V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
heap.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/heap.h"
6 
7 #include <unordered_map>
8 #include <unordered_set>
9 
10 #include "src/accessors.h"
11 #include "src/api-inl.h"
12 #include "src/assembler-inl.h"
13 #include "src/base/bits.h"
14 #include "src/base/once.h"
15 #include "src/base/utils/random-number-generator.h"
16 #include "src/bootstrapper.h"
17 #include "src/code-stubs.h"
18 #include "src/compilation-cache.h"
19 #include "src/conversions.h"
20 #include "src/debug/debug.h"
21 #include "src/deoptimizer.h"
22 #include "src/feedback-vector.h"
23 #include "src/global-handles.h"
24 #include "src/heap/array-buffer-collector.h"
25 #include "src/heap/array-buffer-tracker-inl.h"
26 #include "src/heap/barrier.h"
27 #include "src/heap/code-stats.h"
28 #include "src/heap/concurrent-marking.h"
29 #include "src/heap/embedder-tracing.h"
30 #include "src/heap/gc-idle-time-handler.h"
31 #include "src/heap/gc-tracer.h"
32 #include "src/heap/heap-controller.h"
33 #include "src/heap/heap-write-barrier-inl.h"
34 #include "src/heap/incremental-marking.h"
35 #include "src/heap/mark-compact-inl.h"
36 #include "src/heap/mark-compact.h"
37 #include "src/heap/memory-reducer.h"
38 #include "src/heap/object-stats.h"
39 #include "src/heap/objects-visiting-inl.h"
40 #include "src/heap/objects-visiting.h"
41 #include "src/heap/remembered-set.h"
42 #include "src/heap/scavenge-job.h"
43 #include "src/heap/scavenger-inl.h"
44 #include "src/heap/store-buffer.h"
45 #include "src/heap/stress-marking-observer.h"
46 #include "src/heap/stress-scavenge-observer.h"
47 #include "src/heap/sweeper.h"
48 #include "src/interpreter/interpreter.h"
49 #include "src/microtask-queue.h"
50 #include "src/objects/data-handler.h"
51 #include "src/objects/hash-table-inl.h"
52 #include "src/objects/maybe-object.h"
53 #include "src/objects/shared-function-info.h"
54 #include "src/objects/slots-inl.h"
55 #include "src/regexp/jsregexp.h"
56 #include "src/runtime-profiler.h"
57 #include "src/snapshot/embedded-data.h"
58 #include "src/snapshot/natives.h"
59 #include "src/snapshot/serializer-common.h"
60 #include "src/snapshot/snapshot.h"
61 #include "src/tracing/trace-event.h"
62 #include "src/unicode-decoder.h"
63 #include "src/unicode-inl.h"
64 #include "src/utils-inl.h"
65 #include "src/utils.h"
66 #include "src/v8.h"
67 #include "src/vm-state-inl.h"
68 
69 // Has to be the last include (doesn't have include guards):
70 #include "src/objects/object-macros.h"
71 
72 namespace v8 {
73 namespace internal {
74 
75 void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
76  DCHECK_EQ(Smi::kZero, arguments_adaptor_deopt_pc_offset());
77  set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
78 }
79 
80 void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) {
81  DCHECK(construct_stub_create_deopt_pc_offset() == Smi::kZero);
82  set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset));
83 }
84 
85 void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
86  DCHECK(construct_stub_invoke_deopt_pc_offset() == Smi::kZero);
87  set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset));
88 }
89 
90 void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
91  DCHECK_EQ(Smi::kZero, interpreter_entry_return_pc_offset());
92  set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
93 }
94 
95 void Heap::SetSerializedObjects(FixedArray objects) {
96  DCHECK(isolate()->serializer_enabled());
97  set_serialized_objects(objects);
98 }
99 
100 void Heap::SetSerializedGlobalProxySizes(FixedArray sizes) {
101  DCHECK(isolate()->serializer_enabled());
102  set_serialized_global_proxy_sizes(sizes);
103 }
104 
105 bool Heap::GCCallbackTuple::operator==(
106  const Heap::GCCallbackTuple& other) const {
107  return other.callback == callback && other.data == data;
108 }
109 
110 Heap::GCCallbackTuple& Heap::GCCallbackTuple::operator=(
111  const Heap::GCCallbackTuple& other) = default;
112 
114  ObjectSlot start;
115  ObjectSlot end;
116  StrongRootsList* next;
117 };
118 
120  public:
121  IdleScavengeObserver(Heap& heap, intptr_t step_size)
122  : AllocationObserver(step_size), heap_(heap) {}
123 
124  void Step(int bytes_allocated, Address, size_t) override {
125  heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
126  }
127 
128  private:
129  Heap& heap_;
130 };
131 
132 Heap::Heap()
133  : isolate_(isolate()),
134  initial_max_old_generation_size_(max_old_generation_size_),
135  initial_old_generation_size_(max_old_generation_size_ /
136  kInitalOldGenerationLimitFactor),
137  memory_pressure_level_(MemoryPressureLevel::kNone),
138  old_generation_allocation_limit_(initial_old_generation_size_),
139  global_pretenuring_feedback_(kInitialFeedbackCapacity),
140  current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
141  external_string_table_(this) {
142  // Ensure old_generation_size_ is a multiple of kPageSize.
143  DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
144 
145  set_native_contexts_list(nullptr);
146  set_allocation_sites_list(Smi::kZero);
147  // Put a dummy entry in the remembered pages so we can find the list the
148  // minidump even if there are no real unmapped pages.
149  RememberUnmappedPage(kNullAddress, false);
150 }
151 
152 size_t Heap::MaxReserved() {
153  return static_cast<size_t>(2 * max_semi_space_size_ +
154  max_old_generation_size_);
155 }
156 
157 size_t Heap::ComputeMaxOldGenerationSize(uint64_t physical_memory) {
158  const size_t old_space_physical_memory_factor = 4;
159  size_t computed_size = static_cast<size_t>(physical_memory / i::MB /
160  old_space_physical_memory_factor *
161  kPointerMultiplier);
162  return Max(Min(computed_size, HeapController::kMaxSize),
163  HeapController::kMinSize);
164 }
165 
166 size_t Heap::Capacity() {
167  if (!HasBeenSetUp()) return 0;
168 
169  return new_space_->Capacity() + OldGenerationCapacity();
170 }
171 
172 size_t Heap::OldGenerationCapacity() {
173  if (!HasBeenSetUp()) return 0;
174  PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
175  size_t total = 0;
176  for (PagedSpace* space = spaces.next(); space != nullptr;
177  space = spaces.next()) {
178  total += space->Capacity();
179  }
180  return total + lo_space_->SizeOfObjects() + code_lo_space_->SizeOfObjects();
181 }
182 
183 size_t Heap::CommittedOldGenerationMemory() {
184  if (!HasBeenSetUp()) return 0;
185 
186  PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
187  size_t total = 0;
188  for (PagedSpace* space = spaces.next(); space != nullptr;
189  space = spaces.next()) {
190  total += space->CommittedMemory();
191  }
192  return total + lo_space_->Size() + code_lo_space_->Size();
193 }
194 
195 size_t Heap::CommittedMemoryOfUnmapper() {
196  if (!HasBeenSetUp()) return 0;
197 
198  return memory_allocator()->unmapper()->CommittedBufferedMemory();
199 }
200 
201 size_t Heap::CommittedMemory() {
202  if (!HasBeenSetUp()) return 0;
203 
204  return new_space_->CommittedMemory() + CommittedOldGenerationMemory();
205 }
206 
207 
208 size_t Heap::CommittedPhysicalMemory() {
209  if (!HasBeenSetUp()) return 0;
210 
211  size_t total = 0;
212  for (SpaceIterator it(this); it.has_next();) {
213  total += it.next()->CommittedPhysicalMemory();
214  }
215 
216  return total;
217 }
218 
219 size_t Heap::CommittedMemoryExecutable() {
220  if (!HasBeenSetUp()) return 0;
221 
222  return static_cast<size_t>(memory_allocator()->SizeExecutable());
223 }
224 
225 
226 void Heap::UpdateMaximumCommitted() {
227  if (!HasBeenSetUp()) return;
228 
229  const size_t current_committed_memory = CommittedMemory();
230  if (current_committed_memory > maximum_committed_) {
231  maximum_committed_ = current_committed_memory;
232  }
233 }
234 
235 size_t Heap::Available() {
236  if (!HasBeenSetUp()) return 0;
237 
238  size_t total = 0;
239 
240  for (SpaceIterator it(this); it.has_next();) {
241  total += it.next()->Available();
242  }
243 
244  total += memory_allocator()->Available();
245  return total;
246 }
247 
248 bool Heap::CanExpandOldGeneration(size_t size) {
249  if (force_oom_) return false;
250  if (OldGenerationCapacity() + size > MaxOldGenerationSize()) return false;
251  // The OldGenerationCapacity does not account compaction spaces used
252  // during evacuation. Ensure that expanding the old generation does push
253  // the total allocated memory size over the maximum heap size.
254  return memory_allocator()->Size() + size <= MaxReserved();
255 }
256 
257 bool Heap::HasBeenSetUp() {
258  // We will always have a new space when the heap is set up.
259  return new_space_ != nullptr;
260 }
261 
262 
263 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
264  const char** reason) {
265  // Is global GC requested?
266  if (space != NEW_SPACE) {
267  isolate_->counters()->gc_compactor_caused_by_request()->Increment();
268  *reason = "GC in old space requested";
269  return MARK_COMPACTOR;
270  }
271 
272  if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
273  *reason = "GC in old space forced by flags";
274  return MARK_COMPACTOR;
275  }
276 
277  if (incremental_marking()->NeedsFinalization() &&
278  AllocationLimitOvershotByLargeMargin()) {
279  *reason = "Incremental marking needs finalization";
280  return MARK_COMPACTOR;
281  }
282 
283  // Over-estimate the new space size using capacity to allow some slack.
284  if (!CanExpandOldGeneration(new_space_->TotalCapacity())) {
285  isolate_->counters()
286  ->gc_compactor_caused_by_oldspace_exhaustion()
287  ->Increment();
288  *reason = "scavenge might not succeed";
289  return MARK_COMPACTOR;
290  }
291 
292  // Default
293  *reason = nullptr;
294  return YoungGenerationCollector();
295 }
296 
297 void Heap::SetGCState(HeapState state) {
298  gc_state_ = state;
299 }
300 
301 void Heap::PrintShortHeapStatistics() {
302  if (!FLAG_trace_gc_verbose) return;
303  PrintIsolate(isolate_,
304  "Memory allocator, used: %6" PRIuS
305  " KB,"
306  " available: %6" PRIuS " KB\n",
307  memory_allocator()->Size() / KB,
308  memory_allocator()->Available() / KB);
309  PrintIsolate(isolate_,
310  "Read-only space, used: %6" PRIuS
311  " KB"
312  ", available: %6" PRIuS
313  " KB"
314  ", committed: %6" PRIuS " KB\n",
315  read_only_space_->Size() / KB,
316  read_only_space_->Available() / KB,
317  read_only_space_->CommittedMemory() / KB);
318  PrintIsolate(isolate_,
319  "New space, used: %6" PRIuS
320  " KB"
321  ", available: %6" PRIuS
322  " KB"
323  ", committed: %6" PRIuS " KB\n",
324  new_space_->Size() / KB, new_space_->Available() / KB,
325  new_space_->CommittedMemory() / KB);
326  PrintIsolate(isolate_,
327  "New large object space, used: %6" PRIuS
328  " KB"
329  ", available: %6" PRIuS
330  " KB"
331  ", committed: %6" PRIuS " KB\n",
332  new_lo_space_->SizeOfObjects() / KB,
333  new_lo_space_->Available() / KB,
334  new_lo_space_->CommittedMemory() / KB);
335  PrintIsolate(isolate_,
336  "Old space, used: %6" PRIuS
337  " KB"
338  ", available: %6" PRIuS
339  " KB"
340  ", committed: %6" PRIuS " KB\n",
341  old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
342  old_space_->CommittedMemory() / KB);
343  PrintIsolate(isolate_,
344  "Code space, used: %6" PRIuS
345  " KB"
346  ", available: %6" PRIuS
347  " KB"
348  ", committed: %6" PRIuS "KB\n",
349  code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
350  code_space_->CommittedMemory() / KB);
351  PrintIsolate(isolate_,
352  "Map space, used: %6" PRIuS
353  " KB"
354  ", available: %6" PRIuS
355  " KB"
356  ", committed: %6" PRIuS " KB\n",
357  map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
358  map_space_->CommittedMemory() / KB);
359  PrintIsolate(isolate_,
360  "Large object space, used: %6" PRIuS
361  " KB"
362  ", available: %6" PRIuS
363  " KB"
364  ", committed: %6" PRIuS " KB\n",
365  lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
366  lo_space_->CommittedMemory() / KB);
367  PrintIsolate(isolate_,
368  "Code large object space, used: %6" PRIuS
369  " KB"
370  ", available: %6" PRIuS
371  " KB"
372  ", committed: %6" PRIuS " KB\n",
373  lo_space_->SizeOfObjects() / KB,
374  code_lo_space_->Available() / KB,
375  code_lo_space_->CommittedMemory() / KB);
376  PrintIsolate(isolate_,
377  "All spaces, used: %6" PRIuS
378  " KB"
379  ", available: %6" PRIuS
380  " KB"
381  ", committed: %6" PRIuS "KB\n",
382  this->SizeOfObjects() / KB, this->Available() / KB,
383  this->CommittedMemory() / KB);
384  PrintIsolate(isolate_,
385  "Unmapper buffering %zu chunks of committed: %6" PRIuS " KB\n",
386  memory_allocator()->unmapper()->NumberOfCommittedChunks(),
387  CommittedMemoryOfUnmapper() / KB);
388  PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
389  isolate()->isolate_data()->external_memory_ / KB);
390  PrintIsolate(isolate_, "Backing store memory: %6" PRIuS " KB\n",
391  backing_store_bytes_ / KB);
392  PrintIsolate(isolate_, "External memory global %zu KB\n",
393  external_memory_callback_() / KB);
394  PrintIsolate(isolate_, "Total time spent in GC : %.1f ms\n",
395  total_gc_time_ms_);
396 }
397 
398 void Heap::ReportStatisticsAfterGC() {
399  for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
400  ++i) {
401  int count = deferred_counters_[i];
402  deferred_counters_[i] = 0;
403  while (count > 0) {
404  count--;
405  isolate()->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(i));
406  }
407  }
408 }
409 
410 void Heap::AddHeapObjectAllocationTracker(
411  HeapObjectAllocationTracker* tracker) {
412  if (allocation_trackers_.empty()) DisableInlineAllocation();
413  allocation_trackers_.push_back(tracker);
414 }
415 
416 void Heap::RemoveHeapObjectAllocationTracker(
417  HeapObjectAllocationTracker* tracker) {
418  allocation_trackers_.erase(std::remove(allocation_trackers_.begin(),
419  allocation_trackers_.end(), tracker),
420  allocation_trackers_.end());
421  if (allocation_trackers_.empty()) EnableInlineAllocation();
422 }
423 
424 void Heap::AddRetainingPathTarget(Handle<HeapObject> object,
425  RetainingPathOption option) {
426  if (!FLAG_track_retaining_path) {
427  PrintF("Retaining path tracking requires --track-retaining-path\n");
428  } else {
429  Handle<WeakArrayList> array(retaining_path_targets(), isolate());
430  int index = array->length();
431  array = WeakArrayList::AddToEnd(isolate(), array,
432  MaybeObjectHandle::Weak(object));
433  set_retaining_path_targets(*array);
434  DCHECK_EQ(array->length(), index + 1);
435  retaining_path_target_option_[index] = option;
436  }
437 }
438 
439 bool Heap::IsRetainingPathTarget(HeapObject* object,
440  RetainingPathOption* option) {
441  WeakArrayList* targets = retaining_path_targets();
442  int length = targets->length();
443  MaybeObject object_to_check = HeapObjectReference::Weak(object);
444  for (int i = 0; i < length; i++) {
445  MaybeObject target = targets->Get(i);
446  DCHECK(target->IsWeakOrCleared());
447  if (target == object_to_check) {
448  DCHECK(retaining_path_target_option_.count(i));
449  *option = retaining_path_target_option_[i];
450  return true;
451  }
452  }
453  return false;
454 }
455 
456 void Heap::PrintRetainingPath(HeapObject* target, RetainingPathOption option) {
457  PrintF("\n\n\n");
458  PrintF("#################################################\n");
459  PrintF("Retaining path for %p:\n", static_cast<void*>(target));
460  HeapObject* object = target;
461  std::vector<std::pair<HeapObject*, bool>> retaining_path;
462  Root root = Root::kUnknown;
463  bool ephemeron = false;
464  while (true) {
465  retaining_path.push_back(std::make_pair(object, ephemeron));
466  if (option == RetainingPathOption::kTrackEphemeronPath &&
467  ephemeron_retainer_.count(object)) {
468  object = ephemeron_retainer_[object];
469  ephemeron = true;
470  } else if (retainer_.count(object)) {
471  object = retainer_[object];
472  ephemeron = false;
473  } else {
474  if (retaining_root_.count(object)) {
475  root = retaining_root_[object];
476  }
477  break;
478  }
479  }
480  int distance = static_cast<int>(retaining_path.size());
481  for (auto node : retaining_path) {
482  HeapObject* object = node.first;
483  bool ephemeron = node.second;
484  PrintF("\n");
485  PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
486  PrintF("Distance from root %d%s: ", distance,
487  ephemeron ? " (ephemeron)" : "");
488  object->ShortPrint();
489  PrintF("\n");
490 #ifdef OBJECT_PRINT
491  object->Print();
492  PrintF("\n");
493 #endif
494  --distance;
495  }
496  PrintF("\n");
497  PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
498  PrintF("Root: %s\n", RootVisitor::RootName(root));
499  PrintF("-------------------------------------------------\n");
500 }
501 
502 void Heap::AddRetainer(HeapObject* retainer, HeapObject* object) {
503  if (retainer_.count(object)) return;
504  retainer_[object] = retainer;
505  RetainingPathOption option = RetainingPathOption::kDefault;
506  if (IsRetainingPathTarget(object, &option)) {
507  // Check if the retaining path was already printed in
508  // AddEphemeronRetainer().
509  if (ephemeron_retainer_.count(object) == 0 ||
510  option == RetainingPathOption::kDefault) {
511  PrintRetainingPath(object, option);
512  }
513  }
514 }
515 
516 void Heap::AddEphemeronRetainer(HeapObject* retainer, HeapObject* object) {
517  if (ephemeron_retainer_.count(object)) return;
518  ephemeron_retainer_[object] = retainer;
519  RetainingPathOption option = RetainingPathOption::kDefault;
520  if (IsRetainingPathTarget(object, &option) &&
521  option == RetainingPathOption::kTrackEphemeronPath) {
522  // Check if the retaining path was already printed in AddRetainer().
523  if (retainer_.count(object) == 0) {
524  PrintRetainingPath(object, option);
525  }
526  }
527 }
528 
529 void Heap::AddRetainingRoot(Root root, HeapObject* object) {
530  if (retaining_root_.count(object)) return;
531  retaining_root_[object] = root;
532  RetainingPathOption option = RetainingPathOption::kDefault;
533  if (IsRetainingPathTarget(object, &option)) {
534  PrintRetainingPath(object, option);
535  }
536 }
537 
538 void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
539  deferred_counters_[feature]++;
540 }
541 
542 bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); }
543 
544 void Heap::GarbageCollectionPrologue() {
545  TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE);
546  {
547  AllowHeapAllocation for_the_first_part_of_prologue;
548  gc_count_++;
549 
550 #ifdef VERIFY_HEAP
551  if (FLAG_verify_heap) {
552  Verify();
553  }
554 #endif
555  }
556 
557  // Reset GC statistics.
558  promoted_objects_size_ = 0;
559  previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
560  semi_space_copied_object_size_ = 0;
561  nodes_died_in_new_space_ = 0;
562  nodes_copied_in_new_space_ = 0;
563  nodes_promoted_ = 0;
564 
565  UpdateMaximumCommitted();
566 
567 #ifdef DEBUG
568  DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
569 
570  if (FLAG_gc_verbose) Print();
571 #endif // DEBUG
572 
573  if (new_space_->IsAtMaximumCapacity()) {
574  maximum_size_scavenges_++;
575  } else {
576  maximum_size_scavenges_ = 0;
577  }
578  CheckNewSpaceExpansionCriteria();
579  UpdateNewSpaceAllocationCounter();
580  if (FLAG_track_retaining_path) {
581  retainer_.clear();
582  ephemeron_retainer_.clear();
583  retaining_root_.clear();
584  }
585 }
586 
587 size_t Heap::SizeOfObjects() {
588  size_t total = 0;
589 
590  for (SpaceIterator it(this); it.has_next();) {
591  total += it.next()->SizeOfObjects();
592  }
593  return total;
594 }
595 
596 
597 const char* Heap::GetSpaceName(int idx) {
598  switch (idx) {
599  case NEW_SPACE:
600  return "new_space";
601  case OLD_SPACE:
602  return "old_space";
603  case MAP_SPACE:
604  return "map_space";
605  case CODE_SPACE:
606  return "code_space";
607  case LO_SPACE:
608  return "large_object_space";
609  case NEW_LO_SPACE:
610  return "new_large_object_space";
611  case CODE_LO_SPACE:
612  return "code_large_object_space";
613  case RO_SPACE:
614  return "read_only_space";
615  default:
616  UNREACHABLE();
617  }
618  return nullptr;
619 }
620 
621 void Heap::MergeAllocationSitePretenuringFeedback(
622  const PretenuringFeedbackMap& local_pretenuring_feedback) {
623  AllocationSite* site = nullptr;
624  for (auto& site_and_count : local_pretenuring_feedback) {
625  site = site_and_count.first;
626  MapWord map_word = site_and_count.first->map_word();
627  if (map_word.IsForwardingAddress()) {
628  site = AllocationSite::cast(map_word.ToForwardingAddress());
629  }
630 
631  // We have not validated the allocation site yet, since we have not
632  // dereferenced the site during collecting information.
633  // This is an inlined check of AllocationMemento::IsValid.
634  if (!site->IsAllocationSite() || site->IsZombie()) continue;
635 
636  const int value = static_cast<int>(site_and_count.second);
637  DCHECK_LT(0, value);
638  if (site->IncrementMementoFoundCount(value)) {
639  // For sites in the global map the count is accessed through the site.
640  global_pretenuring_feedback_.insert(std::make_pair(site, 0));
641  }
642  }
643 }
644 
645 void Heap::AddAllocationObserversToAllSpaces(
646  AllocationObserver* observer, AllocationObserver* new_space_observer) {
647  DCHECK(observer && new_space_observer);
648 
649  for (SpaceIterator it(this); it.has_next();) {
650  Space* space = it.next();
651  if (space == new_space()) {
652  space->AddAllocationObserver(new_space_observer);
653  } else {
654  space->AddAllocationObserver(observer);
655  }
656  }
657 }
658 
659 void Heap::RemoveAllocationObserversFromAllSpaces(
660  AllocationObserver* observer, AllocationObserver* new_space_observer) {
661  DCHECK(observer && new_space_observer);
662 
663  for (SpaceIterator it(this); it.has_next();) {
664  Space* space = it.next();
665  if (space == new_space()) {
666  space->RemoveAllocationObserver(new_space_observer);
667  } else {
668  space->RemoveAllocationObserver(observer);
669  }
670  }
671 }
672 
674  public:
675  explicit SkipStoreBufferScope(StoreBuffer* store_buffer)
676  : store_buffer_(store_buffer) {
677  store_buffer_->MoveAllEntriesToRememberedSet();
678  store_buffer_->SetMode(StoreBuffer::IN_GC);
679  }
680 
682  DCHECK(store_buffer_->Empty());
683  store_buffer_->SetMode(StoreBuffer::NOT_IN_GC);
684  }
685 
686  private:
687  StoreBuffer* store_buffer_;
688 };
689 
690 namespace {
691 inline bool MakePretenureDecision(
692  AllocationSite* site, AllocationSite::PretenureDecision current_decision,
693  double ratio, bool maximum_size_scavenge) {
694  // Here we just allow state transitions from undecided or maybe tenure
695  // to don't tenure, maybe tenure, or tenure.
696  if ((current_decision == AllocationSite::kUndecided ||
697  current_decision == AllocationSite::kMaybeTenure)) {
698  if (ratio >= AllocationSite::kPretenureRatio) {
699  // We just transition into tenure state when the semi-space was at
700  // maximum capacity.
701  if (maximum_size_scavenge) {
702  site->set_deopt_dependent_code(true);
703  site->set_pretenure_decision(AllocationSite::kTenure);
704  // Currently we just need to deopt when we make a state transition to
705  // tenure.
706  return true;
707  }
708  site->set_pretenure_decision(AllocationSite::kMaybeTenure);
709  } else {
710  site->set_pretenure_decision(AllocationSite::kDontTenure);
711  }
712  }
713  return false;
714 }
715 
716 inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite* site,
717  bool maximum_size_scavenge) {
718  bool deopt = false;
719  int create_count = site->memento_create_count();
720  int found_count = site->memento_found_count();
721  bool minimum_mementos_created =
722  create_count >= AllocationSite::kPretenureMinimumCreated;
723  double ratio = minimum_mementos_created || FLAG_trace_pretenuring_statistics
724  ? static_cast<double>(found_count) / create_count
725  : 0.0;
726  AllocationSite::PretenureDecision current_decision =
727  site->pretenure_decision();
728 
729  if (minimum_mementos_created) {
730  deopt = MakePretenureDecision(site, current_decision, ratio,
731  maximum_size_scavenge);
732  }
733 
734  if (FLAG_trace_pretenuring_statistics) {
735  PrintIsolate(isolate,
736  "pretenuring: AllocationSite(%p): (created, found, ratio) "
737  "(%d, %d, %f) %s => %s\n",
738  static_cast<void*>(site), create_count, found_count, ratio,
739  site->PretenureDecisionName(current_decision),
740  site->PretenureDecisionName(site->pretenure_decision()));
741  }
742 
743  // Clear feedback calculation fields until the next gc.
744  site->set_memento_found_count(0);
745  site->set_memento_create_count(0);
746  return deopt;
747 }
748 } // namespace
749 
750 void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) {
751  global_pretenuring_feedback_.erase(site);
752 }
753 
754 bool Heap::DeoptMaybeTenuredAllocationSites() {
755  return new_space_->IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
756 }
757 
758 void Heap::ProcessPretenuringFeedback() {
759  bool trigger_deoptimization = false;
760  if (FLAG_allocation_site_pretenuring) {
761  int tenure_decisions = 0;
762  int dont_tenure_decisions = 0;
763  int allocation_mementos_found = 0;
764  int allocation_sites = 0;
765  int active_allocation_sites = 0;
766 
767  AllocationSite* site = nullptr;
768 
769  // Step 1: Digest feedback for recorded allocation sites.
770  bool maximum_size_scavenge = MaximumSizeScavenge();
771  for (auto& site_and_count : global_pretenuring_feedback_) {
772  allocation_sites++;
773  site = site_and_count.first;
774  // Count is always access through the site.
775  DCHECK_EQ(0, site_and_count.second);
776  int found_count = site->memento_found_count();
777  // An entry in the storage does not imply that the count is > 0 because
778  // allocation sites might have been reset due to too many objects dying
779  // in old space.
780  if (found_count > 0) {
781  DCHECK(site->IsAllocationSite());
782  active_allocation_sites++;
783  allocation_mementos_found += found_count;
784  if (DigestPretenuringFeedback(isolate_, site, maximum_size_scavenge)) {
785  trigger_deoptimization = true;
786  }
787  if (site->GetPretenureMode() == TENURED) {
788  tenure_decisions++;
789  } else {
790  dont_tenure_decisions++;
791  }
792  }
793  }
794 
795  // Step 2: Deopt maybe tenured allocation sites if necessary.
796  bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
797  if (deopt_maybe_tenured) {
798  ForeachAllocationSite(
799  allocation_sites_list(),
800  [&allocation_sites, &trigger_deoptimization](AllocationSite* site) {
801  DCHECK(site->IsAllocationSite());
802  allocation_sites++;
803  if (site->IsMaybeTenure()) {
804  site->set_deopt_dependent_code(true);
805  trigger_deoptimization = true;
806  }
807  });
808  }
809 
810  if (trigger_deoptimization) {
811  isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
812  }
813 
814  if (FLAG_trace_pretenuring_statistics &&
815  (allocation_mementos_found > 0 || tenure_decisions > 0 ||
816  dont_tenure_decisions > 0)) {
817  PrintIsolate(isolate(),
818  "pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
819  "active_sites=%d "
820  "mementos=%d tenured=%d not_tenured=%d\n",
821  deopt_maybe_tenured ? 1 : 0, allocation_sites,
822  active_allocation_sites, allocation_mementos_found,
823  tenure_decisions, dont_tenure_decisions);
824  }
825 
826  global_pretenuring_feedback_.clear();
827  global_pretenuring_feedback_.reserve(kInitialFeedbackCapacity);
828  }
829 }
830 
831 void Heap::InvalidateCodeDeoptimizationData(Code code) {
832  MemoryChunk* chunk = MemoryChunk::FromAddress(code->ptr());
833  CodePageMemoryModificationScope modification_scope(chunk);
834  code->set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array());
835 }
836 
837 void Heap::DeoptMarkedAllocationSites() {
838  // TODO(hpayer): If iterating over the allocation sites list becomes a
839  // performance issue, use a cache data structure in heap instead.
840 
841  ForeachAllocationSite(allocation_sites_list(), [this](AllocationSite* site) {
842  if (site->deopt_dependent_code()) {
843  site->dependent_code()->MarkCodeForDeoptimization(
844  isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
845  site->set_deopt_dependent_code(false);
846  }
847  });
848 
849  Deoptimizer::DeoptimizeMarkedCode(isolate_);
850 }
851 
852 
853 void Heap::GarbageCollectionEpilogue() {
854  TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
855  if (Heap::ShouldZapGarbage() || FLAG_clear_free_memory) {
856  ZapFromSpace();
857  }
858 
859 #ifdef VERIFY_HEAP
860  if (FLAG_verify_heap) {
861  Verify();
862  }
863 #endif
864 
865  AllowHeapAllocation for_the_rest_of_the_epilogue;
866 
867 #ifdef DEBUG
868  if (FLAG_print_global_handles) isolate_->global_handles()->Print();
869  if (FLAG_print_handles) PrintHandles();
870  if (FLAG_gc_verbose) Print();
871  if (FLAG_code_stats) ReportCodeStatistics("After GC");
872  if (FLAG_check_handle_count) CheckHandleCount();
873 #endif
874 
875  UpdateMaximumCommitted();
876 
877  isolate_->counters()->alive_after_last_gc()->Set(
878  static_cast<int>(SizeOfObjects()));
879 
880  isolate_->counters()->string_table_capacity()->Set(
881  string_table()->Capacity());
882  isolate_->counters()->number_of_symbols()->Set(
883  string_table()->NumberOfElements());
884 
885  if (CommittedMemory() > 0) {
886  isolate_->counters()->external_fragmentation_total()->AddSample(
887  static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
888 
889  isolate_->counters()->heap_sample_total_committed()->AddSample(
890  static_cast<int>(CommittedMemory() / KB));
891  isolate_->counters()->heap_sample_total_used()->AddSample(
892  static_cast<int>(SizeOfObjects() / KB));
893  isolate_->counters()->heap_sample_map_space_committed()->AddSample(
894  static_cast<int>(map_space()->CommittedMemory() / KB));
895  isolate_->counters()->heap_sample_code_space_committed()->AddSample(
896  static_cast<int>(code_space()->CommittedMemory() / KB));
897 
898  isolate_->counters()->heap_sample_maximum_committed()->AddSample(
899  static_cast<int>(MaximumCommittedMemory() / KB));
900  }
901 
902 #define UPDATE_COUNTERS_FOR_SPACE(space) \
903  isolate_->counters()->space##_bytes_available()->Set( \
904  static_cast<int>(space()->Available())); \
905  isolate_->counters()->space##_bytes_committed()->Set( \
906  static_cast<int>(space()->CommittedMemory())); \
907  isolate_->counters()->space##_bytes_used()->Set( \
908  static_cast<int>(space()->SizeOfObjects()));
909 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
910  if (space()->CommittedMemory() > 0) { \
911  isolate_->counters()->external_fragmentation_##space()->AddSample( \
912  static_cast<int>(100 - \
913  (space()->SizeOfObjects() * 100.0) / \
914  space()->CommittedMemory())); \
915  }
916 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
917  UPDATE_COUNTERS_FOR_SPACE(space) \
918  UPDATE_FRAGMENTATION_FOR_SPACE(space)
919 
920  UPDATE_COUNTERS_FOR_SPACE(new_space)
921  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
922  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
923  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
924  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
925 #undef UPDATE_COUNTERS_FOR_SPACE
926 #undef UPDATE_FRAGMENTATION_FOR_SPACE
927 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
928 
929 #ifdef DEBUG
930  ReportStatisticsAfterGC();
931 #endif // DEBUG
932 
933  last_gc_time_ = MonotonicallyIncreasingTimeInMs();
934 
935  {
936  TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
937  ReduceNewSpaceSize();
938  }
939 
940  if (FLAG_harmony_weak_refs) {
941  // TODO(marja): (spec): The exact condition on when to schedule the cleanup
942  // task is unclear. This version schedules the cleanup task for a factory
943  // whenever the GC has discovered new dirty WeakCells for it (at that point
944  // it might have leftover dirty WeakCells since an earlier invocation of the
945  // cleanup function didn't iterate through them). See
946  // https://github.com/tc39/proposal-weakrefs/issues/34
947  HandleScope handle_scope(isolate());
948  while (
949  !isolate()->heap()->dirty_js_weak_factories()->IsUndefined(isolate())) {
950  // Enqueue one microtask per JSWeakFactory.
951  Handle<JSWeakFactory> weak_factory(
952  JSWeakFactory::cast(isolate()->heap()->dirty_js_weak_factories()),
953  isolate());
954  isolate()->heap()->set_dirty_js_weak_factories(weak_factory->next());
955  weak_factory->set_next(ReadOnlyRoots(isolate()).undefined_value());
956  Handle<Context> context(weak_factory->native_context(), isolate());
957  // GC has no native context, but we use the creation context of the
958  // JSWeakFactory for the EnqueueTask operation. This is consitent with the
959  // Promise implementation, assuming the JSFactory creation context is the
960  // "caller's context" in promise functions. An alternative would be to use
961  // the native context of the cleanup function. This difference shouldn't
962  // be observable from JavaScript, since we enter the native context of the
963  // cleanup function before calling it. TODO(marja): Revisit when the spec
964  // clarifies this. See also
965  // https://github.com/tc39/proposal-weakrefs/issues/38 .
966  Handle<WeakFactoryCleanupJobTask> task =
967  isolate()->factory()->NewWeakFactoryCleanupJobTask(weak_factory);
968  isolate()->EnqueueMicrotask(task);
969  }
970  }
971 }
972 
974  public:
975  explicit GCCallbacksScope(Heap* heap) : heap_(heap) {
976  heap_->gc_callbacks_depth_++;
977  }
978  ~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
979 
980  bool CheckReenter() { return heap_->gc_callbacks_depth_ == 1; }
981 
982  private:
983  Heap* heap_;
984 };
985 
986 
987 void Heap::HandleGCRequest() {
988  if (FLAG_stress_scavenge > 0 && stress_scavenge_observer_->HasRequestedGC()) {
989  CollectAllGarbage(NEW_SPACE, GarbageCollectionReason::kTesting);
990  stress_scavenge_observer_->RequestedGCDone();
991  } else if (HighMemoryPressure()) {
992  incremental_marking()->reset_request_type();
993  CheckMemoryPressure();
994  } else if (incremental_marking()->request_type() ==
995  IncrementalMarking::COMPLETE_MARKING) {
996  incremental_marking()->reset_request_type();
997  CollectAllGarbage(current_gc_flags_,
998  GarbageCollectionReason::kFinalizeMarkingViaStackGuard,
999  current_gc_callback_flags_);
1000  } else if (incremental_marking()->request_type() ==
1001  IncrementalMarking::FINALIZATION &&
1002  incremental_marking()->IsMarking() &&
1003  !incremental_marking()->finalize_marking_completed()) {
1004  incremental_marking()->reset_request_type();
1005  FinalizeIncrementalMarkingIncrementally(
1006  GarbageCollectionReason::kFinalizeMarkingViaStackGuard);
1007  }
1008 }
1009 
1010 
1011 void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
1012  DCHECK(FLAG_idle_time_scavenge);
1013  DCHECK_NOT_NULL(scavenge_job_);
1014  scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
1015 }
1016 
1017 TimedHistogram* Heap::GCTypePriorityTimer(GarbageCollector collector) {
1018  if (IsYoungGenerationCollector(collector)) {
1019  if (isolate_->IsIsolateInBackground()) {
1020  return isolate_->counters()->gc_scavenger_background();
1021  }
1022  return isolate_->counters()->gc_scavenger_foreground();
1023  } else {
1024  if (!incremental_marking()->IsStopped()) {
1025  if (ShouldReduceMemory()) {
1026  if (isolate_->IsIsolateInBackground()) {
1027  return isolate_->counters()->gc_finalize_reduce_memory_background();
1028  }
1029  return isolate_->counters()->gc_finalize_reduce_memory_foreground();
1030  } else {
1031  if (isolate_->IsIsolateInBackground()) {
1032  return isolate_->counters()->gc_finalize_background();
1033  }
1034  return isolate_->counters()->gc_finalize_foreground();
1035  }
1036  } else {
1037  if (isolate_->IsIsolateInBackground()) {
1038  return isolate_->counters()->gc_compactor_background();
1039  }
1040  return isolate_->counters()->gc_compactor_foreground();
1041  }
1042  }
1043 }
1044 
1045 TimedHistogram* Heap::GCTypeTimer(GarbageCollector collector) {
1046  if (IsYoungGenerationCollector(collector)) {
1047  return isolate_->counters()->gc_scavenger();
1048  } else {
1049  if (!incremental_marking()->IsStopped()) {
1050  if (ShouldReduceMemory()) {
1051  return isolate_->counters()->gc_finalize_reduce_memory();
1052  } else {
1053  return isolate_->counters()->gc_finalize();
1054  }
1055  } else {
1056  return isolate_->counters()->gc_compactor();
1057  }
1058  }
1059 }
1060 
1061 void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
1062  const v8::GCCallbackFlags gc_callback_flags) {
1063  // Since we are ignoring the return value, the exact choice of space does
1064  // not matter, so long as we do not specify NEW_SPACE, which would not
1065  // cause a full GC.
1066  set_current_gc_flags(flags);
1067  CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
1068  set_current_gc_flags(kNoGCFlags);
1069 }
1070 
1071 namespace {
1072 
1073 intptr_t CompareWords(int size, HeapObject* a, HeapObject* b) {
1074  int words = size / kPointerSize;
1075  DCHECK_EQ(a->Size(), size);
1076  DCHECK_EQ(b->Size(), size);
1077  intptr_t* slot_a = reinterpret_cast<intptr_t*>(a->address());
1078  intptr_t* slot_b = reinterpret_cast<intptr_t*>(b->address());
1079  for (int i = 0; i < words; i++) {
1080  if (*slot_a != *slot_b) {
1081  return *slot_a - *slot_b;
1082  }
1083  slot_a++;
1084  slot_b++;
1085  }
1086  return 0;
1087 }
1088 
1089 void ReportDuplicates(int size, std::vector<HeapObject*>& objects) {
1090  if (objects.size() == 0) return;
1091 
1092  sort(objects.begin(), objects.end(), [size](HeapObject* a, HeapObject* b) {
1093  intptr_t c = CompareWords(size, a, b);
1094  if (c != 0) return c < 0;
1095  return a < b;
1096  });
1097 
1098  std::vector<std::pair<int, HeapObject*>> duplicates;
1099  HeapObject* current = objects[0];
1100  int count = 1;
1101  for (size_t i = 1; i < objects.size(); i++) {
1102  if (CompareWords(size, current, objects[i]) == 0) {
1103  count++;
1104  } else {
1105  if (count > 1) {
1106  duplicates.push_back(std::make_pair(count - 1, current));
1107  }
1108  count = 1;
1109  current = objects[i];
1110  }
1111  }
1112  if (count > 1) {
1113  duplicates.push_back(std::make_pair(count - 1, current));
1114  }
1115 
1116  int threshold = FLAG_trace_duplicate_threshold_kb * KB;
1117 
1118  sort(duplicates.begin(), duplicates.end());
1119  for (auto it = duplicates.rbegin(); it != duplicates.rend(); ++it) {
1120  int duplicate_bytes = it->first * size;
1121  if (duplicate_bytes < threshold) break;
1122  PrintF("%d duplicates of size %d each (%dKB)\n", it->first, size,
1123  duplicate_bytes / KB);
1124  PrintF("Sample object: ");
1125  it->second->Print();
1126  PrintF("============================\n");
1127  }
1128 }
1129 } // anonymous namespace
1130 
1131 void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
1132  // Since we are ignoring the return value, the exact choice of space does
1133  // not matter, so long as we do not specify NEW_SPACE, which would not
1134  // cause a full GC.
1135  // Major GC would invoke weak handle callbacks on weakly reachable
1136  // handles, but won't collect weakly reachable objects until next
1137  // major GC. Therefore if we collect aggressively and weak handle callback
1138  // has been invoked, we rerun major GC to release objects which become
1139  // garbage.
1140  // Note: as weak callbacks can execute arbitrary code, we cannot
1141  // hope that eventually there will be no weak callbacks invocations.
1142  // Therefore stop recollecting after several attempts.
1143  if (gc_reason == GarbageCollectionReason::kLastResort) {
1144  InvokeNearHeapLimitCallback();
1145  }
1146  RuntimeCallTimerScope runtime_timer(
1147  isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage);
1148 
1149  // The optimizing compiler may be unnecessarily holding on to memory.
1150  isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
1151  isolate()->ClearSerializerData();
1152  set_current_gc_flags(kReduceMemoryFootprintMask);
1153  isolate_->compilation_cache()->Clear();
1154  const int kMaxNumberOfAttempts = 7;
1155  const int kMinNumberOfAttempts = 2;
1156  const v8::GCCallbackFlags callback_flags =
1157  gc_reason == GarbageCollectionReason::kLowMemoryNotification
1158  ? v8::kGCCallbackFlagForced
1159  : v8::kGCCallbackFlagCollectAllAvailableGarbage;
1160  for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
1161  if (!CollectGarbage(OLD_SPACE, gc_reason, callback_flags) &&
1162  attempt + 1 >= kMinNumberOfAttempts) {
1163  break;
1164  }
1165  }
1166 
1167  set_current_gc_flags(kNoGCFlags);
1168  new_space_->Shrink();
1169  UncommitFromSpace();
1170  EagerlyFreeExternalMemory();
1171 
1172  if (FLAG_trace_duplicate_threshold_kb) {
1173  std::map<int, std::vector<HeapObject*>> objects_by_size;
1174  PagedSpaces spaces(this);
1175  for (PagedSpace* space = spaces.next(); space != nullptr;
1176  space = spaces.next()) {
1177  HeapObjectIterator it(space);
1178  for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
1179  objects_by_size[obj->Size()].push_back(obj);
1180  }
1181  }
1182  {
1183  LargeObjectIterator it(lo_space());
1184  for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
1185  objects_by_size[obj->Size()].push_back(obj);
1186  }
1187  }
1188  for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend();
1189  ++it) {
1190  ReportDuplicates(it->first, it->second);
1191  }
1192  }
1193 }
1194 
1195 void Heap::PreciseCollectAllGarbage(int flags,
1196  GarbageCollectionReason gc_reason,
1197  const GCCallbackFlags gc_callback_flags) {
1198  if (!incremental_marking()->IsStopped()) {
1199  FinalizeIncrementalMarkingAtomically(gc_reason);
1200  }
1201  CollectAllGarbage(flags, gc_reason, gc_callback_flags);
1202 }
1203 
1204 void Heap::ReportExternalMemoryPressure() {
1205  const GCCallbackFlags kGCCallbackFlagsForExternalMemory =
1206  static_cast<GCCallbackFlags>(
1207  kGCCallbackFlagSynchronousPhantomCallbackProcessing |
1208  kGCCallbackFlagCollectAllExternalMemory);
1209  if (isolate()->isolate_data()->external_memory_ >
1210  (isolate()->isolate_data()->external_memory_at_last_mark_compact_ +
1211  external_memory_hard_limit())) {
1212  CollectAllGarbage(
1213  kReduceMemoryFootprintMask,
1214  GarbageCollectionReason::kExternalMemoryPressure,
1215  static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage |
1216  kGCCallbackFlagsForExternalMemory));
1217  return;
1218  }
1219  if (incremental_marking()->IsStopped()) {
1220  if (incremental_marking()->CanBeActivated()) {
1221  StartIncrementalMarking(GCFlagsForIncrementalMarking(),
1222  GarbageCollectionReason::kExternalMemoryPressure,
1223  kGCCallbackFlagsForExternalMemory);
1224  } else {
1225  CollectAllGarbage(i::Heap::kNoGCFlags,
1226  GarbageCollectionReason::kExternalMemoryPressure,
1227  kGCCallbackFlagsForExternalMemory);
1228  }
1229  } else {
1230  // Incremental marking is turned on an has already been started.
1231  const double kMinStepSize = 5;
1232  const double kMaxStepSize = 10;
1233  const double ms_step = Min(
1234  kMaxStepSize,
1235  Max(kMinStepSize,
1236  static_cast<double>(isolate()->isolate_data()->external_memory_) /
1237  isolate()->isolate_data()->external_memory_limit_ *
1238  kMinStepSize));
1239  const double deadline = MonotonicallyIncreasingTimeInMs() + ms_step;
1240  // Extend the gc callback flags with external memory flags.
1241  current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
1242  current_gc_callback_flags_ | kGCCallbackFlagsForExternalMemory);
1243  incremental_marking()->AdvanceIncrementalMarking(
1244  deadline, IncrementalMarking::GC_VIA_STACK_GUARD, StepOrigin::kV8);
1245  }
1246 }
1247 
1248 void Heap::EnsureFillerObjectAtTop() {
1249  // There may be an allocation memento behind objects in new space. Upon
1250  // evacuation of a non-full new space (or if we are on the last page) there
1251  // may be uninitialized memory behind top. We fill the remainder of the page
1252  // with a filler.
1253  Address to_top = new_space_->top();
1254  Page* page = Page::FromAddress(to_top - kPointerSize);
1255  if (page->Contains(to_top)) {
1256  int remaining_in_page = static_cast<int>(page->area_end() - to_top);
1257  CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
1258  }
1259 }
1260 
1261 bool Heap::CollectGarbage(AllocationSpace space,
1262  GarbageCollectionReason gc_reason,
1263  const v8::GCCallbackFlags gc_callback_flags) {
1264  const char* collector_reason = nullptr;
1265  GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
1266 
1267  if (!CanExpandOldGeneration(new_space()->Capacity())) {
1268  InvokeNearHeapLimitCallback();
1269  }
1270 
1271  // Ensure that all pending phantom callbacks are invoked.
1272  isolate()->global_handles()->InvokeSecondPassPhantomCallbacks();
1273 
1274  // The VM is in the GC state until exiting this function.
1275  VMState<GC> state(isolate());
1276 
1277 #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
1278  // Reset the allocation timeout, but make sure to allow at least a few
1279  // allocations after a collection. The reason for this is that we have a lot
1280  // of allocation sequences and we assume that a garbage collection will allow
1281  // the subsequent allocation attempts to go through.
1282  if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
1283  allocation_timeout_ = Max(6, NextAllocationTimeout(allocation_timeout_));
1284  }
1285 #endif
1286 
1287  EnsureFillerObjectAtTop();
1288 
1289  if (IsYoungGenerationCollector(collector) &&
1290  !incremental_marking()->IsStopped()) {
1291  if (FLAG_trace_incremental_marking) {
1292  isolate()->PrintWithTimestamp(
1293  "[IncrementalMarking] Scavenge during marking.\n");
1294  }
1295  }
1296 
1297  bool next_gc_likely_to_collect_more = false;
1298  size_t committed_memory_before = 0;
1299 
1300  if (collector == MARK_COMPACTOR) {
1301  committed_memory_before = CommittedOldGenerationMemory();
1302  }
1303 
1304  {
1305  tracer()->Start(collector, gc_reason, collector_reason);
1306  DCHECK(AllowHeapAllocation::IsAllowed());
1307  DisallowHeapAllocation no_allocation_during_gc;
1308  GarbageCollectionPrologue();
1309 
1310  {
1311  TimedHistogram* gc_type_timer = GCTypeTimer(collector);
1312  TimedHistogramScope histogram_timer_scope(gc_type_timer, isolate_);
1313  TRACE_EVENT0("v8", gc_type_timer->name());
1314 
1315  TimedHistogram* gc_type_priority_timer = GCTypePriorityTimer(collector);
1316  OptionalTimedHistogramScopeMode mode =
1317  isolate_->IsMemorySavingsModeActive()
1318  ? OptionalTimedHistogramScopeMode::DONT_TAKE_TIME
1319  : OptionalTimedHistogramScopeMode::TAKE_TIME;
1320  OptionalTimedHistogramScope histogram_timer_priority_scope(
1321  gc_type_priority_timer, isolate_, mode);
1322 
1323  next_gc_likely_to_collect_more =
1324  PerformGarbageCollection(collector, gc_callback_flags);
1325  if (collector == MARK_COMPACTOR || collector == SCAVENGER) {
1326  tracer()->RecordGCPhasesHistograms(gc_type_timer);
1327  }
1328  }
1329 
1330  GarbageCollectionEpilogue();
1331  if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
1332  isolate()->CheckDetachedContextsAfterGC();
1333  }
1334 
1335  if (collector == MARK_COMPACTOR) {
1336  size_t committed_memory_after = CommittedOldGenerationMemory();
1337  size_t used_memory_after = OldGenerationSizeOfObjects();
1338  MemoryReducer::Event event;
1339  event.type = MemoryReducer::kMarkCompact;
1340  event.time_ms = MonotonicallyIncreasingTimeInMs();
1341  // Trigger one more GC if
1342  // - this GC decreased committed memory,
1343  // - there is high fragmentation,
1344  // - there are live detached contexts.
1345  event.next_gc_likely_to_collect_more =
1346  (committed_memory_before > committed_memory_after + MB) ||
1347  HasHighFragmentation(used_memory_after, committed_memory_after) ||
1348  (detached_contexts()->length() > 0);
1349  event.committed_memory = committed_memory_after;
1350  if (deserialization_complete_) {
1351  memory_reducer_->NotifyMarkCompact(event);
1352  }
1353  }
1354 
1355  tracer()->Stop(collector);
1356  }
1357 
1358  if (collector == MARK_COMPACTOR &&
1359  (gc_callback_flags & (kGCCallbackFlagForced |
1360  kGCCallbackFlagCollectAllAvailableGarbage)) != 0) {
1361  isolate()->CountUsage(v8::Isolate::kForcedGC);
1362  }
1363 
1364  // Start incremental marking for the next cycle. We do this only for scavenger
1365  // to avoid a loop where mark-compact causes another mark-compact.
1366  if (IsYoungGenerationCollector(collector)) {
1367  StartIncrementalMarkingIfAllocationLimitIsReached(
1368  GCFlagsForIncrementalMarking(),
1369  kGCCallbackScheduleIdleGarbageCollection);
1370  }
1371 
1372  return next_gc_likely_to_collect_more;
1373 }
1374 
1375 
1376 int Heap::NotifyContextDisposed(bool dependant_context) {
1377  if (!dependant_context) {
1378  tracer()->ResetSurvivalEvents();
1379  old_generation_size_configured_ = false;
1380  MemoryReducer::Event event;
1381  event.type = MemoryReducer::kPossibleGarbage;
1382  event.time_ms = MonotonicallyIncreasingTimeInMs();
1383  memory_reducer_->NotifyPossibleGarbage(event);
1384  }
1385  isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
1386 
1387  number_of_disposed_maps_ = retained_maps()->length();
1388  tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
1389  return ++contexts_disposed_;
1390 }
1391 
1392 void Heap::StartIncrementalMarking(int gc_flags,
1393  GarbageCollectionReason gc_reason,
1394  GCCallbackFlags gc_callback_flags) {
1395  DCHECK(incremental_marking()->IsStopped());
1396  set_current_gc_flags(gc_flags);
1397  current_gc_callback_flags_ = gc_callback_flags;
1398  incremental_marking()->Start(gc_reason);
1399 }
1400 
1401 void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
1402  int gc_flags, const GCCallbackFlags gc_callback_flags) {
1403  if (incremental_marking()->IsStopped()) {
1404  IncrementalMarkingLimit reached_limit = IncrementalMarkingLimitReached();
1405  if (reached_limit == IncrementalMarkingLimit::kSoftLimit) {
1406  incremental_marking()->incremental_marking_job()->ScheduleTask(this);
1407  } else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
1408  StartIncrementalMarking(gc_flags,
1409  GarbageCollectionReason::kAllocationLimit,
1410  gc_callback_flags);
1411  }
1412  }
1413 }
1414 
1415 void Heap::StartIdleIncrementalMarking(
1416  GarbageCollectionReason gc_reason,
1417  const GCCallbackFlags gc_callback_flags) {
1418  gc_idle_time_handler_->ResetNoProgressCounter();
1419  StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason,
1420  gc_callback_flags);
1421 }
1422 
1423 void Heap::MoveElements(FixedArray array, int dst_index, int src_index, int len,
1424  WriteBarrierMode mode) {
1425  if (len == 0) return;
1426 
1427  DCHECK(array->map() != ReadOnlyRoots(this).fixed_cow_array_map());
1428  ObjectSlot dst = array->RawFieldOfElementAt(dst_index);
1429  ObjectSlot src = array->RawFieldOfElementAt(src_index);
1430  if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
1431  if (dst < src) {
1432  for (int i = 0; i < len; i++) {
1433  dst.Relaxed_Store(src.Relaxed_Load());
1434  ++dst;
1435  ++src;
1436  }
1437  } else {
1438  // Copy backwards.
1439  dst += len - 1;
1440  src += len - 1;
1441  for (int i = 0; i < len; i++) {
1442  dst.Relaxed_Store(src.Relaxed_Load());
1443  --dst;
1444  --src;
1445  }
1446  }
1447  } else {
1448  MemMove(dst.ToVoidPtr(), src.ToVoidPtr(), len * kPointerSize);
1449  }
1450  if (mode == SKIP_WRITE_BARRIER) return;
1451  FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len);
1452 }
1453 
1454 #ifdef VERIFY_HEAP
1455 // Helper class for verifying the string table.
1456 class StringTableVerifier : public ObjectVisitor {
1457  public:
1458  explicit StringTableVerifier(Isolate* isolate) : isolate_(isolate) {}
1459 
1460  void VisitPointers(HeapObject* host, ObjectSlot start,
1461  ObjectSlot end) override {
1462  // Visit all HeapObject pointers in [start, end).
1463  for (ObjectSlot p = start; p < end; ++p) {
1464  DCHECK(!HasWeakHeapObjectTag(*p));
1465  if ((*p)->IsHeapObject()) {
1466  HeapObject* object = HeapObject::cast(*p);
1467  // Check that the string is actually internalized.
1468  CHECK(object->IsTheHole(isolate_) || object->IsUndefined(isolate_) ||
1469  object->IsInternalizedString());
1470  }
1471  }
1472  }
1473  void VisitPointers(HeapObject* host, MaybeObjectSlot start,
1474  MaybeObjectSlot end) override {
1475  UNREACHABLE();
1476  }
1477 
1478  private:
1479  Isolate* isolate_;
1480 };
1481 
1482 static void VerifyStringTable(Isolate* isolate) {
1483  StringTableVerifier verifier(isolate);
1484  isolate->heap()->string_table()->IterateElements(&verifier);
1485 }
1486 #endif // VERIFY_HEAP
1487 
1488 bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
1489  bool gc_performed = true;
1490  int counter = 0;
1491  static const int kThreshold = 20;
1492  while (gc_performed && counter++ < kThreshold) {
1493  gc_performed = false;
1494  for (int space = FIRST_SPACE;
1495  space < SerializerDeserializer::kNumberOfSpaces; space++) {
1496  Reservation* reservation = &reservations[space];
1497  DCHECK_LE(1, reservation->size());
1498  if (reservation->at(0).size == 0) {
1499  DCHECK_EQ(1, reservation->size());
1500  continue;
1501  }
1502  bool perform_gc = false;
1503  if (space == MAP_SPACE) {
1504  // We allocate each map individually to avoid fragmentation.
1505  maps->clear();
1506  DCHECK_LE(reservation->size(), 2);
1507  int reserved_size = 0;
1508  for (const Chunk& c : *reservation) reserved_size += c.size;
1509  DCHECK_EQ(0, reserved_size % Map::kSize);
1510  int num_maps = reserved_size / Map::kSize;
1511  for (int i = 0; i < num_maps; i++) {
1512  // The deserializer will update the skip list.
1513  AllocationResult allocation = map_space()->AllocateRawUnaligned(
1514  Map::kSize, PagedSpace::IGNORE_SKIP_LIST);
1515  HeapObject* free_space = nullptr;
1516  if (allocation.To(&free_space)) {
1517  // Mark with a free list node, in case we have a GC before
1518  // deserializing.
1519  Address free_space_address = free_space->address();
1520  CreateFillerObjectAt(free_space_address, Map::kSize,
1521  ClearRecordedSlots::kNo);
1522  maps->push_back(free_space_address);
1523  } else {
1524  perform_gc = true;
1525  break;
1526  }
1527  }
1528  } else if (space == LO_SPACE) {
1529  // Just check that we can allocate during deserialization.
1530  DCHECK_LE(reservation->size(), 2);
1531  int reserved_size = 0;
1532  for (const Chunk& c : *reservation) reserved_size += c.size;
1533  perform_gc = !CanExpandOldGeneration(reserved_size);
1534  } else {
1535  for (auto& chunk : *reservation) {
1536  AllocationResult allocation;
1537  int size = chunk.size;
1538  DCHECK_LE(static_cast<size_t>(size),
1539  MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
1540  static_cast<AllocationSpace>(space)));
1541  if (space == NEW_SPACE) {
1542  allocation = new_space()->AllocateRawUnaligned(size);
1543  } else {
1544  // The deserializer will update the skip list.
1545  allocation = paged_space(space)->AllocateRawUnaligned(
1546  size, PagedSpace::IGNORE_SKIP_LIST);
1547  }
1548  HeapObject* free_space = nullptr;
1549  if (allocation.To(&free_space)) {
1550  // Mark with a free list node, in case we have a GC before
1551  // deserializing.
1552  Address free_space_address = free_space->address();
1553  CreateFillerObjectAt(free_space_address, size,
1554  ClearRecordedSlots::kNo);
1555  DCHECK_GT(SerializerDeserializer::kNumberOfPreallocatedSpaces,
1556  space);
1557  chunk.start = free_space_address;
1558  chunk.end = free_space_address + size;
1559  } else {
1560  perform_gc = true;
1561  break;
1562  }
1563  }
1564  }
1565  if (perform_gc) {
1566  // We cannot perfom a GC with an uninitialized isolate. This check
1567  // fails for example if the max old space size is chosen unwisely,
1568  // so that we cannot allocate space to deserialize the initial heap.
1569  if (!deserialization_complete_) {
1570  V8::FatalProcessOutOfMemory(
1571  isolate(), "insufficient memory to create an Isolate");
1572  }
1573  if (space == NEW_SPACE) {
1574  CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer);
1575  } else {
1576  if (counter > 1) {
1577  CollectAllGarbage(kReduceMemoryFootprintMask,
1578  GarbageCollectionReason::kDeserializer);
1579  } else {
1580  CollectAllGarbage(kNoGCFlags,
1581  GarbageCollectionReason::kDeserializer);
1582  }
1583  }
1584  gc_performed = true;
1585  break; // Abort for-loop over spaces and retry.
1586  }
1587  }
1588  }
1589 
1590  return !gc_performed;
1591 }
1592 
1593 
1594 void Heap::EnsureFromSpaceIsCommitted() {
1595  if (new_space_->CommitFromSpaceIfNeeded()) return;
1596 
1597  // Committing memory to from space failed.
1598  // Memory is exhausted and we will die.
1599  FatalProcessOutOfMemory("Committing semi space failed.");
1600 }
1601 
1602 
1603 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1604  if (start_new_space_size == 0) return;
1605 
1606  promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
1607  static_cast<double>(start_new_space_size) * 100);
1608 
1609  if (previous_semi_space_copied_object_size_ > 0) {
1610  promotion_rate_ =
1611  (static_cast<double>(promoted_objects_size_) /
1612  static_cast<double>(previous_semi_space_copied_object_size_) * 100);
1613  } else {
1614  promotion_rate_ = 0;
1615  }
1616 
1617  semi_space_copied_rate_ =
1618  (static_cast<double>(semi_space_copied_object_size_) /
1619  static_cast<double>(start_new_space_size) * 100);
1620 
1621  double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
1622  tracer()->AddSurvivalRatio(survival_rate);
1623 }
1624 
1625 bool Heap::PerformGarbageCollection(
1626  GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1627  int freed_global_handles = 0;
1628 
1629  if (!IsYoungGenerationCollector(collector)) {
1630  PROFILE(isolate_, CodeMovingGCEvent());
1631  }
1632 
1633 #ifdef VERIFY_HEAP
1634  if (FLAG_verify_heap) {
1635  VerifyStringTable(this->isolate());
1636  }
1637 #endif
1638 
1639  GCType gc_type =
1640  collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1641 
1642  {
1643  GCCallbacksScope scope(this);
1644  // Temporary override any embedder stack state as callbacks may create their
1645  // own state on the stack and recursively trigger GC.
1646  EmbedderStackStateScope embedder_scope(
1647  local_embedder_heap_tracer(),
1648  EmbedderHeapTracer::EmbedderStackState::kUnknown);
1649  if (scope.CheckReenter()) {
1650  AllowHeapAllocation allow_allocation;
1651  TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
1652  VMState<EXTERNAL> state(isolate_);
1653  HandleScope handle_scope(isolate_);
1654  CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1655  }
1656  }
1657 
1658  EnsureFromSpaceIsCommitted();
1659 
1660  size_t start_new_space_size = Heap::new_space()->Size();
1661 
1662  {
1663  Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_);
1664 
1665  switch (collector) {
1666  case MARK_COMPACTOR:
1667  UpdateOldGenerationAllocationCounter();
1668  // Perform mark-sweep with optional compaction.
1669  MarkCompact();
1670  old_generation_size_configured_ = true;
1671  // This should be updated before PostGarbageCollectionProcessing, which
1672  // can cause another GC. Take into account the objects promoted during
1673  // GC.
1674  old_generation_allocation_counter_at_last_gc_ +=
1675  static_cast<size_t>(promoted_objects_size_);
1676  old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects();
1677  break;
1678  case MINOR_MARK_COMPACTOR:
1679  MinorMarkCompact();
1680  break;
1681  case SCAVENGER:
1682  if ((fast_promotion_mode_ &&
1683  CanExpandOldGeneration(new_space()->Size()))) {
1684  tracer()->NotifyYoungGenerationHandling(
1685  YoungGenerationHandling::kFastPromotionDuringScavenge);
1686  EvacuateYoungGeneration();
1687  } else {
1688  tracer()->NotifyYoungGenerationHandling(
1689  YoungGenerationHandling::kRegularScavenge);
1690 
1691  Scavenge();
1692  }
1693  break;
1694  }
1695 
1696  ProcessPretenuringFeedback();
1697  }
1698 
1699  UpdateSurvivalStatistics(static_cast<int>(start_new_space_size));
1700  ConfigureInitialOldGenerationSize();
1701 
1702  if (collector != MARK_COMPACTOR) {
1703  // Objects that died in the new space might have been accounted
1704  // as bytes marked ahead of schedule by the incremental marker.
1705  incremental_marking()->UpdateMarkedBytesAfterScavenge(
1706  start_new_space_size - SurvivedNewSpaceObjectSize());
1707  }
1708 
1709  if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) {
1710  ComputeFastPromotionMode();
1711  }
1712 
1713  isolate_->counters()->objs_since_last_young()->Set(0);
1714 
1715  {
1716  TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
1717  // First round weak callbacks are not supposed to allocate and trigger
1718  // nested GCs.
1719  freed_global_handles =
1720  isolate_->global_handles()->InvokeFirstPassWeakCallbacks();
1721  }
1722 
1723  if (collector == MARK_COMPACTOR) {
1724  TRACE_GC(tracer(), GCTracer::Scope::HEAP_EMBEDDER_TRACING_EPILOGUE);
1725  // TraceEpilogue may trigger operations that invalidate global handles. It
1726  // has to be called *after* all other operations that potentially touch and
1727  // reset global handles. It is also still part of the main garbage
1728  // collection pause and thus needs to be called *before* any operation that
1729  // can potentially trigger recursive garbage
1730  local_embedder_heap_tracer()->TraceEpilogue();
1731  }
1732 
1733  {
1734  TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
1735  gc_post_processing_depth_++;
1736  {
1737  AllowHeapAllocation allow_allocation;
1738  freed_global_handles +=
1739  isolate_->global_handles()->PostGarbageCollectionProcessing(
1740  collector, gc_callback_flags);
1741  }
1742  gc_post_processing_depth_--;
1743  }
1744 
1745  isolate_->eternal_handles()->PostGarbageCollectionProcessing();
1746 
1747  // Update relocatables.
1748  Relocatable::PostGarbageCollectionProcessing(isolate_);
1749 
1750  double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
1751  double mutator_speed =
1752  tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
1753  size_t old_gen_size = OldGenerationSizeOfObjects();
1754  if (collector == MARK_COMPACTOR) {
1755  // Register the amount of external allocated memory.
1756  isolate()->isolate_data()->external_memory_at_last_mark_compact_ =
1757  isolate()->isolate_data()->external_memory_;
1758  isolate()->isolate_data()->external_memory_limit_ =
1759  isolate()->isolate_data()->external_memory_ +
1760  kExternalAllocationSoftLimit;
1761 
1762  double max_factor =
1763  heap_controller()->MaxGrowingFactor(max_old_generation_size_);
1764  size_t new_limit = heap_controller()->CalculateAllocationLimit(
1765  old_gen_size, max_old_generation_size_, max_factor, gc_speed,
1766  mutator_speed, new_space()->Capacity(), CurrentHeapGrowingMode());
1767  old_generation_allocation_limit_ = new_limit;
1768 
1769  CheckIneffectiveMarkCompact(
1770  old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
1771  } else if (HasLowYoungGenerationAllocationRate() &&
1772  old_generation_size_configured_) {
1773  double max_factor =
1774  heap_controller()->MaxGrowingFactor(max_old_generation_size_);
1775  size_t new_limit = heap_controller()->CalculateAllocationLimit(
1776  old_gen_size, max_old_generation_size_, max_factor, gc_speed,
1777  mutator_speed, new_space()->Capacity(), CurrentHeapGrowingMode());
1778  if (new_limit < old_generation_allocation_limit_) {
1779  old_generation_allocation_limit_ = new_limit;
1780  }
1781  }
1782 
1783  {
1784  GCCallbacksScope scope(this);
1785  if (scope.CheckReenter()) {
1786  AllowHeapAllocation allow_allocation;
1787  TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
1788  VMState<EXTERNAL> state(isolate_);
1789  HandleScope handle_scope(isolate_);
1790  CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1791  }
1792  }
1793 
1794 #ifdef VERIFY_HEAP
1795  if (FLAG_verify_heap) {
1796  VerifyStringTable(this->isolate());
1797  }
1798 #endif
1799 
1800  return freed_global_handles > 0;
1801 }
1802 
1803 
1804 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1805  RuntimeCallTimerScope runtime_timer(
1806  isolate(), RuntimeCallCounterId::kGCPrologueCallback);
1807  for (const GCCallbackTuple& info : gc_prologue_callbacks_) {
1808  if (gc_type & info.gc_type) {
1809  v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1810  info.callback(isolate, gc_type, flags, info.data);
1811  }
1812  }
1813 }
1814 
1815 void Heap::CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1816  RuntimeCallTimerScope runtime_timer(
1817  isolate(), RuntimeCallCounterId::kGCEpilogueCallback);
1818  for (const GCCallbackTuple& info : gc_epilogue_callbacks_) {
1819  if (gc_type & info.gc_type) {
1820  v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1821  info.callback(isolate, gc_type, flags, info.data);
1822  }
1823  }
1824 }
1825 
1826 
1827 void Heap::MarkCompact() {
1828  PauseAllocationObserversScope pause_observers(this);
1829 
1830  SetGCState(MARK_COMPACT);
1831 
1832  LOG(isolate_, ResourceEvent("markcompact", "begin"));
1833 
1834  uint64_t size_of_objects_before_gc = SizeOfObjects();
1835 
1836  CodeSpaceMemoryModificationScope code_modifcation(this);
1837 
1838  mark_compact_collector()->Prepare();
1839 
1840  ms_count_++;
1841 
1842  MarkCompactPrologue();
1843 
1844  mark_compact_collector()->CollectGarbage();
1845 
1846  LOG(isolate_, ResourceEvent("markcompact", "end"));
1847 
1848  MarkCompactEpilogue();
1849 
1850  if (FLAG_allocation_site_pretenuring) {
1851  EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1852  }
1853 }
1854 
1855 void Heap::MinorMarkCompact() {
1856 #ifdef ENABLE_MINOR_MC
1857  DCHECK(FLAG_minor_mc);
1858 
1859  PauseAllocationObserversScope pause_observers(this);
1860  SetGCState(MINOR_MARK_COMPACT);
1861  LOG(isolate_, ResourceEvent("MinorMarkCompact", "begin"));
1862 
1863  TRACE_GC(tracer(), GCTracer::Scope::MINOR_MC);
1864  AlwaysAllocateScope always_allocate(isolate());
1865  IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
1866  incremental_marking());
1867  ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
1868 
1869  minor_mark_compact_collector()->CollectGarbage();
1870 
1871  LOG(isolate_, ResourceEvent("MinorMarkCompact", "end"));
1872  SetGCState(NOT_IN_GC);
1873 #else
1874  UNREACHABLE();
1875 #endif // ENABLE_MINOR_MC
1876 }
1877 
1878 void Heap::MarkCompactEpilogue() {
1879  TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE);
1880  SetGCState(NOT_IN_GC);
1881 
1882  isolate_->counters()->objs_since_last_full()->Set(0);
1883 
1884  incremental_marking()->Epilogue();
1885 
1886  DCHECK(incremental_marking()->IsStopped());
1887 }
1888 
1889 
1890 void Heap::MarkCompactPrologue() {
1891  TRACE_GC(tracer(), GCTracer::Scope::MC_PROLOGUE);
1892  isolate_->descriptor_lookup_cache()->Clear();
1893  RegExpResultsCache::Clear(string_split_cache());
1894  RegExpResultsCache::Clear(regexp_multiple_cache());
1895 
1896  isolate_->compilation_cache()->MarkCompactPrologue();
1897 
1898  FlushNumberStringCache();
1899 }
1900 
1901 
1902 void Heap::CheckNewSpaceExpansionCriteria() {
1903  if (FLAG_experimental_new_space_growth_heuristic) {
1904  if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
1905  survived_last_scavenge_ * 100 / new_space_->TotalCapacity() >= 10) {
1906  // Grow the size of new space if there is room to grow, and more than 10%
1907  // have survived the last scavenge.
1908  new_space_->Grow();
1909  survived_since_last_expansion_ = 0;
1910  }
1911  } else if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
1912  survived_since_last_expansion_ > new_space_->TotalCapacity()) {
1913  // Grow the size of new space if there is room to grow, and enough data
1914  // has survived scavenge since the last expansion.
1915  new_space_->Grow();
1916  survived_since_last_expansion_ = 0;
1917  }
1918 }
1919 
1920 void Heap::EvacuateYoungGeneration() {
1921  TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_FAST_PROMOTE);
1922  base::MutexGuard guard(relocation_mutex());
1923  ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
1924  if (!FLAG_concurrent_marking) {
1925  DCHECK(fast_promotion_mode_);
1926  DCHECK(CanExpandOldGeneration(new_space()->Size()));
1927  }
1928 
1929  mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
1930 
1931  SetGCState(SCAVENGE);
1932  LOG(isolate_, ResourceEvent("scavenge", "begin"));
1933 
1934  // Move pages from new->old generation.
1935  PageRange range(new_space()->first_allocatable_address(), new_space()->top());
1936  for (auto it = range.begin(); it != range.end();) {
1937  Page* p = (*++it)->prev_page();
1938  new_space()->from_space().RemovePage(p);
1939  Page::ConvertNewToOld(p);
1940  if (incremental_marking()->IsMarking())
1941  mark_compact_collector()->RecordLiveSlotsOnPage(p);
1942  }
1943 
1944  // Reset new space.
1945  if (!new_space()->Rebalance()) {
1946  FatalProcessOutOfMemory("NewSpace::Rebalance");
1947  }
1948  new_space()->ResetLinearAllocationArea();
1949  new_space()->set_age_mark(new_space()->top());
1950 
1951  // Fix up special trackers.
1952  external_string_table_.PromoteAllNewSpaceStrings();
1953  // GlobalHandles are updated in PostGarbageCollectonProcessing
1954 
1955  IncrementYoungSurvivorsCounter(new_space()->Size());
1956  IncrementPromotedObjectsSize(new_space()->Size());
1957  IncrementSemiSpaceCopiedObjectSize(0);
1958 
1959  LOG(isolate_, ResourceEvent("scavenge", "end"));
1960  SetGCState(NOT_IN_GC);
1961 }
1962 
1963 void Heap::Scavenge() {
1964  TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
1965  base::MutexGuard guard(relocation_mutex());
1966  ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
1967  // There are soft limits in the allocation code, designed to trigger a mark
1968  // sweep collection by failing allocations. There is no sense in trying to
1969  // trigger one during scavenge: scavenges allocation should always succeed.
1970  AlwaysAllocateScope scope(isolate());
1971 
1972  // Bump-pointer allocations done during scavenge are not real allocations.
1973  // Pause the inline allocation steps.
1974  PauseAllocationObserversScope pause_observers(this);
1975  IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
1976  incremental_marking());
1977 
1978 
1979  mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
1980 
1981  SetGCState(SCAVENGE);
1982 
1983  // Flip the semispaces. After flipping, to space is empty, from space has
1984  // live objects.
1985  new_space()->Flip();
1986  new_space()->ResetLinearAllocationArea();
1987 
1988  // We also flip the young generation large object space. All large objects
1989  // will be in the from space.
1990  new_lo_space()->Flip();
1991 
1992  // Implements Cheney's copying algorithm
1993  LOG(isolate_, ResourceEvent("scavenge", "begin"));
1994 
1995  scavenger_collector_->CollectGarbage();
1996 
1997  LOG(isolate_, ResourceEvent("scavenge", "end"));
1998 
1999  SetGCState(NOT_IN_GC);
2000 }
2001 
2002 void Heap::ComputeFastPromotionMode() {
2003  const size_t survived_in_new_space =
2004  survived_last_scavenge_ * 100 / new_space_->Capacity();
2005  fast_promotion_mode_ =
2006  !FLAG_optimize_for_size && FLAG_fast_promotion_new_space &&
2007  !ShouldReduceMemory() && new_space_->IsAtMaximumCapacity() &&
2008  survived_in_new_space >= kMinPromotedPercentForFastPromotionMode;
2009  if (FLAG_trace_gc_verbose && !FLAG_trace_gc_ignore_scavenger) {
2010  PrintIsolate(
2011  isolate(), "Fast promotion mode: %s survival rate: %" PRIuS "%%\n",
2012  fast_promotion_mode_ ? "true" : "false", survived_in_new_space);
2013  }
2014 }
2015 
2016 void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk) {
2017  if (unprotected_memory_chunks_registry_enabled_) {
2018  base::MutexGuard guard(&unprotected_memory_chunks_mutex_);
2019  if (unprotected_memory_chunks_.insert(chunk).second) {
2020  chunk->SetReadAndWritable();
2021  }
2022  }
2023 }
2024 
2025 void Heap::UnprotectAndRegisterMemoryChunk(HeapObject* object) {
2026  UnprotectAndRegisterMemoryChunk(MemoryChunk::FromAddress(object->address()));
2027 }
2028 
2029 void Heap::UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk) {
2030  unprotected_memory_chunks_.erase(chunk);
2031 }
2032 
2033 void Heap::ProtectUnprotectedMemoryChunks() {
2034  DCHECK(unprotected_memory_chunks_registry_enabled_);
2035  for (auto chunk = unprotected_memory_chunks_.begin();
2036  chunk != unprotected_memory_chunks_.end(); chunk++) {
2037  CHECK(memory_allocator()->IsMemoryChunkExecutable(*chunk));
2038  (*chunk)->SetReadAndExecutable();
2039  }
2040  unprotected_memory_chunks_.clear();
2041 }
2042 
2043 bool Heap::ExternalStringTable::Contains(String string) {
2044  for (size_t i = 0; i < new_space_strings_.size(); ++i) {
2045  if (new_space_strings_[i] == string) return true;
2046  }
2047  for (size_t i = 0; i < old_space_strings_.size(); ++i) {
2048  if (old_space_strings_[i] == string) return true;
2049  }
2050  return false;
2051 }
2052 
2053 String Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
2054  ObjectSlot p) {
2055  MapWord first_word = HeapObject::cast(*p)->map_word();
2056 
2057  if (!first_word.IsForwardingAddress()) {
2058  // Unreachable external string can be finalized.
2059  String string = String::cast(*p);
2060  if (!string->IsExternalString()) {
2061  // Original external string has been internalized.
2062  DCHECK(string->IsThinString());
2063  return String();
2064  }
2065  heap->FinalizeExternalString(string);
2066  return String();
2067  }
2068 
2069  // String is still reachable.
2070  String new_string = String::cast(first_word.ToForwardingAddress());
2071  if (new_string->IsThinString()) {
2072  // Filtering Thin strings out of the external string table.
2073  return String();
2074  } else if (new_string->IsExternalString()) {
2075  MemoryChunk::MoveExternalBackingStoreBytes(
2076  ExternalBackingStoreType::kExternalString,
2077  Page::FromAddress(reinterpret_cast<Address>(*p)),
2078  Page::FromHeapObject(new_string),
2079  ExternalString::cast(new_string)->ExternalPayloadSize());
2080  return new_string;
2081  }
2082 
2083  // Internalization can replace external strings with non-external strings.
2084  return new_string->IsExternalString() ? new_string : String();
2085 }
2086 
2087 void Heap::ExternalStringTable::VerifyNewSpace() {
2088 #ifdef DEBUG
2089  std::set<String> visited_map;
2090  std::map<MemoryChunk*, size_t> size_map;
2091  ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
2092  for (size_t i = 0; i < new_space_strings_.size(); ++i) {
2093  String obj = String::cast(new_space_strings_[i]);
2094  MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
2095  DCHECK(mc->InNewSpace());
2096  DCHECK(heap_->InNewSpace(obj));
2097  DCHECK(!obj->IsTheHole(heap_->isolate()));
2098  DCHECK(obj->IsExternalString());
2099  // Note: we can have repeated elements in the table.
2100  DCHECK_EQ(0, visited_map.count(obj));
2101  visited_map.insert(obj);
2102  size_map[mc] += ExternalString::cast(obj)->ExternalPayloadSize();
2103  }
2104  for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
2105  it != size_map.end(); it++)
2106  DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
2107 #endif
2108 }
2109 
2110 void Heap::ExternalStringTable::Verify() {
2111 #ifdef DEBUG
2112  std::set<String> visited_map;
2113  std::map<MemoryChunk*, size_t> size_map;
2114  ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
2115  VerifyNewSpace();
2116  for (size_t i = 0; i < old_space_strings_.size(); ++i) {
2117  String obj = String::cast(old_space_strings_[i]);
2118  MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
2119  DCHECK(!mc->InNewSpace());
2120  DCHECK(!heap_->InNewSpace(obj));
2121  DCHECK(!obj->IsTheHole(heap_->isolate()));
2122  DCHECK(obj->IsExternalString());
2123  // Note: we can have repeated elements in the table.
2124  DCHECK_EQ(0, visited_map.count(obj));
2125  visited_map.insert(obj);
2126  size_map[mc] += ExternalString::cast(obj)->ExternalPayloadSize();
2127  }
2128  for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
2129  it != size_map.end(); it++)
2130  DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
2131 #endif
2132 }
2133 
2134 void Heap::ExternalStringTable::UpdateNewSpaceReferences(
2135  Heap::ExternalStringTableUpdaterCallback updater_func) {
2136  if (new_space_strings_.empty()) return;
2137 
2138  ObjectSlot start(new_space_strings_.data());
2139  ObjectSlot end(new_space_strings_.data() + new_space_strings_.size());
2140  ObjectSlot last = start;
2141 
2142  for (ObjectSlot p = start; p < end; ++p) {
2143  String target = updater_func(heap_, p);
2144 
2145  if (target.is_null()) continue;
2146 
2147  DCHECK(target->IsExternalString());
2148 
2149  if (InNewSpace(target)) {
2150  // String is still in new space. Update the table entry.
2151  last.store(target);
2152  ++last;
2153  } else {
2154  // String got promoted. Move it to the old string list.
2155  old_space_strings_.push_back(target);
2156  }
2157  }
2158 
2159  DCHECK(last <= end);
2160  new_space_strings_.resize(last - start);
2161 #ifdef VERIFY_HEAP
2162  if (FLAG_verify_heap) {
2163  VerifyNewSpace();
2164  }
2165 #endif
2166 }
2167 
2168 void Heap::ExternalStringTable::PromoteAllNewSpaceStrings() {
2169  old_space_strings_.reserve(old_space_strings_.size() +
2170  new_space_strings_.size());
2171  std::move(std::begin(new_space_strings_), std::end(new_space_strings_),
2172  std::back_inserter(old_space_strings_));
2173  new_space_strings_.clear();
2174 }
2175 
2176 void Heap::ExternalStringTable::IterateNewSpaceStrings(RootVisitor* v) {
2177  if (!new_space_strings_.empty()) {
2178  v->VisitRootPointers(
2179  Root::kExternalStringsTable, nullptr,
2180  ObjectSlot(new_space_strings_.data()),
2181  ObjectSlot(new_space_strings_.data() + new_space_strings_.size()));
2182  }
2183 }
2184 
2185 void Heap::ExternalStringTable::IterateAll(RootVisitor* v) {
2186  IterateNewSpaceStrings(v);
2187  if (!old_space_strings_.empty()) {
2188  v->VisitRootPointers(
2189  Root::kExternalStringsTable, nullptr,
2190  ObjectSlot(old_space_strings_.data()),
2191  ObjectSlot(old_space_strings_.data() + old_space_strings_.size()));
2192  }
2193 }
2194 
2195 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
2196  ExternalStringTableUpdaterCallback updater_func) {
2197  external_string_table_.UpdateNewSpaceReferences(updater_func);
2198 }
2199 
2200 void Heap::ExternalStringTable::UpdateReferences(
2201  Heap::ExternalStringTableUpdaterCallback updater_func) {
2202  if (old_space_strings_.size() > 0) {
2203  ObjectSlot start(old_space_strings_.data());
2204  ObjectSlot end(old_space_strings_.data() + old_space_strings_.size());
2205  for (ObjectSlot p = start; p < end; ++p) p.store(updater_func(heap_, p));
2206  }
2207 
2208  UpdateNewSpaceReferences(updater_func);
2209 }
2210 
2211 void Heap::UpdateReferencesInExternalStringTable(
2212  ExternalStringTableUpdaterCallback updater_func) {
2213  external_string_table_.UpdateReferences(updater_func);
2214 }
2215 
2216 
2217 void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
2218  ProcessNativeContexts(retainer);
2219  ProcessAllocationSites(retainer);
2220 }
2221 
2222 
2223 void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
2224  ProcessNativeContexts(retainer);
2225 }
2226 
2227 
2228 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
2229  Object* head =
2230  VisitWeakList2<Context>(this, native_contexts_list(), retainer);
2231  // Update the head of the list of contexts.
2232  set_native_contexts_list(head);
2233 }
2234 
2235 
2236 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
2237  Object* allocation_site_obj =
2238  VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
2239  set_allocation_sites_list(allocation_site_obj);
2240 }
2241 
2242 void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) {
2243  set_native_contexts_list(retainer->RetainAs(native_contexts_list()));
2244  set_allocation_sites_list(retainer->RetainAs(allocation_sites_list()));
2245 }
2246 
2247 void Heap::ForeachAllocationSite(
2248  Object* list, const std::function<void(AllocationSite*)>& visitor) {
2249  DisallowHeapAllocation disallow_heap_allocation;
2250  Object* current = list;
2251  while (current->IsAllocationSite()) {
2252  AllocationSite* site = AllocationSite::cast(current);
2253  visitor(site);
2254  Object* current_nested = site->nested_site();
2255  while (current_nested->IsAllocationSite()) {
2256  AllocationSite* nested_site = AllocationSite::cast(current_nested);
2257  visitor(nested_site);
2258  current_nested = nested_site->nested_site();
2259  }
2260  current = site->weak_next();
2261  }
2262 }
2263 
2264 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
2265  DisallowHeapAllocation no_allocation_scope;
2266  bool marked = false;
2267 
2268  ForeachAllocationSite(allocation_sites_list(),
2269  [&marked, flag, this](AllocationSite* site) {
2270  if (site->GetPretenureMode() == flag) {
2271  site->ResetPretenureDecision();
2272  site->set_deopt_dependent_code(true);
2273  marked = true;
2274  RemoveAllocationSitePretenuringFeedback(site);
2275  return;
2276  }
2277  });
2278  if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
2279 }
2280 
2281 
2282 void Heap::EvaluateOldSpaceLocalPretenuring(
2283  uint64_t size_of_objects_before_gc) {
2284  uint64_t size_of_objects_after_gc = SizeOfObjects();
2285  double old_generation_survival_rate =
2286  (static_cast<double>(size_of_objects_after_gc) * 100) /
2287  static_cast<double>(size_of_objects_before_gc);
2288 
2289  if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
2290  // Too many objects died in the old generation, pretenuring of wrong
2291  // allocation sites may be the cause for that. We have to deopt all
2292  // dependent code registered in the allocation sites to re-evaluate
2293  // our pretenuring decisions.
2294  ResetAllAllocationSitesDependentCode(TENURED);
2295  if (FLAG_trace_pretenuring) {
2296  PrintF(
2297  "Deopt all allocation sites dependent code due to low survival "
2298  "rate in the old generation %f\n",
2299  old_generation_survival_rate);
2300  }
2301  }
2302 }
2303 
2304 
2305 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
2306  DisallowHeapAllocation no_allocation;
2307  // All external strings are listed in the external string table.
2308 
2309  class ExternalStringTableVisitorAdapter : public RootVisitor {
2310  public:
2311  explicit ExternalStringTableVisitorAdapter(
2312  Isolate* isolate, v8::ExternalResourceVisitor* visitor)
2313  : isolate_(isolate), visitor_(visitor) {}
2314  void VisitRootPointers(Root root, const char* description, ObjectSlot start,
2315  ObjectSlot end) override {
2316  for (ObjectSlot p = start; p < end; ++p) {
2317  DCHECK((*p)->IsExternalString());
2318  visitor_->VisitExternalString(
2319  Utils::ToLocal(Handle<String>(String::cast(*p), isolate_)));
2320  }
2321  }
2322 
2323  private:
2324  Isolate* isolate_;
2325  v8::ExternalResourceVisitor* visitor_;
2326  } external_string_table_visitor(isolate(), visitor);
2327 
2328  external_string_table_.IterateAll(&external_string_table_visitor);
2329 }
2330 
2331 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
2332  0); // NOLINT
2333 STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) ==
2334  0); // NOLINT
2335 #ifdef V8_HOST_ARCH_32_BIT
2336 STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) !=
2337  0); // NOLINT
2338 #endif
2339 
2340 
2341 int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
2342  switch (alignment) {
2343  case kWordAligned:
2344  return 0;
2345  case kDoubleAligned:
2346  case kDoubleUnaligned:
2347  return kDoubleSize - kPointerSize;
2348  default:
2349  UNREACHABLE();
2350  }
2351  return 0;
2352 }
2353 
2354 
2355 int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
2356  if (alignment == kDoubleAligned && (address & kDoubleAlignmentMask) != 0)
2357  return kPointerSize;
2358  if (alignment == kDoubleUnaligned && (address & kDoubleAlignmentMask) == 0)
2359  return kDoubleSize - kPointerSize; // No fill if double is always aligned.
2360  return 0;
2361 }
2362 
2363 
2364 HeapObject* Heap::PrecedeWithFiller(HeapObject* object, int filler_size) {
2365  CreateFillerObjectAt(object->address(), filler_size, ClearRecordedSlots::kNo);
2366  return HeapObject::FromAddress(object->address() + filler_size);
2367 }
2368 
2369 
2370 HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size,
2371  int allocation_size,
2372  AllocationAlignment alignment) {
2373  int filler_size = allocation_size - object_size;
2374  DCHECK_LT(0, filler_size);
2375  int pre_filler = GetFillToAlign(object->address(), alignment);
2376  if (pre_filler) {
2377  object = PrecedeWithFiller(object, pre_filler);
2378  filler_size -= pre_filler;
2379  }
2380  if (filler_size)
2381  CreateFillerObjectAt(object->address() + object_size, filler_size,
2382  ClearRecordedSlots::kNo);
2383  return object;
2384 }
2385 
2386 void Heap::RegisterNewArrayBuffer(JSArrayBuffer* buffer) {
2387  ArrayBufferTracker::RegisterNew(this, buffer);
2388 }
2389 
2390 
2391 void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
2392  ArrayBufferTracker::Unregister(this, buffer);
2393 }
2394 
2395 void Heap::ConfigureInitialOldGenerationSize() {
2396  if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
2397  old_generation_allocation_limit_ =
2398  Max(heap_controller()->MinimumAllocationLimitGrowingStep(
2399  CurrentHeapGrowingMode()),
2400  static_cast<size_t>(
2401  static_cast<double>(old_generation_allocation_limit_) *
2402  (tracer()->AverageSurvivalRatio() / 100)));
2403  }
2404 }
2405 
2406 void Heap::CreateJSEntryStub() {
2407  JSEntryStub stub(isolate(), StackFrame::ENTRY);
2408  set_js_entry_code(*stub.GetCode());
2409 }
2410 
2411 
2412 void Heap::CreateJSConstructEntryStub() {
2413  JSEntryStub stub(isolate(), StackFrame::CONSTRUCT_ENTRY);
2414  set_js_construct_entry_code(*stub.GetCode());
2415 }
2416 
2417 void Heap::CreateJSRunMicrotasksEntryStub() {
2418  JSEntryStub stub(isolate(), JSEntryStub::SpecialTarget::kRunMicrotasks);
2419  set_js_run_microtasks_entry_code(*stub.GetCode());
2420 }
2421 
2422 void Heap::CreateFixedStubs() {
2423  // Here we create roots for fixed stubs. They are needed at GC
2424  // for cooking and uncooking (check out frames.cc).
2425  // The eliminates the need for doing dictionary lookup in the
2426  // stub cache for these stubs.
2427  HandleScope scope(isolate());
2428  // Canonicalize handles, so that we can share constant pool entries pointing
2429  // to code targets without dereferencing their handles.
2430  CanonicalHandleScope canonical(isolate());
2431  // gcc-4.4 has problem generating correct code of following snippet:
2432  // { JSEntryStub stub;
2433  // js_entry_code_ = *stub.GetCode();
2434  // }
2435  // { JSConstructEntryStub stub;
2436  // js_construct_entry_code_ = *stub.GetCode();
2437  // }
2438  // To workaround the problem, make separate functions without inlining.
2439  Heap::CreateJSEntryStub();
2440  Heap::CreateJSConstructEntryStub();
2441  Heap::CreateJSRunMicrotasksEntryStub();
2442 }
2443 
2444 void Heap::FlushNumberStringCache() {
2445  // Flush the number to string cache.
2446  int len = number_string_cache()->length();
2447  for (int i = 0; i < len; i++) {
2448  number_string_cache()->set_undefined(i);
2449  }
2450 }
2451 
2452 HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
2453  ClearRecordedSlots clear_slots_mode,
2454  ClearFreedMemoryMode clear_memory_mode) {
2455  if (size == 0) return nullptr;
2456  HeapObject* filler = HeapObject::FromAddress(addr);
2457  if (size == kPointerSize) {
2458  filler->set_map_after_allocation(
2459  Map::unchecked_cast(isolate()->root(RootIndex::kOnePointerFillerMap)),
2460  SKIP_WRITE_BARRIER);
2461  } else if (size == 2 * kPointerSize) {
2462  filler->set_map_after_allocation(
2463  Map::unchecked_cast(isolate()->root(RootIndex::kTwoPointerFillerMap)),
2464  SKIP_WRITE_BARRIER);
2465  if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
2466  Memory<Address>(addr + kPointerSize) =
2467  static_cast<Address>(kClearedFreeMemoryValue);
2468  }
2469  } else {
2470  DCHECK_GT(size, 2 * kPointerSize);
2471  filler->set_map_after_allocation(
2472  Map::unchecked_cast(isolate()->root(RootIndex::kFreeSpaceMap)),
2473  SKIP_WRITE_BARRIER);
2474  FreeSpace::cast(filler)->relaxed_write_size(size);
2475  if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
2476  memset(reinterpret_cast<void*>(addr + 2 * kPointerSize),
2477  kClearedFreeMemoryValue, size - 2 * kPointerSize);
2478  }
2479  }
2480  if (clear_slots_mode == ClearRecordedSlots::kYes) {
2481  ClearRecordedSlotRange(addr, addr + size);
2482  }
2483 
2484  // At this point, we may be deserializing the heap from a snapshot, and
2485  // none of the maps have been created yet and are nullptr.
2486  DCHECK((filler->map().is_null() && !deserialization_complete_) ||
2487  filler->map()->IsMap());
2488  return filler;
2489 }
2490 
2491 
2492 bool Heap::CanMoveObjectStart(HeapObject* object) {
2493  if (!FLAG_move_object_start) return false;
2494 
2495  // Sampling heap profiler may have a reference to the object.
2496  if (isolate()->heap_profiler()->is_sampling_allocations()) return false;
2497 
2498  Address address = object->address();
2499 
2500  if (IsLargeObject(object)) return false;
2501 
2502  // We can move the object start if the page was already swept.
2503  return Page::FromAddress(address)->SweepingDone();
2504 }
2505 
2506 bool Heap::IsImmovable(HeapObject* object) {
2507  MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
2508  return chunk->NeverEvacuate() || IsLargeObject(object);
2509 }
2510 
2511 bool Heap::IsLargeObject(HeapObject* object) {
2512  return lo_space()->Contains(object) || code_lo_space()->Contains(object) ||
2513  new_lo_space()->Contains(object);
2514 }
2515 
2516 bool Heap::IsInYoungGeneration(HeapObject* object) {
2517  if (MemoryChunk::FromHeapObject(object)->IsInNewLargeObjectSpace()) {
2518  return !object->map_word().IsForwardingAddress();
2519  }
2520  return Heap::InNewSpace(object);
2521 }
2522 
2523 #ifdef ENABLE_SLOW_DCHECKS
2524 namespace {
2525 
2526 class LeftTrimmerVerifierRootVisitor : public RootVisitor {
2527  public:
2528  explicit LeftTrimmerVerifierRootVisitor(FixedArrayBase to_check)
2529  : to_check_(to_check) {}
2530 
2531  void VisitRootPointers(Root root, const char* description, ObjectSlot start,
2532  ObjectSlot end) override {
2533  for (ObjectSlot p = start; p < end; ++p) {
2534  DCHECK_NE(*p, to_check_);
2535  }
2536  }
2537 
2538  private:
2539  FixedArrayBase to_check_;
2540 
2541  DISALLOW_COPY_AND_ASSIGN(LeftTrimmerVerifierRootVisitor);
2542 };
2543 } // namespace
2544 #endif // ENABLE_SLOW_DCHECKS
2545 
2546 namespace {
2547 bool MayContainRecordedSlots(HeapObject* object) {
2548  // New space object do not have recorded slots.
2549  if (MemoryChunk::FromHeapObject(object)->InNewSpace()) return false;
2550  // Whitelist objects that definitely do not have pointers.
2551  if (object->IsByteArray() || object->IsFixedDoubleArray()) return false;
2552  // Conservatively return true for other objects.
2553  return true;
2554 }
2555 } // namespace
2556 
2557 FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
2558  int elements_to_trim) {
2559  if (elements_to_trim == 0) {
2560  // This simplifies reasoning in the rest of the function.
2561  return object;
2562  }
2563  CHECK(!object.is_null());
2564  DCHECK(CanMoveObjectStart(object));
2565  // Add custom visitor to concurrent marker if new left-trimmable type
2566  // is added.
2567  DCHECK(object->IsFixedArray() || object->IsFixedDoubleArray());
2568  const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
2569  const int bytes_to_trim = elements_to_trim * element_size;
2570  Map map = object->map();
2571 
2572  // For now this trick is only applied to fixed arrays which may be in new
2573  // space or old space. In a large object space the object's start must
2574  // coincide with chunk and thus the trick is just not applicable.
2575  DCHECK(!IsLargeObject(object));
2576  DCHECK(object->map() != ReadOnlyRoots(this).fixed_cow_array_map());
2577 
2578  STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
2579  STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
2580  STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
2581 
2582  const int len = object->length();
2583  DCHECK(elements_to_trim <= len);
2584 
2585  // Calculate location of new array start.
2586  Address old_start = object->address();
2587  Address new_start = old_start + bytes_to_trim;
2588 
2589  if (incremental_marking()->IsMarking()) {
2590  incremental_marking()->NotifyLeftTrimming(
2591  object, HeapObject::FromAddress(new_start));
2592  }
2593 
2594  // Technically in new space this write might be omitted (except for
2595  // debug mode which iterates through the heap), but to play safer
2596  // we still do it.
2597  HeapObject* filler =
2598  CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
2599 
2600  // Initialize header of the trimmed array. Since left trimming is only
2601  // performed on pages which are not concurrently swept creating a filler
2602  // object does not require synchronization.
2603  RELAXED_WRITE_FIELD(object, bytes_to_trim, map);
2604  RELAXED_WRITE_FIELD(object, bytes_to_trim + kPointerSize,
2605  Smi::FromInt(len - elements_to_trim));
2606 
2607  FixedArrayBase new_object =
2608  FixedArrayBase::cast(HeapObject::FromAddress(new_start));
2609 
2610  // Remove recorded slots for the new map and length offset.
2611  ClearRecordedSlot(new_object, HeapObject::RawField(new_object, 0));
2612  ClearRecordedSlot(new_object, HeapObject::RawField(
2613  new_object, FixedArrayBase::kLengthOffset));
2614 
2615  // Handle invalidated old-to-old slots.
2616  if (incremental_marking()->IsCompacting() &&
2617  MayContainRecordedSlots(new_object)) {
2618  // If the array was right-trimmed before, then it is registered in
2619  // the invalidated_slots.
2620  MemoryChunk::FromHeapObject(new_object)
2621  ->MoveObjectWithInvalidatedSlots(filler, new_object);
2622  // We have to clear slots in the free space to avoid stale old-to-old slots.
2623  // Note we cannot use ClearFreedMemoryMode of CreateFillerObjectAt because
2624  // we need pointer granularity writes to avoid race with the concurrent
2625  // marking.
2626  if (filler->Size() > FreeSpace::kSize) {
2627  MemsetPointer(HeapObject::RawField(filler, FreeSpace::kSize),
2628  ReadOnlyRoots(this).undefined_value(),
2629  (filler->Size() - FreeSpace::kSize) / kPointerSize);
2630  }
2631  }
2632  // Notify the heap profiler of change in object layout.
2633  OnMoveEvent(new_object, object, new_object->Size());
2634 
2635 #ifdef ENABLE_SLOW_DCHECKS
2636  if (FLAG_enable_slow_asserts) {
2637  // Make sure the stack or other roots (e.g., Handles) don't contain pointers
2638  // to the original FixedArray (which is now the filler object).
2639  LeftTrimmerVerifierRootVisitor root_visitor(object);
2640  ReadOnlyRoots(this).Iterate(&root_visitor);
2641  IterateRoots(&root_visitor, VISIT_ALL);
2642  }
2643 #endif // ENABLE_SLOW_DCHECKS
2644 
2645  return new_object;
2646 }
2647 
2648 void Heap::RightTrimFixedArray(FixedArrayBase object, int elements_to_trim) {
2649  const int len = object->length();
2650  DCHECK_LE(elements_to_trim, len);
2651  DCHECK_GE(elements_to_trim, 0);
2652 
2653  int bytes_to_trim;
2654  DCHECK(!object->IsFixedTypedArrayBase());
2655  if (object->IsByteArray()) {
2656  int new_size = ByteArray::SizeFor(len - elements_to_trim);
2657  bytes_to_trim = ByteArray::SizeFor(len) - new_size;
2658  DCHECK_GE(bytes_to_trim, 0);
2659  } else if (object->IsFixedArray()) {
2660  CHECK_NE(elements_to_trim, len);
2661  bytes_to_trim = elements_to_trim * kPointerSize;
2662  } else {
2663  DCHECK(object->IsFixedDoubleArray());
2664  CHECK_NE(elements_to_trim, len);
2665  bytes_to_trim = elements_to_trim * kDoubleSize;
2666  }
2667 
2668  CreateFillerForArray<FixedArrayBase>(object, elements_to_trim, bytes_to_trim);
2669 }
2670 
2671 void Heap::RightTrimWeakFixedArray(WeakFixedArray* object,
2672  int elements_to_trim) {
2673  // This function is safe to use only at the end of the mark compact
2674  // collection: When marking, we record the weak slots, and shrinking
2675  // invalidates them.
2676  DCHECK_EQ(gc_state(), MARK_COMPACT);
2677  CreateFillerForArray<WeakFixedArray*>(object, elements_to_trim,
2678  elements_to_trim * kPointerSize);
2679 }
2680 
2681 template <typename T>
2682 void Heap::CreateFillerForArray(T object, int elements_to_trim,
2683  int bytes_to_trim) {
2684  DCHECK(object->IsFixedArrayBase() || object->IsByteArray() ||
2685  object->IsWeakFixedArray());
2686 
2687  // For now this trick is only applied to objects in new and paged space.
2688  DCHECK(object->map() != ReadOnlyRoots(this).fixed_cow_array_map());
2689 
2690  if (bytes_to_trim == 0) {
2691  DCHECK_EQ(elements_to_trim, 0);
2692  // No need to create filler and update live bytes counters.
2693  return;
2694  }
2695 
2696  // Calculate location of new array end.
2697  int old_size = object->Size();
2698  Address old_end = object->address() + old_size;
2699  Address new_end = old_end - bytes_to_trim;
2700 
2701  // Register the array as an object with invalidated old-to-old slots. We
2702  // cannot use NotifyObjectLayoutChange as it would mark the array black,
2703  // which is not safe for left-trimming because left-trimming re-pushes
2704  // only grey arrays onto the marking worklist.
2705  if (incremental_marking()->IsCompacting() &&
2706  MayContainRecordedSlots(object)) {
2707  // Ensure that the object survives because the InvalidatedSlotsFilter will
2708  // compute its size from its map during pointers updating phase.
2709  incremental_marking()->WhiteToGreyAndPush(object);
2710  MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
2711  object, old_size);
2712  }
2713 
2714  // Technically in new space this write might be omitted (except for
2715  // debug mode which iterates through the heap), but to play safer
2716  // we still do it.
2717  // We do not create a filler for objects in a large object space.
2718  if (!IsLargeObject(object)) {
2719  HeapObject* filler =
2720  CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
2721  DCHECK_NOT_NULL(filler);
2722  // Clear the mark bits of the black area that belongs now to the filler.
2723  // This is an optimization. The sweeper will release black fillers anyway.
2724  if (incremental_marking()->black_allocation() &&
2725  incremental_marking()->marking_state()->IsBlackOrGrey(filler)) {
2726  Page* page = Page::FromAddress(new_end);
2727  incremental_marking()->marking_state()->bitmap(page)->ClearRange(
2728  page->AddressToMarkbitIndex(new_end),
2729  page->AddressToMarkbitIndex(new_end + bytes_to_trim));
2730  }
2731  }
2732 
2733  // Initialize header of the trimmed array. We are storing the new length
2734  // using release store after creating a filler for the left-over space to
2735  // avoid races with the sweeper thread.
2736  object->synchronized_set_length(object->length() - elements_to_trim);
2737 
2738  // Notify the heap object allocation tracker of change in object layout. The
2739  // array may not be moved during GC, and size has to be adjusted nevertheless.
2740  for (auto& tracker : allocation_trackers_) {
2741  tracker->UpdateObjectSizeEvent(object->address(), object->Size());
2742  }
2743 }
2744 
2745 void Heap::MakeHeapIterable() {
2746  mark_compact_collector()->EnsureSweepingCompleted();
2747 }
2748 
2749 
2750 static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) {
2751  const double kMinMutatorUtilization = 0.0;
2752  const double kConservativeGcSpeedInBytesPerMillisecond = 200000;
2753  if (mutator_speed == 0) return kMinMutatorUtilization;
2754  if (gc_speed == 0) gc_speed = kConservativeGcSpeedInBytesPerMillisecond;
2755  // Derivation:
2756  // mutator_utilization = mutator_time / (mutator_time + gc_time)
2757  // mutator_time = 1 / mutator_speed
2758  // gc_time = 1 / gc_speed
2759  // mutator_utilization = (1 / mutator_speed) /
2760  // (1 / mutator_speed + 1 / gc_speed)
2761  // mutator_utilization = gc_speed / (mutator_speed + gc_speed)
2762  return gc_speed / (mutator_speed + gc_speed);
2763 }
2764 
2765 
2766 double Heap::YoungGenerationMutatorUtilization() {
2767  double mutator_speed = static_cast<double>(
2768  tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
2769  double gc_speed =
2770  tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects);
2771  double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
2772  if (FLAG_trace_mutator_utilization) {
2773  isolate()->PrintWithTimestamp(
2774  "Young generation mutator utilization = %.3f ("
2775  "mutator_speed=%.f, gc_speed=%.f)\n",
2776  result, mutator_speed, gc_speed);
2777  }
2778  return result;
2779 }
2780 
2781 
2782 double Heap::OldGenerationMutatorUtilization() {
2783  double mutator_speed = static_cast<double>(
2784  tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond());
2785  double gc_speed = static_cast<double>(
2786  tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
2787  double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
2788  if (FLAG_trace_mutator_utilization) {
2789  isolate()->PrintWithTimestamp(
2790  "Old generation mutator utilization = %.3f ("
2791  "mutator_speed=%.f, gc_speed=%.f)\n",
2792  result, mutator_speed, gc_speed);
2793  }
2794  return result;
2795 }
2796 
2797 
2798 bool Heap::HasLowYoungGenerationAllocationRate() {
2799  const double high_mutator_utilization = 0.993;
2800  return YoungGenerationMutatorUtilization() > high_mutator_utilization;
2801 }
2802 
2803 
2804 bool Heap::HasLowOldGenerationAllocationRate() {
2805  const double high_mutator_utilization = 0.993;
2806  return OldGenerationMutatorUtilization() > high_mutator_utilization;
2807 }
2808 
2809 
2810 bool Heap::HasLowAllocationRate() {
2811  return HasLowYoungGenerationAllocationRate() &&
2812  HasLowOldGenerationAllocationRate();
2813 }
2814 
2815 bool Heap::IsIneffectiveMarkCompact(size_t old_generation_size,
2816  double mutator_utilization) {
2817  const double kHighHeapPercentage = 0.8;
2818  const double kLowMutatorUtilization = 0.4;
2819  return old_generation_size >=
2820  kHighHeapPercentage * max_old_generation_size_ &&
2821  mutator_utilization < kLowMutatorUtilization;
2822 }
2823 
2824 void Heap::CheckIneffectiveMarkCompact(size_t old_generation_size,
2825  double mutator_utilization) {
2826  const int kMaxConsecutiveIneffectiveMarkCompacts = 4;
2827  if (!FLAG_detect_ineffective_gcs_near_heap_limit) return;
2828  if (!IsIneffectiveMarkCompact(old_generation_size, mutator_utilization)) {
2829  consecutive_ineffective_mark_compacts_ = 0;
2830  return;
2831  }
2832  ++consecutive_ineffective_mark_compacts_;
2833  if (consecutive_ineffective_mark_compacts_ ==
2834  kMaxConsecutiveIneffectiveMarkCompacts) {
2835  if (InvokeNearHeapLimitCallback()) {
2836  // The callback increased the heap limit.
2837  consecutive_ineffective_mark_compacts_ = 0;
2838  return;
2839  }
2840  FatalProcessOutOfMemory("Ineffective mark-compacts near heap limit");
2841  }
2842 }
2843 
2844 bool Heap::HasHighFragmentation() {
2845  size_t used = OldGenerationSizeOfObjects();
2846  size_t committed = CommittedOldGenerationMemory();
2847  return HasHighFragmentation(used, committed);
2848 }
2849 
2850 bool Heap::HasHighFragmentation(size_t used, size_t committed) {
2851  const size_t kSlack = 16 * MB;
2852  // Fragmentation is high if committed > 2 * used + kSlack.
2853  // Rewrite the exression to avoid overflow.
2854  DCHECK_GE(committed, used);
2855  return committed - used > used + kSlack;
2856 }
2857 
2858 bool Heap::ShouldOptimizeForMemoryUsage() {
2859  const size_t kOldGenerationSlack = max_old_generation_size_ / 8;
2860  return FLAG_optimize_for_size || isolate()->IsIsolateInBackground() ||
2861  isolate()->IsMemorySavingsModeActive() || HighMemoryPressure() ||
2862  !CanExpandOldGeneration(kOldGenerationSlack);
2863 }
2864 
2865 void Heap::ActivateMemoryReducerIfNeeded() {
2866  // Activate memory reducer when switching to background if
2867  // - there was no mark compact since the start.
2868  // - the committed memory can be potentially reduced.
2869  // 2 pages for the old, code, and map space + 1 page for new space.
2870  const int kMinCommittedMemory = 7 * Page::kPageSize;
2871  if (ms_count_ == 0 && CommittedMemory() > kMinCommittedMemory &&
2872  isolate()->IsIsolateInBackground()) {
2873  MemoryReducer::Event event;
2874  event.type = MemoryReducer::kPossibleGarbage;
2875  event.time_ms = MonotonicallyIncreasingTimeInMs();
2876  memory_reducer_->NotifyPossibleGarbage(event);
2877  }
2878 }
2879 
2880 void Heap::ReduceNewSpaceSize() {
2881  // TODO(ulan): Unify this constant with the similar constant in
2882  // GCIdleTimeHandler once the change is merged to 4.5.
2883  static const size_t kLowAllocationThroughput = 1000;
2884  const double allocation_throughput =
2885  tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
2886 
2887  if (FLAG_predictable) return;
2888 
2889  if (ShouldReduceMemory() ||
2890  ((allocation_throughput != 0) &&
2891  (allocation_throughput < kLowAllocationThroughput))) {
2892  new_space_->Shrink();
2893  UncommitFromSpace();
2894  }
2895 }
2896 
2897 void Heap::FinalizeIncrementalMarkingIfComplete(
2898  GarbageCollectionReason gc_reason) {
2899  if (incremental_marking()->IsMarking() &&
2900  (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
2901  (!incremental_marking()->finalize_marking_completed() &&
2902  mark_compact_collector()->marking_worklist()->IsEmpty() &&
2903  local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking()))) {
2904  FinalizeIncrementalMarkingIncrementally(gc_reason);
2905  } else if (incremental_marking()->IsComplete() ||
2906  (mark_compact_collector()->marking_worklist()->IsEmpty() &&
2907  local_embedder_heap_tracer()
2908  ->ShouldFinalizeIncrementalMarking())) {
2909  CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
2910  }
2911 }
2912 
2913 void Heap::FinalizeIncrementalMarkingAtomically(
2914  GarbageCollectionReason gc_reason) {
2915  DCHECK(!incremental_marking()->IsStopped());
2916  CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
2917 }
2918 
2919 void Heap::FinalizeIncrementalMarkingIncrementally(
2920  GarbageCollectionReason gc_reason) {
2921  if (FLAG_trace_incremental_marking) {
2922  isolate()->PrintWithTimestamp(
2923  "[IncrementalMarking] (%s).\n",
2924  Heap::GarbageCollectionReasonToString(gc_reason));
2925  }
2926 
2927  HistogramTimerScope incremental_marking_scope(
2928  isolate()->counters()->gc_incremental_marking_finalize());
2929  TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
2930  TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
2931 
2932  {
2933  GCCallbacksScope scope(this);
2934  if (scope.CheckReenter()) {
2935  AllowHeapAllocation allow_allocation;
2936  TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
2937  VMState<EXTERNAL> state(isolate_);
2938  HandleScope handle_scope(isolate_);
2939  CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
2940  }
2941  }
2942  incremental_marking()->FinalizeIncrementally();
2943  {
2944  GCCallbacksScope scope(this);
2945  if (scope.CheckReenter()) {
2946  AllowHeapAllocation allow_allocation;
2947  TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
2948  VMState<EXTERNAL> state(isolate_);
2949  HandleScope handle_scope(isolate_);
2950  CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
2951  }
2952  }
2953 }
2954 
2955 void Heap::RegisterDeserializedObjectsForBlackAllocation(
2956  Reservation* reservations, const std::vector<HeapObject*>& large_objects,
2957  const std::vector<Address>& maps) {
2958  // TODO(ulan): pause black allocation during deserialization to avoid
2959  // iterating all these objects in one go.
2960 
2961  if (!incremental_marking()->black_allocation()) return;
2962 
2963  // Iterate black objects in old space, code space, map space, and large
2964  // object space for side effects.
2965  IncrementalMarking::MarkingState* marking_state =
2966  incremental_marking()->marking_state();
2967  for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
2968  const Heap::Reservation& res = reservations[i];
2969  for (auto& chunk : res) {
2970  Address addr = chunk.start;
2971  while (addr < chunk.end) {
2972  HeapObject* obj = HeapObject::FromAddress(addr);
2973  // Objects can have any color because incremental marking can
2974  // start in the middle of Heap::ReserveSpace().
2975  if (marking_state->IsBlack(obj)) {
2976  incremental_marking()->ProcessBlackAllocatedObject(obj);
2977  }
2978  addr += obj->Size();
2979  }
2980  }
2981  }
2982 
2983  // Large object space doesn't use reservations, so it needs custom handling.
2984  for (HeapObject* object : large_objects) {
2985  incremental_marking()->ProcessBlackAllocatedObject(object);
2986  }
2987 
2988  // Map space doesn't use reservations, so it needs custom handling.
2989  for (Address addr : maps) {
2990  incremental_marking()->ProcessBlackAllocatedObject(
2991  HeapObject::FromAddress(addr));
2992  }
2993 }
2994 
2995 void Heap::NotifyObjectLayoutChange(HeapObject* object, int size,
2996  const DisallowHeapAllocation&) {
2997  if (incremental_marking()->IsMarking()) {
2998  incremental_marking()->MarkBlackAndPush(object);
2999  if (incremental_marking()->IsCompacting() &&
3000  MayContainRecordedSlots(object)) {
3001  MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
3002  object, size);
3003  }
3004  }
3005 #ifdef VERIFY_HEAP
3006  if (FLAG_verify_heap) {
3007  DCHECK_NULL(pending_layout_change_object_);
3008  pending_layout_change_object_ = object;
3009  }
3010 #endif
3011 }
3012 
3013 #ifdef VERIFY_HEAP
3014 // Helper class for collecting slot addresses.
3015 class SlotCollectingVisitor final : public ObjectVisitor {
3016  public:
3017  void VisitPointers(HeapObject* host, ObjectSlot start,
3018  ObjectSlot end) override {
3019  VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
3020  }
3021  void VisitPointers(HeapObject* host, MaybeObjectSlot start,
3022  MaybeObjectSlot end) final {
3023  for (MaybeObjectSlot p = start; p < end; ++p) {
3024  slots_.push_back(p);
3025  }
3026  }
3027 
3028  int number_of_slots() { return static_cast<int>(slots_.size()); }
3029 
3030  MaybeObjectSlot slot(int i) { return slots_[i]; }
3031 
3032  private:
3033  std::vector<MaybeObjectSlot> slots_;
3034 };
3035 
3036 void Heap::VerifyObjectLayoutChange(HeapObject* object, Map new_map) {
3037  if (!FLAG_verify_heap) return;
3038 
3039  // Check that Heap::NotifyObjectLayout was called for object transitions
3040  // that are not safe for concurrent marking.
3041  // If you see this check triggering for a freshly allocated object,
3042  // use object->set_map_after_allocation() to initialize its map.
3043  if (pending_layout_change_object_ == nullptr) {
3044  if (object->IsJSObject()) {
3045  DCHECK(!object->map()->TransitionRequiresSynchronizationWithGC(new_map));
3046  } else {
3047  // Check that the set of slots before and after the transition match.
3048  SlotCollectingVisitor old_visitor;
3049  object->IterateFast(&old_visitor);
3050  MapWord old_map_word = object->map_word();
3051  // Temporarily set the new map to iterate new slots.
3052  object->set_map_word(MapWord::FromMap(new_map));
3053  SlotCollectingVisitor new_visitor;
3054  object->IterateFast(&new_visitor);
3055  // Restore the old map.
3056  object->set_map_word(old_map_word);
3057  DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
3058  for (int i = 0; i < new_visitor.number_of_slots(); i++) {
3059  DCHECK(new_visitor.slot(i) == old_visitor.slot(i));
3060  }
3061  }
3062  } else {
3063  DCHECK_EQ(pending_layout_change_object_, object);
3064  pending_layout_change_object_ = nullptr;
3065  }
3066 }
3067 #endif
3068 
3069 GCIdleTimeHeapState Heap::ComputeHeapState() {
3070  GCIdleTimeHeapState heap_state;
3071  heap_state.contexts_disposed = contexts_disposed_;
3072  heap_state.contexts_disposal_rate =
3073  tracer()->ContextDisposalRateInMilliseconds();
3074  heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
3075  heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
3076  return heap_state;
3077 }
3078 
3079 
3080 bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
3081  GCIdleTimeHeapState heap_state,
3082  double deadline_in_ms) {
3083  bool result = false;
3084  switch (action.type) {
3085  case DONE:
3086  result = true;
3087  break;
3088  case DO_INCREMENTAL_STEP: {
3089  const double remaining_idle_time_in_ms =
3090  incremental_marking()->AdvanceIncrementalMarking(
3091  deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
3092  StepOrigin::kTask);
3093  if (remaining_idle_time_in_ms > 0.0) {
3094  FinalizeIncrementalMarkingIfComplete(
3095  GarbageCollectionReason::kFinalizeMarkingViaTask);
3096  }
3097  result = incremental_marking()->IsStopped();
3098  break;
3099  }
3100  case DO_FULL_GC: {
3101  DCHECK_LT(0, contexts_disposed_);
3102  HistogramTimerScope scope(isolate_->counters()->gc_context());
3103  TRACE_EVENT0("v8", "V8.GCContext");
3104  CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kContextDisposal);
3105  break;
3106  }
3107  case DO_NOTHING:
3108  break;
3109  }
3110 
3111  return result;
3112 }
3113 
3114 void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
3115  GCIdleTimeHeapState heap_state,
3116  double start_ms, double deadline_in_ms) {
3117  double idle_time_in_ms = deadline_in_ms - start_ms;
3118  double current_time = MonotonicallyIncreasingTimeInMs();
3119  last_idle_notification_time_ = current_time;
3120  double deadline_difference = deadline_in_ms - current_time;
3121 
3122  contexts_disposed_ = 0;
3123 
3124  if ((FLAG_trace_idle_notification && action.type > DO_NOTHING) ||
3125  FLAG_trace_idle_notification_verbose) {
3126  isolate_->PrintWithTimestamp(
3127  "Idle notification: requested idle time %.2f ms, used idle time %.2f "
3128  "ms, deadline usage %.2f ms [",
3129  idle_time_in_ms, idle_time_in_ms - deadline_difference,
3130  deadline_difference);
3131  action.Print();
3132  PrintF("]");
3133  if (FLAG_trace_idle_notification_verbose) {
3134  PrintF("[");
3135  heap_state.Print();
3136  PrintF("]");
3137  }
3138  PrintF("\n");
3139  }
3140 }
3141 
3142 
3143 double Heap::MonotonicallyIncreasingTimeInMs() {
3144  return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
3145  static_cast<double>(base::Time::kMillisecondsPerSecond);
3146 }
3147 
3148 
3149 bool Heap::IdleNotification(int idle_time_in_ms) {
3150  return IdleNotification(
3151  V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
3152  (static_cast<double>(idle_time_in_ms) /
3153  static_cast<double>(base::Time::kMillisecondsPerSecond)));
3154 }
3155 
3156 
3157 bool Heap::IdleNotification(double deadline_in_seconds) {
3158  CHECK(HasBeenSetUp());
3159  double deadline_in_ms =
3160  deadline_in_seconds *
3161  static_cast<double>(base::Time::kMillisecondsPerSecond);
3162  HistogramTimerScope idle_notification_scope(
3163  isolate_->counters()->gc_idle_notification());
3164  TRACE_EVENT0("v8", "V8.GCIdleNotification");
3165  double start_ms = MonotonicallyIncreasingTimeInMs();
3166  double idle_time_in_ms = deadline_in_ms - start_ms;
3167 
3168  tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
3169  OldGenerationAllocationCounter());
3170 
3171  GCIdleTimeHeapState heap_state = ComputeHeapState();
3172 
3173  GCIdleTimeAction action =
3174  gc_idle_time_handler_->Compute(idle_time_in_ms, heap_state);
3175 
3176  bool result = PerformIdleTimeAction(action, heap_state, deadline_in_ms);
3177 
3178  IdleNotificationEpilogue(action, heap_state, start_ms, deadline_in_ms);
3179  return result;
3180 }
3181 
3182 
3183 bool Heap::RecentIdleNotificationHappened() {
3184  return (last_idle_notification_time_ +
3185  GCIdleTimeHandler::kMaxScheduledIdleTime) >
3186  MonotonicallyIncreasingTimeInMs();
3187 }
3188 
3190  public:
3191  explicit MemoryPressureInterruptTask(Heap* heap)
3192  : CancelableTask(heap->isolate()), heap_(heap) {}
3193 
3194  ~MemoryPressureInterruptTask() override = default;
3195 
3196  private:
3197  // v8::internal::CancelableTask overrides.
3198  void RunInternal() override { heap_->CheckMemoryPressure(); }
3199 
3200  Heap* heap_;
3201  DISALLOW_COPY_AND_ASSIGN(MemoryPressureInterruptTask);
3202 };
3203 
3204 void Heap::CheckMemoryPressure() {
3205  if (HighMemoryPressure()) {
3206  // The optimizing compiler may be unnecessarily holding on to memory.
3207  isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
3208  }
3209  MemoryPressureLevel memory_pressure_level = memory_pressure_level_;
3210  // Reset the memory pressure level to avoid recursive GCs triggered by
3211  // CheckMemoryPressure from AdjustAmountOfExternalMemory called by
3212  // the finalizers.
3213  memory_pressure_level_ = MemoryPressureLevel::kNone;
3214  if (memory_pressure_level == MemoryPressureLevel::kCritical) {
3215  CollectGarbageOnMemoryPressure();
3216  } else if (memory_pressure_level == MemoryPressureLevel::kModerate) {
3217  if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
3218  StartIncrementalMarking(kReduceMemoryFootprintMask,
3219  GarbageCollectionReason::kMemoryPressure);
3220  }
3221  }
3222  if (memory_reducer_) {
3223  MemoryReducer::Event event;
3224  event.type = MemoryReducer::kPossibleGarbage;
3225  event.time_ms = MonotonicallyIncreasingTimeInMs();
3226  memory_reducer_->NotifyPossibleGarbage(event);
3227  }
3228 }
3229 
3230 void Heap::CollectGarbageOnMemoryPressure() {
3231  const int kGarbageThresholdInBytes = 8 * MB;
3232  const double kGarbageThresholdAsFractionOfTotalMemory = 0.1;
3233  // This constant is the maximum response time in RAIL performance model.
3234  const double kMaxMemoryPressurePauseMs = 100;
3235 
3236  double start = MonotonicallyIncreasingTimeInMs();
3237  CollectAllGarbage(kReduceMemoryFootprintMask,
3238  GarbageCollectionReason::kMemoryPressure,
3239  kGCCallbackFlagCollectAllAvailableGarbage);
3240  EagerlyFreeExternalMemory();
3241  double end = MonotonicallyIncreasingTimeInMs();
3242 
3243  // Estimate how much memory we can free.
3244  int64_t potential_garbage = (CommittedMemory() - SizeOfObjects()) +
3245  isolate()->isolate_data()->external_memory_;
3246  // If we can potentially free large amount of memory, then start GC right
3247  // away instead of waiting for memory reducer.
3248  if (potential_garbage >= kGarbageThresholdInBytes &&
3249  potential_garbage >=
3250  CommittedMemory() * kGarbageThresholdAsFractionOfTotalMemory) {
3251  // If we spent less than half of the time budget, then perform full GC
3252  // Otherwise, start incremental marking.
3253  if (end - start < kMaxMemoryPressurePauseMs / 2) {
3254  CollectAllGarbage(kReduceMemoryFootprintMask,
3255  GarbageCollectionReason::kMemoryPressure,
3256  kGCCallbackFlagCollectAllAvailableGarbage);
3257  } else {
3258  if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
3259  StartIncrementalMarking(kReduceMemoryFootprintMask,
3260  GarbageCollectionReason::kMemoryPressure);
3261  }
3262  }
3263  }
3264 }
3265 
3266 void Heap::MemoryPressureNotification(MemoryPressureLevel level,
3267  bool is_isolate_locked) {
3268  MemoryPressureLevel previous = memory_pressure_level_;
3269  memory_pressure_level_ = level;
3270  if ((previous != MemoryPressureLevel::kCritical &&
3271  level == MemoryPressureLevel::kCritical) ||
3272  (previous == MemoryPressureLevel::kNone &&
3273  level == MemoryPressureLevel::kModerate)) {
3274  if (is_isolate_locked) {
3275  CheckMemoryPressure();
3276  } else {
3277  ExecutionAccess access(isolate());
3278  isolate()->stack_guard()->RequestGC();
3279  auto taskrunner = V8::GetCurrentPlatform()->GetForegroundTaskRunner(
3280  reinterpret_cast<v8::Isolate*>(isolate()));
3281  taskrunner->PostTask(
3282  base::make_unique<MemoryPressureInterruptTask>(this));
3283  }
3284  }
3285 }
3286 
3287 void Heap::EagerlyFreeExternalMemory() {
3288  for (Page* page : *old_space()) {
3289  if (!page->SweepingDone()) {
3290  base::MutexGuard guard(page->mutex());
3291  if (!page->SweepingDone()) {
3292  ArrayBufferTracker::FreeDead(
3293  page, mark_compact_collector()->non_atomic_marking_state());
3294  }
3295  }
3296  }
3297  memory_allocator()->unmapper()->EnsureUnmappingCompleted();
3298 }
3299 
3300 void Heap::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
3301  void* data) {
3302  const size_t kMaxCallbacks = 100;
3303  CHECK_LT(near_heap_limit_callbacks_.size(), kMaxCallbacks);
3304  for (auto callback_data : near_heap_limit_callbacks_) {
3305  CHECK_NE(callback_data.first, callback);
3306  }
3307  near_heap_limit_callbacks_.push_back(std::make_pair(callback, data));
3308 }
3309 
3310 void Heap::RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
3311  size_t heap_limit) {
3312  for (size_t i = 0; i < near_heap_limit_callbacks_.size(); i++) {
3313  if (near_heap_limit_callbacks_[i].first == callback) {
3314  near_heap_limit_callbacks_.erase(near_heap_limit_callbacks_.begin() + i);
3315  if (heap_limit) {
3316  RestoreHeapLimit(heap_limit);
3317  }
3318  return;
3319  }
3320  }
3321  UNREACHABLE();
3322 }
3323 
3324 bool Heap::InvokeNearHeapLimitCallback() {
3325  if (near_heap_limit_callbacks_.size() > 0) {
3326  HandleScope scope(isolate());
3327  v8::NearHeapLimitCallback callback =
3328  near_heap_limit_callbacks_.back().first;
3329  void* data = near_heap_limit_callbacks_.back().second;
3330  size_t heap_limit = callback(data, max_old_generation_size_,
3331  initial_max_old_generation_size_);
3332  if (heap_limit > max_old_generation_size_) {
3333  max_old_generation_size_ = heap_limit;
3334  return true;
3335  }
3336  }
3337  return false;
3338 }
3339 
3340 void Heap::CollectCodeStatistics() {
3341  TRACE_EVENT0("v8", "Heap::CollectCodeStatistics");
3342  CodeStatistics::ResetCodeAndMetadataStatistics(isolate());
3343  // We do not look for code in new space, or map space. If code
3344  // somehow ends up in those spaces, we would miss it here.
3345  CodeStatistics::CollectCodeStatistics(code_space_, isolate());
3346  CodeStatistics::CollectCodeStatistics(old_space_, isolate());
3347  CodeStatistics::CollectCodeStatistics(code_lo_space_, isolate());
3348 }
3349 
3350 #ifdef DEBUG
3351 
3352 void Heap::Print() {
3353  if (!HasBeenSetUp()) return;
3354  isolate()->PrintStack(stdout);
3355 
3356  for (SpaceIterator it(this); it.has_next();) {
3357  it.next()->Print();
3358  }
3359 }
3360 
3361 
3362 void Heap::ReportCodeStatistics(const char* title) {
3363  PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
3364  CollectCodeStatistics();
3365  CodeStatistics::ReportCodeStatistics(isolate());
3366 }
3367 
3368 #endif // DEBUG
3369 
3370 const char* Heap::GarbageCollectionReasonToString(
3371  GarbageCollectionReason gc_reason) {
3372  switch (gc_reason) {
3373  case GarbageCollectionReason::kAllocationFailure:
3374  return "allocation failure";
3375  case GarbageCollectionReason::kAllocationLimit:
3376  return "allocation limit";
3377  case GarbageCollectionReason::kContextDisposal:
3378  return "context disposal";
3379  case GarbageCollectionReason::kCountersExtension:
3380  return "counters extension";
3381  case GarbageCollectionReason::kDebugger:
3382  return "debugger";
3383  case GarbageCollectionReason::kDeserializer:
3384  return "deserialize";
3385  case GarbageCollectionReason::kExternalMemoryPressure:
3386  return "external memory pressure";
3387  case GarbageCollectionReason::kFinalizeMarkingViaStackGuard:
3388  return "finalize incremental marking via stack guard";
3389  case GarbageCollectionReason::kFinalizeMarkingViaTask:
3390  return "finalize incremental marking via task";
3391  case GarbageCollectionReason::kFullHashtable:
3392  return "full hash-table";
3393  case GarbageCollectionReason::kHeapProfiler:
3394  return "heap profiler";
3395  case GarbageCollectionReason::kIdleTask:
3396  return "idle task";
3397  case GarbageCollectionReason::kLastResort:
3398  return "last resort";
3399  case GarbageCollectionReason::kLowMemoryNotification:
3400  return "low memory notification";
3401  case GarbageCollectionReason::kMakeHeapIterable:
3402  return "make heap iterable";
3403  case GarbageCollectionReason::kMemoryPressure:
3404  return "memory pressure";
3405  case GarbageCollectionReason::kMemoryReducer:
3406  return "memory reducer";
3407  case GarbageCollectionReason::kRuntime:
3408  return "runtime";
3409  case GarbageCollectionReason::kSamplingProfiler:
3410  return "sampling profiler";
3411  case GarbageCollectionReason::kSnapshotCreator:
3412  return "snapshot creator";
3413  case GarbageCollectionReason::kTesting:
3414  return "testing";
3415  case GarbageCollectionReason::kExternalFinalize:
3416  return "external finalize";
3417  case GarbageCollectionReason::kUnknown:
3418  return "unknown";
3419  }
3420  UNREACHABLE();
3421 }
3422 
3423 bool Heap::Contains(HeapObject* value) {
3424  if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
3425  return false;
3426  }
3427  return HasBeenSetUp() &&
3428  (new_space_->ToSpaceContains(value) || old_space_->Contains(value) ||
3429  code_space_->Contains(value) || map_space_->Contains(value) ||
3430  lo_space_->Contains(value) || read_only_space_->Contains(value) ||
3431  code_lo_space_->Contains(value) || new_lo_space_->Contains(value));
3432 }
3433 
3434 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
3435  if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
3436  return false;
3437  }
3438  if (!HasBeenSetUp()) return false;
3439 
3440  switch (space) {
3441  case NEW_SPACE:
3442  return new_space_->ToSpaceContains(value);
3443  case OLD_SPACE:
3444  return old_space_->Contains(value);
3445  case CODE_SPACE:
3446  return code_space_->Contains(value);
3447  case MAP_SPACE:
3448  return map_space_->Contains(value);
3449  case LO_SPACE:
3450  return lo_space_->Contains(value);
3451  case CODE_LO_SPACE:
3452  return code_lo_space_->Contains(value);
3453  case NEW_LO_SPACE:
3454  return new_lo_space_->Contains(value);
3455  case RO_SPACE:
3456  return read_only_space_->Contains(value);
3457  }
3458  UNREACHABLE();
3459 }
3460 
3461 bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
3462  if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
3463  return false;
3464  }
3465  if (!HasBeenSetUp()) return false;
3466 
3467  switch (space) {
3468  case NEW_SPACE:
3469  return new_space_->ToSpaceContainsSlow(addr);
3470  case OLD_SPACE:
3471  return old_space_->ContainsSlow(addr);
3472  case CODE_SPACE:
3473  return code_space_->ContainsSlow(addr);
3474  case MAP_SPACE:
3475  return map_space_->ContainsSlow(addr);
3476  case LO_SPACE:
3477  return lo_space_->ContainsSlow(addr);
3478  case CODE_LO_SPACE:
3479  return code_lo_space_->ContainsSlow(addr);
3480  case NEW_LO_SPACE:
3481  return new_lo_space_->ContainsSlow(addr);
3482  case RO_SPACE:
3483  return read_only_space_->ContainsSlow(addr);
3484  }
3485  UNREACHABLE();
3486 }
3487 
3488 bool Heap::IsValidAllocationSpace(AllocationSpace space) {
3489  switch (space) {
3490  case NEW_SPACE:
3491  case OLD_SPACE:
3492  case CODE_SPACE:
3493  case MAP_SPACE:
3494  case LO_SPACE:
3495  case NEW_LO_SPACE:
3496  case CODE_LO_SPACE:
3497  case RO_SPACE:
3498  return true;
3499  default:
3500  return false;
3501  }
3502 }
3503 
3504 #ifdef VERIFY_HEAP
3505 class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
3506  public:
3507  explicit VerifyReadOnlyPointersVisitor(Heap* heap)
3508  : VerifyPointersVisitor(heap) {}
3509 
3510  protected:
3511  void VerifyPointers(HeapObject* host, MaybeObjectSlot start,
3512  MaybeObjectSlot end) override {
3513  if (host != nullptr) {
3514  CHECK(heap_->InReadOnlySpace(host->map()));
3515  }
3516  VerifyPointersVisitor::VerifyPointers(host, start, end);
3517 
3518  for (MaybeObjectSlot current = start; current < end; ++current) {
3519  HeapObject* object;
3520  if ((*current)->GetHeapObject(&object)) {
3521  CHECK(heap_->InReadOnlySpace(object));
3522  }
3523  }
3524  }
3525 };
3526 
3527 void Heap::Verify() {
3528  CHECK(HasBeenSetUp());
3529  HandleScope scope(isolate());
3530 
3531  // We have to wait here for the sweeper threads to have an iterable heap.
3532  mark_compact_collector()->EnsureSweepingCompleted();
3533 
3534  VerifyPointersVisitor visitor(this);
3535  IterateRoots(&visitor, VISIT_ONLY_STRONG);
3536 
3537  VerifySmisVisitor smis_visitor;
3538  IterateSmiRoots(&smis_visitor);
3539 
3540  new_space_->Verify(isolate());
3541 
3542  old_space_->Verify(isolate(), &visitor);
3543  map_space_->Verify(isolate(), &visitor);
3544 
3545  VerifyPointersVisitor no_dirty_regions_visitor(this);
3546  code_space_->Verify(isolate(), &no_dirty_regions_visitor);
3547 
3548  lo_space_->Verify(isolate());
3549  code_lo_space_->Verify(isolate());
3550  new_lo_space_->Verify(isolate());
3551 
3552  VerifyReadOnlyPointersVisitor read_only_visitor(this);
3553  read_only_space_->Verify(isolate(), &read_only_visitor);
3554 }
3555 
3556 class SlotVerifyingVisitor : public ObjectVisitor {
3557  public:
3558  SlotVerifyingVisitor(std::set<Address>* untyped,
3559  std::set<std::pair<SlotType, Address> >* typed)
3560  : untyped_(untyped), typed_(typed) {}
3561 
3562  virtual bool ShouldHaveBeenRecorded(HeapObject* host, MaybeObject target) = 0;
3563  // TODO(3770): Drop this after the migration.
3564  bool ShouldHaveBeenRecorded(Code host, MaybeObject target) {
3565  return ShouldHaveBeenRecorded(reinterpret_cast<HeapObject*>(host.ptr()),
3566  target);
3567  }
3568 
3569  void VisitPointers(HeapObject* host, ObjectSlot start,
3570  ObjectSlot end) override {
3571 #ifdef DEBUG
3572  for (ObjectSlot slot = start; slot < end; ++slot) {
3573  DCHECK(!HasWeakHeapObjectTag(*slot));
3574  }
3575 #endif // DEBUG
3576  VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
3577  }
3578 
3579  void VisitPointers(HeapObject* host, MaybeObjectSlot start,
3580  MaybeObjectSlot end) final {
3581  for (MaybeObjectSlot slot = start; slot < end; ++slot) {
3582  if (ShouldHaveBeenRecorded(host, *slot)) {
3583  CHECK_GT(untyped_->count(slot.address()), 0);
3584  }
3585  }
3586  }
3587 
3588  void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
3589  Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
3590  if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
3591  CHECK(
3592  InTypedSet(CODE_TARGET_SLOT, rinfo->pc()) ||
3593  (rinfo->IsInConstantPool() &&
3594  InTypedSet(CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address())));
3595  }
3596  }
3597 
3598  void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
3599  Object* target = rinfo->target_object();
3600  if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
3601  CHECK(InTypedSet(EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
3602  (rinfo->IsInConstantPool() &&
3603  InTypedSet(OBJECT_SLOT, rinfo->constant_pool_entry_address())));
3604  }
3605  }
3606 
3607  private:
3608  bool InTypedSet(SlotType type, Address slot) {
3609  return typed_->count(std::make_pair(type, slot)) > 0;
3610  }
3611  std::set<Address>* untyped_;
3612  std::set<std::pair<SlotType, Address> >* typed_;
3613 };
3614 
3615 class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
3616  public:
3617  OldToNewSlotVerifyingVisitor(std::set<Address>* untyped,
3618  std::set<std::pair<SlotType, Address>>* typed)
3619  : SlotVerifyingVisitor(untyped, typed) {}
3620 
3621  bool ShouldHaveBeenRecorded(HeapObject* host, MaybeObject target) override {
3622  DCHECK_IMPLIES(target->IsStrongOrWeak() && Heap::InNewSpace(target),
3623  Heap::InToSpace(target));
3624  return target->IsStrongOrWeak() && Heap::InNewSpace(target) &&
3625  !Heap::InNewSpace(host);
3626  }
3627 };
3628 
3629 template <RememberedSetType direction>
3630 void CollectSlots(MemoryChunk* chunk, Address start, Address end,
3631  std::set<Address>* untyped,
3632  std::set<std::pair<SlotType, Address> >* typed) {
3633  RememberedSet<direction>::Iterate(
3634  chunk,
3635  [start, end, untyped](MaybeObjectSlot slot) {
3636  if (start <= slot.address() && slot.address() < end) {
3637  untyped->insert(slot.address());
3638  }
3639  return KEEP_SLOT;
3640  },
3641  SlotSet::PREFREE_EMPTY_BUCKETS);
3642  RememberedSet<direction>::IterateTyped(
3643  chunk, [start, end, typed](SlotType type, Address host, Address slot) {
3644  if (start <= slot && slot < end) {
3645  typed->insert(std::make_pair(type, slot));
3646  }
3647  return KEEP_SLOT;
3648  });
3649 }
3650 
3651 void Heap::VerifyRememberedSetFor(HeapObject* object) {
3652  MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
3653  DCHECK_IMPLIES(chunk->mutex() == nullptr, InReadOnlySpace(object));
3654  // In RO_SPACE chunk->mutex() may be nullptr, so just ignore it.
3655  base::LockGuard<base::Mutex, base::NullBehavior::kIgnoreIfNull> lock_guard(
3656  chunk->mutex());
3657  Address start = object->address();
3658  Address end = start + object->Size();
3659  std::set<Address> old_to_new;
3660  std::set<std::pair<SlotType, Address> > typed_old_to_new;
3661  if (!InNewSpace(object)) {
3662  store_buffer()->MoveAllEntriesToRememberedSet();
3663  CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new);
3664  OldToNewSlotVerifyingVisitor visitor(&old_to_new, &typed_old_to_new);
3665  object->IterateBody(&visitor);
3666  }
3667  // TODO(ulan): Add old to old slot set verification once all weak objects
3668  // have their own instance types and slots are recorded for all weal fields.
3669 }
3670 #endif
3671 
3672 #ifdef DEBUG
3673 void Heap::VerifyCountersAfterSweeping() {
3674  PagedSpaces spaces(this);
3675  for (PagedSpace* space = spaces.next(); space != nullptr;
3676  space = spaces.next()) {
3677  space->VerifyCountersAfterSweeping();
3678  }
3679 }
3680 
3681 void Heap::VerifyCountersBeforeConcurrentSweeping() {
3682  PagedSpaces spaces(this);
3683  for (PagedSpace* space = spaces.next(); space != nullptr;
3684  space = spaces.next()) {
3685  space->VerifyCountersBeforeConcurrentSweeping();
3686  }
3687 }
3688 #endif
3689 
3690 void Heap::ZapFromSpace() {
3691  if (!new_space_->IsFromSpaceCommitted()) return;
3692  for (Page* page : PageRange(new_space_->from_space().first_page(), nullptr)) {
3693  memory_allocator()->ZapBlock(page->area_start(),
3694  page->HighWaterMark() - page->area_start(),
3695  ZapValue());
3696  }
3697 }
3698 
3699 void Heap::ZapCodeObject(Address start_address, int size_in_bytes) {
3700 #ifdef DEBUG
3701  DCHECK(IsAligned(start_address, kIntSize));
3702  for (int i = 0; i < size_in_bytes / kIntSize; i++) {
3703  Memory<int>(start_address + i * kIntSize) = kCodeZapValue;
3704  }
3705 #endif
3706 }
3707 
3708 Code Heap::builtin(int index) {
3709  DCHECK(Builtins::IsBuiltinId(index));
3710  return Code::cast(ObjectPtr(isolate()->builtins_table()[index]));
3711 }
3712 
3713 Address Heap::builtin_address(int index) {
3714  DCHECK(Builtins::IsBuiltinId(index) || index == Builtins::builtin_count);
3715  return reinterpret_cast<Address>(&isolate()->builtins_table()[index]);
3716 }
3717 
3718 void Heap::set_builtin(int index, Code builtin) {
3719  DCHECK(Builtins::IsBuiltinId(index));
3720  DCHECK(Internals::HasHeapObjectTag(builtin.ptr()));
3721  // The given builtin may be completely uninitialized thus we cannot check its
3722  // type here.
3723  isolate()->builtins_table()[index] = builtin.ptr();
3724 }
3725 
3726 void Heap::IterateRoots(RootVisitor* v, VisitMode mode) {
3727  IterateStrongRoots(v, mode);
3728  IterateWeakRoots(v, mode);
3729 }
3730 
3731 void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
3732  const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
3733  mode == VISIT_ALL_IN_MINOR_MC_MARK ||
3734  mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
3735  v->VisitRootPointer(Root::kStringTable, nullptr,
3736  ObjectSlot(&roots_table()[RootIndex::kStringTable]));
3737  v->Synchronize(VisitorSynchronization::kStringTable);
3738  if (!isMinorGC && mode != VISIT_ALL_IN_SWEEP_NEWSPACE &&
3739  mode != VISIT_FOR_SERIALIZATION) {
3740  // Scavenge collections have special processing for this.
3741  // Do not visit for serialization, since the external string table will
3742  // be populated from scratch upon deserialization.
3743  external_string_table_.IterateAll(v);
3744  }
3745  v->Synchronize(VisitorSynchronization::kExternalStringsTable);
3746 }
3747 
3748 void Heap::IterateSmiRoots(RootVisitor* v) {
3749  // Acquire execution access since we are going to read stack limit values.
3750  ExecutionAccess access(isolate());
3751  v->VisitRootPointers(Root::kSmiRootList, nullptr,
3752  roots_table().smi_roots_begin(),
3753  roots_table().smi_roots_end());
3754  v->Synchronize(VisitorSynchronization::kSmiRootList);
3755 }
3756 
3757 // We cannot avoid stale handles to left-trimmed objects, but can only make
3758 // sure all handles still needed are updated. Filter out a stale pointer
3759 // and clear the slot to allow post processing of handles (needed because
3760 // the sweeper might actually free the underlying page).
3762  public:
3763  explicit FixStaleLeftTrimmedHandlesVisitor(Heap* heap) : heap_(heap) {
3764  USE(heap_);
3765  }
3766 
3767  void VisitRootPointer(Root root, const char* description,
3768  ObjectSlot p) override {
3769  FixHandle(p);
3770  }
3771 
3772  void VisitRootPointers(Root root, const char* description, ObjectSlot start,
3773  ObjectSlot end) override {
3774  for (ObjectSlot p = start; p < end; ++p) FixHandle(p);
3775  }
3776 
3777  private:
3778  inline void FixHandle(ObjectSlot p) {
3779  if (!(*p)->IsHeapObject()) return;
3780  HeapObject* current = reinterpret_cast<HeapObject*>(*p);
3781  const MapWord map_word = current->map_word();
3782  if (!map_word.IsForwardingAddress() && current->IsFiller()) {
3783 #ifdef DEBUG
3784  // We need to find a FixedArrayBase map after walking the fillers.
3785  while (current->IsFiller()) {
3786  Address next = reinterpret_cast<Address>(current);
3787  if (current->map() == ReadOnlyRoots(heap_).one_pointer_filler_map()) {
3788  next += kPointerSize;
3789  } else if (current->map() ==
3790  ReadOnlyRoots(heap_).two_pointer_filler_map()) {
3791  next += 2 * kPointerSize;
3792  } else {
3793  next += current->Size();
3794  }
3795  current = reinterpret_cast<HeapObject*>(next);
3796  }
3797  DCHECK(current->IsFixedArrayBase());
3798 #endif // DEBUG
3799  p.store(nullptr);
3800  }
3801  }
3802 
3803  Heap* heap_;
3804 };
3805 
3806 void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
3807  const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
3808  mode == VISIT_ALL_IN_MINOR_MC_MARK ||
3809  mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
3810  v->VisitRootPointers(Root::kStrongRootList, nullptr,
3811  roots_table().strong_roots_begin(),
3812  roots_table().strong_roots_end());
3813  v->Synchronize(VisitorSynchronization::kStrongRootList);
3814 
3815  isolate_->bootstrapper()->Iterate(v);
3816  v->Synchronize(VisitorSynchronization::kBootstrapper);
3817  isolate_->Iterate(v);
3818  v->Synchronize(VisitorSynchronization::kTop);
3819  Relocatable::Iterate(isolate_, v);
3820  v->Synchronize(VisitorSynchronization::kRelocatable);
3821  isolate_->debug()->Iterate(v);
3822  v->Synchronize(VisitorSynchronization::kDebug);
3823 
3824  isolate_->compilation_cache()->Iterate(v);
3825  v->Synchronize(VisitorSynchronization::kCompilationCache);
3826 
3827  // Iterate over local handles in handle scopes.
3828  FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
3829  isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
3830  isolate_->handle_scope_implementer()->Iterate(v);
3831  isolate_->IterateDeferredHandles(v);
3832  v->Synchronize(VisitorSynchronization::kHandleScope);
3833 
3834  // Iterate over the builtin code objects and code stubs in the
3835  // heap. Note that it is not necessary to iterate over code objects
3836  // on scavenge collections.
3837  if (!isMinorGC) {
3838  IterateBuiltins(v);
3839  v->Synchronize(VisitorSynchronization::kBuiltins);
3840 
3841  // The dispatch table is set up directly from the builtins using
3842  // IntitializeDispatchTable so there is no need to iterate to create it.
3843  if (mode != VISIT_FOR_SERIALIZATION) {
3844  // Currently we iterate the dispatch table to update pointers to possibly
3845  // moved Code objects for bytecode handlers.
3846  // TODO(v8:6666): Remove iteration once builtins are embedded (and thus
3847  // immovable) in every build configuration.
3848  isolate_->interpreter()->IterateDispatchTable(v);
3849  v->Synchronize(VisitorSynchronization::kDispatchTable);
3850  }
3851  }
3852 
3853  // Iterate over global handles.
3854  switch (mode) {
3855  case VISIT_FOR_SERIALIZATION:
3856  // Global handles are not iterated by the serializer. Values referenced by
3857  // global handles need to be added manually.
3858  break;
3859  case VISIT_ONLY_STRONG:
3860  isolate_->global_handles()->IterateStrongRoots(v);
3861  break;
3862  case VISIT_ALL_IN_SCAVENGE:
3863  isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
3864  break;
3865  case VISIT_ALL_IN_MINOR_MC_MARK:
3866  // Global handles are processed manually by the minor MC.
3867  break;
3868  case VISIT_ALL_IN_MINOR_MC_UPDATE:
3869  // Global handles are processed manually by the minor MC.
3870  break;
3871  case VISIT_ALL_IN_SWEEP_NEWSPACE:
3872  case VISIT_ALL:
3873  isolate_->global_handles()->IterateAllRoots(v);
3874  break;
3875  }
3876  v->Synchronize(VisitorSynchronization::kGlobalHandles);
3877 
3878  // Iterate over eternal handles. Eternal handles are not iterated by the
3879  // serializer. Values referenced by eternal handles need to be added manually.
3880  if (mode != VISIT_FOR_SERIALIZATION) {
3881  if (isMinorGC) {
3882  isolate_->eternal_handles()->IterateNewSpaceRoots(v);
3883  } else {
3884  isolate_->eternal_handles()->IterateAllRoots(v);
3885  }
3886  }
3887  v->Synchronize(VisitorSynchronization::kEternalHandles);
3888 
3889  // Iterate over pointers being held by inactive threads.
3890  isolate_->thread_manager()->Iterate(v);
3891  v->Synchronize(VisitorSynchronization::kThreadManager);
3892 
3893  // Iterate over other strong roots (currently only identity maps).
3894  for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
3895  v->VisitRootPointers(Root::kStrongRoots, nullptr, list->start, list->end);
3896  }
3897  v->Synchronize(VisitorSynchronization::kStrongRoots);
3898 
3899  // Iterate over pending Microtasks stored in MicrotaskQueues.
3900  MicrotaskQueue* default_microtask_queue = isolate_->default_microtask_queue();
3901  if (default_microtask_queue) {
3902  MicrotaskQueue* microtask_queue = default_microtask_queue;
3903  do {
3904  microtask_queue->IterateMicrotasks(v);
3905  microtask_queue = microtask_queue->next();
3906  } while (microtask_queue != default_microtask_queue);
3907  }
3908 
3909  // Iterate over the partial snapshot cache unless serializing or
3910  // deserializing.
3911  if (mode != VISIT_FOR_SERIALIZATION) {
3912  SerializerDeserializer::Iterate(isolate_, v);
3913  v->Synchronize(VisitorSynchronization::kPartialSnapshotCache);
3914  }
3915 }
3916 
3917 void Heap::IterateWeakGlobalHandles(RootVisitor* v) {
3918  isolate_->global_handles()->IterateWeakRoots(v);
3919 }
3920 
3921 void Heap::IterateBuiltins(RootVisitor* v) {
3922  for (int i = 0; i < Builtins::builtin_count; i++) {
3923  v->VisitRootPointer(Root::kBuiltins, Builtins::name(i),
3924  ObjectSlot(builtin_address(i)));
3925  }
3926 }
3927 
3928 // TODO(1236194): Since the heap size is configurable on the command line
3929 // and through the API, we should gracefully handle the case that the heap
3930 // size is not big enough to fit all the initial objects.
3931 void Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
3932  size_t max_old_generation_size_in_mb,
3933  size_t code_range_size_in_mb) {
3934  // Overwrite default configuration.
3935  if (max_semi_space_size_in_kb != 0) {
3936  max_semi_space_size_ =
3937  RoundUp<Page::kPageSize>(max_semi_space_size_in_kb * KB);
3938  }
3939  if (max_old_generation_size_in_mb != 0) {
3940  max_old_generation_size_ = max_old_generation_size_in_mb * MB;
3941  }
3942 
3943  // If max space size flags are specified overwrite the configuration.
3944  if (FLAG_max_semi_space_size > 0) {
3945  max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
3946  }
3947  if (FLAG_max_old_space_size > 0) {
3948  max_old_generation_size_ =
3949  static_cast<size_t>(FLAG_max_old_space_size) * MB;
3950  }
3951 
3952  if (Page::kPageSize > MB) {
3953  max_semi_space_size_ = RoundUp<Page::kPageSize>(max_semi_space_size_);
3954  max_old_generation_size_ =
3955  RoundUp<Page::kPageSize>(max_old_generation_size_);
3956  }
3957 
3958  if (FLAG_stress_compaction) {
3959  // This will cause more frequent GCs when stressing.
3960  max_semi_space_size_ = MB;
3961  }
3962 
3963  // The new space size must be a power of two to support single-bit testing
3964  // for containment.
3965  max_semi_space_size_ = static_cast<size_t>(base::bits::RoundUpToPowerOfTwo64(
3966  static_cast<uint64_t>(max_semi_space_size_)));
3967 
3968  if (max_semi_space_size_ == kMaxSemiSpaceSizeInKB * KB) {
3969  // Start with at least 1*MB semi-space on machines with a lot of memory.
3970  initial_semispace_size_ =
3971  Max(initial_semispace_size_, static_cast<size_t>(1 * MB));
3972  }
3973 
3974  if (FLAG_min_semi_space_size > 0) {
3975  size_t initial_semispace_size =
3976  static_cast<size_t>(FLAG_min_semi_space_size) * MB;
3977  if (initial_semispace_size > max_semi_space_size_) {
3978  initial_semispace_size_ = max_semi_space_size_;
3979  if (FLAG_trace_gc) {
3980  PrintIsolate(isolate_,
3981  "Min semi-space size cannot be more than the maximum "
3982  "semi-space size of %" PRIuS " MB\n",
3983  max_semi_space_size_ / MB);
3984  }
3985  } else {
3986  initial_semispace_size_ =
3987  RoundUp<Page::kPageSize>(initial_semispace_size);
3988  }
3989  }
3990 
3991  initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
3992 
3993  if (FLAG_semi_space_growth_factor < 2) {
3994  FLAG_semi_space_growth_factor = 2;
3995  }
3996 
3997  // The old generation is paged and needs at least one page for each space.
3998  int paged_space_count =
3999  LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
4000  initial_max_old_generation_size_ = max_old_generation_size_ =
4001  Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
4002  max_old_generation_size_);
4003 
4004  if (FLAG_initial_old_space_size > 0) {
4005  initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
4006  } else {
4007  initial_old_generation_size_ =
4008  max_old_generation_size_ / kInitalOldGenerationLimitFactor;
4009  }
4010  old_generation_allocation_limit_ = initial_old_generation_size_;
4011 
4012  // We rely on being able to allocate new arrays in paged spaces.
4013  DCHECK(kMaxRegularHeapObjectSize >=
4014  (JSArray::kSize +
4015  FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
4016  AllocationMemento::kSize));
4017 
4018  code_range_size_ = code_range_size_in_mb * MB;
4019 
4020  configured_ = true;
4021 }
4022 
4023 
4024 void Heap::AddToRingBuffer(const char* string) {
4025  size_t first_part =
4026  Min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
4027  memcpy(trace_ring_buffer_ + ring_buffer_end_, string, first_part);
4028  ring_buffer_end_ += first_part;
4029  if (first_part < strlen(string)) {
4030  ring_buffer_full_ = true;
4031  size_t second_part = strlen(string) - first_part;
4032  memcpy(trace_ring_buffer_, string + first_part, second_part);
4033  ring_buffer_end_ = second_part;
4034  }
4035 }
4036 
4037 
4038 void Heap::GetFromRingBuffer(char* buffer) {
4039  size_t copied = 0;
4040  if (ring_buffer_full_) {
4041  copied = kTraceRingBufferSize - ring_buffer_end_;
4042  memcpy(buffer, trace_ring_buffer_ + ring_buffer_end_, copied);
4043  }
4044  memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
4045 }
4046 
4047 void Heap::ConfigureHeapDefault() { ConfigureHeap(0, 0, 0); }
4048 
4049 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
4050  *stats->start_marker = HeapStats::kStartMarker;
4051  *stats->end_marker = HeapStats::kEndMarker;
4052  *stats->ro_space_size = read_only_space_->Size();
4053  *stats->ro_space_capacity = read_only_space_->Capacity();
4054  *stats->new_space_size = new_space_->Size();
4055  *stats->new_space_capacity = new_space_->Capacity();
4056  *stats->old_space_size = old_space_->SizeOfObjects();
4057  *stats->old_space_capacity = old_space_->Capacity();
4058  *stats->code_space_size = code_space_->SizeOfObjects();
4059  *stats->code_space_capacity = code_space_->Capacity();
4060  *stats->map_space_size = map_space_->SizeOfObjects();
4061  *stats->map_space_capacity = map_space_->Capacity();
4062  *stats->lo_space_size = lo_space_->Size();
4063  *stats->code_lo_space_size = code_lo_space_->Size();
4064  isolate_->global_handles()->RecordStats(stats);
4065  *stats->memory_allocator_size = memory_allocator()->Size();
4066  *stats->memory_allocator_capacity =
4067  memory_allocator()->Size() + memory_allocator()->Available();
4068  *stats->os_error = base::OS::GetLastError();
4069  *stats->malloced_memory = isolate_->allocator()->GetCurrentMemoryUsage();
4070  *stats->malloced_peak_memory = isolate_->allocator()->GetMaxMemoryUsage();
4071  if (take_snapshot) {
4072  HeapIterator iterator(this);
4073  for (HeapObject* obj = iterator.next(); obj != nullptr;
4074  obj = iterator.next()) {
4075  InstanceType type = obj->map()->instance_type();
4076  DCHECK(0 <= type && type <= LAST_TYPE);
4077  stats->objects_per_type[type]++;
4078  stats->size_per_type[type] += obj->Size();
4079  }
4080  }
4081  if (stats->last_few_messages != nullptr)
4082  GetFromRingBuffer(stats->last_few_messages);
4083  if (stats->js_stacktrace != nullptr) {
4084  FixedStringAllocator fixed(stats->js_stacktrace, kStacktraceBufferSize - 1);
4085  StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
4086  if (gc_state() == Heap::NOT_IN_GC) {
4087  isolate()->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
4088  } else {
4089  accumulator.Add("Cannot get stack trace in GC.");
4090  }
4091  }
4092 }
4093 
4094 size_t Heap::OldGenerationSizeOfObjects() {
4095  PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
4096  size_t total = 0;
4097  for (PagedSpace* space = spaces.next(); space != nullptr;
4098  space = spaces.next()) {
4099  total += space->SizeOfObjects();
4100  }
4101  return total + lo_space_->SizeOfObjects();
4102 }
4103 
4104 uint64_t Heap::PromotedExternalMemorySize() {
4105  IsolateData* isolate_data = isolate()->isolate_data();
4106  if (isolate_data->external_memory_ <=
4107  isolate_data->external_memory_at_last_mark_compact_) {
4108  return 0;
4109  }
4110  return static_cast<uint64_t>(
4111  isolate_data->external_memory_ -
4112  isolate_data->external_memory_at_last_mark_compact_);
4113 }
4114 
4115 bool Heap::ShouldOptimizeForLoadTime() {
4116  return isolate()->rail_mode() == PERFORMANCE_LOAD &&
4117  !AllocationLimitOvershotByLargeMargin() &&
4118  MonotonicallyIncreasingTimeInMs() <
4119  isolate()->LoadStartTimeMs() + kMaxLoadTimeMs;
4120 }
4121 
4122 // This predicate is called when an old generation space cannot allocated from
4123 // the free list and is about to add a new page. Returning false will cause a
4124 // major GC. It happens when the old generation allocation limit is reached and
4125 // - either we need to optimize for memory usage,
4126 // - or the incremental marking is not in progress and we cannot start it.
4127 bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
4128  if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
4129  // We reached the old generation allocation limit.
4130 
4131  if (ShouldOptimizeForMemoryUsage()) return false;
4132 
4133  if (ShouldOptimizeForLoadTime()) return true;
4134 
4135  if (incremental_marking()->NeedsFinalization()) {
4136  return !AllocationLimitOvershotByLargeMargin();
4137  }
4138 
4139  if (incremental_marking()->IsStopped() &&
4140  IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
4141  // We cannot start incremental marking.
4142  return false;
4143  }
4144  return true;
4145 }
4146 
4147 Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
4148  if (ShouldReduceMemory() || FLAG_stress_compaction) {
4149  return Heap::HeapGrowingMode::kMinimal;
4150  }
4151 
4152  if (ShouldOptimizeForMemoryUsage()) {
4153  return Heap::HeapGrowingMode::kConservative;
4154  }
4155 
4156  if (memory_reducer()->ShouldGrowHeapSlowly()) {
4157  return Heap::HeapGrowingMode::kSlow;
4158  }
4159 
4160  return Heap::HeapGrowingMode::kDefault;
4161 }
4162 
4163 // This function returns either kNoLimit, kSoftLimit, or kHardLimit.
4164 // The kNoLimit means that either incremental marking is disabled or it is too
4165 // early to start incremental marking.
4166 // The kSoftLimit means that incremental marking should be started soon.
4167 // The kHardLimit means that incremental marking should be started immediately.
4168 Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
4169  // Code using an AlwaysAllocateScope assumes that the GC state does not
4170  // change; that implies that no marking steps must be performed.
4171  if (!incremental_marking()->CanBeActivated() || always_allocate()) {
4172  // Incremental marking is disabled or it is too early to start.
4173  return IncrementalMarkingLimit::kNoLimit;
4174  }
4175  if (FLAG_stress_incremental_marking) {
4176  return IncrementalMarkingLimit::kHardLimit;
4177  }
4178  if (OldGenerationSizeOfObjects() <=
4179  IncrementalMarking::kActivationThreshold) {
4180  // Incremental marking is disabled or it is too early to start.
4181  return IncrementalMarkingLimit::kNoLimit;
4182  }
4183  if ((FLAG_stress_compaction && (gc_count_ & 1) != 0) ||
4184  HighMemoryPressure()) {
4185  // If there is high memory pressure or stress testing is enabled, then
4186  // start marking immediately.
4187  return IncrementalMarkingLimit::kHardLimit;
4188  }
4189 
4190  if (FLAG_stress_marking > 0) {
4191  double gained_since_last_gc =
4192  PromotedSinceLastGC() +
4193  (isolate()->isolate_data()->external_memory_ -
4194  isolate()->isolate_data()->external_memory_at_last_mark_compact_);
4195  double size_before_gc =
4196  OldGenerationObjectsAndPromotedExternalMemorySize() -
4197  gained_since_last_gc;
4198  double bytes_to_limit = old_generation_allocation_limit_ - size_before_gc;
4199  if (bytes_to_limit > 0) {
4200  double current_percent = (gained_since_last_gc / bytes_to_limit) * 100.0;
4201 
4202  if (FLAG_trace_stress_marking) {
4203  isolate()->PrintWithTimestamp(
4204  "[IncrementalMarking] %.2lf%% of the memory limit reached\n",
4205  current_percent);
4206  }
4207 
4208  if (FLAG_fuzzer_gc_analysis) {
4209  // Skips values >=100% since they already trigger marking.
4210  if (current_percent < 100.0) {
4211  max_marking_limit_reached_ =
4212  std::max(max_marking_limit_reached_, current_percent);
4213  }
4214  } else if (static_cast<int>(current_percent) >=
4215  stress_marking_percentage_) {
4216  stress_marking_percentage_ = NextStressMarkingLimit();
4217  return IncrementalMarkingLimit::kHardLimit;
4218  }
4219  }
4220  }
4221 
4222  size_t old_generation_space_available = OldGenerationSpaceAvailable();
4223 
4224  if (old_generation_space_available > new_space_->Capacity()) {
4225  return IncrementalMarkingLimit::kNoLimit;
4226  }
4227  if (ShouldOptimizeForMemoryUsage()) {
4228  return IncrementalMarkingLimit::kHardLimit;
4229  }
4230  if (ShouldOptimizeForLoadTime()) {
4231  return IncrementalMarkingLimit::kNoLimit;
4232  }
4233  if (old_generation_space_available == 0) {
4234  return IncrementalMarkingLimit::kHardLimit;
4235  }
4236  return IncrementalMarkingLimit::kSoftLimit;
4237 }
4238 
4239 void Heap::EnableInlineAllocation() {
4240  if (!inline_allocation_disabled_) return;
4241  inline_allocation_disabled_ = false;
4242 
4243  // Update inline allocation limit for new space.
4244  new_space()->UpdateInlineAllocationLimit(0);
4245 }
4246 
4247 
4248 void Heap::DisableInlineAllocation() {
4249  if (inline_allocation_disabled_) return;
4250  inline_allocation_disabled_ = true;
4251 
4252  // Update inline allocation limit for new space.
4253  new_space()->UpdateInlineAllocationLimit(0);
4254 
4255  // Update inline allocation limit for old spaces.
4256  PagedSpaces spaces(this);
4257  CodeSpaceMemoryModificationScope modification_scope(this);
4258  for (PagedSpace* space = spaces.next(); space != nullptr;
4259  space = spaces.next()) {
4260  space->FreeLinearAllocationArea();
4261  }
4262 }
4263 
4264 HeapObject* Heap::EnsureImmovableCode(HeapObject* heap_object,
4265  int object_size) {
4266  // Code objects which should stay at a fixed address are allocated either
4267  // in the first page of code space, in large object space, or (during
4268  // snapshot creation) the containing page is marked as immovable.
4269  DCHECK(heap_object);
4270  DCHECK(code_space_->Contains(heap_object));
4271  DCHECK_GE(object_size, 0);
4272  if (!Heap::IsImmovable(heap_object)) {
4273  if (isolate()->serializer_enabled() ||
4274  code_space_->first_page()->Contains(heap_object->address())) {
4275  MemoryChunk::FromAddress(heap_object->address())->MarkNeverEvacuate();
4276  } else {
4277  // Discard the first code allocation, which was on a page where it could
4278  // be moved.
4279  CreateFillerObjectAt(heap_object->address(), object_size,
4280  ClearRecordedSlots::kNo);
4281  heap_object = AllocateRawCodeInLargeObjectSpace(object_size);
4282  UnprotectAndRegisterMemoryChunk(heap_object);
4283  ZapCodeObject(heap_object->address(), object_size);
4284  OnAllocationEvent(heap_object, object_size);
4285  }
4286  }
4287  return heap_object;
4288 }
4289 
4290 HeapObject* Heap::AllocateRawWithLightRetry(int size, AllocationSpace space,
4291  AllocationAlignment alignment) {
4292  HeapObject* result;
4293  AllocationResult alloc = AllocateRaw(size, space, alignment);
4294  if (alloc.To(&result)) {
4295  DCHECK(result != ReadOnlyRoots(this).exception());
4296  return result;
4297  }
4298  // Two GCs before panicking. In newspace will almost always succeed.
4299  for (int i = 0; i < 2; i++) {
4300  CollectGarbage(alloc.RetrySpace(),
4301  GarbageCollectionReason::kAllocationFailure);
4302  alloc = AllocateRaw(size, space, alignment);
4303  if (alloc.To(&result)) {
4304  DCHECK(result != ReadOnlyRoots(this).exception());
4305  return result;
4306  }
4307  }
4308  return nullptr;
4309 }
4310 
4311 HeapObject* Heap::AllocateRawWithRetryOrFail(int size, AllocationSpace space,
4312  AllocationAlignment alignment) {
4313  AllocationResult alloc;
4314  HeapObject* result = AllocateRawWithLightRetry(size, space, alignment);
4315  if (result) return result;
4316 
4317  isolate()->counters()->gc_last_resort_from_handles()->Increment();
4318  CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
4319  {
4320  AlwaysAllocateScope scope(isolate());
4321  alloc = AllocateRaw(size, space, alignment);
4322  }
4323  if (alloc.To(&result)) {
4324  DCHECK(result != ReadOnlyRoots(this).exception());
4325  return result;
4326  }
4327  // TODO(1181417): Fix this.
4328  FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
4329  return nullptr;
4330 }
4331 
4332 // TODO(jkummerow): Refactor this. AllocateRaw should take an "immovability"
4333 // parameter and just do what's necessary.
4334 HeapObject* Heap::AllocateRawCodeInLargeObjectSpace(int size) {
4335  AllocationResult alloc = code_lo_space()->AllocateRaw(size);
4336  HeapObject* result;
4337  if (alloc.To(&result)) {
4338  DCHECK(result != ReadOnlyRoots(this).exception());
4339  return result;
4340  }
4341  // Two GCs before panicking.
4342  for (int i = 0; i < 2; i++) {
4343  CollectGarbage(alloc.RetrySpace(),
4344  GarbageCollectionReason::kAllocationFailure);
4345  alloc = code_lo_space()->AllocateRaw(size);
4346  if (alloc.To(&result)) {
4347  DCHECK(result != ReadOnlyRoots(this).exception());
4348  return result;
4349  }
4350  }
4351  isolate()->counters()->gc_last_resort_from_handles()->Increment();
4352  CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
4353  {
4354  AlwaysAllocateScope scope(isolate());
4355  alloc = code_lo_space()->AllocateRaw(size);
4356  }
4357  if (alloc.To(&result)) {
4358  DCHECK(result != ReadOnlyRoots(this).exception());
4359  return result;
4360  }
4361  // TODO(1181417): Fix this.
4362  FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
4363  return nullptr;
4364 }
4365 
4366 void Heap::SetUp() {
4367 #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
4368  allocation_timeout_ = NextAllocationTimeout();
4369 #endif
4370 
4371  // Initialize heap spaces and initial maps and objects.
4372  //
4373  // If the heap is not yet configured (e.g. through the API), configure it.
4374  // Configuration is based on the flags new-space-size (really the semispace
4375  // size) and old-space-size if set or the initial values of semispace_size_
4376  // and old_generation_size_ otherwise.
4377  if (!configured_) ConfigureHeapDefault();
4378 
4379  mmap_region_base_ =
4380  reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
4381  ~kMmapRegionMask;
4382 
4383  // Set up memory allocator.
4384  memory_allocator_ =
4385  new MemoryAllocator(isolate_, MaxReserved(), code_range_size_);
4386 
4387  store_buffer_ = new StoreBuffer(this);
4388 
4389  heap_controller_ = new HeapController(this);
4390 
4391  mark_compact_collector_ = new MarkCompactCollector(this);
4392 
4393  scavenger_collector_ = new ScavengerCollector(this);
4394 
4395  incremental_marking_ =
4396  new IncrementalMarking(this, mark_compact_collector_->marking_worklist(),
4397  mark_compact_collector_->weak_objects());
4398 
4399  if (FLAG_concurrent_marking || FLAG_parallel_marking) {
4400  MarkCompactCollector::MarkingWorklist* marking_worklist =
4401  mark_compact_collector_->marking_worklist();
4402  concurrent_marking_ = new ConcurrentMarking(
4403  this, marking_worklist->shared(), marking_worklist->bailout(),
4404  marking_worklist->on_hold(), mark_compact_collector_->weak_objects(),
4405  marking_worklist->embedder());
4406  } else {
4407  concurrent_marking_ = new ConcurrentMarking(this, nullptr, nullptr, nullptr,
4408  nullptr, nullptr);
4409  }
4410 
4411  for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
4412  space_[i] = nullptr;
4413  }
4414 
4415  space_[RO_SPACE] = read_only_space_ = new ReadOnlySpace(this);
4416  space_[NEW_SPACE] = new_space_ =
4417  new NewSpace(this, memory_allocator_->data_page_allocator(),
4418  initial_semispace_size_, max_semi_space_size_);
4419  space_[OLD_SPACE] = old_space_ = new OldSpace(this);
4420  space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
4421  space_[MAP_SPACE] = map_space_ = new MapSpace(this);
4422  space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this);
4423  space_[NEW_LO_SPACE] = new_lo_space_ = new NewLargeObjectSpace(this);
4424  space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this);
4425 
4426  for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
4427  i++) {
4428  deferred_counters_[i] = 0;
4429  }
4430 
4431  tracer_ = new GCTracer(this);
4432 #ifdef ENABLE_MINOR_MC
4433  minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
4434 #else
4435  minor_mark_compact_collector_ = nullptr;
4436 #endif // ENABLE_MINOR_MC
4437  array_buffer_collector_ = new ArrayBufferCollector(this);
4438  gc_idle_time_handler_ = new GCIdleTimeHandler();
4439  memory_reducer_ = new MemoryReducer(this);
4440  if (V8_UNLIKELY(FLAG_gc_stats)) {
4441  live_object_stats_ = new ObjectStats(this);
4442  dead_object_stats_ = new ObjectStats(this);
4443  }
4444  local_embedder_heap_tracer_ = new LocalEmbedderHeapTracer(isolate());
4445 
4446  LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
4447  LOG(isolate_, IntPtrTEvent("heap-available", Available()));
4448 
4449  store_buffer()->SetUp();
4450 
4451  mark_compact_collector()->SetUp();
4452 #ifdef ENABLE_MINOR_MC
4453  if (minor_mark_compact_collector() != nullptr) {
4454  minor_mark_compact_collector()->SetUp();
4455  }
4456 #endif // ENABLE_MINOR_MC
4457 
4458  if (FLAG_idle_time_scavenge) {
4459  scavenge_job_ = new ScavengeJob();
4460  idle_scavenge_observer_ = new IdleScavengeObserver(
4461  *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
4462  new_space()->AddAllocationObserver(idle_scavenge_observer_);
4463  }
4464 
4465  SetGetExternallyAllocatedMemoryInBytesCallback(
4466  DefaultGetExternallyAllocatedMemoryInBytesCallback);
4467 
4468  if (FLAG_stress_marking > 0) {
4469  stress_marking_percentage_ = NextStressMarkingLimit();
4470  stress_marking_observer_ = new StressMarkingObserver(*this);
4471  AddAllocationObserversToAllSpaces(stress_marking_observer_,
4472  stress_marking_observer_);
4473  }
4474  if (FLAG_stress_scavenge > 0) {
4475  stress_scavenge_observer_ = new StressScavengeObserver(*this);
4476  new_space()->AddAllocationObserver(stress_scavenge_observer_);
4477  }
4478 
4479  write_protect_code_memory_ = FLAG_write_protect_code_memory;
4480 }
4481 
4482 void Heap::InitializeHashSeed() {
4483  DCHECK(!deserialization_complete_);
4484  uint64_t new_hash_seed;
4485  if (FLAG_hash_seed == 0) {
4486  int64_t rnd = isolate()->random_number_generator()->NextInt64();
4487  new_hash_seed = static_cast<uint64_t>(rnd);
4488  } else {
4489  new_hash_seed = static_cast<uint64_t>(FLAG_hash_seed);
4490  }
4491  ReadOnlyRoots(this).hash_seed()->copy_in(
4492  0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size);
4493 }
4494 
4495 void Heap::SetStackLimits() {
4496  DCHECK_NOT_NULL(isolate_);
4497  DCHECK(isolate_ == isolate());
4498  // On 64 bit machines, pointers are generally out of range of Smis. We write
4499  // something that looks like an out of range Smi to the GC.
4500 
4501  // Set up the special root array entries containing the stack limits.
4502  // These are actually addresses, but the tag makes the GC ignore it.
4503  roots_table()[RootIndex::kStackLimit] = reinterpret_cast<Object*>(
4504  (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
4505  roots_table()[RootIndex::kRealStackLimit] = reinterpret_cast<Object*>(
4506  (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
4507 }
4508 
4509 void Heap::ClearStackLimits() {
4510  roots_table()[RootIndex::kStackLimit] = Smi::kZero;
4511  roots_table()[RootIndex::kRealStackLimit] = Smi::kZero;
4512 }
4513 
4514 int Heap::NextAllocationTimeout(int current_timeout) {
4515  if (FLAG_random_gc_interval > 0) {
4516  // If current timeout hasn't reached 0 the GC was caused by something
4517  // different than --stress-atomic-gc flag and we don't update the timeout.
4518  if (current_timeout <= 0) {
4519  return isolate()->fuzzer_rng()->NextInt(FLAG_random_gc_interval + 1);
4520  } else {
4521  return current_timeout;
4522  }
4523  }
4524  return FLAG_gc_interval;
4525 }
4526 
4527 void Heap::PrintAllocationsHash() {
4528  uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
4529  PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
4530 }
4531 
4532 void Heap::PrintMaxMarkingLimitReached() {
4533  PrintF("\n### Maximum marking limit reached = %.02lf\n",
4534  max_marking_limit_reached_);
4535 }
4536 
4537 void Heap::PrintMaxNewSpaceSizeReached() {
4538  PrintF("\n### Maximum new space size reached = %.02lf\n",
4539  stress_scavenge_observer_->MaxNewSpaceSizeReached());
4540 }
4541 
4542 int Heap::NextStressMarkingLimit() {
4543  return isolate()->fuzzer_rng()->NextInt(FLAG_stress_marking + 1);
4544 }
4545 
4546 void Heap::NotifyDeserializationComplete() {
4547  PagedSpaces spaces(this);
4548  for (PagedSpace* s = spaces.next(); s != nullptr; s = spaces.next()) {
4549  if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages();
4550 #ifdef DEBUG
4551  // All pages right after bootstrapping must be marked as never-evacuate.
4552  for (Page* p : *s) {
4553  DCHECK(p->NeverEvacuate());
4554  }
4555 #endif // DEBUG
4556  }
4557 
4558  read_only_space()->MarkAsReadOnly();
4559  deserialization_complete_ = true;
4560 }
4561 
4562 void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
4563  DCHECK_EQ(gc_state_, HeapState::NOT_IN_GC);
4564  local_embedder_heap_tracer()->SetRemoteTracer(tracer);
4565 }
4566 
4567 EmbedderHeapTracer* Heap::GetEmbedderHeapTracer() const {
4568  return local_embedder_heap_tracer()->remote_tracer();
4569 }
4570 
4571 void Heap::RegisterExternallyReferencedObject(Address* location) {
4572  // The embedder is not aware of whether numbers are materialized as heap
4573  // objects are just passed around as Smis.
4574  ObjectPtr object(*location);
4575  if (!object->IsHeapObject()) return;
4576  HeapObject* heap_object = HeapObject::cast(object);
4577  DCHECK(Contains(heap_object));
4578  if (FLAG_incremental_marking_wrappers && incremental_marking()->IsMarking()) {
4579  incremental_marking()->WhiteToGreyAndPush(heap_object);
4580  } else {
4581  DCHECK(mark_compact_collector()->in_use());
4582  mark_compact_collector()->MarkExternallyReferencedObject(heap_object);
4583  }
4584 }
4585 
4586 void Heap::StartTearDown() { SetGCState(TEAR_DOWN); }
4587 
4588 void Heap::TearDown() {
4589  DCHECK_EQ(gc_state_, TEAR_DOWN);
4590 #ifdef VERIFY_HEAP
4591  if (FLAG_verify_heap) {
4592  Verify();
4593  }
4594 #endif
4595 
4596  UpdateMaximumCommitted();
4597 
4598  if (FLAG_verify_predictable || FLAG_fuzzer_gc_analysis) {
4599  PrintAllocationsHash();
4600  }
4601 
4602  if (FLAG_fuzzer_gc_analysis) {
4603  if (FLAG_stress_marking > 0) {
4604  PrintMaxMarkingLimitReached();
4605  }
4606  if (FLAG_stress_scavenge > 0) {
4607  PrintMaxNewSpaceSizeReached();
4608  }
4609  }
4610 
4611  if (FLAG_idle_time_scavenge) {
4612  new_space()->RemoveAllocationObserver(idle_scavenge_observer_);
4613  delete idle_scavenge_observer_;
4614  idle_scavenge_observer_ = nullptr;
4615  delete scavenge_job_;
4616  scavenge_job_ = nullptr;
4617  }
4618 
4619  if (FLAG_stress_marking > 0) {
4620  RemoveAllocationObserversFromAllSpaces(stress_marking_observer_,
4621  stress_marking_observer_);
4622  delete stress_marking_observer_;
4623  stress_marking_observer_ = nullptr;
4624  }
4625  if (FLAG_stress_scavenge > 0) {
4626  new_space()->RemoveAllocationObserver(stress_scavenge_observer_);
4627  delete stress_scavenge_observer_;
4628  stress_scavenge_observer_ = nullptr;
4629  }
4630 
4631  if (heap_controller_ != nullptr) {
4632  delete heap_controller_;
4633  heap_controller_ = nullptr;
4634  }
4635 
4636  if (mark_compact_collector_ != nullptr) {
4637  mark_compact_collector_->TearDown();
4638  delete mark_compact_collector_;
4639  mark_compact_collector_ = nullptr;
4640  }
4641 
4642 #ifdef ENABLE_MINOR_MC
4643  if (minor_mark_compact_collector_ != nullptr) {
4644  minor_mark_compact_collector_->TearDown();
4645  delete minor_mark_compact_collector_;
4646  minor_mark_compact_collector_ = nullptr;
4647  }
4648 #endif // ENABLE_MINOR_MC
4649 
4650  if (scavenger_collector_ != nullptr) {
4651  delete scavenger_collector_;
4652  scavenger_collector_ = nullptr;
4653  }
4654 
4655  if (array_buffer_collector_ != nullptr) {
4656  delete array_buffer_collector_;
4657  array_buffer_collector_ = nullptr;
4658  }
4659 
4660  delete incremental_marking_;
4661  incremental_marking_ = nullptr;
4662 
4663  delete concurrent_marking_;
4664  concurrent_marking_ = nullptr;
4665 
4666  delete gc_idle_time_handler_;
4667  gc_idle_time_handler_ = nullptr;
4668 
4669  if (memory_reducer_ != nullptr) {
4670  memory_reducer_->TearDown();
4671  delete memory_reducer_;
4672  memory_reducer_ = nullptr;
4673  }
4674 
4675  if (live_object_stats_ != nullptr) {
4676  delete live_object_stats_;
4677  live_object_stats_ = nullptr;
4678  }
4679 
4680  if (dead_object_stats_ != nullptr) {
4681  delete dead_object_stats_;
4682  dead_object_stats_ = nullptr;
4683  }
4684 
4685  delete local_embedder_heap_tracer_;
4686  local_embedder_heap_tracer_ = nullptr;
4687 
4688  isolate_->global_handles()->TearDown();
4689 
4690  external_string_table_.TearDown();
4691 
4692  // Tear down all ArrayBuffers before tearing down the heap since their
4693  // byte_length may be a HeapNumber which is required for freeing the backing
4694  // store.
4695  ArrayBufferTracker::TearDown(this);
4696 
4697  delete tracer_;
4698  tracer_ = nullptr;
4699 
4700  for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
4701  delete space_[i];
4702  space_[i] = nullptr;
4703  }
4704 
4705  store_buffer()->TearDown();
4706 
4707  memory_allocator()->TearDown();
4708 
4709  StrongRootsList* next = nullptr;
4710  for (StrongRootsList* list = strong_roots_list_; list; list = next) {
4711  next = list->next;
4712  delete list;
4713  }
4714  strong_roots_list_ = nullptr;
4715 
4716  delete store_buffer_;
4717  store_buffer_ = nullptr;
4718 
4719  delete memory_allocator_;
4720  memory_allocator_ = nullptr;
4721 }
4722 
4723 void Heap::AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
4724  GCType gc_type, void* data) {
4725  DCHECK_NOT_NULL(callback);
4726  DCHECK(gc_prologue_callbacks_.end() ==
4727  std::find(gc_prologue_callbacks_.begin(), gc_prologue_callbacks_.end(),
4728  GCCallbackTuple(callback, gc_type, data)));
4729  gc_prologue_callbacks_.emplace_back(callback, gc_type, data);
4730 }
4731 
4732 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
4733  void* data) {
4734  DCHECK_NOT_NULL(callback);
4735  for (size_t i = 0; i < gc_prologue_callbacks_.size(); i++) {
4736  if (gc_prologue_callbacks_[i].callback == callback &&
4737  gc_prologue_callbacks_[i].data == data) {
4738  gc_prologue_callbacks_[i] = gc_prologue_callbacks_.back();
4739  gc_prologue_callbacks_.pop_back();
4740  return;
4741  }
4742  }
4743  UNREACHABLE();
4744 }
4745 
4746 void Heap::AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
4747  GCType gc_type, void* data) {
4748  DCHECK_NOT_NULL(callback);
4749  DCHECK(gc_epilogue_callbacks_.end() ==
4750  std::find(gc_epilogue_callbacks_.begin(), gc_epilogue_callbacks_.end(),
4751  GCCallbackTuple(callback, gc_type, data)));
4752  gc_epilogue_callbacks_.emplace_back(callback, gc_type, data);
4753 }
4754 
4755 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
4756  void* data) {
4757  DCHECK_NOT_NULL(callback);
4758  for (size_t i = 0; i < gc_epilogue_callbacks_.size(); i++) {
4759  if (gc_epilogue_callbacks_[i].callback == callback &&
4760  gc_epilogue_callbacks_[i].data == data) {
4761  gc_epilogue_callbacks_[i] = gc_epilogue_callbacks_.back();
4762  gc_epilogue_callbacks_.pop_back();
4763  return;
4764  }
4765  }
4766  UNREACHABLE();
4767 }
4768 
4769 namespace {
4770 Handle<WeakArrayList> CompactWeakArrayList(Heap* heap,
4771  Handle<WeakArrayList> array,
4772  PretenureFlag pretenure) {
4773  if (array->length() == 0) {
4774  return array;
4775  }
4776  int new_length = array->CountLiveWeakReferences();
4777  if (new_length == array->length()) {
4778  return array;
4779  }
4780 
4781  Handle<WeakArrayList> new_array = WeakArrayList::EnsureSpace(
4782  heap->isolate(),
4783  handle(ReadOnlyRoots(heap).empty_weak_array_list(), heap->isolate()),
4784  new_length, pretenure);
4785  // Allocation might have caused GC and turned some of the elements into
4786  // cleared weak heap objects. Count the number of live references again and
4787  // fill in the new array.
4788  int copy_to = 0;
4789  for (int i = 0; i < array->length(); i++) {
4790  MaybeObject element = array->Get(i);
4791  if (element->IsCleared()) continue;
4792  new_array->Set(copy_to++, element);
4793  }
4794  new_array->set_length(copy_to);
4795  return new_array;
4796 }
4797 
4798 } // anonymous namespace
4799 
4800 void Heap::CompactWeakArrayLists(PretenureFlag pretenure) {
4801  // Find known PrototypeUsers and compact them.
4802  std::vector<Handle<PrototypeInfo>> prototype_infos;
4803  {
4804  HeapIterator iterator(this);
4805  for (HeapObject* o = iterator.next(); o != nullptr; o = iterator.next()) {
4806  if (o->IsPrototypeInfo()) {
4807  PrototypeInfo* prototype_info = PrototypeInfo::cast(o);
4808  if (prototype_info->prototype_users()->IsWeakArrayList()) {
4809  prototype_infos.emplace_back(handle(prototype_info, isolate()));
4810  }
4811  }
4812  }
4813  }
4814  for (auto& prototype_info : prototype_infos) {
4815  Handle<WeakArrayList> array(
4816  WeakArrayList::cast(prototype_info->prototype_users()), isolate());
4817  DCHECK_IMPLIES(pretenure == TENURED,
4818  InOldSpace(*array) ||
4819  *array == ReadOnlyRoots(this).empty_weak_array_list());
4820  WeakArrayList* new_array = PrototypeUsers::Compact(
4821  array, this, JSObject::PrototypeRegistryCompactionCallback, pretenure);
4822  prototype_info->set_prototype_users(new_array);
4823  }
4824 
4825  // Find known WeakArrayLists and compact them.
4826  Handle<WeakArrayList> scripts(script_list(), isolate());
4827  DCHECK_IMPLIES(pretenure == TENURED, InOldSpace(*scripts));
4828  scripts = CompactWeakArrayList(this, scripts, pretenure);
4829  set_script_list(*scripts);
4830 
4831  Handle<WeakArrayList> no_script_list(noscript_shared_function_infos(),
4832  isolate());
4833  DCHECK_IMPLIES(pretenure == TENURED, InOldSpace(*no_script_list));
4834  no_script_list = CompactWeakArrayList(this, no_script_list, pretenure);
4835  set_noscript_shared_function_infos(*no_script_list);
4836 }
4837 
4838 void Heap::AddRetainedMap(Handle<Map> map) {
4839  if (map->is_in_retained_map_list()) {
4840  return;
4841  }
4842  Handle<WeakArrayList> array(retained_maps(), isolate());
4843  if (array->IsFull()) {
4844  CompactRetainedMaps(*array);
4845  }
4846  array =
4847  WeakArrayList::AddToEnd(isolate(), array, MaybeObjectHandle::Weak(map));
4848  array = WeakArrayList::AddToEnd(
4849  isolate(), array,
4850  MaybeObjectHandle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()));
4851  if (*array != retained_maps()) {
4852  set_retained_maps(*array);
4853  }
4854  map->set_is_in_retained_map_list(true);
4855 }
4856 
4857 void Heap::CompactRetainedMaps(WeakArrayList* retained_maps) {
4858  DCHECK_EQ(retained_maps, this->retained_maps());
4859  int length = retained_maps->length();
4860  int new_length = 0;
4861  int new_number_of_disposed_maps = 0;
4862  // This loop compacts the array by removing cleared weak cells.
4863  for (int i = 0; i < length; i += 2) {
4864  MaybeObject maybe_object = retained_maps->Get(i);
4865  if (maybe_object->IsCleared()) {
4866  continue;
4867  }
4868 
4869  DCHECK(maybe_object->IsWeak());
4870 
4871  MaybeObject age = retained_maps->Get(i + 1);
4872  DCHECK(age->IsSmi());
4873  if (i != new_length) {
4874  retained_maps->Set(new_length, maybe_object);
4875  retained_maps->Set(new_length + 1, age);
4876  }
4877  if (i < number_of_disposed_maps_) {
4878  new_number_of_disposed_maps += 2;
4879  }
4880  new_length += 2;
4881  }
4882  number_of_disposed_maps_ = new_number_of_disposed_maps;
4883  HeapObject* undefined = ReadOnlyRoots(this).undefined_value();
4884  for (int i = new_length; i < length; i++) {
4885  retained_maps->Set(i, HeapObjectReference::Strong(undefined));
4886  }
4887  if (new_length != length) retained_maps->set_length(new_length);
4888 }
4889 
4890 void Heap::FatalProcessOutOfMemory(const char* location) {
4891  v8::internal::V8::FatalProcessOutOfMemory(isolate(), location, true);
4892 }
4893 
4894 #ifdef DEBUG
4895 
4896 class PrintHandleVisitor : public RootVisitor {
4897  public:
4898  void VisitRootPointers(Root root, const char* description, ObjectSlot start,
4899  ObjectSlot end) override {
4900  for (ObjectSlot p = start; p < end; ++p)
4901  PrintF(" handle %p to %p\n", p.ToVoidPtr(), reinterpret_cast<void*>(*p));
4902  }
4903 };
4904 
4905 
4906 void Heap::PrintHandles() {
4907  PrintF("Handles:\n");
4908  PrintHandleVisitor v;
4909  isolate_->handle_scope_implementer()->Iterate(&v);
4910 }
4911 
4912 #endif
4913 
4915  public:
4916  CheckHandleCountVisitor() : handle_count_(0) {}
4917  ~CheckHandleCountVisitor() override {
4918  CHECK_GT(HandleScope::kCheckHandleThreshold, handle_count_);
4919  }
4920  void VisitRootPointers(Root root, const char* description, ObjectSlot start,
4921  ObjectSlot end) override {
4922  handle_count_ += end - start;
4923  }
4924 
4925  private:
4926  ptrdiff_t handle_count_;
4927 };
4928 
4929 
4930 void Heap::CheckHandleCount() {
4932  isolate_->handle_scope_implementer()->Iterate(&v);
4933 }
4934 
4935 Address* Heap::store_buffer_top_address() {
4936  return store_buffer()->top_address();
4937 }
4938 
4939 // static
4940 intptr_t Heap::store_buffer_mask_constant() {
4941  return StoreBuffer::kStoreBufferMask;
4942 }
4943 
4944 // static
4945 Address Heap::store_buffer_overflow_function_address() {
4946  return FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow);
4947 }
4948 
4949 void Heap::ClearRecordedSlot(HeapObject* object, ObjectSlot slot) {
4950  Page* page = Page::FromAddress(slot.address());
4951  if (!page->InNewSpace()) {
4952  DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
4953  store_buffer()->DeleteEntry(slot.address());
4954  }
4955 }
4956 
4957 #ifdef DEBUG
4958 void Heap::VerifyClearedSlot(HeapObject* object, ObjectSlot slot) {
4959  if (InNewSpace(object)) return;
4960  Page* page = Page::FromAddress(slot.address());
4961  DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
4962  store_buffer()->MoveAllEntriesToRememberedSet();
4963  CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()));
4964  // Old to old slots are filtered with invalidated slots.
4965  CHECK_IMPLIES(RememberedSet<OLD_TO_OLD>::Contains(page, slot.address()),
4966  page->RegisteredObjectWithInvalidatedSlots(object));
4967 }
4968 #endif
4969 
4970 void Heap::ClearRecordedSlotRange(Address start, Address end) {
4971  Page* page = Page::FromAddress(start);
4972  if (!page->InNewSpace()) {
4973  DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
4974  store_buffer()->DeleteEntry(start, end);
4975  }
4976 }
4977 
4978 PagedSpace* PagedSpaces::next() {
4979  switch (counter_++) {
4980  case RO_SPACE:
4981  // skip NEW_SPACE
4982  counter_++;
4983  return heap_->read_only_space();
4984  case OLD_SPACE:
4985  return heap_->old_space();
4986  case CODE_SPACE:
4987  return heap_->code_space();
4988  case MAP_SPACE:
4989  return heap_->map_space();
4990  default:
4991  return nullptr;
4992  }
4993 }
4994 
4995 SpaceIterator::SpaceIterator(Heap* heap)
4996  : heap_(heap), current_space_(FIRST_SPACE - 1) {}
4997 
4998 SpaceIterator::~SpaceIterator() = default;
4999 
5000 bool SpaceIterator::has_next() {
5001  // Iterate until no more spaces.
5002  return current_space_ != LAST_SPACE;
5003 }
5004 
5005 Space* SpaceIterator::next() {
5006  DCHECK(has_next());
5007  return heap_->space(++current_space_);
5008 }
5009 
5010 
5012  public:
5013  virtual ~HeapObjectsFilter() = default;
5014  virtual bool SkipObject(HeapObject* object) = 0;
5015 };
5016 
5017 
5019  public:
5020  explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
5021  MarkReachableObjects();
5022  }
5023 
5024  ~UnreachableObjectsFilter() override {
5025  for (auto it : reachable_) {
5026  delete it.second;
5027  it.second = nullptr;
5028  }
5029  }
5030 
5031  bool SkipObject(HeapObject* object) override {
5032  if (object->IsFiller()) return true;
5033  MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
5034  if (reachable_.count(chunk) == 0) return true;
5035  return reachable_[chunk]->count(object) == 0;
5036  }
5037 
5038  private:
5039  bool MarkAsReachable(HeapObject* object) {
5040  MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
5041  if (reachable_.count(chunk) == 0) {
5042  reachable_[chunk] = new std::unordered_set<HeapObject*>();
5043  }
5044  if (reachable_[chunk]->count(object)) return false;
5045  reachable_[chunk]->insert(object);
5046  return true;
5047  }
5048 
5049  class MarkingVisitor : public ObjectVisitor, public RootVisitor {
5050  public:
5051  explicit MarkingVisitor(UnreachableObjectsFilter* filter)
5052  : filter_(filter) {}
5053 
5054  void VisitPointers(HeapObject* host, ObjectSlot start,
5055  ObjectSlot end) override {
5056  MarkPointers(MaybeObjectSlot(start), MaybeObjectSlot(end));
5057  }
5058 
5059  void VisitPointers(HeapObject* host, MaybeObjectSlot start,
5060  MaybeObjectSlot end) final {
5061  MarkPointers(start, end);
5062  }
5063 
5064  void VisitRootPointers(Root root, const char* description, ObjectSlot start,
5065  ObjectSlot end) override {
5066  MarkPointers(MaybeObjectSlot(start), MaybeObjectSlot(end));
5067  }
5068 
5069  void TransitiveClosure() {
5070  while (!marking_stack_.empty()) {
5071  HeapObject* obj = marking_stack_.back();
5072  marking_stack_.pop_back();
5073  obj->Iterate(this);
5074  }
5075  }
5076 
5077  private:
5078  void MarkPointers(MaybeObjectSlot start, MaybeObjectSlot end) {
5079  // Treat weak references as strong.
5080  for (MaybeObjectSlot p = start; p < end; ++p) {
5081  HeapObject* heap_object;
5082  if ((*p)->GetHeapObject(&heap_object)) {
5083  if (filter_->MarkAsReachable(heap_object)) {
5084  marking_stack_.push_back(heap_object);
5085  }
5086  }
5087  }
5088  }
5089  UnreachableObjectsFilter* filter_;
5090  std::vector<HeapObject*> marking_stack_;
5091  };
5092 
5093  friend class MarkingVisitor;
5094 
5095  void MarkReachableObjects() {
5096  MarkingVisitor visitor(this);
5097  heap_->IterateRoots(&visitor, VISIT_ALL);
5098  visitor.TransitiveClosure();
5099  }
5100 
5101  Heap* heap_;
5102  DisallowHeapAllocation no_allocation_;
5103  std::unordered_map<MemoryChunk*, std::unordered_set<HeapObject*>*> reachable_;
5104 };
5105 
5106 HeapIterator::HeapIterator(Heap* heap,
5107  HeapIterator::HeapObjectsFiltering filtering)
5108  : heap_(heap),
5109  filtering_(filtering),
5110  filter_(nullptr),
5111  space_iterator_(nullptr),
5112  object_iterator_(nullptr) {
5113  heap_->MakeHeapIterable();
5114  heap_->heap_iterator_start();
5115  // Start the iteration.
5116  space_iterator_ = new SpaceIterator(heap_);
5117  switch (filtering_) {
5118  case kFilterUnreachable:
5119  filter_ = new UnreachableObjectsFilter(heap_);
5120  break;
5121  default:
5122  break;
5123  }
5124  object_iterator_ = space_iterator_->next()->GetObjectIterator();
5125 }
5126 
5127 
5128 HeapIterator::~HeapIterator() {
5129  heap_->heap_iterator_end();
5130 #ifdef DEBUG
5131  // Assert that in filtering mode we have iterated through all
5132  // objects. Otherwise, heap will be left in an inconsistent state.
5133  if (filtering_ != kNoFiltering) {
5134  DCHECK_NULL(object_iterator_);
5135  }
5136 #endif
5137  delete space_iterator_;
5138  delete filter_;
5139 }
5140 
5141 
5142 HeapObject* HeapIterator::next() {
5143  if (filter_ == nullptr) return NextObject();
5144 
5145  HeapObject* obj = NextObject();
5146  while ((obj != nullptr) && (filter_->SkipObject(obj))) obj = NextObject();
5147  return obj;
5148 }
5149 
5150 
5151 HeapObject* HeapIterator::NextObject() {
5152  // No iterator means we are done.
5153  if (object_iterator_.get() == nullptr) return nullptr;
5154 
5155  if (HeapObject* obj = object_iterator_.get()->Next()) {
5156  // If the current iterator has more objects we are fine.
5157  return obj;
5158  } else {
5159  // Go though the spaces looking for one that has objects.
5160  while (space_iterator_->has_next()) {
5161  object_iterator_ = space_iterator_->next()->GetObjectIterator();
5162  if (HeapObject* obj = object_iterator_.get()->Next()) {
5163  return obj;
5164  }
5165  }
5166  }
5167  // Done with the last space.
5168  object_iterator_.reset(nullptr);
5169  return nullptr;
5170 }
5171 
5172 
5173 void Heap::UpdateTotalGCTime(double duration) {
5174  if (FLAG_trace_gc_verbose) {
5175  total_gc_time_ms_ += duration;
5176  }
5177 }
5178 
5179 void Heap::ExternalStringTable::CleanUpNewSpaceStrings() {
5180  int last = 0;
5181  Isolate* isolate = heap_->isolate();
5182  for (size_t i = 0; i < new_space_strings_.size(); ++i) {
5183  Object* o = new_space_strings_[i];
5184  if (o->IsTheHole(isolate)) {
5185  continue;
5186  }
5187  // The real external string is already in one of these vectors and was or
5188  // will be processed. Re-processing it will add a duplicate to the vector.
5189  if (o->IsThinString()) continue;
5190  DCHECK(o->IsExternalString());
5191  if (InNewSpace(o)) {
5192  new_space_strings_[last++] = o;
5193  } else {
5194  old_space_strings_.push_back(o);
5195  }
5196  }
5197  new_space_strings_.resize(last);
5198 }
5199 
5200 void Heap::ExternalStringTable::CleanUpAll() {
5201  CleanUpNewSpaceStrings();
5202  int last = 0;
5203  Isolate* isolate = heap_->isolate();
5204  for (size_t i = 0; i < old_space_strings_.size(); ++i) {
5205  Object* o = old_space_strings_[i];
5206  if (o->IsTheHole(isolate)) {
5207  continue;
5208  }
5209  // The real external string is already in one of these vectors and was or
5210  // will be processed. Re-processing it will add a duplicate to the vector.
5211  if (o->IsThinString()) continue;
5212  DCHECK(o->IsExternalString());
5213  DCHECK(!InNewSpace(o));
5214  old_space_strings_[last++] = o;
5215  }
5216  old_space_strings_.resize(last);
5217 #ifdef VERIFY_HEAP
5218  if (FLAG_verify_heap) {
5219  Verify();
5220  }
5221 #endif
5222 }
5223 
5224 void Heap::ExternalStringTable::TearDown() {
5225  for (size_t i = 0; i < new_space_strings_.size(); ++i) {
5226  Object* o = new_space_strings_[i];
5227  // Dont finalize thin strings.
5228  if (o->IsThinString()) continue;
5229  heap_->FinalizeExternalString(ExternalString::cast(o));
5230  }
5231  new_space_strings_.clear();
5232  for (size_t i = 0; i < old_space_strings_.size(); ++i) {
5233  Object* o = old_space_strings_[i];
5234  // Dont finalize thin strings.
5235  if (o->IsThinString()) continue;
5236  heap_->FinalizeExternalString(ExternalString::cast(o));
5237  }
5238  old_space_strings_.clear();
5239 }
5240 
5241 
5242 void Heap::RememberUnmappedPage(Address page, bool compacted) {
5243  // Tag the page pointer to make it findable in the dump file.
5244  if (compacted) {
5245  page ^= 0xC1EAD & (Page::kPageSize - 1); // Cleared.
5246  } else {
5247  page ^= 0x1D1ED & (Page::kPageSize - 1); // I died.
5248  }
5249  remembered_unmapped_pages_[remembered_unmapped_pages_index_] = page;
5250  remembered_unmapped_pages_index_++;
5251  remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
5252 }
5253 
5254 void Heap::RegisterStrongRoots(ObjectSlot start, ObjectSlot end) {
5255  StrongRootsList* list = new StrongRootsList();
5256  list->next = strong_roots_list_;
5257  list->start = start;
5258  list->end = end;
5259  strong_roots_list_ = list;
5260 }
5261 
5262 void Heap::UnregisterStrongRoots(ObjectSlot start) {
5263  StrongRootsList* prev = nullptr;
5264  StrongRootsList* list = strong_roots_list_;
5265  while (list != nullptr) {
5266  StrongRootsList* next = list->next;
5267  if (list->start == start) {
5268  if (prev) {
5269  prev->next = next;
5270  } else {
5271  strong_roots_list_ = next;
5272  }
5273  delete list;
5274  } else {
5275  prev = list;
5276  }
5277  list = next;
5278  }
5279 }
5280 
5281 void Heap::SetBuiltinsConstantsTable(FixedArray cache) {
5282  set_builtins_constants_table(cache);
5283 }
5284 
5285 void Heap::SetInterpreterEntryTrampolineForProfiling(Code code) {
5286  DCHECK_EQ(Builtins::kInterpreterEntryTrampoline, code->builtin_index());
5287  set_interpreter_entry_trampoline_for_profiling(code);
5288 }
5289 
5290 void Heap::AddDirtyJSWeakFactory(
5291  JSWeakFactory* weak_factory,
5292  std::function<void(HeapObject* object, ObjectSlot slot, Object* target)>
5293  gc_notify_updated_slot) {
5294  DCHECK(dirty_js_weak_factories()->IsUndefined(isolate()) ||
5295  dirty_js_weak_factories()->IsJSWeakFactory());
5296  DCHECK(weak_factory->next()->IsUndefined(isolate()));
5297  DCHECK(!weak_factory->scheduled_for_cleanup());
5298  weak_factory->set_scheduled_for_cleanup(true);
5299  weak_factory->set_next(dirty_js_weak_factories());
5300  gc_notify_updated_slot(
5301  weak_factory,
5302  HeapObject::RawField(weak_factory, JSWeakFactory::kNextOffset),
5303  dirty_js_weak_factories());
5304  set_dirty_js_weak_factories(weak_factory);
5305  // Roots are rescanned after objects are moved, so no need to record a slot
5306  // for the root pointing to the first JSWeakFactory.
5307 }
5308 
5309 void Heap::AddKeepDuringJobTarget(Handle<JSReceiver> target) {
5310  DCHECK(FLAG_harmony_weak_refs);
5311  DCHECK(weak_refs_keep_during_job()->IsUndefined() ||
5312  weak_refs_keep_during_job()->IsOrderedHashSet());
5313  Handle<OrderedHashSet> table;
5314  if (weak_refs_keep_during_job()->IsUndefined(isolate())) {
5315  table = isolate()->factory()->NewOrderedHashSet();
5316  } else {
5317  table =
5318  handle(OrderedHashSet::cast(weak_refs_keep_during_job()), isolate());
5319  }
5320  table = OrderedHashSet::Add(isolate(), table, target);
5321  set_weak_refs_keep_during_job(*table);
5322 }
5323 
5324 void Heap::ClearKeepDuringJobSet() {
5325  set_weak_refs_keep_during_job(ReadOnlyRoots(isolate()).undefined_value());
5326 }
5327 
5328 size_t Heap::NumberOfTrackedHeapObjectTypes() {
5329  return ObjectStats::OBJECT_STATS_COUNT;
5330 }
5331 
5332 
5333 size_t Heap::ObjectCountAtLastGC(size_t index) {
5334  if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
5335  return 0;
5336  return live_object_stats_->object_count_last_gc(index);
5337 }
5338 
5339 
5340 size_t Heap::ObjectSizeAtLastGC(size_t index) {
5341  if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
5342  return 0;
5343  return live_object_stats_->object_size_last_gc(index);
5344 }
5345 
5346 
5347 bool Heap::GetObjectTypeName(size_t index, const char** object_type,
5348  const char** object_sub_type) {
5349  if (index >= ObjectStats::OBJECT_STATS_COUNT) return false;
5350 
5351  switch (static_cast<int>(index)) {
5352 #define COMPARE_AND_RETURN_NAME(name) \
5353  case name: \
5354  *object_type = #name; \
5355  *object_sub_type = ""; \
5356  return true;
5357  INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
5358 #undef COMPARE_AND_RETURN_NAME
5359 
5360 #define COMPARE_AND_RETURN_NAME(name) \
5361  case ObjectStats::FIRST_VIRTUAL_TYPE + ObjectStats::name: \
5362  *object_type = #name; \
5363  *object_sub_type = ""; \
5364  return true;
5365  VIRTUAL_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
5366 #undef COMPARE_AND_RETURN_NAME
5367  }
5368  return false;
5369 }
5370 
5371 size_t Heap::NumberOfNativeContexts() {
5372  int result = 0;
5373  Object* context = native_contexts_list();
5374  while (!context->IsUndefined(isolate())) {
5375  ++result;
5376  Context native_context = Context::cast(context);
5377  context = native_context->next_context_link();
5378  }
5379  return result;
5380 }
5381 
5382 size_t Heap::NumberOfDetachedContexts() {
5383  // The detached_contexts() array has two entries per detached context.
5384  return detached_contexts()->length() / 2;
5385 }
5386 
5387 const char* AllocationSpaceName(AllocationSpace space) {
5388  switch (space) {
5389  case NEW_SPACE:
5390  return "NEW_SPACE";
5391  case OLD_SPACE:
5392  return "OLD_SPACE";
5393  case CODE_SPACE:
5394  return "CODE_SPACE";
5395  case MAP_SPACE:
5396  return "MAP_SPACE";
5397  case LO_SPACE:
5398  return "LO_SPACE";
5399  case NEW_LO_SPACE:
5400  return "NEW_LO_SPACE";
5401  case RO_SPACE:
5402  return "RO_SPACE";
5403  default:
5404  UNREACHABLE();
5405  }
5406  return nullptr;
5407 }
5408 
5409 void VerifyPointersVisitor::VisitPointers(HeapObject* host, ObjectSlot start,
5410  ObjectSlot end) {
5411  VerifyPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
5412 }
5413 
5414 void VerifyPointersVisitor::VisitPointers(HeapObject* host,
5415  MaybeObjectSlot start,
5416  MaybeObjectSlot end) {
5417  VerifyPointers(host, start, end);
5418 }
5419 
5420 void VerifyPointersVisitor::VisitRootPointers(Root root,
5421  const char* description,
5422  ObjectSlot start,
5423  ObjectSlot end) {
5424  VerifyPointers(nullptr, MaybeObjectSlot(start), MaybeObjectSlot(end));
5425 }
5426 
5427 void VerifyPointersVisitor::VerifyPointers(HeapObject* host,
5428  MaybeObjectSlot start,
5429  MaybeObjectSlot end) {
5430  for (MaybeObjectSlot current = start; current < end; ++current) {
5431  HeapObject* object;
5432  if ((*current)->GetHeapObject(&object)) {
5433  CHECK(heap_->Contains(object));
5434  CHECK(object->map()->IsMap());
5435  } else {
5436  CHECK((*current)->IsSmi() || (*current)->IsCleared());
5437  }
5438  }
5439 }
5440 
5441 void VerifySmisVisitor::VisitRootPointers(Root root, const char* description,
5442  ObjectSlot start, ObjectSlot end) {
5443  for (ObjectSlot current = start; current < end; ++current) {
5444  CHECK((*current)->IsSmi());
5445  }
5446 }
5447 
5448 bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
5449  // Object migration is governed by the following rules:
5450  //
5451  // 1) Objects in new-space can be migrated to the old space
5452  // that matches their target space or they stay in new-space.
5453  // 2) Objects in old-space stay in the same space when migrating.
5454  // 3) Fillers (two or more words) can migrate due to left-trimming of
5455  // fixed arrays in new-space or old space.
5456  // 4) Fillers (one word) can never migrate, they are skipped by
5457  // incremental marking explicitly to prevent invalid pattern.
5458  //
5459  // Since this function is used for debugging only, we do not place
5460  // asserts here, but check everything explicitly.
5461  if (obj->map() == ReadOnlyRoots(this).one_pointer_filler_map()) return false;
5462  InstanceType type = obj->map()->instance_type();
5463  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
5464  AllocationSpace src = chunk->owner()->identity();
5465  switch (src) {
5466  case NEW_SPACE:
5467  return dst == NEW_SPACE || dst == OLD_SPACE;
5468  case OLD_SPACE:
5469  return dst == OLD_SPACE;
5470  case CODE_SPACE:
5471  return dst == CODE_SPACE && type == CODE_TYPE;
5472  case MAP_SPACE:
5473  case LO_SPACE:
5474  case CODE_LO_SPACE:
5475  case NEW_LO_SPACE:
5476  case RO_SPACE:
5477  return false;
5478  }
5479  UNREACHABLE();
5480 }
5481 
5482 void Heap::CreateObjectStats() {
5483  if (V8_LIKELY(FLAG_gc_stats == 0)) return;
5484  if (!live_object_stats_) {
5485  live_object_stats_ = new ObjectStats(this);
5486  }
5487  if (!dead_object_stats_) {
5488  dead_object_stats_ = new ObjectStats(this);
5489  }
5490 }
5491 
5492 void AllocationObserver::AllocationStep(int bytes_allocated,
5493  Address soon_object, size_t size) {
5494  DCHECK_GE(bytes_allocated, 0);
5495  bytes_to_next_step_ -= bytes_allocated;
5496  if (bytes_to_next_step_ <= 0) {
5497  Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object, size);
5498  step_size_ = GetNextStepSize();
5499  bytes_to_next_step_ = step_size_;
5500  }
5501  DCHECK_GE(bytes_to_next_step_, 0);
5502 }
5503 
5504 namespace {
5505 
5506 Map GcSafeMapOfCodeSpaceObject(HeapObject* object) {
5507  MapWord map_word = object->map_word();
5508  return map_word.IsForwardingAddress() ? map_word.ToForwardingAddress()->map()
5509  : map_word.ToMap();
5510 }
5511 
5512 int GcSafeSizeOfCodeSpaceObject(HeapObject* object) {
5513  return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object));
5514 }
5515 
5516 Code GcSafeCastToCode(Heap* heap, HeapObject* object, Address inner_pointer) {
5517  Code code = Code::unchecked_cast(object);
5518  DCHECK(!code.is_null());
5519  DCHECK(heap->GcSafeCodeContains(code, inner_pointer));
5520  return code;
5521 }
5522 
5523 } // namespace
5524 
5525 bool Heap::GcSafeCodeContains(Code code, Address addr) {
5526  Map map = GcSafeMapOfCodeSpaceObject(code);
5527  DCHECK(map == ReadOnlyRoots(this).code_map());
5528  if (InstructionStream::TryLookupCode(isolate(), addr) == code) return true;
5529  Address start = code->address();
5530  Address end = code->address() + code->SizeFromMap(map);
5531  return start <= addr && addr < end;
5532 }
5533 
5534 Code Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
5535  Code code = InstructionStream::TryLookupCode(isolate(), inner_pointer);
5536  if (!code.is_null()) return code;
5537 
5538  // Check if the inner pointer points into a large object chunk.
5539  LargePage* large_page = code_lo_space()->FindPage(inner_pointer);
5540  if (large_page != nullptr) {
5541  return GcSafeCastToCode(this, large_page->GetObject(), inner_pointer);
5542  }
5543 
5544  DCHECK(code_space()->Contains(inner_pointer));
5545 
5546  // Iterate through the page until we reach the end or find an object starting
5547  // after the inner pointer.
5548  Page* page = Page::FromAddress(inner_pointer);
5549  DCHECK_EQ(page->owner(), code_space());
5550  mark_compact_collector()->sweeper()->EnsurePageIsIterable(page);
5551 
5552  Address addr = page->skip_list()->StartFor(inner_pointer);
5553  Address top = code_space()->top();
5554  Address limit = code_space()->limit();
5555 
5556  while (true) {
5557  if (addr == top && addr != limit) {
5558  addr = limit;
5559  continue;
5560  }
5561 
5562  HeapObject* obj = HeapObject::FromAddress(addr);
5563  int obj_size = GcSafeSizeOfCodeSpaceObject(obj);
5564  Address next_addr = addr + obj_size;
5565  if (next_addr > inner_pointer) {
5566  return GcSafeCastToCode(this, obj, inner_pointer);
5567  }
5568  addr = next_addr;
5569  }
5570 }
5571 
5572 void Heap::WriteBarrierForCodeSlow(Code code) {
5573  for (RelocIterator it(code, RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
5574  !it.done(); it.next()) {
5575  GenerationalBarrierForCode(code, it.rinfo(), it.rinfo()->target_object());
5576  MarkingBarrierForCode(code, it.rinfo(), it.rinfo()->target_object());
5577  }
5578 }
5579 
5580 void Heap::GenerationalBarrierSlow(HeapObject* object, Address slot,
5581  HeapObject* value) {
5582  Heap* heap = Heap::FromWritableHeapObject(object);
5583  heap->store_buffer()->InsertEntry(slot);
5584 }
5585 
5586 void Heap::GenerationalBarrierForElementsSlow(Heap* heap, FixedArray array,
5587  int offset, int length) {
5588  for (int i = 0; i < length; i++) {
5589  if (!InNewSpace(array->get(offset + i))) continue;
5590  heap->store_buffer()->InsertEntry(
5591  array->RawFieldOfElementAt(offset + i).address());
5592  }
5593 }
5594 
5595 void Heap::GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
5596  HeapObject* object) {
5597  DCHECK(InNewSpace(object));
5598  Page* source_page = Page::FromAddress(host.ptr());
5599  RelocInfo::Mode rmode = rinfo->rmode();
5600  Address addr = rinfo->pc();
5601  SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
5602  if (rinfo->IsInConstantPool()) {
5603  addr = rinfo->constant_pool_entry_address();
5604  if (RelocInfo::IsCodeTargetMode(rmode)) {
5605  slot_type = CODE_ENTRY_SLOT;
5606  } else {
5607  DCHECK(RelocInfo::IsEmbeddedObject(rmode));
5608  slot_type = OBJECT_SLOT;
5609  }
5610  }
5611  RememberedSet<OLD_TO_NEW>::InsertTyped(source_page, host.ptr(), slot_type,
5612  addr);
5613 }
5614 
5615 void Heap::MarkingBarrierSlow(HeapObject* object, Address slot,
5616  HeapObject* value) {
5617  Heap* heap = Heap::FromWritableHeapObject(object);
5618  heap->incremental_marking()->RecordWriteSlow(object, HeapObjectSlot(slot),
5619  value);
5620 }
5621 
5622 void Heap::MarkingBarrierForElementsSlow(Heap* heap, HeapObject* object) {
5623  if (FLAG_concurrent_marking ||
5624  heap->incremental_marking()->marking_state()->IsBlack(object)) {
5625  heap->incremental_marking()->RevisitObject(object);
5626  }
5627 }
5628 
5629 void Heap::MarkingBarrierForCodeSlow(Code host, RelocInfo* rinfo,
5630  HeapObject* object) {
5631  Heap* heap = Heap::FromWritableHeapObject(host);
5632  DCHECK(heap->incremental_marking()->IsMarking());
5633  heap->incremental_marking()->RecordWriteIntoCode(host, rinfo, object);
5634 }
5635 
5636 bool Heap::PageFlagsAreConsistent(HeapObject* object) {
5637  Heap* heap = Heap::FromWritableHeapObject(object);
5638  MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
5639  heap_internals::MemoryChunk* slim_chunk =
5640  heap_internals::MemoryChunk::FromHeapObject(object);
5641 
5642  const bool generation_consistency =
5643  chunk->owner()->identity() != NEW_SPACE ||
5644  (chunk->InNewSpace() && slim_chunk->InNewSpace());
5645  const bool marking_consistency =
5646  !heap->incremental_marking()->IsMarking() ||
5647  (chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING) &&
5648  slim_chunk->IsMarking());
5649 
5650  return generation_consistency && marking_consistency;
5651 }
5652 
5653 static_assert(MemoryChunk::Flag::INCREMENTAL_MARKING ==
5654  heap_internals::MemoryChunk::kMarkingBit,
5655  "Incremental marking flag inconsistent");
5656 static_assert(MemoryChunk::Flag::IN_FROM_SPACE ==
5657  heap_internals::MemoryChunk::kFromSpaceBit,
5658  "From space flag inconsistent");
5659 static_assert(MemoryChunk::Flag::IN_TO_SPACE ==
5660  heap_internals::MemoryChunk::kToSpaceBit,
5661  "To space flag inconsistent");
5662 static_assert(MemoryChunk::kFlagsOffset ==
5663  heap_internals::MemoryChunk::kFlagsOffset,
5664  "Flag offset inconsistent");
5665 
5666 void Heap::SetEmbedderStackStateForNextFinalizaton(
5667  EmbedderHeapTracer::EmbedderStackState stack_state) {
5668  local_embedder_heap_tracer()->SetEmbedderStackStateForNextFinalization(
5669  stack_state);
5670 }
5671 
5672 #ifdef DEBUG
5673 void Heap::IncrementObjectCounters() {
5674  isolate_->counters()->objs_since_last_full()->Increment();
5675  isolate_->counters()->objs_since_last_young()->Increment();
5676 }
5677 #endif // DEBUG
5678 
5679 } // namespace internal
5680 } // namespace v8
Definition: libplatform.h:13