V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
heap-inl.h
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_HEAP_INL_H_
6 #define V8_HEAP_HEAP_INL_H_
7 
8 #include <cmath>
9 
10 // Clients of this interface shouldn't depend on lots of heap internals.
11 // Do not include anything from src/heap other than src/heap/heap.h and its
12 // write barrier here!
13 #include "src/heap/heap-write-barrier.h"
14 #include "src/heap/heap.h"
15 
16 #include "src/base/atomic-utils.h"
17 #include "src/base/platform/platform.h"
18 #include "src/feedback-vector.h"
19 
20 // TODO(mstarzinger): There is one more include to remove in order to no longer
21 // leak heap internals to users of this interface!
22 #include "src/heap/spaces-inl.h"
23 #include "src/isolate-data.h"
24 #include "src/isolate.h"
25 #include "src/log.h"
26 #include "src/msan.h"
27 #include "src/objects-inl.h"
28 #include "src/objects/allocation-site-inl.h"
29 #include "src/objects/api-callbacks-inl.h"
30 #include "src/objects/descriptor-array.h"
31 #include "src/objects/literal-objects-inl.h"
32 #include "src/objects/scope-info.h"
33 #include "src/objects/script-inl.h"
34 #include "src/profiler/heap-profiler.h"
35 #include "src/string-hasher.h"
36 #include "src/zone/zone-list-inl.h"
37 
38 // The following header includes the write barrier essentials that can also be
39 // used stand-alone without including heap-inl.h.
40 // TODO(mlippautz): Remove once users of object-macros.h include this file on
41 // their own.
42 #include "src/heap/heap-write-barrier-inl.h"
43 
44 namespace v8 {
45 namespace internal {
46 
47 AllocationSpace AllocationResult::RetrySpace() {
48  DCHECK(IsRetry());
49  return static_cast<AllocationSpace>(Smi::ToInt(object_));
50 }
51 
52 HeapObject* AllocationResult::ToObjectChecked() {
53  CHECK(!IsRetry());
54  return HeapObject::cast(object_);
55 }
56 
57 Isolate* Heap::isolate() {
58  return reinterpret_cast<Isolate*>(
59  reinterpret_cast<intptr_t>(this) -
60  reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
61 }
62 
63 int64_t Heap::external_memory() {
64  return isolate()->isolate_data()->external_memory_;
65 }
66 
67 void Heap::update_external_memory(int64_t delta) {
68  isolate()->isolate_data()->external_memory_ += delta;
69 }
70 
71 void Heap::update_external_memory_concurrently_freed(intptr_t freed) {
72  external_memory_concurrently_freed_ += freed;
73 }
74 
75 void Heap::account_external_memory_concurrently_freed() {
76  isolate()->isolate_data()->external_memory_ -=
77  external_memory_concurrently_freed_;
78  external_memory_concurrently_freed_ = 0;
79 }
80 
81 RootsTable& Heap::roots_table() { return isolate()->roots_table(); }
82 
83 // TODO(jkummerow): Drop std::remove_pointer after the migration to ObjectPtr.
84 #define ROOT_ACCESSOR(Type, name, CamelName) \
85  Type Heap::name() { \
86  return std::remove_pointer<Type>::type::cast( \
87  roots_table()[RootIndex::k##CamelName]); \
88  }
89 MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
90 #undef ROOT_ACCESSOR
91 
92 #define ROOT_ACCESSOR(type, name, CamelName) \
93  void Heap::set_##name(type value) { \
94  /* The deserializer makes use of the fact that these common roots are */ \
95  /* never in new space and never on a page that is being compacted. */ \
96  DCHECK_IMPLIES(deserialization_complete(), \
97  !RootsTable::IsImmortalImmovable(RootIndex::k##CamelName)); \
98  DCHECK_IMPLIES(RootsTable::IsImmortalImmovable(RootIndex::k##CamelName), \
99  IsImmovable(HeapObject::cast(value))); \
100  roots_table()[RootIndex::k##CamelName] = value; \
101  }
102 ROOT_LIST(ROOT_ACCESSOR)
103 #undef ROOT_ACCESSOR
104 
105 void Heap::SetRootCodeStubs(SimpleNumberDictionary value) {
106  roots_table()[RootIndex::kCodeStubs] = value;
107 }
108 
109 void Heap::SetRootMaterializedObjects(FixedArray objects) {
110  roots_table()[RootIndex::kMaterializedObjects] = objects;
111 }
112 
113 void Heap::SetRootScriptList(Object* value) {
114  roots_table()[RootIndex::kScriptList] = value;
115 }
116 
117 void Heap::SetRootStringTable(StringTable value) {
118  roots_table()[RootIndex::kStringTable] = value;
119 }
120 
121 void Heap::SetRootNoScriptSharedFunctionInfos(Object* value) {
122  roots_table()[RootIndex::kNoScriptSharedFunctionInfos] = value;
123 }
124 
125 void Heap::SetMessageListeners(TemplateList value) {
126  roots_table()[RootIndex::kMessageListeners] = value;
127 }
128 
129 PagedSpace* Heap::paged_space(int idx) {
130  DCHECK_NE(idx, LO_SPACE);
131  DCHECK_NE(idx, NEW_SPACE);
132  DCHECK_NE(idx, CODE_LO_SPACE);
133  DCHECK_NE(idx, NEW_LO_SPACE);
134  return static_cast<PagedSpace*>(space_[idx]);
135 }
136 
137 Space* Heap::space(int idx) { return space_[idx]; }
138 
139 Address* Heap::NewSpaceAllocationTopAddress() {
140  return new_space_->allocation_top_address();
141 }
142 
143 Address* Heap::NewSpaceAllocationLimitAddress() {
144  return new_space_->allocation_limit_address();
145 }
146 
147 Address* Heap::OldSpaceAllocationTopAddress() {
148  return old_space_->allocation_top_address();
149 }
150 
151 Address* Heap::OldSpaceAllocationLimitAddress() {
152  return old_space_->allocation_limit_address();
153 }
154 
155 void Heap::UpdateNewSpaceAllocationCounter() {
156  new_space_allocation_counter_ = NewSpaceAllocationCounter();
157 }
158 
159 size_t Heap::NewSpaceAllocationCounter() {
160  return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
161 }
162 
163 AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
164  AllocationAlignment alignment) {
165  DCHECK(AllowHandleAllocation::IsAllowed());
166  DCHECK(AllowHeapAllocation::IsAllowed());
167  DCHECK(gc_state_ == NOT_IN_GC);
168 #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
169  if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
170  if (!always_allocate() && Heap::allocation_timeout_-- <= 0) {
171  return AllocationResult::Retry(space);
172  }
173  }
174 #endif
175 #ifdef DEBUG
176  IncrementObjectCounters();
177 #endif
178 
179  bool large_object = size_in_bytes > kMaxRegularHeapObjectSize;
180 
181  HeapObject* object = nullptr;
182  AllocationResult allocation;
183  if (NEW_SPACE == space) {
184  if (large_object) {
185  // TODO(hpayer): Implement a LO tenuring strategy.
186  space = FLAG_young_generation_large_objects ? NEW_LO_SPACE : LO_SPACE;
187  } else {
188  allocation = new_space_->AllocateRaw(size_in_bytes, alignment);
189  if (allocation.To(&object)) {
190  OnAllocationEvent(object, size_in_bytes);
191  }
192  return allocation;
193  }
194  }
195 
196  // Here we only allocate in the old generation.
197  if (OLD_SPACE == space) {
198  if (large_object) {
199  allocation = lo_space_->AllocateRaw(size_in_bytes);
200  } else {
201  allocation = old_space_->AllocateRaw(size_in_bytes, alignment);
202  }
203  } else if (CODE_SPACE == space) {
204  if (size_in_bytes <= code_space()->AreaSize()) {
205  allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
206  } else {
207  allocation = code_lo_space_->AllocateRaw(size_in_bytes);
208  }
209  } else if (LO_SPACE == space) {
210  DCHECK(large_object);
211  allocation = lo_space_->AllocateRaw(size_in_bytes);
212  } else if (NEW_LO_SPACE == space) {
213  DCHECK(FLAG_young_generation_large_objects);
214  allocation = new_lo_space_->AllocateRaw(size_in_bytes);
215  } else if (CODE_LO_SPACE == space) {
216  allocation = code_lo_space_->AllocateRaw(size_in_bytes);
217  } else if (MAP_SPACE == space) {
218  allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
219  } else if (RO_SPACE == space) {
220 #ifdef V8_USE_SNAPSHOT
221  DCHECK(isolate_->serializer_enabled());
222 #endif
223  DCHECK(!large_object);
224  DCHECK(CanAllocateInReadOnlySpace());
225  allocation = read_only_space_->AllocateRaw(size_in_bytes, alignment);
226  } else {
227  // NEW_SPACE is not allowed here.
228  UNREACHABLE();
229  }
230 
231  if (allocation.To(&object)) {
232  if (space == CODE_SPACE) {
233  // Unprotect the memory chunk of the object if it was not unprotected
234  // already.
235  UnprotectAndRegisterMemoryChunk(object);
236  ZapCodeObject(object->address(), size_in_bytes);
237  }
238  OnAllocationEvent(object, size_in_bytes);
239  }
240 
241  return allocation;
242 }
243 
244 void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
245  for (auto& tracker : allocation_trackers_) {
246  tracker->AllocationEvent(object->address(), size_in_bytes);
247  }
248 
249  if (FLAG_verify_predictable) {
250  ++allocations_count_;
251  // Advance synthetic time by making a time request.
252  MonotonicallyIncreasingTimeInMs();
253 
254  UpdateAllocationsHash(object);
255  UpdateAllocationsHash(size_in_bytes);
256 
257  if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
258  PrintAllocationsHash();
259  }
260  } else if (FLAG_fuzzer_gc_analysis) {
261  ++allocations_count_;
262  } else if (FLAG_trace_allocation_stack_interval > 0) {
263  ++allocations_count_;
264  if (allocations_count_ % FLAG_trace_allocation_stack_interval == 0) {
265  isolate()->PrintStack(stdout, Isolate::kPrintStackConcise);
266  }
267  }
268 }
269 
270 
271 void Heap::OnMoveEvent(HeapObject* target, HeapObject* source,
272  int size_in_bytes) {
273  HeapProfiler* heap_profiler = isolate_->heap_profiler();
274  if (heap_profiler->is_tracking_object_moves()) {
275  heap_profiler->ObjectMoveEvent(source->address(), target->address(),
276  size_in_bytes);
277  }
278  for (auto& tracker : allocation_trackers_) {
279  tracker->MoveEvent(source->address(), target->address(), size_in_bytes);
280  }
281  if (target->IsSharedFunctionInfo()) {
282  LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source->address(),
283  target->address()));
284  }
285 
286  if (FLAG_verify_predictable) {
287  ++allocations_count_;
288  // Advance synthetic time by making a time request.
289  MonotonicallyIncreasingTimeInMs();
290 
291  UpdateAllocationsHash(source);
292  UpdateAllocationsHash(target);
293  UpdateAllocationsHash(size_in_bytes);
294 
295  if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
296  PrintAllocationsHash();
297  }
298  } else if (FLAG_fuzzer_gc_analysis) {
299  ++allocations_count_;
300  }
301 }
302 
303 bool Heap::CanAllocateInReadOnlySpace() {
304  return !deserialization_complete_ &&
305  (isolate()->serializer_enabled() ||
306  !isolate()->initialized_from_snapshot());
307 }
308 
309 void Heap::UpdateAllocationsHash(HeapObject* object) {
310  Address object_address = object->address();
311  MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
312  AllocationSpace allocation_space = memory_chunk->owner()->identity();
313 
314  STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
315  uint32_t value =
316  static_cast<uint32_t>(object_address - memory_chunk->address()) |
317  (static_cast<uint32_t>(allocation_space) << kPageSizeBits);
318 
319  UpdateAllocationsHash(value);
320 }
321 
322 
323 void Heap::UpdateAllocationsHash(uint32_t value) {
324  uint16_t c1 = static_cast<uint16_t>(value);
325  uint16_t c2 = static_cast<uint16_t>(value >> 16);
326  raw_allocations_hash_ =
327  StringHasher::AddCharacterCore(raw_allocations_hash_, c1);
328  raw_allocations_hash_ =
329  StringHasher::AddCharacterCore(raw_allocations_hash_, c2);
330 }
331 
332 void Heap::RegisterExternalString(String string) {
333  DCHECK(string->IsExternalString());
334  DCHECK(!string->IsThinString());
335  external_string_table_.AddString(string);
336 }
337 
338 void Heap::UpdateExternalString(String string, size_t old_payload,
339  size_t new_payload) {
340  DCHECK(string->IsExternalString());
341  Page* page = Page::FromHeapObject(string);
342 
343  if (old_payload > new_payload)
344  page->DecrementExternalBackingStoreBytes(
345  ExternalBackingStoreType::kExternalString, old_payload - new_payload);
346  else
347  page->IncrementExternalBackingStoreBytes(
348  ExternalBackingStoreType::kExternalString, new_payload - old_payload);
349 }
350 
351 void Heap::FinalizeExternalString(String string) {
352  DCHECK(string->IsExternalString());
353  Page* page = Page::FromHeapObject(string);
354  ExternalString ext_string = ExternalString::cast(string);
355 
356  page->DecrementExternalBackingStoreBytes(
357  ExternalBackingStoreType::kExternalString,
358  ext_string->ExternalPayloadSize());
359 
361  reinterpret_cast<v8::String::ExternalStringResourceBase**>(
362  string->address() + ExternalString::kResourceOffset);
363 
364  // Dispose of the C++ object if it has not already been disposed.
365  if (*resource_addr != nullptr) {
366  (*resource_addr)->Dispose();
367  *resource_addr = nullptr;
368  }
369 }
370 
371 Address Heap::NewSpaceTop() { return new_space_->top(); }
372 
373 // static
374 bool Heap::InNewSpace(Object* object) {
375  DCHECK(!HasWeakHeapObjectTag(object));
376  return object->IsHeapObject() && InNewSpace(HeapObject::cast(object));
377 }
378 
379 // static
380 bool Heap::InNewSpace(MaybeObject object) {
381  HeapObject* heap_object;
382  return object->GetHeapObject(&heap_object) && InNewSpace(heap_object);
383 }
384 
385 // static
386 bool Heap::InNewSpace(HeapObject* heap_object) {
387  // Inlined check from NewSpace::Contains.
388  bool result = MemoryChunk::FromHeapObject(heap_object)->InNewSpace();
389 #ifdef DEBUG
390  // If in NEW_SPACE, then check we're either not in the middle of GC or the
391  // object is in to-space.
392  if (result) {
393  // If the object is in NEW_SPACE, then it's not in RO_SPACE so this is safe.
394  Heap* heap = Heap::FromWritableHeapObject(heap_object);
395  DCHECK(heap->gc_state_ != NOT_IN_GC || InToSpace(heap_object));
396  }
397 #endif
398  return result;
399 }
400 
401 // static
402 bool Heap::InNewSpace(HeapObjectPtr heap_object) {
403  bool result = MemoryChunk::FromHeapObject(heap_object)->InNewSpace();
404 #ifdef DEBUG
405  // If in NEW_SPACE, then check we're either not in the middle of GC or the
406  // object is in to-space.
407  if (result) {
408  // If the object is in NEW_SPACE, then it's not in RO_SPACE so this is safe.
409  Heap* heap = Heap::FromWritableHeapObject(&heap_object);
410  DCHECK(heap->gc_state_ != NOT_IN_GC || InToSpace(heap_object));
411  }
412 #endif
413  return result;
414 }
415 
416 // static
417 bool Heap::InFromSpace(Object* object) {
418  DCHECK(!HasWeakHeapObjectTag(object));
419  return object->IsHeapObject() && InFromSpace(HeapObject::cast(object));
420 }
421 
422 // static
423 bool Heap::InFromSpace(MaybeObject object) {
424  HeapObject* heap_object;
425  return object->GetHeapObject(&heap_object) && InFromSpace(heap_object);
426 }
427 
428 // static
429 bool Heap::InFromSpace(HeapObject* heap_object) {
430  return MemoryChunk::FromHeapObject(heap_object)
431  ->IsFlagSet(Page::IN_FROM_SPACE);
432 }
433 
434 // static
435 bool Heap::InToSpace(Object* object) {
436  DCHECK(!HasWeakHeapObjectTag(object));
437  return object->IsHeapObject() && InToSpace(HeapObject::cast(object));
438 }
439 
440 // static
441 bool Heap::InToSpace(MaybeObject object) {
442  HeapObject* heap_object;
443  return object->GetHeapObject(&heap_object) && InToSpace(heap_object);
444 }
445 
446 // static
447 bool Heap::InToSpace(HeapObject* heap_object) {
448  return MemoryChunk::FromHeapObject(heap_object)->IsFlagSet(Page::IN_TO_SPACE);
449 }
450 
451 // static
452 bool Heap::InToSpace(HeapObjectPtr heap_object) {
453  return MemoryChunk::FromHeapObject(heap_object)->IsFlagSet(Page::IN_TO_SPACE);
454 }
455 
456 bool Heap::InOldSpace(Object* object) { return old_space_->Contains(object); }
457 
458 bool Heap::InReadOnlySpace(Object* object) {
459  return read_only_space_->Contains(object);
460 }
461 
462 // static
463 Heap* Heap::FromWritableHeapObject(const HeapObject* obj) {
464  MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
465  // RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
466  // find a heap. The exception is when the ReadOnlySpace is writeable, during
467  // bootstrapping, so explicitly allow this case.
468  SLOW_DCHECK(chunk->owner()->identity() != RO_SPACE ||
469  static_cast<ReadOnlySpace*>(chunk->owner())->writable());
470  Heap* heap = chunk->heap();
471  SLOW_DCHECK(heap != nullptr);
472  return heap;
473 }
474 
475 // static
476 Heap* Heap::FromWritableHeapObject(const HeapObjectPtr* obj) {
477  MemoryChunk* chunk = MemoryChunk::FromHeapObject(*obj);
478  // RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
479  // find a heap. The exception is when the ReadOnlySpace is writeable, during
480  // bootstrapping, so explicitly allow this case.
481  SLOW_DCHECK(chunk->owner()->identity() != RO_SPACE ||
482  static_cast<ReadOnlySpace*>(chunk->owner())->writable());
483  Heap* heap = chunk->heap();
484  SLOW_DCHECK(heap != nullptr);
485  return heap;
486 }
487 
488 bool Heap::ShouldBePromoted(Address old_address) {
489  Page* page = Page::FromAddress(old_address);
490  Address age_mark = new_space_->age_mark();
491  return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
492  (!page->ContainsLimit(age_mark) || old_address < age_mark);
493 }
494 
495 void Heap::CopyBlock(Address dst, Address src, int byte_size) {
496  CopyWords(dst, src, static_cast<size_t>(byte_size / kPointerSize));
497 }
498 
499 template <Heap::FindMementoMode mode>
500 AllocationMemento* Heap::FindAllocationMemento(Map map, HeapObject* object) {
501  Address object_address = object->address();
502  Address memento_address = object_address + object->SizeFromMap(map);
503  Address last_memento_word_address = memento_address + kPointerSize;
504  // If the memento would be on another page, bail out immediately.
505  if (!Page::OnSamePage(object_address, last_memento_word_address)) {
506  return nullptr;
507  }
508  HeapObject* candidate = HeapObject::FromAddress(memento_address);
509  Map candidate_map = candidate->map();
510  // This fast check may peek at an uninitialized word. However, the slow check
511  // below (memento_address == top) ensures that this is safe. Mark the word as
512  // initialized to silence MemorySanitizer warnings.
513  MSAN_MEMORY_IS_INITIALIZED(&candidate_map, sizeof(candidate_map));
514  if (candidate_map != ReadOnlyRoots(this).allocation_memento_map()) {
515  return nullptr;
516  }
517 
518  // Bail out if the memento is below the age mark, which can happen when
519  // mementos survived because a page got moved within new space.
520  Page* object_page = Page::FromAddress(object_address);
521  if (object_page->IsFlagSet(Page::NEW_SPACE_BELOW_AGE_MARK)) {
522  Address age_mark =
523  reinterpret_cast<SemiSpace*>(object_page->owner())->age_mark();
524  if (!object_page->Contains(age_mark)) {
525  return nullptr;
526  }
527  // Do an exact check in the case where the age mark is on the same page.
528  if (object_address < age_mark) {
529  return nullptr;
530  }
531  }
532 
533  AllocationMemento* memento_candidate = AllocationMemento::cast(candidate);
534 
535  // Depending on what the memento is used for, we might need to perform
536  // additional checks.
537  Address top;
538  switch (mode) {
539  case Heap::kForGC:
540  return memento_candidate;
541  case Heap::kForRuntime:
542  if (memento_candidate == nullptr) return nullptr;
543  // Either the object is the last object in the new space, or there is
544  // another object of at least word size (the header map word) following
545  // it, so suffices to compare ptr and top here.
546  top = NewSpaceTop();
547  DCHECK(memento_address == top ||
548  memento_address + HeapObject::kHeaderSize <= top ||
549  !Page::OnSamePage(memento_address, top - 1));
550  if ((memento_address != top) && memento_candidate->IsValid()) {
551  return memento_candidate;
552  }
553  return nullptr;
554  default:
555  UNREACHABLE();
556  }
557  UNREACHABLE();
558 }
559 
560 void Heap::UpdateAllocationSite(Map map, HeapObject* object,
561  PretenuringFeedbackMap* pretenuring_feedback) {
562  DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_);
563  DCHECK(
564  InFromSpace(object) ||
565  (InToSpace(object) && Page::FromAddress(object->address())
566  ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) ||
567  (!InNewSpace(object) && Page::FromAddress(object->address())
568  ->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)));
569  if (!FLAG_allocation_site_pretenuring ||
570  !AllocationSite::CanTrack(map->instance_type()))
571  return;
572  AllocationMemento* memento_candidate =
573  FindAllocationMemento<kForGC>(map, object);
574  if (memento_candidate == nullptr) return;
575 
576  // Entering cached feedback is used in the parallel case. We are not allowed
577  // to dereference the allocation site and rather have to postpone all checks
578  // till actually merging the data.
579  Address key = memento_candidate->GetAllocationSiteUnchecked();
580  (*pretenuring_feedback)[reinterpret_cast<AllocationSite*>(key)]++;
581 }
582 
583 void Heap::ExternalStringTable::AddString(String string) {
584  DCHECK(string->IsExternalString());
585  DCHECK(!Contains(string));
586 
587  if (InNewSpace(string)) {
588  new_space_strings_.push_back(string);
589  } else {
590  old_space_strings_.push_back(string);
591  }
592 }
593 
594 Oddball* Heap::ToBoolean(bool condition) {
595  ReadOnlyRoots roots(this);
596  return condition ? roots.true_value() : roots.false_value();
597 }
598 
599 uint64_t Heap::HashSeed() {
600  uint64_t seed;
601  ReadOnlyRoots(this).hash_seed()->copy_out(0, reinterpret_cast<byte*>(&seed),
602  kInt64Size);
603  DCHECK(FLAG_randomize_hashes || seed == 0);
604  return seed;
605 }
606 
607 int Heap::NextScriptId() {
608  int last_id = last_script_id()->value();
609  if (last_id == Smi::kMaxValue) last_id = v8::UnboundScript::kNoScriptId;
610  last_id++;
611  set_last_script_id(Smi::FromInt(last_id));
612  return last_id;
613 }
614 
615 int Heap::NextDebuggingId() {
616  int last_id = last_debugging_id()->value();
617  if (last_id == DebugInfo::DebuggingIdBits::kMax) {
618  last_id = DebugInfo::kNoDebuggingId;
619  }
620  last_id++;
621  set_last_debugging_id(Smi::FromInt(last_id));
622  return last_id;
623 }
624 
625 int Heap::GetNextTemplateSerialNumber() {
626  int next_serial_number = next_template_serial_number()->value() + 1;
627  set_next_template_serial_number(Smi::FromInt(next_serial_number));
628  return next_serial_number;
629 }
630 
631 int Heap::MaxNumberToStringCacheSize() const {
632  // Compute the size of the number string cache based on the max newspace size.
633  // The number string cache has a minimum size based on twice the initial cache
634  // size to ensure that it is bigger after being made 'full size'.
635  size_t number_string_cache_size = max_semi_space_size_ / 512;
636  number_string_cache_size =
637  Max(static_cast<size_t>(kInitialNumberStringCacheSize * 2),
638  Min<size_t>(0x4000u, number_string_cache_size));
639  // There is a string and a number per entry so the length is twice the number
640  // of entries.
641  return static_cast<int>(number_string_cache_size * 2);
642 }
643 
644 void Heap::IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
645  size_t amount) {
646  base::CheckedIncrement(&backing_store_bytes_, amount);
647  // TODO(mlippautz): Implement interrupt for global memory allocations that can
648  // trigger garbage collections.
649 }
650 
651 bool Heap::IsWithinLargeObject(Address address) {
652  if (new_lo_space()->FindPage(address) || lo_space()->FindPage(address) ||
653  code_lo_space()->FindPage(address))
654  return true;
655  return false;
656 }
657 
658 void Heap::DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
659  size_t amount) {
660  base::CheckedDecrement(&backing_store_bytes_, amount);
661 }
662 
663 AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
664  : heap_(isolate->heap()) {
665  heap_->always_allocate_scope_count_++;
666 }
667 
668 AlwaysAllocateScope::~AlwaysAllocateScope() {
669  heap_->always_allocate_scope_count_--;
670 }
671 
672 CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
673  : heap_(heap) {
674  if (heap_->write_protect_code_memory()) {
675  heap_->increment_code_space_memory_modification_scope_depth();
676  heap_->code_space()->SetReadAndWritable();
677  LargePage* page = heap_->code_lo_space()->first_page();
678  while (page != nullptr) {
679  DCHECK(page->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
680  CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
681  page->SetReadAndWritable();
682  page = page->next_page();
683  }
684  }
685 }
686 
687 CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
688  if (heap_->write_protect_code_memory()) {
689  heap_->decrement_code_space_memory_modification_scope_depth();
690  heap_->code_space()->SetReadAndExecutable();
691  LargePage* page = heap_->code_lo_space()->first_page();
692  while (page != nullptr) {
693  DCHECK(page->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
694  CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
695  page->SetReadAndExecutable();
696  page = page->next_page();
697  }
698  }
699 }
700 
701 CodePageCollectionMemoryModificationScope::
702  CodePageCollectionMemoryModificationScope(Heap* heap)
703  : heap_(heap) {
704  if (heap_->write_protect_code_memory() &&
705  !heap_->code_space_memory_modification_scope_depth()) {
706  heap_->EnableUnprotectedMemoryChunksRegistry();
707  }
708 }
709 
710 CodePageCollectionMemoryModificationScope::
711  ~CodePageCollectionMemoryModificationScope() {
712  if (heap_->write_protect_code_memory() &&
713  !heap_->code_space_memory_modification_scope_depth()) {
714  heap_->ProtectUnprotectedMemoryChunks();
715  heap_->DisableUnprotectedMemoryChunksRegistry();
716  }
717 }
718 
719 CodePageMemoryModificationScope::CodePageMemoryModificationScope(
720  MemoryChunk* chunk)
721  : chunk_(chunk),
722  scope_active_(chunk_->heap()->write_protect_code_memory() &&
723  chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
724  if (scope_active_) {
725  DCHECK(chunk_->owner()->identity() == CODE_SPACE ||
726  (chunk_->owner()->identity() == CODE_LO_SPACE));
727  chunk_->SetReadAndWritable();
728  }
729 }
730 
731 CodePageMemoryModificationScope::~CodePageMemoryModificationScope() {
732  if (scope_active_) {
733  chunk_->SetReadAndExecutable();
734  }
735 }
736 
737 } // namespace internal
738 } // namespace v8
739 
740 #endif // V8_HEAP_HEAP_INL_H_
Definition: libplatform.h:13