V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
isolate-allocator.cc
1 // Copyright 2018 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/isolate-allocator.h"
6 #include "src/base/bounded-page-allocator.h"
7 #include "src/isolate.h"
8 #include "src/ptr-compr.h"
9 #include "src/utils.h"
10 
11 namespace v8 {
12 namespace internal {
13 
14 IsolateAllocator::IsolateAllocator(IsolateAllocationMode mode) {
15 #if V8_TARGET_ARCH_64_BIT
16  if (mode == IsolateAllocationMode::kInV8Heap) {
17  Address heap_base = InitReservation();
18  CommitPagesForIsolate(heap_base);
19  return;
20  }
21 #endif // V8_TARGET_ARCH_64_BIT
22 
23  // Allocate Isolate in C++ heap.
24  CHECK_EQ(mode, IsolateAllocationMode::kInCppHeap);
25  page_allocator_ = GetPlatformPageAllocator();
26  isolate_memory_ = ::operator new(sizeof(Isolate));
27  DCHECK(!reservation_.IsReserved());
28 }
29 
30 IsolateAllocator::~IsolateAllocator() {
31  if (reservation_.IsReserved()) {
32  // The actual memory will be freed when the |reservation_| will die.
33  return;
34  }
35 
36  // The memory was allocated in C++ heap.
37  ::operator delete(isolate_memory_);
38 }
39 
40 #if V8_TARGET_ARCH_64_BIT
41 Address IsolateAllocator::InitReservation() {
42  v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
43 
44  // Reserve a 4Gb region so that the middle is 4Gb aligned.
45  // The VirtualMemory API does not support such an constraint so we have to
46  // implement it manually here.
47  size_t reservation_size = kPtrComprHeapReservationSize;
48  size_t base_alignment = kPtrComprIsolateRootAlignment;
49 
50  const int kMaxAttempts = 3;
51  for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
52  Address hint = RoundDown(reinterpret_cast<Address>(
53  platform_page_allocator->GetRandomMmapAddr()),
54  base_alignment) +
55  kPtrComprIsolateRootBias;
56 
57  // Within this reservation there will be a sub-region with proper alignment.
58  VirtualMemory padded_reservation(platform_page_allocator,
59  reservation_size * 2,
60  reinterpret_cast<void*>(hint));
61  if (!padded_reservation.IsReserved()) break;
62 
63  // Find such a sub-region inside the reservation that it's middle is
64  // |base_alignment|-aligned.
65  Address address =
66  RoundUp(padded_reservation.address() + kPtrComprIsolateRootBias,
67  base_alignment) -
68  kPtrComprIsolateRootBias;
69  CHECK(padded_reservation.InVM(address, reservation_size));
70 
71  // Now free the padded reservation and immediately try to reserve an exact
72  // region at aligned address. We have to do this dancing because the
73  // reservation address requirement is more complex than just a certain
74  // alignment and not all operating systems support freeing parts of reserved
75  // address space regions.
76  padded_reservation.Free();
77 
78  VirtualMemory reservation(platform_page_allocator, reservation_size,
79  reinterpret_cast<void*>(address));
80  if (!reservation.IsReserved()) break;
81 
82  // The reservation could still be somewhere else but we can accept it
83  // if the reservation has the required alignment.
84  Address aligned_address =
85  RoundUp(reservation.address() + kPtrComprIsolateRootBias,
86  base_alignment) -
87  kPtrComprIsolateRootBias;
88 
89  if (reservation.address() == aligned_address) {
90  reservation_ = std::move(reservation);
91  break;
92  }
93  }
94  if (!reservation_.IsReserved()) {
95  V8::FatalProcessOutOfMemory(nullptr,
96  "Failed to reserve memory for new V8 Isolate");
97  }
98 
99  CHECK_EQ(reservation_.size(), reservation_size);
100 
101  Address heap_base = reservation_.address() + kPtrComprIsolateRootBias;
102  CHECK(IsAligned(heap_base, base_alignment));
103 
104  return heap_base;
105 }
106 
107 void IsolateAllocator::CommitPagesForIsolate(Address heap_base) {
108  v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
109 
110  // Simplify BoundedPageAllocator's life by configuring it to use same page
111  // size as the Heap will use (MemoryChunk::kPageSize).
112  size_t page_size = RoundUp(size_t{1} << kPageSizeBits,
113  platform_page_allocator->AllocatePageSize());
114 
115  page_allocator_instance_ = base::make_unique<base::BoundedPageAllocator>(
116  platform_page_allocator, reservation_.address(), reservation_.size(),
117  page_size);
118  page_allocator_ = page_allocator_instance_.get();
119 
120  Address isolate_address = heap_base - Isolate::isolate_root_bias();
121  Address isolate_end = isolate_address + sizeof(Isolate);
122 
123  // Inform the bounded page allocator about reserved pages.
124  {
125  Address reserved_region_address = RoundDown(isolate_address, page_size);
126  size_t reserved_region_size =
127  RoundUp(isolate_end, page_size) - reserved_region_address;
128 
129  CHECK(page_allocator_instance_->AllocatePagesAt(
130  reserved_region_address, reserved_region_size,
131  PageAllocator::Permission::kNoAccess));
132  }
133 
134  // Commit pages where the Isolate will be stored.
135  {
136  size_t commit_page_size = platform_page_allocator->CommitPageSize();
137  Address committed_region_address =
138  RoundDown(isolate_address, commit_page_size);
139  size_t committed_region_size =
140  RoundUp(isolate_end, commit_page_size) - committed_region_address;
141 
142  // We are using |reservation_| directly here because |page_allocator_| has
143  // bigger commit page size than we actually need.
144  CHECK(reservation_.SetPermissions(committed_region_address,
145  committed_region_size,
146  PageAllocator::kReadWrite));
147 
148  if (Heap::ShouldZapGarbage()) {
149  for (Address address = committed_region_address;
150  address < committed_region_size; address += kPointerSize) {
151  Memory<Address>(address) = static_cast<Address>(kZapValue);
152  }
153  }
154  }
155  isolate_memory_ = reinterpret_cast<void*>(isolate_address);
156 }
157 #endif // V8_TARGET_ARCH_64_BIT
158 
159 } // namespace internal
160 } // namespace v8
virtual void * GetRandomMmapAddr()=0
Definition: libplatform.h:13
virtual size_t AllocatePageSize()=0