V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
allocation.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/allocation.h"
6 
7 #include <stdlib.h> // For free, malloc.
8 #include "src/base/bits.h"
9 #include "src/base/lazy-instance.h"
10 #include "src/base/logging.h"
11 #include "src/base/lsan-page-allocator.h"
12 #include "src/base/page-allocator.h"
13 #include "src/base/platform/platform.h"
14 #include "src/utils.h"
15 #include "src/v8.h"
16 
17 #if V8_LIBC_BIONIC
18 #include <malloc.h> // NOLINT
19 #endif
20 
21 namespace v8 {
22 namespace internal {
23 
24 namespace {
25 
26 void* AlignedAllocInternal(size_t size, size_t alignment) {
27  void* ptr;
28 #if V8_OS_WIN
29  ptr = _aligned_malloc(size, alignment);
30 #elif V8_LIBC_BIONIC
31  // posix_memalign is not exposed in some Android versions, so we fall back to
32  // memalign. See http://code.google.com/p/android/issues/detail?id=35391.
33  ptr = memalign(alignment, size);
34 #else
35  if (posix_memalign(&ptr, alignment, size)) ptr = nullptr;
36 #endif
37  return ptr;
38 }
39 
40 // TODO(bbudge) Simplify this once all embedders implement a page allocator.
41 struct InitializePageAllocator {
42  static void Construct(void* page_allocator_ptr_arg) {
43  auto page_allocator_ptr =
44  reinterpret_cast<v8::PageAllocator**>(page_allocator_ptr_arg);
45  v8::PageAllocator* page_allocator =
46  V8::GetCurrentPlatform()->GetPageAllocator();
47  if (page_allocator == nullptr) {
48  // On the heap and leaked so that no destructor needs to run at exit time.
49  static auto* default_allocator = new v8::base::PageAllocator;
50  page_allocator = default_allocator;
51  }
52 #if defined(LEAK_SANITIZER)
53  {
54  // On the heap and leaked so that no destructor needs to run at exit time.
55  static auto* lsan_allocator =
56  new v8::base::LsanPageAllocator(page_allocator);
57  page_allocator = lsan_allocator;
58  }
59 #endif
60  *page_allocator_ptr = page_allocator;
61  }
62 };
63 
64 static base::LazyInstance<v8::PageAllocator*, InitializePageAllocator>::type
65  page_allocator = LAZY_INSTANCE_INITIALIZER;
66 // We will attempt allocation this many times. After each failure, we call
67 // OnCriticalMemoryPressure to try to free some memory.
68 const int kAllocationTries = 2;
69 
70 } // namespace
71 
72 v8::PageAllocator* GetPlatformPageAllocator() {
73  DCHECK_NOT_NULL(page_allocator.Get());
74  return page_allocator.Get();
75 }
76 
77 v8::PageAllocator* SetPlatformPageAllocatorForTesting(
78  v8::PageAllocator* new_page_allocator) {
79  v8::PageAllocator* old_page_allocator = GetPlatformPageAllocator();
80  *page_allocator.Pointer() = new_page_allocator;
81  return old_page_allocator;
82 }
83 
84 void* Malloced::New(size_t size) {
85  void* result = AllocWithRetry(size);
86  if (result == nullptr) {
87  V8::FatalProcessOutOfMemory(nullptr, "Malloced operator new");
88  }
89  return result;
90 }
91 
92 void Malloced::Delete(void* p) {
93  free(p);
94 }
95 
96 char* StrDup(const char* str) {
97  int length = StrLength(str);
98  char* result = NewArray<char>(length + 1);
99  MemCopy(result, str, length);
100  result[length] = '\0';
101  return result;
102 }
103 
104 char* StrNDup(const char* str, int n) {
105  int length = StrLength(str);
106  if (n < length) length = n;
107  char* result = NewArray<char>(length + 1);
108  MemCopy(result, str, length);
109  result[length] = '\0';
110  return result;
111 }
112 
113 void* AllocWithRetry(size_t size) {
114  void* result = nullptr;
115  for (int i = 0; i < kAllocationTries; ++i) {
116  result = malloc(size);
117  if (result != nullptr) break;
118  if (!OnCriticalMemoryPressure(size)) break;
119  }
120  return result;
121 }
122 
123 void* AlignedAlloc(size_t size, size_t alignment) {
124  DCHECK_LE(V8_ALIGNOF(void*), alignment);
125  DCHECK(base::bits::IsPowerOfTwo(alignment));
126  void* result = nullptr;
127  for (int i = 0; i < kAllocationTries; ++i) {
128  result = AlignedAllocInternal(size, alignment);
129  if (result != nullptr) break;
130  if (!OnCriticalMemoryPressure(size + alignment)) break;
131  }
132  if (result == nullptr) {
133  V8::FatalProcessOutOfMemory(nullptr, "AlignedAlloc");
134  }
135  return result;
136 }
137 
138 void AlignedFree(void *ptr) {
139 #if V8_OS_WIN
140  _aligned_free(ptr);
141 #elif V8_LIBC_BIONIC
142  // Using free is not correct in general, but for V8_LIBC_BIONIC it is.
143  free(ptr);
144 #else
145  free(ptr);
146 #endif
147 }
148 
149 size_t AllocatePageSize() {
150  return GetPlatformPageAllocator()->AllocatePageSize();
151 }
152 
153 size_t CommitPageSize() { return GetPlatformPageAllocator()->CommitPageSize(); }
154 
155 void SetRandomMmapSeed(int64_t seed) {
156  GetPlatformPageAllocator()->SetRandomMmapSeed(seed);
157 }
158 
159 void* GetRandomMmapAddr() {
160  return GetPlatformPageAllocator()->GetRandomMmapAddr();
161 }
162 
163 void* AllocatePages(v8::PageAllocator* page_allocator, void* address,
164  size_t size, size_t alignment,
165  PageAllocator::Permission access) {
166  DCHECK_NOT_NULL(page_allocator);
167  DCHECK_EQ(address, AlignedAddress(address, alignment));
168  DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
169  void* result = nullptr;
170  for (int i = 0; i < kAllocationTries; ++i) {
171  result = page_allocator->AllocatePages(address, size, alignment, access);
172  if (result != nullptr) break;
173  size_t request_size = size + alignment - page_allocator->AllocatePageSize();
174  if (!OnCriticalMemoryPressure(request_size)) break;
175  }
176  return result;
177 }
178 
179 bool FreePages(v8::PageAllocator* page_allocator, void* address,
180  const size_t size) {
181  DCHECK_NOT_NULL(page_allocator);
182  DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
183  return page_allocator->FreePages(address, size);
184 }
185 
186 bool ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
187  size_t new_size) {
188  DCHECK_NOT_NULL(page_allocator);
189  DCHECK_LT(new_size, size);
190  DCHECK(IsAligned(new_size, page_allocator->CommitPageSize()));
191  return page_allocator->ReleasePages(address, size, new_size);
192 }
193 
194 bool SetPermissions(v8::PageAllocator* page_allocator, void* address,
195  size_t size, PageAllocator::Permission access) {
196  DCHECK_NOT_NULL(page_allocator);
197  return page_allocator->SetPermissions(address, size, access);
198 }
199 
200 byte* AllocatePage(v8::PageAllocator* page_allocator, void* address,
201  size_t* allocated) {
202  DCHECK_NOT_NULL(page_allocator);
203  size_t page_size = page_allocator->AllocatePageSize();
204  void* result = AllocatePages(page_allocator, address, page_size, page_size,
205  PageAllocator::kReadWrite);
206  if (result != nullptr) *allocated = page_size;
207  return static_cast<byte*>(result);
208 }
209 
210 bool OnCriticalMemoryPressure(size_t length) {
211  // TODO(bbudge) Rework retry logic once embedders implement the more
212  // informative overload.
213  if (!V8::GetCurrentPlatform()->OnCriticalMemoryPressure(length)) {
214  V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
215  }
216  return true;
217 }
218 
219 VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size,
220  void* hint, size_t alignment)
221  : page_allocator_(page_allocator) {
222  DCHECK_NOT_NULL(page_allocator);
223  DCHECK(IsAligned(size, page_allocator_->CommitPageSize()));
224  size_t page_size = page_allocator_->AllocatePageSize();
225  alignment = RoundUp(alignment, page_size);
226  Address address = reinterpret_cast<Address>(
227  AllocatePages(page_allocator_, hint, RoundUp(size, page_size), alignment,
228  PageAllocator::kNoAccess));
229  if (address != kNullAddress) {
230  DCHECK(IsAligned(address, alignment));
231  region_ = base::AddressRegion(address, size);
232  }
233 }
234 
235 VirtualMemory::~VirtualMemory() {
236  if (IsReserved()) {
237  Free();
238  }
239 }
240 
241 void VirtualMemory::Reset() {
242  page_allocator_ = nullptr;
243  region_ = base::AddressRegion();
244 }
245 
246 bool VirtualMemory::SetPermissions(Address address, size_t size,
247  PageAllocator::Permission access) {
248  CHECK(InVM(address, size));
249  bool result =
250  v8::internal::SetPermissions(page_allocator_, address, size, access);
251  DCHECK(result);
252  return result;
253 }
254 
255 size_t VirtualMemory::Release(Address free_start) {
256  DCHECK(IsReserved());
257  DCHECK(IsAligned(free_start, page_allocator_->CommitPageSize()));
258  // Notice: Order is important here. The VirtualMemory object might live
259  // inside the allocated region.
260 
261  const size_t old_size = region_.size();
262  const size_t free_size = old_size - (free_start - region_.begin());
263  CHECK(InVM(free_start, free_size));
264  region_.set_size(old_size - free_size);
265  CHECK(ReleasePages(page_allocator_, reinterpret_cast<void*>(region_.begin()),
266  old_size, region_.size()));
267  return free_size;
268 }
269 
270 void VirtualMemory::Free() {
271  DCHECK(IsReserved());
272  // Notice: Order is important here. The VirtualMemory object might live
273  // inside the allocated region.
274  v8::PageAllocator* page_allocator = page_allocator_;
275  base::AddressRegion region = region_;
276  Reset();
277  // FreePages expects size to be aligned to allocation granularity however
278  // ReleasePages may leave size at only commit granularity. Align it here.
279  CHECK(FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
280  RoundUp(region.size(), page_allocator->AllocatePageSize())));
281 }
282 
283 void VirtualMemory::TakeControl(VirtualMemory* from) {
284  DCHECK(!IsReserved());
285  page_allocator_ = from->page_allocator_;
286  region_ = from->region_;
287  from->Reset();
288 }
289 
290 } // namespace internal
291 } // namespace v8
virtual bool FreePages(void *address, size_t length)=0
virtual void SetRandomMmapSeed(int64_t seed)=0
virtual void * GetRandomMmapAddr()=0
Definition: libplatform.h:13
virtual bool ReleasePages(void *address, size_t length, size_t new_length)=0
virtual void * AllocatePages(void *address, size_t length, size_t alignment, Permission permissions)=0
virtual size_t AllocatePageSize()=0
virtual size_t CommitPageSize()=0
virtual bool SetPermissions(void *address, size_t length, Permission permissions)=0