V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
wasm-memory.cc
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <limits>
6 
7 #include "src/counters.h"
8 #include "src/heap/heap-inl.h"
9 #include "src/objects-inl.h"
10 #include "src/objects/js-array-buffer-inl.h"
11 #include "src/wasm/wasm-engine.h"
12 #include "src/wasm/wasm-limits.h"
13 #include "src/wasm/wasm-memory.h"
14 #include "src/wasm/wasm-module.h"
15 
16 namespace v8 {
17 namespace internal {
18 namespace wasm {
19 
20 namespace {
21 
22 constexpr size_t kNegativeGuardSize = 1u << 31; // 2GiB
23 
24 void AddAllocationStatusSample(Isolate* isolate,
25  WasmMemoryTracker::AllocationStatus status) {
26  isolate->counters()->wasm_memory_allocation_result()->AddSample(
27  static_cast<int>(status));
28 }
29 
30 void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
31  size_t size, void** allocation_base,
32  size_t* allocation_length) {
33  using AllocationStatus = WasmMemoryTracker::AllocationStatus;
34 #if V8_TARGET_ARCH_64_BIT
35  bool require_full_guard_regions = true;
36 #else
37  bool require_full_guard_regions = false;
38 #endif
39  // Let the WasmMemoryTracker know we are going to reserve a bunch of
40  // address space.
41  // Try up to three times; getting rid of dead JSArrayBuffer allocations might
42  // require two GCs because the first GC maybe incremental and may have
43  // floating garbage.
44  static constexpr int kAllocationRetries = 2;
45  bool did_retry = false;
46  for (int trial = 0;; ++trial) {
47  // For guard regions, we always allocate the largest possible offset into
48  // the heap, so the addressable memory after the guard page can be made
49  // inaccessible.
50  //
51  // To protect against 32-bit integer overflow issues, we also protect the
52  // 2GiB before the valid part of the memory buffer.
53  // TODO(7881): do not use static_cast<uint32_t>() here
54  *allocation_length =
55  require_full_guard_regions
56  ? RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize())
57  : RoundUp(base::bits::RoundUpToPowerOfTwo32(
58  static_cast<uint32_t>(size)),
59  kWasmPageSize);
60  DCHECK_GE(*allocation_length, size);
61  DCHECK_GE(*allocation_length, kWasmPageSize);
62 
63  auto limit = require_full_guard_regions ? WasmMemoryTracker::kSoftLimit
64  : WasmMemoryTracker::kHardLimit;
65  if (memory_tracker->ReserveAddressSpace(*allocation_length, limit)) break;
66 
67  did_retry = true;
68  // After first and second GC: retry.
69  if (trial == kAllocationRetries) {
70  // If we fail to allocate guard regions and the fallback is enabled, then
71  // retry without full guard regions.
72  if (require_full_guard_regions && FLAG_wasm_trap_handler_fallback) {
73  require_full_guard_regions = false;
74  --trial; // one more try.
75  continue;
76  }
77 
78  // We are over the address space limit. Fail.
79  //
80  // When running under the correctness fuzzer (i.e.
81  // --abort-on-stack-or-string-length-overflow is preset), we crash
82  // instead so it is not incorrectly reported as a correctness
83  // violation. See https://crbug.com/828293#c4
84  if (FLAG_abort_on_stack_or_string_length_overflow) {
85  FATAL("could not allocate wasm memory");
86  }
87  AddAllocationStatusSample(
88  heap->isolate(), AllocationStatus::kAddressSpaceLimitReachedFailure);
89  return nullptr;
90  }
91  // Collect garbage and retry.
92  heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
93  }
94 
95  // The Reserve makes the whole region inaccessible by default.
96  DCHECK_NULL(*allocation_base);
97  for (int trial = 0;; ++trial) {
98  *allocation_base =
99  AllocatePages(GetPlatformPageAllocator(), nullptr, *allocation_length,
100  kWasmPageSize, PageAllocator::kNoAccess);
101  if (*allocation_base != nullptr) break;
102  if (trial == kAllocationRetries) {
103  memory_tracker->ReleaseReservation(*allocation_length);
104  AddAllocationStatusSample(heap->isolate(),
105  AllocationStatus::kOtherFailure);
106  return nullptr;
107  }
108  heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
109  }
110  byte* memory = reinterpret_cast<byte*>(*allocation_base);
111  if (require_full_guard_regions) {
112  memory += kNegativeGuardSize;
113  }
114 
115  // Make the part we care about accessible.
116  if (size > 0) {
117  bool result =
118  SetPermissions(GetPlatformPageAllocator(), memory,
119  RoundUp(size, kWasmPageSize), PageAllocator::kReadWrite);
120  // SetPermissions commits the extra memory, which may put us over the
121  // process memory limit. If so, report this as an OOM.
122  if (!result) {
123  V8::FatalProcessOutOfMemory(nullptr, "TryAllocateBackingStore");
124  }
125  }
126 
127  memory_tracker->RegisterAllocation(heap->isolate(), *allocation_base,
128  *allocation_length, memory, size);
129  AddAllocationStatusSample(heap->isolate(),
130  did_retry ? AllocationStatus::kSuccessAfterRetry
131  : AllocationStatus::kSuccess);
132  return memory;
133 }
134 
135 #if V8_TARGET_ARCH_MIPS64
136 // MIPS64 has a user space of 2^40 bytes on most processors,
137 // address space limits needs to be smaller.
138 constexpr size_t kAddressSpaceSoftLimit = 0x2100000000L; // 132 GiB
139 constexpr size_t kAddressSpaceHardLimit = 0x4000000000L; // 256 GiB
140 #elif V8_TARGET_ARCH_64_BIT
141 constexpr size_t kAddressSpaceSoftLimit = 0x6000000000L; // 384 GiB
142 constexpr size_t kAddressSpaceHardLimit = 0x10100000000L; // 1 TiB + 4 GiB
143 #else
144 constexpr size_t kAddressSpaceSoftLimit = 0x90000000; // 2 GiB + 256 MiB
145 constexpr size_t kAddressSpaceHardLimit = 0xC0000000; // 3 GiB
146 #endif
147 
148 } // namespace
149 
150 WasmMemoryTracker::~WasmMemoryTracker() {
151  // All reserved address space should be released before the allocation tracker
152  // is destroyed.
153  DCHECK_EQ(reserved_address_space_, 0u);
154  DCHECK_EQ(allocated_address_space_, 0u);
155 }
156 
157 bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes,
158  ReservationLimit limit) {
159  size_t reservation_limit =
160  limit == kSoftLimit ? kAddressSpaceSoftLimit : kAddressSpaceHardLimit;
161  while (true) {
162  size_t old_count = reserved_address_space_.load();
163  if (old_count > reservation_limit) return false;
164  if (reservation_limit - old_count < num_bytes) return false;
165  if (reserved_address_space_.compare_exchange_weak(old_count,
166  old_count + num_bytes)) {
167  return true;
168  }
169  }
170 }
171 
172 void WasmMemoryTracker::ReleaseReservation(size_t num_bytes) {
173  size_t const old_reserved = reserved_address_space_.fetch_sub(num_bytes);
174  USE(old_reserved);
175  DCHECK_LE(num_bytes, old_reserved);
176 }
177 
178 void WasmMemoryTracker::RegisterAllocation(Isolate* isolate,
179  void* allocation_base,
180  size_t allocation_length,
181  void* buffer_start,
182  size_t buffer_length) {
183  base::MutexGuard scope_lock(&mutex_);
184 
185  allocated_address_space_ += allocation_length;
186  AddAddressSpaceSample(isolate);
187 
188  allocations_.emplace(buffer_start,
189  AllocationData{allocation_base, allocation_length,
190  buffer_start, buffer_length});
191 }
192 
193 WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation(
194  Isolate* isolate, const void* buffer_start) {
195  base::MutexGuard scope_lock(&mutex_);
196 
197  auto find_result = allocations_.find(buffer_start);
198  CHECK_NE(find_result, allocations_.end());
199 
200  if (find_result != allocations_.end()) {
201  size_t num_bytes = find_result->second.allocation_length;
202  DCHECK_LE(num_bytes, reserved_address_space_);
203  DCHECK_LE(num_bytes, allocated_address_space_);
204  reserved_address_space_ -= num_bytes;
205  allocated_address_space_ -= num_bytes;
206  // ReleaseAllocation might be called with a nullptr as isolate if the
207  // embedder is releasing the allocation and not a specific isolate. This
208  // happens if the allocation was shared between multiple isolates (threads).
209  if (isolate) AddAddressSpaceSample(isolate);
210 
211  AllocationData allocation_data = find_result->second;
212  allocations_.erase(find_result);
213  return allocation_data;
214  }
215  UNREACHABLE();
216 }
217 
218 const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData(
219  const void* buffer_start) {
220  base::MutexGuard scope_lock(&mutex_);
221  const auto& result = allocations_.find(buffer_start);
222  if (result != allocations_.end()) {
223  return &result->second;
224  }
225  return nullptr;
226 }
227 
228 bool WasmMemoryTracker::IsWasmMemory(const void* buffer_start) {
229  base::MutexGuard scope_lock(&mutex_);
230  return allocations_.find(buffer_start) != allocations_.end();
231 }
232 
233 bool WasmMemoryTracker::HasFullGuardRegions(const void* buffer_start) {
234  base::MutexGuard scope_lock(&mutex_);
235  const auto allocation = allocations_.find(buffer_start);
236 
237  if (allocation == allocations_.end()) {
238  return false;
239  }
240 
241  Address start = reinterpret_cast<Address>(buffer_start);
242  Address limit =
243  reinterpret_cast<Address>(allocation->second.allocation_base) +
244  allocation->second.allocation_length;
245  return start + kWasmMaxHeapOffset < limit;
246 }
247 
248 bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(Isolate* isolate,
249  const void* buffer_start) {
250  if (IsWasmMemory(buffer_start)) {
251  const AllocationData allocation = ReleaseAllocation(isolate, buffer_start);
252  CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base,
253  allocation.allocation_length));
254  return true;
255  }
256  return false;
257 }
258 
259 void WasmMemoryTracker::AddAddressSpaceSample(Isolate* isolate) {
260  // Report address space usage in MiB so the full range fits in an int on all
261  // platforms.
262  isolate->counters()->wasm_address_space_usage_mb()->AddSample(
263  static_cast<int>(allocated_address_space_ >> 20));
264 }
265 
266 Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
267  size_t size, bool is_external,
268  SharedFlag shared) {
269  Handle<JSArrayBuffer> buffer =
270  isolate->factory()->NewJSArrayBuffer(shared, TENURED);
271  constexpr bool is_wasm_memory = true;
272  JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store, size,
273  shared, is_wasm_memory);
274  buffer->set_is_neuterable(false);
275  buffer->set_is_growable(true);
276  return buffer;
277 }
278 
279 MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
280  SharedFlag shared) {
281  // Enforce flag-limited maximum allocation size.
282  if (size > max_mem_bytes()) return {};
283 
284  WasmMemoryTracker* memory_tracker = isolate->wasm_engine()->memory_tracker();
285 
286  // Set by TryAllocateBackingStore or GetEmptyBackingStore
287  void* allocation_base = nullptr;
288  size_t allocation_length = 0;
289 
290  void* memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
291  &allocation_base, &allocation_length);
292  if (memory == nullptr) return {};
293 
294 #if DEBUG
295  // Double check the API allocator actually zero-initialized the memory.
296  const byte* bytes = reinterpret_cast<const byte*>(memory);
297  for (size_t i = 0; i < size; ++i) {
298  DCHECK_EQ(0, bytes[i]);
299  }
300 #endif
301 
302  reinterpret_cast<v8::Isolate*>(isolate)
303  ->AdjustAmountOfExternalAllocatedMemory(size);
304 
305  constexpr bool is_external = false;
306  return SetupArrayBuffer(isolate, memory, size, is_external, shared);
307 }
308 
309 void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
310  bool free_memory) {
311  if (buffer->is_shared()) return; // Detaching shared buffers is impossible.
312  DCHECK(!buffer->is_neuterable());
313 
314  const bool is_external = buffer->is_external();
315  DCHECK(!buffer->is_neuterable());
316  if (!is_external) {
317  buffer->set_is_external(true);
318  isolate->heap()->UnregisterArrayBuffer(*buffer);
319  if (free_memory) {
320  // We need to free the memory before neutering the buffer because
321  // FreeBackingStore reads buffer->allocation_base(), which is nulled out
322  // by Neuter. This means there is a dangling pointer until we neuter the
323  // buffer. Since there is no way for the user to directly call
324  // FreeBackingStore, we can ensure this is safe.
325  buffer->FreeBackingStoreFromMainThread();
326  }
327  }
328 
329  DCHECK(buffer->is_external());
330  buffer->set_is_wasm_memory(false);
331  buffer->set_is_neuterable(true);
332  buffer->Neuter();
333 }
334 
335 } // namespace wasm
336 } // namespace internal
337 } // namespace v8
Definition: libplatform.h:13