V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
wasm-code-manager.cc
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/wasm/wasm-code-manager.h"
6 
7 #include <iomanip>
8 
9 #include "src/assembler-inl.h"
10 #include "src/base/adapters.h"
11 #include "src/base/macros.h"
12 #include "src/base/platform/platform.h"
13 #include "src/codegen.h"
14 #include "src/disassembler.h"
15 #include "src/globals.h"
16 #include "src/macro-assembler-inl.h"
17 #include "src/macro-assembler.h"
18 #include "src/objects-inl.h"
19 #include "src/wasm/compilation-environment.h"
20 #include "src/wasm/function-compiler.h"
21 #include "src/wasm/jump-table-assembler.h"
22 #include "src/wasm/wasm-import-wrapper-cache-inl.h"
23 #include "src/wasm/wasm-module.h"
24 #include "src/wasm/wasm-objects-inl.h"
25 #include "src/wasm/wasm-objects.h"
26 
27 #define TRACE_HEAP(...) \
28  do { \
29  if (FLAG_trace_wasm_native_heap) PrintF(__VA_ARGS__); \
30  } while (false)
31 
32 namespace v8 {
33 namespace internal {
34 namespace wasm {
35 
36 namespace {
37 
38 // Binary predicate to perform lookups in {NativeModule::owned_code_} with a
39 // given address into a code object. Use with {std::upper_bound} for example.
40 struct WasmCodeUniquePtrComparator {
41  bool operator()(Address pc, const std::unique_ptr<WasmCode>& code) const {
42  DCHECK_NE(kNullAddress, pc);
43  DCHECK_NOT_NULL(code);
44  return pc < code->instruction_start();
45  }
46 };
47 
48 } // namespace
49 
50 void DisjointAllocationPool::Merge(base::AddressRegion region) {
51  auto dest_it = regions_.begin();
52  auto dest_end = regions_.end();
53 
54  // Skip over dest regions strictly before {region}.
55  while (dest_it != dest_end && dest_it->end() < region.begin()) ++dest_it;
56 
57  // After last dest region: insert and done.
58  if (dest_it == dest_end) {
59  regions_.push_back(region);
60  return;
61  }
62 
63  // Adjacent (from below) to dest: merge and done.
64  if (dest_it->begin() == region.end()) {
65  base::AddressRegion merged_region{region.begin(),
66  region.size() + dest_it->size()};
67  DCHECK_EQ(merged_region.end(), dest_it->end());
68  *dest_it = merged_region;
69  return;
70  }
71 
72  // Before dest: insert and done.
73  if (dest_it->begin() > region.end()) {
74  regions_.insert(dest_it, region);
75  return;
76  }
77 
78  // Src is adjacent from above. Merge and check whether the merged region is
79  // now adjacent to the next region.
80  DCHECK_EQ(dest_it->end(), region.begin());
81  dest_it->set_size(dest_it->size() + region.size());
82  DCHECK_EQ(dest_it->end(), region.end());
83  auto next_dest = dest_it;
84  ++next_dest;
85  if (next_dest != dest_end && dest_it->end() == next_dest->begin()) {
86  dest_it->set_size(dest_it->size() + next_dest->size());
87  DCHECK_EQ(dest_it->end(), next_dest->end());
88  regions_.erase(next_dest);
89  }
90 }
91 
92 base::AddressRegion DisjointAllocationPool::Allocate(size_t size) {
93  for (auto it = regions_.begin(), end = regions_.end(); it != end; ++it) {
94  if (size > it->size()) continue;
95  base::AddressRegion ret{it->begin(), size};
96  if (size == it->size()) {
97  regions_.erase(it);
98  } else {
99  *it = base::AddressRegion{it->begin() + size, it->size() - size};
100  }
101  return ret;
102  }
103  return {};
104 }
105 
106 Address WasmCode::constant_pool() const {
107  if (FLAG_enable_embedded_constant_pool) {
108  if (constant_pool_offset_ < instructions().size()) {
109  return instruction_start() + constant_pool_offset_;
110  }
111  }
112  return kNullAddress;
113 }
114 
115 size_t WasmCode::trap_handler_index() const {
116  CHECK(HasTrapHandlerIndex());
117  return static_cast<size_t>(trap_handler_index_);
118 }
119 
120 void WasmCode::set_trap_handler_index(size_t value) {
121  trap_handler_index_ = value;
122 }
123 
124 void WasmCode::RegisterTrapHandlerData() {
125  DCHECK(!HasTrapHandlerIndex());
126  if (kind() != WasmCode::kFunction) return;
127 
128  Address base = instruction_start();
129 
130  size_t size = instructions().size();
131  const int index =
132  RegisterHandlerData(base, size, protected_instructions().size(),
133  protected_instructions().start());
134 
135  // TODO(eholk): if index is negative, fail.
136  CHECK_LE(0, index);
137  set_trap_handler_index(static_cast<size_t>(index));
138 }
139 
140 bool WasmCode::HasTrapHandlerIndex() const { return trap_handler_index_ >= 0; }
141 
142 bool WasmCode::ShouldBeLogged(Isolate* isolate) {
143  return isolate->logger()->is_listening_to_code_events() ||
144  isolate->is_profiling();
145 }
146 
147 void WasmCode::LogCode(Isolate* isolate) const {
148  DCHECK(ShouldBeLogged(isolate));
149  if (IsAnonymous()) return;
150 
151  ModuleWireBytes wire_bytes(native_module()->wire_bytes());
152  // TODO(herhut): Allow to log code without on-heap round-trip of the name.
153  WireBytesRef name_ref =
154  native_module()->module()->LookupFunctionName(wire_bytes, index());
155  WasmName name_vec = wire_bytes.GetNameOrNull(name_ref);
156  if (!name_vec.is_empty()) {
157  HandleScope scope(isolate);
158  MaybeHandle<String> maybe_name = isolate->factory()->NewStringFromUtf8(
159  Vector<const char>::cast(name_vec));
160  Handle<String> name;
161  if (!maybe_name.ToHandle(&name)) {
162  name = isolate->factory()->NewStringFromAsciiChecked("<name too long>");
163  }
164  int name_length;
165  auto cname =
166  name->ToCString(AllowNullsFlag::DISALLOW_NULLS,
167  RobustnessFlag::ROBUST_STRING_TRAVERSAL, &name_length);
168  PROFILE(isolate,
169  CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this,
170  {cname.get(), static_cast<size_t>(name_length)}));
171  } else {
172  EmbeddedVector<char, 32> generated_name;
173  int length = SNPrintF(generated_name, "wasm-function[%d]", index());
174  generated_name.Truncate(length);
175  PROFILE(isolate, CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this,
176  generated_name));
177  }
178 
179  if (!source_positions().is_empty()) {
180  LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(instruction_start(),
181  source_positions()));
182  }
183 }
184 
185 const char* WasmCode::GetRuntimeStubName() const {
186  DCHECK_EQ(WasmCode::kRuntimeStub, kind());
187 #define RETURN_NAME(Name) \
188  if (native_module_->runtime_stub_table_[WasmCode::k##Name] == this) { \
189  return #Name; \
190  }
191 #define RETURN_NAME_TRAP(Name) RETURN_NAME(ThrowWasm##Name)
192  WASM_RUNTIME_STUB_LIST(RETURN_NAME, RETURN_NAME_TRAP)
193 #undef RETURN_NAME_TRAP
194 #undef RETURN_NAME
195  return "<unknown>";
196 }
197 
198 void WasmCode::Validate() const {
199 #ifdef DEBUG
200  // We expect certain relocation info modes to never appear in {WasmCode}
201  // objects or to be restricted to a small set of valid values. Hence the
202  // iteration below does not use a mask, but visits all relocation data.
203  for (RelocIterator it(instructions(), reloc_info(), constant_pool());
204  !it.done(); it.next()) {
205  RelocInfo::Mode mode = it.rinfo()->rmode();
206  switch (mode) {
207  case RelocInfo::WASM_CALL: {
208  Address target = it.rinfo()->wasm_call_address();
209  WasmCode* code = native_module_->Lookup(target);
210  CHECK_NOT_NULL(code);
211  CHECK_EQ(WasmCode::kJumpTable, code->kind());
212  CHECK(code->contains(target));
213  break;
214  }
215  case RelocInfo::WASM_STUB_CALL: {
216  Address target = it.rinfo()->wasm_stub_call_address();
217  WasmCode* code = native_module_->Lookup(target);
218  CHECK_NOT_NULL(code);
219  CHECK_EQ(WasmCode::kRuntimeStub, code->kind());
220  CHECK_EQ(target, code->instruction_start());
221  break;
222  }
223  case RelocInfo::INTERNAL_REFERENCE:
224  case RelocInfo::INTERNAL_REFERENCE_ENCODED: {
225  Address target = it.rinfo()->target_internal_reference();
226  CHECK(contains(target));
227  break;
228  }
229  case RelocInfo::EXTERNAL_REFERENCE:
230  case RelocInfo::COMMENT:
231  case RelocInfo::CONST_POOL:
232  case RelocInfo::VENEER_POOL:
233  // These are OK to appear.
234  break;
235  default:
236  FATAL("Unexpected mode: %d", mode);
237  }
238  }
239 #endif
240 }
241 
242 void WasmCode::Print(const char* name) const {
243  StdoutStream os;
244  os << "--- WebAssembly code ---\n";
245  Disassemble(name, os);
246  os << "--- End code ---\n";
247 }
248 
249 void WasmCode::Disassemble(const char* name, std::ostream& os,
250  Address current_pc) const {
251  if (name) os << "name: " << name << "\n";
252  if (!IsAnonymous()) os << "index: " << index() << "\n";
253  os << "kind: " << GetWasmCodeKindAsString(kind_) << "\n";
254  os << "compiler: " << (is_liftoff() ? "Liftoff" : "TurboFan") << "\n";
255  size_t body_size = instructions().size();
256  os << "Body (size = " << body_size << ")\n";
257 
258 #ifdef ENABLE_DISASSEMBLER
259  size_t instruction_size = body_size;
260  if (constant_pool_offset_ && constant_pool_offset_ < instruction_size) {
261  instruction_size = constant_pool_offset_;
262  }
263  if (safepoint_table_offset_ && safepoint_table_offset_ < instruction_size) {
264  instruction_size = safepoint_table_offset_;
265  }
266  if (handler_table_offset_ && handler_table_offset_ < instruction_size) {
267  instruction_size = handler_table_offset_;
268  }
269  DCHECK_LT(0, instruction_size);
270  os << "Instructions (size = " << instruction_size << ")\n";
271  Disassembler::Decode(nullptr, &os, instructions().start(),
272  instructions().start() + instruction_size,
273  CodeReference(this), current_pc);
274  os << "\n";
275 
276  if (handler_table_offset_ > 0) {
277  HandlerTable table(instruction_start(), handler_table_offset_);
278  os << "Exception Handler Table (size = " << table.NumberOfReturnEntries()
279  << "):\n";
280  table.HandlerTableReturnPrint(os);
281  os << "\n";
282  }
283 
284  if (!protected_instructions_.is_empty()) {
285  os << "Protected instructions:\n pc offset land pad\n";
286  for (auto& data : protected_instructions()) {
287  os << std::setw(10) << std::hex << data.instr_offset << std::setw(10)
288  << std::hex << data.landing_offset << "\n";
289  }
290  os << "\n";
291  }
292 
293  if (!source_positions().is_empty()) {
294  os << "Source positions:\n pc offset position\n";
295  for (SourcePositionTableIterator it(source_positions()); !it.done();
296  it.Advance()) {
297  os << std::setw(10) << std::hex << it.code_offset() << std::dec
298  << std::setw(10) << it.source_position().ScriptOffset()
299  << (it.is_statement() ? " statement" : "") << "\n";
300  }
301  os << "\n";
302  }
303 
304  os << "RelocInfo (size = " << reloc_info_.size() << ")\n";
305  for (RelocIterator it(instructions(), reloc_info(), constant_pool());
306  !it.done(); it.next()) {
307  it.rinfo()->Print(nullptr, os);
308  }
309  os << "\n";
310 #endif // ENABLE_DISASSEMBLER
311 }
312 
313 const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
314  switch (kind) {
315  case WasmCode::kFunction:
316  return "wasm function";
317  case WasmCode::kWasmToJsWrapper:
318  return "wasm-to-js";
319  case WasmCode::kLazyStub:
320  return "lazy-compile";
321  case WasmCode::kRuntimeStub:
322  return "runtime-stub";
323  case WasmCode::kInterpreterEntry:
324  return "interpreter entry";
325  case WasmCode::kJumpTable:
326  return "jump table";
327  }
328  return "unknown kind";
329 }
330 
331 WasmCode::~WasmCode() {
332  if (HasTrapHandlerIndex()) {
333  CHECK_LT(trap_handler_index(),
334  static_cast<size_t>(std::numeric_limits<int>::max()));
335  trap_handler::ReleaseHandlerData(static_cast<int>(trap_handler_index()));
336  }
337 }
338 
339 NativeModule::NativeModule(Isolate* isolate, const WasmFeatures& enabled,
340  bool can_request_more, VirtualMemory code_space,
341  WasmCodeManager* code_manager,
342  std::shared_ptr<const WasmModule> module)
343  : enabled_features_(enabled),
344  module_(std::move(module)),
345  compilation_state_(CompilationState::New(isolate, this)),
346  import_wrapper_cache_(std::unique_ptr<WasmImportWrapperCache>(
347  new WasmImportWrapperCache(this))),
348  free_code_space_(code_space.region()),
349  code_manager_(code_manager),
350  can_request_more_memory_(can_request_more),
351  use_trap_handler_(trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler
352  : kNoTrapHandler) {
353  DCHECK_NOT_NULL(module_);
354  owned_code_space_.emplace_back(std::move(code_space));
355  owned_code_.reserve(num_functions());
356 
357  uint32_t num_wasm_functions = module_->num_declared_functions;
358  if (num_wasm_functions > 0) {
359  code_table_.reset(new WasmCode*[num_wasm_functions]);
360  memset(code_table_.get(), 0, num_wasm_functions * sizeof(WasmCode*));
361 
362  jump_table_ = CreateEmptyJumpTable(num_wasm_functions);
363  }
364 }
365 
366 void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
367  DCHECK_LE(num_functions(), max_functions);
368  WasmCode** new_table = new WasmCode*[max_functions];
369  memset(new_table, 0, max_functions * sizeof(*new_table));
370  memcpy(new_table, code_table_.get(),
371  module_->num_declared_functions * sizeof(*new_table));
372  code_table_.reset(new_table);
373 
374  // Re-allocate jump table.
375  jump_table_ = CreateEmptyJumpTable(max_functions);
376 }
377 
378 void NativeModule::LogWasmCodes(Isolate* isolate) {
379  if (!WasmCode::ShouldBeLogged(isolate)) return;
380 
381  // TODO(titzer): we skip the logging of the import wrappers
382  // here, but they should be included somehow.
383  for (WasmCode* code : code_table()) {
384  if (code != nullptr) code->LogCode(isolate);
385  }
386 }
387 
388 CompilationEnv NativeModule::CreateCompilationEnv() const {
389  return {module(), use_trap_handler_, kRuntimeExceptionSupport};
390 }
391 
392 WasmCode* NativeModule::AddOwnedCode(
393  uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
394  size_t safepoint_table_offset, size_t handler_table_offset,
395  size_t constant_pool_offset,
396  OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
397  OwnedVector<const byte> reloc_info,
398  OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
399  WasmCode::Tier tier) {
400  WasmCode* code;
401  {
402  // Both allocation and insertion in owned_code_ happen in the same critical
403  // section, thus ensuring owned_code_'s elements are rarely if ever moved.
404  base::MutexGuard lock(&allocation_mutex_);
405  Vector<byte> executable_buffer = AllocateForCode(instructions.size());
406  // Ownership will be transferred to {owned_code_} below.
407  code = new WasmCode(this, index, executable_buffer, stack_slots,
408  safepoint_table_offset, handler_table_offset,
409  constant_pool_offset, std::move(protected_instructions),
410  std::move(reloc_info), std::move(source_position_table),
411  kind, tier);
412 
413  if (owned_code_.empty() ||
414  code->instruction_start() > owned_code_.back()->instruction_start()) {
415  // Common case.
416  owned_code_.emplace_back(code);
417  } else {
418  // Slow but unlikely case.
419  // TODO(mtrofin): We allocate in increasing address order, and
420  // even if we end up with segmented memory, we may end up only with a few
421  // large moves - if, for example, a new segment is below the current ones.
422  auto insert_before = std::upper_bound(
423  owned_code_.begin(), owned_code_.end(), code->instruction_start(),
424  WasmCodeUniquePtrComparator{});
425  owned_code_.emplace(insert_before, code);
426  }
427  }
428  memcpy(reinterpret_cast<void*>(code->instruction_start()),
429  instructions.start(), instructions.size());
430 
431  return code;
432 }
433 
434 WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
435  WasmCode* ret = AddAnonymousCode(code, WasmCode::kFunction);
436  return ret;
437 }
438 
439 void NativeModule::SetLazyBuiltin(Handle<Code> code) {
440  uint32_t num_wasm_functions = module_->num_declared_functions;
441  if (num_wasm_functions == 0) return;
442  WasmCode* lazy_builtin = AddAnonymousCode(code, WasmCode::kLazyStub);
443  // Fill the jump table with jumps to the lazy compile stub.
444  Address lazy_compile_target = lazy_builtin->instruction_start();
445  for (uint32_t i = 0; i < num_wasm_functions; ++i) {
446  JumpTableAssembler::EmitLazyCompileJumpSlot(
447  jump_table_->instruction_start(), i,
448  i + module_->num_imported_functions, lazy_compile_target,
449  WasmCode::kNoFlushICache);
450  }
451  Assembler::FlushICache(jump_table_->instructions().start(),
452  jump_table_->instructions().size());
453 }
454 
455 void NativeModule::SetRuntimeStubs(Isolate* isolate) {
456  HandleScope scope(isolate);
457  DCHECK_NULL(runtime_stub_table_[0]); // Only called once.
458 #define COPY_BUILTIN(Name) \
459  runtime_stub_table_[WasmCode::k##Name] = \
460  AddAnonymousCode(isolate->builtins()->builtin_handle(Builtins::k##Name), \
461  WasmCode::kRuntimeStub, #Name);
462 #define COPY_BUILTIN_TRAP(Name) COPY_BUILTIN(ThrowWasm##Name)
463  WASM_RUNTIME_STUB_LIST(COPY_BUILTIN, COPY_BUILTIN_TRAP)
464 #undef COPY_BUILTIN_TRAP
465 #undef COPY_BUILTIN
466 }
467 
468 WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code, WasmCode::Kind kind,
469  const char* name) {
470  // For off-heap builtins, we create a copy of the off-heap instruction stream
471  // instead of the on-heap code object containing the trampoline. Ensure that
472  // we do not apply the on-heap reloc info to the off-heap instructions.
473  const size_t relocation_size =
474  code->is_off_heap_trampoline() ? 0 : code->relocation_size();
475  OwnedVector<byte> reloc_info = OwnedVector<byte>::New(relocation_size);
476  memcpy(reloc_info.start(), code->relocation_start(), relocation_size);
477  Handle<ByteArray> source_pos_table(code->SourcePositionTable(),
478  code->GetIsolate());
479  OwnedVector<byte> source_pos =
480  OwnedVector<byte>::New(source_pos_table->length());
481  source_pos_table->copy_out(0, source_pos.start(), source_pos_table->length());
482  Vector<const byte> instructions(
483  reinterpret_cast<byte*>(code->InstructionStart()),
484  static_cast<size_t>(code->InstructionSize()));
485  int stack_slots = code->has_safepoint_info() ? code->stack_slots() : 0;
486  int safepoint_table_offset =
487  code->has_safepoint_info() ? code->safepoint_table_offset() : 0;
488  WasmCode* ret =
489  AddOwnedCode(WasmCode::kAnonymousFuncIndex, // index
490  instructions, // instructions
491  stack_slots, // stack_slots
492  safepoint_table_offset, // safepoint_table_offset
493  code->handler_table_offset(), // handler_table_offset
494  code->constant_pool_offset(), // constant_pool_offset
495  {}, // protected_instructions
496  std::move(reloc_info), // reloc_info
497  std::move(source_pos), // source positions
498  kind, // kind
499  WasmCode::kOther); // tier
500 
501  // Apply the relocation delta by iterating over the RelocInfo.
502  intptr_t delta = ret->instruction_start() - code->InstructionStart();
503  int mode_mask = RelocInfo::kApplyMask |
504  RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
505  RelocIterator orig_it(*code, mode_mask);
506  for (RelocIterator it(ret->instructions(), ret->reloc_info(),
507  ret->constant_pool(), mode_mask);
508  !it.done(); it.next(), orig_it.next()) {
509  RelocInfo::Mode mode = it.rinfo()->rmode();
510  if (RelocInfo::IsWasmStubCall(mode)) {
511  uint32_t stub_call_tag = orig_it.rinfo()->wasm_call_tag();
512  DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
513  WasmCode* code =
514  runtime_stub(static_cast<WasmCode::RuntimeStubId>(stub_call_tag));
515  it.rinfo()->set_wasm_stub_call_address(code->instruction_start(),
516  SKIP_ICACHE_FLUSH);
517  } else {
518  it.rinfo()->apply(delta);
519  }
520  }
521 
522  // Flush the i-cache here instead of in AddOwnedCode, to include the changes
523  // made while iterating over the RelocInfo above.
524  Assembler::FlushICache(ret->instructions().start(),
525  ret->instructions().size());
526  if (FLAG_print_code || FLAG_print_wasm_code) ret->Print(name);
527  ret->Validate();
528  return ret;
529 }
530 
531 WasmCode* NativeModule::AddCode(
532  uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
533  size_t safepoint_table_offset, size_t handler_table_offset,
534  OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
535  OwnedVector<const byte> source_pos_table, WasmCode::Kind kind,
536  WasmCode::Tier tier) {
537  OwnedVector<byte> reloc_info = OwnedVector<byte>::New(desc.reloc_size);
538  memcpy(reloc_info.start(), desc.buffer + desc.buffer_size - desc.reloc_size,
539  desc.reloc_size);
540  WasmCode* ret =
541  AddOwnedCode(index, {desc.buffer, static_cast<size_t>(desc.instr_size)},
542  stack_slots, safepoint_table_offset, handler_table_offset,
543  desc.instr_size - desc.constant_pool_size,
544  std::move(protected_instructions), std::move(reloc_info),
545  std::move(source_pos_table), kind, tier);
546 
547  // Apply the relocation delta by iterating over the RelocInfo.
548  intptr_t delta = ret->instructions().start() - desc.buffer;
549  int mode_mask = RelocInfo::kApplyMask |
550  RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
551  RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
552  for (RelocIterator it(ret->instructions(), ret->reloc_info(),
553  ret->constant_pool(), mode_mask);
554  !it.done(); it.next()) {
555  RelocInfo::Mode mode = it.rinfo()->rmode();
556  if (RelocInfo::IsWasmCall(mode)) {
557  uint32_t call_tag = it.rinfo()->wasm_call_tag();
558  Address target = GetCallTargetForFunction(call_tag);
559  it.rinfo()->set_wasm_call_address(target, SKIP_ICACHE_FLUSH);
560  } else if (RelocInfo::IsWasmStubCall(mode)) {
561  uint32_t stub_call_tag = it.rinfo()->wasm_call_tag();
562  DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
563  WasmCode* code =
564  runtime_stub(static_cast<WasmCode::RuntimeStubId>(stub_call_tag));
565  it.rinfo()->set_wasm_stub_call_address(code->instruction_start(),
566  SKIP_ICACHE_FLUSH);
567  } else {
568  it.rinfo()->apply(delta);
569  }
570  }
571 
572  // Flush the i-cache here instead of in AddOwnedCode, to include the changes
573  // made while iterating over the RelocInfo above.
574  Assembler::FlushICache(ret->instructions().start(),
575  ret->instructions().size());
576  if (FLAG_print_code || FLAG_print_wasm_code) ret->Print();
577  ret->Validate();
578  return ret;
579 }
580 
581 WasmCode* NativeModule::AddDeserializedCode(
582  uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
583  size_t safepoint_table_offset, size_t handler_table_offset,
584  size_t constant_pool_offset,
585  OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
586  OwnedVector<const byte> reloc_info,
587  OwnedVector<const byte> source_position_table, WasmCode::Tier tier) {
588  WasmCode* code =
589  AddOwnedCode(index, instructions, stack_slots, safepoint_table_offset,
590  handler_table_offset, constant_pool_offset,
591  std::move(protected_instructions), std::move(reloc_info),
592  std::move(source_position_table), WasmCode::kFunction, tier);
593 
594  if (!code->protected_instructions_.is_empty()) {
595  code->RegisterTrapHandlerData();
596  }
597  base::MutexGuard lock(&allocation_mutex_);
598  InstallCode(code);
599  // Note: we do not flush the i-cache here, since the code needs to be
600  // relocated anyway. The caller is responsible for flushing the i-cache later.
601  return code;
602 }
603 
604 void NativeModule::PublishCode(WasmCode* code) {
605  base::MutexGuard lock(&allocation_mutex_);
606  // Skip publishing code if there is an active redirection to the interpreter
607  // for the given function index, in order to preserve the redirection.
608  if (has_interpreter_redirection(code->index())) return;
609 
610  if (!code->protected_instructions_.is_empty()) {
611  code->RegisterTrapHandlerData();
612  }
613  InstallCode(code);
614 }
615 
616 void NativeModule::PublishInterpreterEntry(WasmCode* code,
617  uint32_t func_index) {
618  code->index_ = func_index;
619  base::MutexGuard lock(&allocation_mutex_);
620  InstallCode(code);
621  SetInterpreterRedirection(func_index);
622 }
623 
624 std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
625  base::MutexGuard lock(&allocation_mutex_);
626  std::vector<WasmCode*> result;
627  result.reserve(code_table().size());
628  for (WasmCode* code : code_table()) result.push_back(code);
629  return result;
630 }
631 
632 WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t num_wasm_functions) {
633  // Only call this if we really need a jump table.
634  DCHECK_LT(0, num_wasm_functions);
635  OwnedVector<byte> instructions = OwnedVector<byte>::New(
636  JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions));
637  memset(instructions.start(), 0, instructions.size());
638  return AddOwnedCode(WasmCode::kAnonymousFuncIndex, // index
639  instructions.as_vector(), // instructions
640  0, // stack_slots
641  0, // safepoint_table_offset
642  0, // handler_table_offset
643  0, // constant_pool_offset
644  {}, // protected_instructions
645  {}, // reloc_info
646  {}, // source_pos
647  WasmCode::kJumpTable, // kind
648  WasmCode::kOther); // tier
649 }
650 
651 void NativeModule::InstallCode(WasmCode* code) {
652  DCHECK_LT(code->index(), num_functions());
653  DCHECK_LE(module_->num_imported_functions, code->index());
654 
655  // Update code table, except for interpreter entries.
656  if (code->kind() != WasmCode::kInterpreterEntry) {
657  code_table_[code->index() - module_->num_imported_functions] = code;
658  }
659 
660  // Patch jump table.
661  uint32_t slot_idx = code->index() - module_->num_imported_functions;
662  JumpTableAssembler::PatchJumpTableSlot(jump_table_->instruction_start(),
663  slot_idx, code->instruction_start(),
664  WasmCode::kFlushICache);
665 }
666 
667 Vector<byte> NativeModule::AllocateForCode(size_t size) {
668  DCHECK_LT(0, size);
669  v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
670  // This happens under a lock assumed by the caller.
671  size = RoundUp(size, kCodeAlignment);
672  base::AddressRegion code_space = free_code_space_.Allocate(size);
673  if (code_space.is_empty()) {
674  if (!can_request_more_memory_) {
675  V8::FatalProcessOutOfMemory(nullptr,
676  "NativeModule::AllocateForCode reservation");
677  UNREACHABLE();
678  }
679 
680  Address hint = owned_code_space_.empty() ? kNullAddress
681  : owned_code_space_.back().end();
682 
683  VirtualMemory new_mem =
684  code_manager_->TryAllocate(size, reinterpret_cast<void*>(hint));
685  if (!new_mem.IsReserved()) {
686  V8::FatalProcessOutOfMemory(nullptr,
687  "NativeModule::AllocateForCode reservation");
688  UNREACHABLE();
689  }
690  code_manager_->AssignRanges(new_mem.address(), new_mem.end(), this);
691 
692  free_code_space_.Merge(new_mem.region());
693  owned_code_space_.emplace_back(std::move(new_mem));
694  code_space = free_code_space_.Allocate(size);
695  DCHECK(!code_space.is_empty());
696  }
697  const Address page_size = page_allocator->AllocatePageSize();
698  Address commit_start = RoundUp(code_space.begin(), page_size);
699  Address commit_end = RoundUp(code_space.end(), page_size);
700  // {commit_start} will be either code_space.start or the start of the next
701  // page. {commit_end} will be the start of the page after the one in which
702  // the allocation ends.
703  // We start from an aligned start, and we know we allocated vmem in
704  // page multiples.
705  // We just need to commit what's not committed. The page in which we
706  // start is already committed (or we start at the beginning of a page).
707  // The end needs to be committed all through the end of the page.
708  if (commit_start < commit_end) {
709  committed_code_space_.fetch_add(commit_end - commit_start);
710  // Committed code cannot grow bigger than maximum code space size.
711  DCHECK_LE(committed_code_space_.load(), kMaxWasmCodeMemory);
712 #if V8_OS_WIN
713  // On Windows, we cannot commit a region that straddles different
714  // reservations of virtual memory. Because we bump-allocate, and because, if
715  // we need more memory, we append that memory at the end of the
716  // owned_code_space_ list, we traverse that list in reverse order to find
717  // the reservation(s) that guide how to chunk the region to commit.
718  for (auto& vmem : base::Reversed(owned_code_space_)) {
719  if (commit_end <= vmem.address() || vmem.end() <= commit_start) continue;
720  Address start = std::max(commit_start, vmem.address());
721  Address end = std::min(commit_end, vmem.end());
722  size_t commit_size = static_cast<size_t>(end - start);
723  if (!code_manager_->Commit(start, commit_size)) {
724  V8::FatalProcessOutOfMemory(nullptr,
725  "NativeModule::AllocateForCode commit");
726  UNREACHABLE();
727  }
728  // Opportunistically reduce the commit range. This might terminate the
729  // loop early.
730  if (commit_start == start) commit_start = end;
731  if (commit_end == end) commit_end = start;
732  if (commit_start >= commit_end) break;
733  }
734 #else
735  if (!code_manager_->Commit(commit_start, commit_end - commit_start)) {
736  V8::FatalProcessOutOfMemory(nullptr,
737  "NativeModule::AllocateForCode commit");
738  UNREACHABLE();
739  }
740 #endif
741  }
742  DCHECK(IsAligned(code_space.begin(), kCodeAlignment));
743  allocated_code_space_.Merge(code_space);
744  TRACE_HEAP("Code alloc for %p: %" PRIxPTR ",+%zu\n", this, code_space.begin(),
745  size);
746  return {reinterpret_cast<byte*>(code_space.begin()), code_space.size()};
747 }
748 
749 namespace {
750 class NativeModuleWireBytesStorage final : public WireBytesStorage {
751  public:
752  explicit NativeModuleWireBytesStorage(NativeModule* native_module)
753  : native_module_(native_module) {}
754 
755  Vector<const uint8_t> GetCode(WireBytesRef ref) const final {
756  return native_module_->wire_bytes().SubVector(ref.offset(),
757  ref.end_offset());
758  }
759 
760  private:
761  NativeModule* const native_module_;
762 };
763 } // namespace
764 
765 void NativeModule::SetWireBytes(OwnedVector<const byte> wire_bytes) {
766  wire_bytes_ = std::move(wire_bytes);
767  if (!wire_bytes.is_empty()) {
768  compilation_state_->SetWireBytesStorage(
769  std::make_shared<NativeModuleWireBytesStorage>(this));
770  }
771 }
772 
773 WasmCode* NativeModule::Lookup(Address pc) const {
774  base::MutexGuard lock(&allocation_mutex_);
775  if (owned_code_.empty()) return nullptr;
776  auto iter = std::upper_bound(owned_code_.begin(), owned_code_.end(), pc,
777  WasmCodeUniquePtrComparator());
778  if (iter == owned_code_.begin()) return nullptr;
779  --iter;
780  WasmCode* candidate = iter->get();
781  DCHECK_NOT_NULL(candidate);
782  return candidate->contains(pc) ? candidate : nullptr;
783 }
784 
785 Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const {
786  // TODO(clemensh): Measure performance win of returning instruction start
787  // directly if we have turbofan code. Downside: Redirecting functions (e.g.
788  // for debugging) gets much harder.
789 
790  // Return the jump table slot for that function index.
791  DCHECK_NOT_NULL(jump_table_);
792  uint32_t slot_idx = func_index - module_->num_imported_functions;
793  uint32_t slot_offset = JumpTableAssembler::SlotIndexToOffset(slot_idx);
794  DCHECK_LT(slot_offset, jump_table_->instructions().size());
795  return jump_table_->instruction_start() + slot_offset;
796 }
797 
798 uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot(
799  Address slot_address) const {
800  DCHECK(is_jump_table_slot(slot_address));
801  uint32_t slot_offset =
802  static_cast<uint32_t>(slot_address - jump_table_->instruction_start());
803  uint32_t slot_idx = JumpTableAssembler::SlotOffsetToIndex(slot_offset);
804  DCHECK_LT(slot_idx, module_->num_declared_functions);
805  return module_->num_imported_functions + slot_idx;
806 }
807 
808 void NativeModule::DisableTrapHandler() {
809  // Switch {use_trap_handler_} from true to false.
810  DCHECK(use_trap_handler_);
811  use_trap_handler_ = kNoTrapHandler;
812 
813  // Clear the code table (just to increase the chances to hit an error if we
814  // forget to re-add all code).
815  uint32_t num_wasm_functions = module_->num_declared_functions;
816  memset(code_table_.get(), 0, num_wasm_functions * sizeof(WasmCode*));
817 
818  // TODO(clemensh): Actually free the owned code, such that the memory can be
819  // recycled.
820 }
821 
822 NativeModule::~NativeModule() {
823  TRACE_HEAP("Deleting native module: %p\n", reinterpret_cast<void*>(this));
824  // Cancel all background compilation before resetting any field of the
825  // NativeModule or freeing anything.
826  compilation_state_->CancelAndWait();
827  code_manager_->FreeNativeModule(this);
828 }
829 
830 WasmCodeManager::WasmCodeManager(WasmMemoryTracker* memory_tracker,
831  size_t max_committed)
832  : memory_tracker_(memory_tracker),
833  remaining_uncommitted_code_space_(max_committed) {
834  DCHECK_LE(max_committed, kMaxWasmCodeMemory);
835 }
836 
837 bool WasmCodeManager::Commit(Address start, size_t size) {
838  // TODO(v8:8462) Remove eager commit once perf supports remapping.
839  if (FLAG_perf_prof) return true;
840  DCHECK(IsAligned(start, AllocatePageSize()));
841  DCHECK(IsAligned(size, AllocatePageSize()));
842  // Reserve the size. Use CAS loop to avoid underflow on
843  // {remaining_uncommitted_}. Temporary underflow would allow concurrent
844  // threads to over-commit.
845  while (true) {
846  size_t old_value = remaining_uncommitted_code_space_.load();
847  if (old_value < size) return false;
848  if (remaining_uncommitted_code_space_.compare_exchange_weak(
849  old_value, old_value - size)) {
850  break;
851  }
852  }
853  PageAllocator::Permission permission = FLAG_wasm_write_protect_code_memory
854  ? PageAllocator::kReadWrite
855  : PageAllocator::kReadWriteExecute;
856 
857  bool ret =
858  SetPermissions(GetPlatformPageAllocator(), start, size, permission);
859  TRACE_HEAP("Setting rw permissions for %p:%p\n",
860  reinterpret_cast<void*>(start),
861  reinterpret_cast<void*>(start + size));
862 
863  if (!ret) {
864  // Highly unlikely.
865  remaining_uncommitted_code_space_.fetch_add(size);
866  return false;
867  }
868  return ret;
869 }
870 
871 void WasmCodeManager::AssignRanges(Address start, Address end,
872  NativeModule* native_module) {
873  base::MutexGuard lock(&native_modules_mutex_);
874  lookup_map_.insert(std::make_pair(start, std::make_pair(end, native_module)));
875 }
876 
877 void WasmCodeManager::AssignRangesAndAddModule(Address start, Address end,
878  NativeModule* native_module) {
879  base::MutexGuard lock(&native_modules_mutex_);
880  lookup_map_.insert(std::make_pair(start, std::make_pair(end, native_module)));
881  native_modules_.emplace(native_module);
882 }
883 
884 VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
885  v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
886  DCHECK_GT(size, 0);
887  size = RoundUp(size, page_allocator->AllocatePageSize());
888  if (!memory_tracker_->ReserveAddressSpace(size,
889  WasmMemoryTracker::kHardLimit)) {
890  return {};
891  }
892  if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();
893 
894  VirtualMemory mem(page_allocator, size, hint,
895  page_allocator->AllocatePageSize());
896  if (!mem.IsReserved()) {
897  memory_tracker_->ReleaseReservation(size);
898  return {};
899  }
900  TRACE_HEAP("VMem alloc: %p:%p (%zu)\n",
901  reinterpret_cast<void*>(mem.address()),
902  reinterpret_cast<void*>(mem.end()), mem.size());
903 
904  // TODO(v8:8462) Remove eager commit once perf supports remapping.
905  if (FLAG_perf_prof) {
906  SetPermissions(GetPlatformPageAllocator(), mem.address(), mem.size(),
907  PageAllocator::kReadWriteExecute);
908  }
909  return mem;
910 }
911 
912 void WasmCodeManager::SampleModuleSizes(Isolate* isolate) const {
913  base::MutexGuard lock(&native_modules_mutex_);
914  for (NativeModule* native_module : native_modules_) {
915  int code_size =
916  static_cast<int>(native_module->committed_code_space_.load() / MB);
917  isolate->counters()->wasm_module_code_size_mb()->AddSample(code_size);
918  }
919 }
920 
921 void WasmCodeManager::SetMaxCommittedMemoryForTesting(size_t limit) {
922  remaining_uncommitted_code_space_.store(limit);
923 }
924 
925 namespace {
926 
927 void ModuleSamplingCallback(v8::Isolate* v8_isolate, v8::GCType type,
928  v8::GCCallbackFlags flags, void* data) {
929  Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
930  isolate->wasm_engine()->code_manager()->SampleModuleSizes(isolate);
931 }
932 
933 } // namespace
934 
935 // static
936 void WasmCodeManager::InstallSamplingGCCallback(Isolate* isolate) {
937  isolate->heap()->AddGCEpilogueCallback(ModuleSamplingCallback,
938  v8::kGCTypeMarkSweepCompact, nullptr);
939 }
940 
941 // static
942 size_t WasmCodeManager::EstimateNativeModuleCodeSize(const WasmModule* module) {
943  constexpr size_t kCodeSizeMultiplier = 4;
944  constexpr size_t kCodeOverhead = 32; // for prologue, stack check, ...
945  constexpr size_t kStaticCodeSize = 512; // runtime stubs, ...
946  constexpr size_t kImportSize = 64 * kPointerSize;
947 
948  size_t estimate = kStaticCodeSize;
949  for (auto& function : module->functions) {
950  estimate += kCodeOverhead + kCodeSizeMultiplier * function.code.length();
951  }
952  estimate +=
953  JumpTableAssembler::SizeForNumberOfSlots(module->num_declared_functions);
954  estimate += kImportSize * module->num_imported_functions;
955 
956  return estimate;
957 }
958 
959 // static
960 size_t WasmCodeManager::EstimateNativeModuleNonCodeSize(
961  const WasmModule* module) {
962  size_t wasm_module_estimate = EstimateStoredSize(module);
963 
964  uint32_t num_wasm_functions = module->num_declared_functions;
965 
966  // TODO(wasm): Include wire bytes size.
967  size_t native_module_estimate =
968  sizeof(NativeModule) + /* NativeModule struct */
969  (sizeof(WasmCode*) * num_wasm_functions) + /* code table size */
970  (sizeof(WasmCode) * num_wasm_functions); /* code object size */
971 
972  return wasm_module_estimate + native_module_estimate;
973 }
974 
975 bool WasmCodeManager::ShouldForceCriticalMemoryPressureNotification() {
976  base::MutexGuard lock(&native_modules_mutex_);
977  // TODO(titzer): we force a critical memory pressure notification
978  // when the code space is almost exhausted, but only upon the next module
979  // creation. This is only for one isolate, and it should really do this for
980  // all isolates, at the point of commit.
981  constexpr size_t kCriticalThreshold = 32 * 1024 * 1024;
982  return native_modules_.size() > 1 &&
983  remaining_uncommitted_code_space_.load() < kCriticalThreshold;
984 }
985 
986 std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
987  Isolate* isolate, const WasmFeatures& enabled, size_t code_size_estimate,
988  bool can_request_more, std::shared_ptr<const WasmModule> module) {
989  DCHECK_EQ(this, isolate->wasm_engine()->code_manager());
990  if (ShouldForceCriticalMemoryPressureNotification()) {
991  (reinterpret_cast<v8::Isolate*>(isolate))
992  ->MemoryPressureNotification(MemoryPressureLevel::kCritical);
993  }
994 
995  // If the code must be contiguous, reserve enough address space up front.
996  size_t code_vmem_size =
997  kRequiresCodeRange ? kMaxWasmCodeMemory : code_size_estimate;
998  // Try up to three times; getting rid of dead JSArrayBuffer allocations might
999  // require two GCs because the first GC maybe incremental and may have
1000  // floating garbage.
1001  static constexpr int kAllocationRetries = 2;
1002  VirtualMemory code_space;
1003  for (int retries = 0;; ++retries) {
1004  code_space = TryAllocate(code_vmem_size);
1005  if (code_space.IsReserved()) break;
1006  if (retries == kAllocationRetries) {
1007  V8::FatalProcessOutOfMemory(isolate, "WasmCodeManager::NewNativeModule");
1008  UNREACHABLE();
1009  }
1010  // Run one GC, then try the allocation again.
1011  isolate->heap()->MemoryPressureNotification(MemoryPressureLevel::kCritical,
1012  true);
1013  }
1014 
1015  Address start = code_space.address();
1016  size_t size = code_space.size();
1017  Address end = code_space.end();
1018  std::unique_ptr<NativeModule> ret(new NativeModule(
1019  isolate, enabled, can_request_more, std::move(code_space),
1020  isolate->wasm_engine()->code_manager(), std::move(module)));
1021  TRACE_HEAP("New NativeModule %p: Mem: %" PRIuPTR ",+%zu\n", ret.get(), start,
1022  size);
1023  AssignRangesAndAddModule(start, end, ret.get());
1024  return ret;
1025 }
1026 
1027 bool NativeModule::SetExecutable(bool executable) {
1028  if (is_executable_ == executable) return true;
1029  TRACE_HEAP("Setting module %p as executable: %d.\n", this, executable);
1030 
1031  v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
1032 
1033  if (FLAG_wasm_write_protect_code_memory) {
1034  PageAllocator::Permission permission =
1035  executable ? PageAllocator::kReadExecute : PageAllocator::kReadWrite;
1036 #if V8_OS_WIN
1037  // On windows, we need to switch permissions per separate virtual memory
1038  // reservation. This is really just a problem when the NativeModule is
1039  // growable (meaning can_request_more_memory_). That's 32-bit in production,
1040  // or unittests.
1041  // For now, in that case, we commit at reserved memory granularity.
1042  // Technically, that may be a waste, because we may reserve more than we
1043  // use. On 32-bit though, the scarce resource is the address space -
1044  // committed or not.
1045  if (can_request_more_memory_) {
1046  for (auto& vmem : owned_code_space_) {
1047  if (!SetPermissions(page_allocator, vmem.address(), vmem.size(),
1048  permission)) {
1049  return false;
1050  }
1051  TRACE_HEAP("Set %p:%p to executable:%d\n", vmem.address(), vmem.end(),
1052  executable);
1053  }
1054  is_executable_ = executable;
1055  return true;
1056  }
1057 #endif
1058  for (auto& region : allocated_code_space_.regions()) {
1059  // allocated_code_space_ is fine-grained, so we need to
1060  // page-align it.
1061  size_t region_size =
1062  RoundUp(region.size(), page_allocator->AllocatePageSize());
1063  if (!SetPermissions(page_allocator, region.begin(), region_size,
1064  permission)) {
1065  return false;
1066  }
1067  TRACE_HEAP("Set %p:%p to executable:%d\n",
1068  reinterpret_cast<void*>(region.begin()),
1069  reinterpret_cast<void*>(region.end()), executable);
1070  }
1071  }
1072  is_executable_ = executable;
1073  return true;
1074 }
1075 
1076 void WasmCodeManager::FreeNativeModule(NativeModule* native_module) {
1077  base::MutexGuard lock(&native_modules_mutex_);
1078  DCHECK_EQ(1, native_modules_.count(native_module));
1079  native_modules_.erase(native_module);
1080  TRACE_HEAP("Freeing NativeModule %p\n", native_module);
1081  for (auto& code_space : native_module->owned_code_space_) {
1082  DCHECK(code_space.IsReserved());
1083  TRACE_HEAP("VMem Release: %" PRIxPTR ":%" PRIxPTR " (%zu)\n",
1084  code_space.address(), code_space.end(), code_space.size());
1085  lookup_map_.erase(code_space.address());
1086  memory_tracker_->ReleaseReservation(code_space.size());
1087  code_space.Free();
1088  DCHECK(!code_space.IsReserved());
1089  }
1090  native_module->owned_code_space_.clear();
1091 
1092  size_t code_size = native_module->committed_code_space_.load();
1093  DCHECK(IsAligned(code_size, AllocatePageSize()));
1094  remaining_uncommitted_code_space_.fetch_add(code_size);
1095  // Remaining code space cannot grow bigger than maximum code space size.
1096  DCHECK_LE(remaining_uncommitted_code_space_.load(), kMaxWasmCodeMemory);
1097 }
1098 
1099 NativeModule* WasmCodeManager::LookupNativeModule(Address pc) const {
1100  base::MutexGuard lock(&native_modules_mutex_);
1101  if (lookup_map_.empty()) return nullptr;
1102 
1103  auto iter = lookup_map_.upper_bound(pc);
1104  if (iter == lookup_map_.begin()) return nullptr;
1105  --iter;
1106  Address region_start = iter->first;
1107  Address region_end = iter->second.first;
1108  NativeModule* candidate = iter->second.second;
1109 
1110  DCHECK_NOT_NULL(candidate);
1111  return region_start <= pc && pc < region_end ? candidate : nullptr;
1112 }
1113 
1114 WasmCode* WasmCodeManager::LookupCode(Address pc) const {
1115  NativeModule* candidate = LookupNativeModule(pc);
1116  return candidate ? candidate->Lookup(pc) : nullptr;
1117 }
1118 
1119 size_t WasmCodeManager::remaining_uncommitted_code_space() const {
1120  return remaining_uncommitted_code_space_.load();
1121 }
1122 
1123 // TODO(v8:7424): Code protection scopes are not yet supported with shared code
1124 // enabled and need to be revisited to work with --wasm-shared-code as well.
1125 NativeModuleModificationScope::NativeModuleModificationScope(
1126  NativeModule* native_module)
1127  : native_module_(native_module) {
1128  if (FLAG_wasm_write_protect_code_memory && native_module_ &&
1129  (native_module_->modification_scope_depth_++) == 0) {
1130  bool success = native_module_->SetExecutable(false);
1131  CHECK(success);
1132  }
1133 }
1134 
1135 NativeModuleModificationScope::~NativeModuleModificationScope() {
1136  if (FLAG_wasm_write_protect_code_memory && native_module_ &&
1137  (native_module_->modification_scope_depth_--) == 1) {
1138  bool success = native_module_->SetExecutable(true);
1139  CHECK(success);
1140  }
1141 }
1142 
1143 } // namespace wasm
1144 } // namespace internal
1145 } // namespace v8
1146 #undef TRACE_HEAP
STL namespace.
virtual void * GetRandomMmapAddr()=0
Definition: libplatform.h:13
virtual size_t AllocatePageSize()=0