V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
embedded-data.cc
1 // Copyright 2018 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/snapshot/embedded-data.h"
6 
7 #include "src/assembler-inl.h"
8 #include "src/callable.h"
9 #include "src/macro-assembler.h"
10 #include "src/objects-inl.h"
11 #include "src/snapshot/snapshot.h"
12 
13 namespace v8 {
14 namespace internal {
15 
16 // static
17 bool InstructionStream::PcIsOffHeap(Isolate* isolate, Address pc) {
18  if (FLAG_embedded_builtins) {
19  const Address start = reinterpret_cast<Address>(isolate->embedded_blob());
20  return IsInRange(pc, start, start + isolate->embedded_blob_size());
21  } else {
22  return false;
23  }
24 }
25 
26 // static
27 Code InstructionStream::TryLookupCode(Isolate* isolate, Address address) {
28  if (!PcIsOffHeap(isolate, address)) return Code();
29 
30  EmbeddedData d = EmbeddedData::FromBlob();
31  if (address < d.InstructionStartOfBuiltin(0)) return Code();
32 
33  // Note: Addresses within the padding section between builtins (i.e. within
34  // start + size <= address < start + padded_size) are interpreted as belonging
35  // to the preceding builtin.
36 
37  int l = 0, r = Builtins::builtin_count;
38  while (l < r) {
39  const int mid = (l + r) / 2;
40  Address start = d.InstructionStartOfBuiltin(mid);
41  Address end = start + d.PaddedInstructionSizeOfBuiltin(mid);
42 
43  if (address < start) {
44  r = mid;
45  } else if (address >= end) {
46  l = mid + 1;
47  } else {
48  return isolate->builtins()->builtin(mid);
49  }
50  }
51 
52  UNREACHABLE();
53 }
54 
55 // static
56 void InstructionStream::CreateOffHeapInstructionStream(Isolate* isolate,
57  uint8_t** data,
58  uint32_t* size) {
59  EmbeddedData d = EmbeddedData::FromIsolate(isolate);
60 
61  v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
62  const uint32_t page_size =
63  static_cast<uint32_t>(page_allocator->AllocatePageSize());
64  const uint32_t allocated_size = RoundUp(d.size(), page_size);
65 
66  uint8_t* allocated_bytes = static_cast<uint8_t*>(
67  AllocatePages(page_allocator, isolate->heap()->GetRandomMmapAddr(),
68  allocated_size, page_size, PageAllocator::kReadWrite));
69  CHECK_NOT_NULL(allocated_bytes);
70 
71  std::memcpy(allocated_bytes, d.data(), d.size());
72  CHECK(SetPermissions(page_allocator, allocated_bytes, allocated_size,
73  PageAllocator::kReadExecute));
74 
75  *data = allocated_bytes;
76  *size = d.size();
77 
78  d.Dispose();
79 }
80 
81 // static
82 void InstructionStream::FreeOffHeapInstructionStream(uint8_t* data,
83  uint32_t size) {
84  v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
85  const uint32_t page_size =
86  static_cast<uint32_t>(page_allocator->AllocatePageSize());
87  CHECK(FreePages(page_allocator, data, RoundUp(size, page_size)));
88 }
89 
90 namespace {
91 
92 bool BuiltinAliasesOffHeapTrampolineRegister(Isolate* isolate, Code code) {
93  DCHECK(Builtins::IsIsolateIndependent(code->builtin_index()));
94  switch (Builtins::KindOf(code->builtin_index())) {
95  case Builtins::CPP:
96  case Builtins::TFC:
97  case Builtins::TFH:
98  case Builtins::TFJ:
99  case Builtins::TFS:
100  break;
101 
102  // Bytecode handlers will only ever be used by the interpreter and so there
103  // will never be a need to use trampolines with them.
104  case Builtins::BCH:
105  case Builtins::API:
106  case Builtins::ASM:
107  // TODO(jgruber): Extend checks to remaining kinds.
108  return false;
109  }
110 
111  Callable callable = Builtins::CallableFor(
112  isolate, static_cast<Builtins::Name>(code->builtin_index()));
113  CallInterfaceDescriptor descriptor = callable.descriptor();
114 
115  if (descriptor.ContextRegister() == kOffHeapTrampolineRegister) {
116  return true;
117  }
118 
119  for (int i = 0; i < descriptor.GetRegisterParameterCount(); i++) {
120  Register reg = descriptor.GetRegisterParameter(i);
121  if (reg == kOffHeapTrampolineRegister) return true;
122  }
123 
124  return false;
125 }
126 
127 void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
128  static const int kRelocMask =
129  RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
130  RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
131 
132  for (int i = 0; i < Builtins::builtin_count; i++) {
133  if (!Builtins::IsIsolateIndependent(i)) continue;
134 
135  Code code = isolate->builtins()->builtin(i);
136  RelocIterator on_heap_it(code, kRelocMask);
137  RelocIterator off_heap_it(blob, code, kRelocMask);
138 
139 #if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
140  defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
141  defined(V8_TARGET_ARCH_IA32)
142  // On these platforms we emit relative builtin-to-builtin
143  // jumps for isolate independent builtins in the snapshot. This fixes up the
144  // relative jumps to the right offsets in the snapshot.
145  // See also: Code::IsIsolateIndependent.
146  while (!on_heap_it.done()) {
147  DCHECK(!off_heap_it.done());
148 
149  RelocInfo* rinfo = on_heap_it.rinfo();
150  DCHECK_EQ(rinfo->rmode(), off_heap_it.rinfo()->rmode());
151  Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
152  CHECK(Builtins::IsIsolateIndependentBuiltin(target));
153 
154  // Do not emit write-barrier for off-heap writes.
155  off_heap_it.rinfo()->set_target_address(
156  blob->InstructionStartOfBuiltin(target->builtin_index()),
157  SKIP_WRITE_BARRIER);
158 
159  on_heap_it.next();
160  off_heap_it.next();
161  }
162  DCHECK(off_heap_it.done());
163 #else
164  // Architectures other than x64 and arm/arm64 do not use pc-relative calls
165  // and thus must not contain embedded code targets. Instead, we use an
166  // indirection through the root register.
167  CHECK(on_heap_it.done());
168  CHECK(off_heap_it.done());
169 #endif // defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64)
170  }
171 }
172 
173 } // namespace
174 
175 // static
176 EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
177  Builtins* builtins = isolate->builtins();
178 
179  // Store instruction stream lengths and offsets.
180  std::vector<struct Metadata> metadata(kTableSize);
181 
182  bool saw_unsafe_builtin = false;
183  uint32_t raw_data_size = 0;
184  for (int i = 0; i < Builtins::builtin_count; i++) {
185  Code code = builtins->builtin(i);
186 
187  if (Builtins::IsIsolateIndependent(i)) {
188  // Sanity-check that the given builtin is isolate-independent and does not
189  // use the trampoline register in its calling convention.
190  if (!code->IsIsolateIndependent(isolate)) {
191  saw_unsafe_builtin = true;
192  fprintf(stderr, "%s is not isolate-independent.\n", Builtins::name(i));
193  }
194  if (Builtins::IsWasmRuntimeStub(i) &&
195  RelocInfo::RequiresRelocation(code)) {
196  // Wasm additionally requires that its runtime stubs must be
197  // individually PIC (i.e. we must be able to copy each stub outside the
198  // embedded area without relocations). In particular, that means
199  // pc-relative calls to other builtins are disallowed.
200  saw_unsafe_builtin = true;
201  fprintf(stderr, "%s is a wasm runtime stub but needs relocation.\n",
202  Builtins::name(i));
203  }
204  if (BuiltinAliasesOffHeapTrampolineRegister(isolate, code)) {
205  saw_unsafe_builtin = true;
206  fprintf(stderr, "%s aliases the off-heap trampoline register.\n",
207  Builtins::name(i));
208  }
209 
210  uint32_t length = static_cast<uint32_t>(code->raw_instruction_size());
211 
212  DCHECK_EQ(0, raw_data_size % kCodeAlignment);
213  metadata[i].instructions_offset = raw_data_size;
214  metadata[i].instructions_length = length;
215 
216  // Align the start of each instruction stream.
217  raw_data_size += PadAndAlign(length);
218  } else {
219  metadata[i].instructions_offset = raw_data_size;
220  }
221  }
222  CHECK_WITH_MSG(
223  !saw_unsafe_builtin,
224  "One or more builtins marked as isolate-independent either contains "
225  "isolate-dependent code or aliases the off-heap trampoline register. "
226  "If in doubt, ask jgruber@");
227 
228  const uint32_t blob_size = RawDataOffset() + raw_data_size;
229  uint8_t* const blob = new uint8_t[blob_size];
230  uint8_t* const raw_data_start = blob + RawDataOffset();
231 
232  // Initially zap the entire blob, effectively padding the alignment area
233  // between two builtins with int3's (on x64/ia32).
234  ZapCode(reinterpret_cast<Address>(blob), blob_size);
235 
236  // Write the metadata tables.
237  DCHECK_EQ(MetadataSize(), sizeof(metadata[0]) * metadata.size());
238  std::memcpy(blob + MetadataOffset(), metadata.data(), MetadataSize());
239 
240  // Write the raw data section.
241  for (int i = 0; i < Builtins::builtin_count; i++) {
242  if (!Builtins::IsIsolateIndependent(i)) continue;
243  Code code = builtins->builtin(i);
244  uint32_t offset = metadata[i].instructions_offset;
245  uint8_t* dst = raw_data_start + offset;
246  DCHECK_LE(RawDataOffset() + offset + code->raw_instruction_size(),
247  blob_size);
248  std::memcpy(dst, reinterpret_cast<uint8_t*>(code->raw_instruction_start()),
249  code->raw_instruction_size());
250  }
251 
252  EmbeddedData d(blob, blob_size);
253 
254  // Fix up call targets that point to other embedded builtins.
255  FinalizeEmbeddedCodeTargets(isolate, &d);
256 
257  // Hash the blob and store the result.
258  STATIC_ASSERT(HashSize() == kSizetSize);
259  const size_t hash = d.CreateHash();
260  std::memcpy(blob + HashOffset(), &hash, HashSize());
261 
262  DCHECK_EQ(hash, d.CreateHash());
263  DCHECK_EQ(hash, d.Hash());
264 
265  if (FLAG_serialization_statistics) d.PrintStatistics();
266 
267  return d;
268 }
269 
270 Address EmbeddedData::InstructionStartOfBuiltin(int i) const {
271  DCHECK(Builtins::IsBuiltinId(i));
272  const struct Metadata* metadata = Metadata();
273  const uint8_t* result = RawData() + metadata[i].instructions_offset;
274  DCHECK_LE(result, data_ + size_);
275  DCHECK_IMPLIES(result == data_ + size_, InstructionSizeOfBuiltin(i) == 0);
276  return reinterpret_cast<Address>(result);
277 }
278 
279 uint32_t EmbeddedData::InstructionSizeOfBuiltin(int i) const {
280  DCHECK(Builtins::IsBuiltinId(i));
281  const struct Metadata* metadata = Metadata();
282  return metadata[i].instructions_length;
283 }
284 
285 size_t EmbeddedData::CreateHash() const {
286  STATIC_ASSERT(HashOffset() == 0);
287  STATIC_ASSERT(HashSize() == kSizetSize);
288  return base::hash_range(data_ + HashSize(), data_ + size_);
289 }
290 
291 void EmbeddedData::PrintStatistics() const {
292  DCHECK(FLAG_serialization_statistics);
293 
294  constexpr int kCount = Builtins::builtin_count;
295 
296  int embedded_count = 0;
297  int instruction_size = 0;
298  int sizes[kCount];
299  for (int i = 0; i < kCount; i++) {
300  if (!Builtins::IsIsolateIndependent(i)) continue;
301  const int size = InstructionSizeOfBuiltin(i);
302  instruction_size += size;
303  sizes[embedded_count] = size;
304  embedded_count++;
305  }
306 
307  // Sort for percentiles.
308  std::sort(&sizes[0], &sizes[embedded_count]);
309 
310  const int k50th = embedded_count * 0.5;
311  const int k75th = embedded_count * 0.75;
312  const int k90th = embedded_count * 0.90;
313  const int k99th = embedded_count * 0.99;
314 
315  const int metadata_size = static_cast<int>(HashSize() + MetadataSize());
316 
317  PrintF("EmbeddedData:\n");
318  PrintF(" Total size: %d\n",
319  static_cast<int>(size()));
320  PrintF(" Metadata size: %d\n", metadata_size);
321  PrintF(" Instruction size: %d\n", instruction_size);
322  PrintF(" Padding: %d\n",
323  static_cast<int>(size() - metadata_size - instruction_size));
324  PrintF(" Embedded builtin count: %d\n", embedded_count);
325  PrintF(" Instruction size (50th percentile): %d\n", sizes[k50th]);
326  PrintF(" Instruction size (75th percentile): %d\n", sizes[k75th]);
327  PrintF(" Instruction size (90th percentile): %d\n", sizes[k90th]);
328  PrintF(" Instruction size (99th percentile): %d\n", sizes[k99th]);
329  PrintF("\n");
330 }
331 
332 } // namespace internal
333 } // namespace v8
Definition: libplatform.h:13
virtual size_t AllocatePageSize()=0