V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
wasm-interpreter.cc
1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <atomic>
6 #include <type_traits>
7 
8 #include "src/wasm/wasm-interpreter.h"
9 
10 #include "src/assembler-inl.h"
11 #include "src/boxed-float.h"
12 #include "src/compiler/wasm-compiler.h"
13 #include "src/conversions.h"
14 #include "src/identity-map.h"
15 #include "src/objects-inl.h"
16 #include "src/trap-handler/trap-handler.h"
17 #include "src/utils.h"
18 #include "src/wasm/decoder.h"
19 #include "src/wasm/function-body-decoder-impl.h"
20 #include "src/wasm/function-body-decoder.h"
21 #include "src/wasm/memory-tracing.h"
22 #include "src/wasm/wasm-engine.h"
23 #include "src/wasm/wasm-external-refs.h"
24 #include "src/wasm/wasm-limits.h"
25 #include "src/wasm/wasm-module.h"
26 #include "src/wasm/wasm-objects-inl.h"
27 
28 #include "src/zone/accounting-allocator.h"
29 #include "src/zone/zone-containers.h"
30 
31 namespace v8 {
32 namespace internal {
33 namespace wasm {
34 
35 #define TRACE(...) \
36  do { \
37  if (FLAG_trace_wasm_interpreter) PrintF(__VA_ARGS__); \
38  } while (false)
39 
40 #if V8_TARGET_BIG_ENDIAN
41 #define LANE(i, type) ((sizeof(type.val) / sizeof(type.val[0])) - (i)-1)
42 #else
43 #define LANE(i, type) (i)
44 #endif
45 
46 #define FOREACH_INTERNAL_OPCODE(V) V(Breakpoint, 0xFF)
47 
48 #define WASM_CTYPES(V) \
49  V(I32, int32_t) V(I64, int64_t) V(F32, float) V(F64, double) V(S128, Simd128)
50 
51 #define FOREACH_SIMPLE_BINOP(V) \
52  V(I32Add, uint32_t, +) \
53  V(I32Sub, uint32_t, -) \
54  V(I32Mul, uint32_t, *) \
55  V(I32And, uint32_t, &) \
56  V(I32Ior, uint32_t, |) \
57  V(I32Xor, uint32_t, ^) \
58  V(I32Eq, uint32_t, ==) \
59  V(I32Ne, uint32_t, !=) \
60  V(I32LtU, uint32_t, <) \
61  V(I32LeU, uint32_t, <=) \
62  V(I32GtU, uint32_t, >) \
63  V(I32GeU, uint32_t, >=) \
64  V(I32LtS, int32_t, <) \
65  V(I32LeS, int32_t, <=) \
66  V(I32GtS, int32_t, >) \
67  V(I32GeS, int32_t, >=) \
68  V(I64Add, uint64_t, +) \
69  V(I64Sub, uint64_t, -) \
70  V(I64Mul, uint64_t, *) \
71  V(I64And, uint64_t, &) \
72  V(I64Ior, uint64_t, |) \
73  V(I64Xor, uint64_t, ^) \
74  V(I64Eq, uint64_t, ==) \
75  V(I64Ne, uint64_t, !=) \
76  V(I64LtU, uint64_t, <) \
77  V(I64LeU, uint64_t, <=) \
78  V(I64GtU, uint64_t, >) \
79  V(I64GeU, uint64_t, >=) \
80  V(I64LtS, int64_t, <) \
81  V(I64LeS, int64_t, <=) \
82  V(I64GtS, int64_t, >) \
83  V(I64GeS, int64_t, >=) \
84  V(F32Add, float, +) \
85  V(F32Sub, float, -) \
86  V(F32Eq, float, ==) \
87  V(F32Ne, float, !=) \
88  V(F32Lt, float, <) \
89  V(F32Le, float, <=) \
90  V(F32Gt, float, >) \
91  V(F32Ge, float, >=) \
92  V(F64Add, double, +) \
93  V(F64Sub, double, -) \
94  V(F64Eq, double, ==) \
95  V(F64Ne, double, !=) \
96  V(F64Lt, double, <) \
97  V(F64Le, double, <=) \
98  V(F64Gt, double, >) \
99  V(F64Ge, double, >=) \
100  V(F32Mul, float, *) \
101  V(F64Mul, double, *) \
102  V(F32Div, float, /) \
103  V(F64Div, double, /)
104 
105 #define FOREACH_OTHER_BINOP(V) \
106  V(I32DivS, int32_t) \
107  V(I32DivU, uint32_t) \
108  V(I32RemS, int32_t) \
109  V(I32RemU, uint32_t) \
110  V(I32Shl, uint32_t) \
111  V(I32ShrU, uint32_t) \
112  V(I32ShrS, int32_t) \
113  V(I64DivS, int64_t) \
114  V(I64DivU, uint64_t) \
115  V(I64RemS, int64_t) \
116  V(I64RemU, uint64_t) \
117  V(I64Shl, uint64_t) \
118  V(I64ShrU, uint64_t) \
119  V(I64ShrS, int64_t) \
120  V(I32Ror, int32_t) \
121  V(I32Rol, int32_t) \
122  V(I64Ror, int64_t) \
123  V(I64Rol, int64_t) \
124  V(F32Min, float) \
125  V(F32Max, float) \
126  V(F64Min, double) \
127  V(F64Max, double) \
128  V(I32AsmjsDivS, int32_t) \
129  V(I32AsmjsDivU, uint32_t) \
130  V(I32AsmjsRemS, int32_t) \
131  V(I32AsmjsRemU, uint32_t) \
132  V(F32CopySign, Float32) \
133  V(F64CopySign, Float64)
134 
135 #define FOREACH_I32CONV_FLOATOP(V) \
136  V(I32SConvertF32, int32_t, float) \
137  V(I32SConvertF64, int32_t, double) \
138  V(I32UConvertF32, uint32_t, float) \
139  V(I32UConvertF64, uint32_t, double)
140 
141 #define FOREACH_OTHER_UNOP(V) \
142  V(I32Clz, uint32_t) \
143  V(I32Ctz, uint32_t) \
144  V(I32Popcnt, uint32_t) \
145  V(I32Eqz, uint32_t) \
146  V(I64Clz, uint64_t) \
147  V(I64Ctz, uint64_t) \
148  V(I64Popcnt, uint64_t) \
149  V(I64Eqz, uint64_t) \
150  V(F32Abs, Float32) \
151  V(F32Neg, Float32) \
152  V(F32Ceil, float) \
153  V(F32Floor, float) \
154  V(F32Trunc, float) \
155  V(F32NearestInt, float) \
156  V(F64Abs, Float64) \
157  V(F64Neg, Float64) \
158  V(F64Ceil, double) \
159  V(F64Floor, double) \
160  V(F64Trunc, double) \
161  V(F64NearestInt, double) \
162  V(I32ConvertI64, int64_t) \
163  V(I64SConvertF32, float) \
164  V(I64SConvertF64, double) \
165  V(I64UConvertF32, float) \
166  V(I64UConvertF64, double) \
167  V(I64SConvertI32, int32_t) \
168  V(I64UConvertI32, uint32_t) \
169  V(F32SConvertI32, int32_t) \
170  V(F32UConvertI32, uint32_t) \
171  V(F32SConvertI64, int64_t) \
172  V(F32UConvertI64, uint64_t) \
173  V(F32ConvertF64, double) \
174  V(F32ReinterpretI32, int32_t) \
175  V(F64SConvertI32, int32_t) \
176  V(F64UConvertI32, uint32_t) \
177  V(F64SConvertI64, int64_t) \
178  V(F64UConvertI64, uint64_t) \
179  V(F64ConvertF32, float) \
180  V(F64ReinterpretI64, int64_t) \
181  V(I32AsmjsSConvertF32, float) \
182  V(I32AsmjsUConvertF32, float) \
183  V(I32AsmjsSConvertF64, double) \
184  V(I32AsmjsUConvertF64, double) \
185  V(F32Sqrt, float) \
186  V(F64Sqrt, double)
187 
188 namespace {
189 
190 constexpr uint32_t kFloat32SignBitMask = uint32_t{1} << 31;
191 constexpr uint64_t kFloat64SignBitMask = uint64_t{1} << 63;
192 
193 inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) {
194  if (b == 0) {
195  *trap = kTrapDivByZero;
196  return 0;
197  }
198  if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
199  *trap = kTrapDivUnrepresentable;
200  return 0;
201  }
202  return a / b;
203 }
204 
205 inline uint32_t ExecuteI32DivU(uint32_t a, uint32_t b, TrapReason* trap) {
206  if (b == 0) {
207  *trap = kTrapDivByZero;
208  return 0;
209  }
210  return a / b;
211 }
212 
213 inline int32_t ExecuteI32RemS(int32_t a, int32_t b, TrapReason* trap) {
214  if (b == 0) {
215  *trap = kTrapRemByZero;
216  return 0;
217  }
218  if (b == -1) return 0;
219  return a % b;
220 }
221 
222 inline uint32_t ExecuteI32RemU(uint32_t a, uint32_t b, TrapReason* trap) {
223  if (b == 0) {
224  *trap = kTrapRemByZero;
225  return 0;
226  }
227  return a % b;
228 }
229 
230 inline uint32_t ExecuteI32Shl(uint32_t a, uint32_t b, TrapReason* trap) {
231  return a << (b & 0x1F);
232 }
233 
234 inline uint32_t ExecuteI32ShrU(uint32_t a, uint32_t b, TrapReason* trap) {
235  return a >> (b & 0x1F);
236 }
237 
238 inline int32_t ExecuteI32ShrS(int32_t a, int32_t b, TrapReason* trap) {
239  return a >> (b & 0x1F);
240 }
241 
242 inline int64_t ExecuteI64DivS(int64_t a, int64_t b, TrapReason* trap) {
243  if (b == 0) {
244  *trap = kTrapDivByZero;
245  return 0;
246  }
247  if (b == -1 && a == std::numeric_limits<int64_t>::min()) {
248  *trap = kTrapDivUnrepresentable;
249  return 0;
250  }
251  return a / b;
252 }
253 
254 inline uint64_t ExecuteI64DivU(uint64_t a, uint64_t b, TrapReason* trap) {
255  if (b == 0) {
256  *trap = kTrapDivByZero;
257  return 0;
258  }
259  return a / b;
260 }
261 
262 inline int64_t ExecuteI64RemS(int64_t a, int64_t b, TrapReason* trap) {
263  if (b == 0) {
264  *trap = kTrapRemByZero;
265  return 0;
266  }
267  if (b == -1) return 0;
268  return a % b;
269 }
270 
271 inline uint64_t ExecuteI64RemU(uint64_t a, uint64_t b, TrapReason* trap) {
272  if (b == 0) {
273  *trap = kTrapRemByZero;
274  return 0;
275  }
276  return a % b;
277 }
278 
279 inline uint64_t ExecuteI64Shl(uint64_t a, uint64_t b, TrapReason* trap) {
280  return a << (b & 0x3F);
281 }
282 
283 inline uint64_t ExecuteI64ShrU(uint64_t a, uint64_t b, TrapReason* trap) {
284  return a >> (b & 0x3F);
285 }
286 
287 inline int64_t ExecuteI64ShrS(int64_t a, int64_t b, TrapReason* trap) {
288  return a >> (b & 0x3F);
289 }
290 
291 inline uint32_t ExecuteI32Ror(uint32_t a, uint32_t b, TrapReason* trap) {
292  uint32_t shift = (b & 0x1F);
293  return (a >> shift) | (a << (32 - shift));
294 }
295 
296 inline uint32_t ExecuteI32Rol(uint32_t a, uint32_t b, TrapReason* trap) {
297  uint32_t shift = (b & 0x1F);
298  return (a << shift) | (a >> (32 - shift));
299 }
300 
301 inline uint64_t ExecuteI64Ror(uint64_t a, uint64_t b, TrapReason* trap) {
302  uint32_t shift = (b & 0x3F);
303  return (a >> shift) | (a << (64 - shift));
304 }
305 
306 inline uint64_t ExecuteI64Rol(uint64_t a, uint64_t b, TrapReason* trap) {
307  uint32_t shift = (b & 0x3F);
308  return (a << shift) | (a >> (64 - shift));
309 }
310 
311 inline float ExecuteF32Min(float a, float b, TrapReason* trap) {
312  return JSMin(a, b);
313 }
314 
315 inline float ExecuteF32Max(float a, float b, TrapReason* trap) {
316  return JSMax(a, b);
317 }
318 
319 inline Float32 ExecuteF32CopySign(Float32 a, Float32 b, TrapReason* trap) {
320  return Float32::FromBits((a.get_bits() & ~kFloat32SignBitMask) |
321  (b.get_bits() & kFloat32SignBitMask));
322 }
323 
324 inline double ExecuteF64Min(double a, double b, TrapReason* trap) {
325  return JSMin(a, b);
326 }
327 
328 inline double ExecuteF64Max(double a, double b, TrapReason* trap) {
329  return JSMax(a, b);
330 }
331 
332 inline Float64 ExecuteF64CopySign(Float64 a, Float64 b, TrapReason* trap) {
333  return Float64::FromBits((a.get_bits() & ~kFloat64SignBitMask) |
334  (b.get_bits() & kFloat64SignBitMask));
335 }
336 
337 inline int32_t ExecuteI32AsmjsDivS(int32_t a, int32_t b, TrapReason* trap) {
338  if (b == 0) return 0;
339  if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
340  return std::numeric_limits<int32_t>::min();
341  }
342  return a / b;
343 }
344 
345 inline uint32_t ExecuteI32AsmjsDivU(uint32_t a, uint32_t b, TrapReason* trap) {
346  if (b == 0) return 0;
347  return a / b;
348 }
349 
350 inline int32_t ExecuteI32AsmjsRemS(int32_t a, int32_t b, TrapReason* trap) {
351  if (b == 0) return 0;
352  if (b == -1) return 0;
353  return a % b;
354 }
355 
356 inline uint32_t ExecuteI32AsmjsRemU(uint32_t a, uint32_t b, TrapReason* trap) {
357  if (b == 0) return 0;
358  return a % b;
359 }
360 
361 inline int32_t ExecuteI32AsmjsSConvertF32(float a, TrapReason* trap) {
362  return DoubleToInt32(a);
363 }
364 
365 inline uint32_t ExecuteI32AsmjsUConvertF32(float a, TrapReason* trap) {
366  return DoubleToUint32(a);
367 }
368 
369 inline int32_t ExecuteI32AsmjsSConvertF64(double a, TrapReason* trap) {
370  return DoubleToInt32(a);
371 }
372 
373 inline uint32_t ExecuteI32AsmjsUConvertF64(double a, TrapReason* trap) {
374  return DoubleToUint32(a);
375 }
376 
377 int32_t ExecuteI32Clz(uint32_t val, TrapReason* trap) {
378  return base::bits::CountLeadingZeros(val);
379 }
380 
381 uint32_t ExecuteI32Ctz(uint32_t val, TrapReason* trap) {
382  return base::bits::CountTrailingZeros(val);
383 }
384 
385 uint32_t ExecuteI32Popcnt(uint32_t val, TrapReason* trap) {
386  return base::bits::CountPopulation(val);
387 }
388 
389 inline uint32_t ExecuteI32Eqz(uint32_t val, TrapReason* trap) {
390  return val == 0 ? 1 : 0;
391 }
392 
393 int64_t ExecuteI64Clz(uint64_t val, TrapReason* trap) {
394  return base::bits::CountLeadingZeros(val);
395 }
396 
397 inline uint64_t ExecuteI64Ctz(uint64_t val, TrapReason* trap) {
398  return base::bits::CountTrailingZeros(val);
399 }
400 
401 inline int64_t ExecuteI64Popcnt(uint64_t val, TrapReason* trap) {
402  return base::bits::CountPopulation(val);
403 }
404 
405 inline int32_t ExecuteI64Eqz(uint64_t val, TrapReason* trap) {
406  return val == 0 ? 1 : 0;
407 }
408 
409 inline Float32 ExecuteF32Abs(Float32 a, TrapReason* trap) {
410  return Float32::FromBits(a.get_bits() & ~kFloat32SignBitMask);
411 }
412 
413 inline Float32 ExecuteF32Neg(Float32 a, TrapReason* trap) {
414  return Float32::FromBits(a.get_bits() ^ kFloat32SignBitMask);
415 }
416 
417 inline float ExecuteF32Ceil(float a, TrapReason* trap) { return ceilf(a); }
418 
419 inline float ExecuteF32Floor(float a, TrapReason* trap) { return floorf(a); }
420 
421 inline float ExecuteF32Trunc(float a, TrapReason* trap) { return truncf(a); }
422 
423 inline float ExecuteF32NearestInt(float a, TrapReason* trap) {
424  return nearbyintf(a);
425 }
426 
427 inline float ExecuteF32Sqrt(float a, TrapReason* trap) {
428  float result = sqrtf(a);
429  return result;
430 }
431 
432 inline Float64 ExecuteF64Abs(Float64 a, TrapReason* trap) {
433  return Float64::FromBits(a.get_bits() & ~kFloat64SignBitMask);
434 }
435 
436 inline Float64 ExecuteF64Neg(Float64 a, TrapReason* trap) {
437  return Float64::FromBits(a.get_bits() ^ kFloat64SignBitMask);
438 }
439 
440 inline double ExecuteF64Ceil(double a, TrapReason* trap) { return ceil(a); }
441 
442 inline double ExecuteF64Floor(double a, TrapReason* trap) { return floor(a); }
443 
444 inline double ExecuteF64Trunc(double a, TrapReason* trap) { return trunc(a); }
445 
446 inline double ExecuteF64NearestInt(double a, TrapReason* trap) {
447  return nearbyint(a);
448 }
449 
450 inline double ExecuteF64Sqrt(double a, TrapReason* trap) { return sqrt(a); }
451 
452 template <typename int_type, typename float_type>
453 int_type ExecuteConvert(float_type a, TrapReason* trap) {
454  if (is_inbounds<int_type>(a)) {
455  return static_cast<int_type>(a);
456  }
457  *trap = kTrapFloatUnrepresentable;
458  return 0;
459 }
460 
461 template <typename int_type, typename float_type>
462 int_type ExecuteConvertSaturate(float_type a) {
463  TrapReason base_trap = kTrapCount;
464  int32_t val = ExecuteConvert<int_type>(a, &base_trap);
465  if (base_trap == kTrapCount) {
466  return val;
467  }
468  return std::isnan(a) ? 0
469  : (a < static_cast<float_type>(0.0)
470  ? std::numeric_limits<int_type>::min()
471  : std::numeric_limits<int_type>::max());
472 }
473 
474 template <typename dst_type, typename src_type, void (*fn)(Address)>
475 inline dst_type CallExternalIntToFloatFunction(src_type input) {
476  uint8_t data[std::max(sizeof(dst_type), sizeof(src_type))];
477  Address data_addr = reinterpret_cast<Address>(data);
478  WriteUnalignedValue<src_type>(data_addr, input);
479  fn(data_addr);
480  return ReadUnalignedValue<dst_type>(data_addr);
481 }
482 
483 template <typename dst_type, typename src_type, int32_t (*fn)(Address)>
484 inline dst_type CallExternalFloatToIntFunction(src_type input,
485  TrapReason* trap) {
486  uint8_t data[std::max(sizeof(dst_type), sizeof(src_type))];
487  Address data_addr = reinterpret_cast<Address>(data);
488  WriteUnalignedValue<src_type>(data_addr, input);
489  if (!fn(data_addr)) *trap = kTrapFloatUnrepresentable;
490  return ReadUnalignedValue<dst_type>(data_addr);
491 }
492 
493 inline uint32_t ExecuteI32ConvertI64(int64_t a, TrapReason* trap) {
494  return static_cast<uint32_t>(a & 0xFFFFFFFF);
495 }
496 
497 int64_t ExecuteI64SConvertF32(float a, TrapReason* trap) {
498  return CallExternalFloatToIntFunction<int64_t, float,
499  float32_to_int64_wrapper>(a, trap);
500 }
501 
502 int64_t ExecuteI64SConvertSatF32(float a) {
503  TrapReason base_trap = kTrapCount;
504  int64_t val = ExecuteI64SConvertF32(a, &base_trap);
505  if (base_trap == kTrapCount) {
506  return val;
507  }
508  return std::isnan(a) ? 0
509  : (a < 0.0 ? std::numeric_limits<int64_t>::min()
510  : std::numeric_limits<int64_t>::max());
511 }
512 
513 int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
514  return CallExternalFloatToIntFunction<int64_t, double,
515  float64_to_int64_wrapper>(a, trap);
516 }
517 
518 int64_t ExecuteI64SConvertSatF64(double a) {
519  TrapReason base_trap = kTrapCount;
520  int64_t val = ExecuteI64SConvertF64(a, &base_trap);
521  if (base_trap == kTrapCount) {
522  return val;
523  }
524  return std::isnan(a) ? 0
525  : (a < 0.0 ? std::numeric_limits<int64_t>::min()
526  : std::numeric_limits<int64_t>::max());
527 }
528 
529 uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
530  return CallExternalFloatToIntFunction<uint64_t, float,
531  float32_to_uint64_wrapper>(a, trap);
532 }
533 
534 uint64_t ExecuteI64UConvertSatF32(float a) {
535  TrapReason base_trap = kTrapCount;
536  uint64_t val = ExecuteI64UConvertF32(a, &base_trap);
537  if (base_trap == kTrapCount) {
538  return val;
539  }
540  return std::isnan(a) ? 0
541  : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
542  : std::numeric_limits<uint64_t>::max());
543 }
544 
545 uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
546  return CallExternalFloatToIntFunction<uint64_t, double,
547  float64_to_uint64_wrapper>(a, trap);
548 }
549 
550 uint64_t ExecuteI64UConvertSatF64(double a) {
551  TrapReason base_trap = kTrapCount;
552  int64_t val = ExecuteI64UConvertF64(a, &base_trap);
553  if (base_trap == kTrapCount) {
554  return val;
555  }
556  return std::isnan(a) ? 0
557  : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
558  : std::numeric_limits<uint64_t>::max());
559 }
560 
561 inline int64_t ExecuteI64SConvertI32(int32_t a, TrapReason* trap) {
562  return static_cast<int64_t>(a);
563 }
564 
565 inline int64_t ExecuteI64UConvertI32(uint32_t a, TrapReason* trap) {
566  return static_cast<uint64_t>(a);
567 }
568 
569 inline float ExecuteF32SConvertI32(int32_t a, TrapReason* trap) {
570  return static_cast<float>(a);
571 }
572 
573 inline float ExecuteF32UConvertI32(uint32_t a, TrapReason* trap) {
574  return static_cast<float>(a);
575 }
576 
577 inline float ExecuteF32SConvertI64(int64_t a, TrapReason* trap) {
578  return static_cast<float>(a);
579 }
580 
581 inline float ExecuteF32UConvertI64(uint64_t a, TrapReason* trap) {
582  return CallExternalIntToFloatFunction<float, uint64_t,
583  uint64_to_float32_wrapper>(a);
584 }
585 
586 inline float ExecuteF32ConvertF64(double a, TrapReason* trap) {
587  return static_cast<float>(a);
588 }
589 
590 inline Float32 ExecuteF32ReinterpretI32(int32_t a, TrapReason* trap) {
591  return Float32::FromBits(a);
592 }
593 
594 inline double ExecuteF64SConvertI32(int32_t a, TrapReason* trap) {
595  return static_cast<double>(a);
596 }
597 
598 inline double ExecuteF64UConvertI32(uint32_t a, TrapReason* trap) {
599  return static_cast<double>(a);
600 }
601 
602 inline double ExecuteF64SConvertI64(int64_t a, TrapReason* trap) {
603  return static_cast<double>(a);
604 }
605 
606 inline double ExecuteF64UConvertI64(uint64_t a, TrapReason* trap) {
607  return CallExternalIntToFloatFunction<double, uint64_t,
608  uint64_to_float64_wrapper>(a);
609 }
610 
611 inline double ExecuteF64ConvertF32(float a, TrapReason* trap) {
612  return static_cast<double>(a);
613 }
614 
615 inline Float64 ExecuteF64ReinterpretI64(int64_t a, TrapReason* trap) {
616  return Float64::FromBits(a);
617 }
618 
619 inline int32_t ExecuteI32ReinterpretF32(WasmValue a) {
620  return a.to_f32_boxed().get_bits();
621 }
622 
623 inline int64_t ExecuteI64ReinterpretF64(WasmValue a) {
624  return a.to_f64_boxed().get_bits();
625 }
626 
627 enum InternalOpcode {
628 #define DECL_INTERNAL_ENUM(name, value) kInternal##name = value,
629  FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_ENUM)
630 #undef DECL_INTERNAL_ENUM
631 };
632 
633 const char* OpcodeName(uint32_t val) {
634  switch (val) {
635 #define DECL_INTERNAL_CASE(name, value) \
636  case kInternal##name: \
637  return "Internal" #name;
638  FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_CASE)
639 #undef DECL_INTERNAL_CASE
640  }
641  return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(val));
642 }
643 
644 } // namespace
645 
646 class SideTable;
647 
648 // Code and metadata needed to execute a function.
650  const WasmFunction* function; // wasm function
651  BodyLocalDecls locals; // local declarations
652  const byte* orig_start; // start of original code
653  const byte* orig_end; // end of original code
654  byte* start; // start of (maybe altered) code
655  byte* end; // end of (maybe altered) code
656  SideTable* side_table; // precomputed side table for control flow.
657 
658  const byte* at(pc_t pc) { return start + pc; }
659 };
660 
661 // A helper class to compute the control transfers for each bytecode offset.
662 // Control transfers allow Br, BrIf, BrTable, If, Else, and End bytecodes to
663 // be directly executed without the need to dynamically track blocks.
664 class SideTable : public ZoneObject {
665  public:
666  ControlTransferMap map_;
667  uint32_t max_stack_height_ = 0;
668 
669  SideTable(Zone* zone, const WasmModule* module, InterpreterCode* code)
670  : map_(zone) {
671  // Create a zone for all temporary objects.
672  Zone control_transfer_zone(zone->allocator(), ZONE_NAME);
673 
674  // Represents a control flow label.
675  class CLabel : public ZoneObject {
676  explicit CLabel(Zone* zone, uint32_t target_stack_height, uint32_t arity)
677  : target_stack_height(target_stack_height),
678  arity(arity),
679  refs(zone) {}
680 
681  public:
682  struct Ref {
683  const byte* from_pc;
684  const uint32_t stack_height;
685  };
686  const byte* target = nullptr;
687  uint32_t target_stack_height;
688  // Arity when branching to this label.
689  const uint32_t arity;
690  ZoneVector<Ref> refs;
691 
692  static CLabel* New(Zone* zone, uint32_t stack_height, uint32_t arity) {
693  return new (zone) CLabel(zone, stack_height, arity);
694  }
695 
696  // Bind this label to the given PC.
697  void Bind(const byte* pc) {
698  DCHECK_NULL(target);
699  target = pc;
700  }
701 
702  // Reference this label from the given location.
703  void Ref(const byte* from_pc, uint32_t stack_height) {
704  // Target being bound before a reference means this is a loop.
705  DCHECK_IMPLIES(target, *target == kExprLoop);
706  refs.push_back({from_pc, stack_height});
707  }
708 
709  void Finish(ControlTransferMap* map, const byte* start) {
710  DCHECK_NOT_NULL(target);
711  for (auto ref : refs) {
712  size_t offset = static_cast<size_t>(ref.from_pc - start);
713  auto pcdiff = static_cast<pcdiff_t>(target - ref.from_pc);
714  DCHECK_GE(ref.stack_height, target_stack_height);
715  spdiff_t spdiff =
716  static_cast<spdiff_t>(ref.stack_height - target_stack_height);
717  TRACE("control transfer @%zu: Δpc %d, stack %u->%u = -%u\n", offset,
718  pcdiff, ref.stack_height, target_stack_height, spdiff);
719  ControlTransferEntry& entry = (*map)[offset];
720  entry.pc_diff = pcdiff;
721  entry.sp_diff = spdiff;
722  entry.target_arity = arity;
723  }
724  }
725  };
726 
727  // An entry in the control stack.
728  struct Control {
729  const byte* pc;
730  CLabel* end_label;
731  CLabel* else_label;
732  // Arity (number of values on the stack) when exiting this control
733  // structure via |end|.
734  uint32_t exit_arity;
735  // Track whether this block was already left, i.e. all further
736  // instructions are unreachable.
737  bool unreachable = false;
738 
739  Control(const byte* pc, CLabel* end_label, CLabel* else_label,
740  uint32_t exit_arity)
741  : pc(pc),
742  end_label(end_label),
743  else_label(else_label),
744  exit_arity(exit_arity) {}
745  Control(const byte* pc, CLabel* end_label, uint32_t exit_arity)
746  : Control(pc, end_label, nullptr, exit_arity) {}
747 
748  void Finish(ControlTransferMap* map, const byte* start) {
749  end_label->Finish(map, start);
750  if (else_label) else_label->Finish(map, start);
751  }
752  };
753 
754  // Compute the ControlTransfer map.
755  // This algorithm maintains a stack of control constructs similar to the
756  // AST decoder. The {control_stack} allows matching {br,br_if,br_table}
757  // bytecodes with their target, as well as determining whether the current
758  // bytecodes are within the true or false block of an else.
759  ZoneVector<Control> control_stack(&control_transfer_zone);
760  uint32_t stack_height = 0;
761  uint32_t func_arity =
762  static_cast<uint32_t>(code->function->sig->return_count());
763  CLabel* func_label =
764  CLabel::New(&control_transfer_zone, stack_height, func_arity);
765  control_stack.emplace_back(code->orig_start, func_label, func_arity);
766  auto control_parent = [&]() -> Control& {
767  DCHECK_LE(2, control_stack.size());
768  return control_stack[control_stack.size() - 2];
769  };
770  auto copy_unreachable = [&] {
771  control_stack.back().unreachable = control_parent().unreachable;
772  };
773  for (BytecodeIterator i(code->orig_start, code->orig_end, &code->locals);
774  i.has_next(); i.next()) {
775  WasmOpcode opcode = i.current();
776  if (WasmOpcodes::IsPrefixOpcode(opcode)) opcode = i.prefixed_opcode();
777  bool unreachable = control_stack.back().unreachable;
778  if (unreachable) {
779  TRACE("@%u: %s (is unreachable)\n", i.pc_offset(),
780  WasmOpcodes::OpcodeName(opcode));
781  } else {
782  auto stack_effect =
783  StackEffect(module, code->function->sig, i.pc(), i.end());
784  TRACE("@%u: %s (sp %d - %d + %d)\n", i.pc_offset(),
785  WasmOpcodes::OpcodeName(opcode), stack_height, stack_effect.first,
786  stack_effect.second);
787  DCHECK_GE(stack_height, stack_effect.first);
788  DCHECK_GE(kMaxUInt32, static_cast<uint64_t>(stack_height) -
789  stack_effect.first + stack_effect.second);
790  stack_height = stack_height - stack_effect.first + stack_effect.second;
791  if (stack_height > max_stack_height_) max_stack_height_ = stack_height;
792  }
793  switch (opcode) {
794  case kExprBlock:
795  case kExprLoop: {
796  bool is_loop = opcode == kExprLoop;
797  BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
798  i.pc());
799  if (imm.type == kWasmVar) {
800  imm.sig = module->signatures[imm.sig_index];
801  }
802  TRACE("control @%u: %s, arity %d->%d\n", i.pc_offset(),
803  is_loop ? "Loop" : "Block", imm.in_arity(), imm.out_arity());
804  CLabel* label =
805  CLabel::New(&control_transfer_zone, stack_height,
806  is_loop ? imm.in_arity() : imm.out_arity());
807  control_stack.emplace_back(i.pc(), label, imm.out_arity());
808  copy_unreachable();
809  if (is_loop) label->Bind(i.pc());
810  break;
811  }
812  case kExprIf: {
813  BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures, &i,
814  i.pc());
815  if (imm.type == kWasmVar) {
816  imm.sig = module->signatures[imm.sig_index];
817  }
818  TRACE("control @%u: If, arity %d->%d\n", i.pc_offset(),
819  imm.in_arity(), imm.out_arity());
820  CLabel* end_label = CLabel::New(&control_transfer_zone, stack_height,
821  imm.out_arity());
822  CLabel* else_label =
823  CLabel::New(&control_transfer_zone, stack_height, 0);
824  control_stack.emplace_back(i.pc(), end_label, else_label,
825  imm.out_arity());
826  copy_unreachable();
827  if (!unreachable) else_label->Ref(i.pc(), stack_height);
828  break;
829  }
830  case kExprElse: {
831  Control* c = &control_stack.back();
832  copy_unreachable();
833  TRACE("control @%u: Else\n", i.pc_offset());
834  if (!control_parent().unreachable) {
835  c->end_label->Ref(i.pc(), stack_height);
836  }
837  DCHECK_NOT_NULL(c->else_label);
838  c->else_label->Bind(i.pc() + 1);
839  c->else_label->Finish(&map_, code->orig_start);
840  c->else_label = nullptr;
841  DCHECK_GE(stack_height, c->end_label->target_stack_height);
842  stack_height = c->end_label->target_stack_height;
843  break;
844  }
845  case kExprEnd: {
846  Control* c = &control_stack.back();
847  TRACE("control @%u: End\n", i.pc_offset());
848  // Only loops have bound labels.
849  DCHECK_IMPLIES(c->end_label->target, *c->pc == kExprLoop);
850  if (!c->end_label->target) {
851  if (c->else_label) c->else_label->Bind(i.pc());
852  c->end_label->Bind(i.pc() + 1);
853  }
854  c->Finish(&map_, code->orig_start);
855  DCHECK_GE(stack_height, c->end_label->target_stack_height);
856  stack_height = c->end_label->target_stack_height + c->exit_arity;
857  control_stack.pop_back();
858  break;
859  }
860  case kExprBr: {
862  TRACE("control @%u: Br[depth=%u]\n", i.pc_offset(), imm.depth);
863  Control* c = &control_stack[control_stack.size() - imm.depth - 1];
864  if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
865  break;
866  }
867  case kExprBrIf: {
869  TRACE("control @%u: BrIf[depth=%u]\n", i.pc_offset(), imm.depth);
870  Control* c = &control_stack[control_stack.size() - imm.depth - 1];
871  if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
872  break;
873  }
874  case kExprBrTable: {
877  TRACE("control @%u: BrTable[count=%u]\n", i.pc_offset(),
878  imm.table_count);
879  if (!unreachable) {
880  while (iterator.has_next()) {
881  uint32_t j = iterator.cur_index();
882  uint32_t target = iterator.next();
883  Control* c = &control_stack[control_stack.size() - target - 1];
884  c->end_label->Ref(i.pc() + j, stack_height);
885  }
886  }
887  break;
888  }
889  default:
890  break;
891  }
892  if (WasmOpcodes::IsUnconditionalJump(opcode)) {
893  control_stack.back().unreachable = true;
894  }
895  }
896  DCHECK_EQ(0, control_stack.size());
897  DCHECK_EQ(func_arity, stack_height);
898  }
899 
900  ControlTransferEntry& Lookup(pc_t from) {
901  auto result = map_.find(from);
902  DCHECK(result != map_.end());
903  return result->second;
904  }
905 };
906 
907 // The main storage for interpreter code. It maps {WasmFunction} to the
908 // metadata needed to execute each function.
909 class CodeMap {
910  Zone* zone_;
911  const WasmModule* module_;
912  ZoneVector<InterpreterCode> interpreter_code_;
913  // TODO(wasm): Remove this testing wart. It is needed because interpreter
914  // entry stubs are not generated in testing the interpreter in cctests.
915  bool call_indirect_through_module_ = false;
916 
917  public:
918  CodeMap(const WasmModule* module, const uint8_t* module_start, Zone* zone)
919  : zone_(zone), module_(module), interpreter_code_(zone) {
920  if (module == nullptr) return;
921  interpreter_code_.reserve(module->functions.size());
922  for (const WasmFunction& function : module->functions) {
923  if (function.imported) {
924  DCHECK(!function.code.is_set());
925  AddFunction(&function, nullptr, nullptr);
926  } else {
927  AddFunction(&function, module_start + function.code.offset(),
928  module_start + function.code.end_offset());
929  }
930  }
931  }
932 
933  bool call_indirect_through_module() { return call_indirect_through_module_; }
934 
935  void set_call_indirect_through_module(bool val) {
936  call_indirect_through_module_ = val;
937  }
938 
939  const WasmModule* module() const { return module_; }
940 
941  InterpreterCode* GetCode(const WasmFunction* function) {
942  InterpreterCode* code = GetCode(function->func_index);
943  DCHECK_EQ(function, code->function);
944  return code;
945  }
946 
947  InterpreterCode* GetCode(uint32_t function_index) {
948  DCHECK_LT(function_index, interpreter_code_.size());
949  return Preprocess(&interpreter_code_[function_index]);
950  }
951 
952  InterpreterCode* GetIndirectCode(uint32_t table_index, uint32_t entry_index) {
953  uint32_t saved_index;
954  USE(saved_index);
955  if (table_index >= module_->tables.size()) return nullptr;
956  // Mask table index for SSCA mitigation.
957  saved_index = table_index;
958  table_index &= static_cast<int32_t>((table_index - module_->tables.size()) &
959  ~static_cast<int32_t>(table_index)) >>
960  31;
961  DCHECK_EQ(table_index, saved_index);
962  const WasmTable* table = &module_->tables[table_index];
963  if (entry_index >= table->values.size()) return nullptr;
964  // Mask entry_index for SSCA mitigation.
965  saved_index = entry_index;
966  entry_index &= static_cast<int32_t>((entry_index - table->values.size()) &
967  ~static_cast<int32_t>(entry_index)) >>
968  31;
969  DCHECK_EQ(entry_index, saved_index);
970  uint32_t index = table->values[entry_index];
971  if (index >= interpreter_code_.size()) return nullptr;
972  // Mask index for SSCA mitigation.
973  saved_index = index;
974  index &= static_cast<int32_t>((index - interpreter_code_.size()) &
975  ~static_cast<int32_t>(index)) >>
976  31;
977  DCHECK_EQ(index, saved_index);
978 
979  return GetCode(index);
980  }
981 
982  InterpreterCode* Preprocess(InterpreterCode* code) {
983  DCHECK_EQ(code->function->imported, code->start == nullptr);
984  if (!code->side_table && code->start) {
985  // Compute the control targets map and the local declarations.
986  code->side_table = new (zone_) SideTable(zone_, module_, code);
987  }
988  return code;
989  }
990 
991  void AddFunction(const WasmFunction* function, const byte* code_start,
992  const byte* code_end) {
993  InterpreterCode code = {
994  function, BodyLocalDecls(zone_), code_start,
995  code_end, const_cast<byte*>(code_start), const_cast<byte*>(code_end),
996  nullptr};
997 
998  DCHECK_EQ(interpreter_code_.size(), function->func_index);
999  interpreter_code_.push_back(code);
1000  }
1001 
1002  void SetFunctionCode(const WasmFunction* function, const byte* start,
1003  const byte* end) {
1004  DCHECK_LT(function->func_index, interpreter_code_.size());
1005  InterpreterCode* code = &interpreter_code_[function->func_index];
1006  DCHECK_EQ(function, code->function);
1007  code->orig_start = start;
1008  code->orig_end = end;
1009  code->start = const_cast<byte*>(start);
1010  code->end = const_cast<byte*>(end);
1011  code->side_table = nullptr;
1012  Preprocess(code);
1013  }
1014 };
1015 
1016 namespace {
1017 
1018 struct ExternalCallResult {
1019  enum Type {
1020  // The function should be executed inside this interpreter.
1021  INTERNAL,
1022  // For indirect calls: Table or function does not exist.
1023  INVALID_FUNC,
1024  // For indirect calls: Signature does not match expected signature.
1025  SIGNATURE_MISMATCH,
1026  // The function was executed and returned normally.
1027  EXTERNAL_RETURNED,
1028  // The function was executed, threw an exception, and the stack was unwound.
1029  EXTERNAL_UNWOUND
1030  };
1031  Type type;
1032  // If type is INTERNAL, this field holds the function to call internally.
1033  InterpreterCode* interpreter_code;
1034 
1035  ExternalCallResult(Type type) : type(type) { // NOLINT
1036  DCHECK_NE(INTERNAL, type);
1037  }
1038  ExternalCallResult(Type type, InterpreterCode* code)
1039  : type(type), interpreter_code(code) {
1040  DCHECK_EQ(INTERNAL, type);
1041  }
1042 };
1043 
1044 // Like a static_cast from src to dst, but specialized for boxed floats.
1045 template <typename dst, typename src>
1046 struct converter {
1047  dst operator()(src val) const { return static_cast<dst>(val); }
1048 };
1049 template <>
1050 struct converter<Float64, uint64_t> {
1051  Float64 operator()(uint64_t val) const { return Float64::FromBits(val); }
1052 };
1053 template <>
1054 struct converter<Float32, uint32_t> {
1055  Float32 operator()(uint32_t val) const { return Float32::FromBits(val); }
1056 };
1057 template <>
1058 struct converter<uint64_t, Float64> {
1059  uint64_t operator()(Float64 val) const { return val.get_bits(); }
1060 };
1061 template <>
1062 struct converter<uint32_t, Float32> {
1063  uint32_t operator()(Float32 val) const { return val.get_bits(); }
1064 };
1065 
1066 template <typename T>
1067 V8_INLINE bool has_nondeterminism(T val) {
1068  static_assert(!std::is_floating_point<T>::value, "missing specialization");
1069  return false;
1070 }
1071 template <>
1072 V8_INLINE bool has_nondeterminism<float>(float val) {
1073  return std::isnan(val);
1074 }
1075 template <>
1076 V8_INLINE bool has_nondeterminism<double>(double val) {
1077  return std::isnan(val);
1078 }
1079 
1080 } // namespace
1081 
1082 // Responsible for executing code directly.
1083 class ThreadImpl {
1084  struct Activation {
1085  uint32_t fp;
1086  sp_t sp;
1087  Activation(uint32_t fp, sp_t sp) : fp(fp), sp(sp) {}
1088  };
1089 
1090  public:
1091  ThreadImpl(Zone* zone, CodeMap* codemap,
1092  Handle<WasmInstanceObject> instance_object)
1093  : codemap_(codemap),
1094  instance_object_(instance_object),
1095  frames_(zone),
1096  activations_(zone) {}
1097 
1098  //==========================================================================
1099  // Implementation of public interface for WasmInterpreter::Thread.
1100  //==========================================================================
1101 
1102  WasmInterpreter::State state() { return state_; }
1103 
1104  void InitFrame(const WasmFunction* function, WasmValue* args) {
1105  DCHECK_EQ(current_activation().fp, frames_.size());
1106  InterpreterCode* code = codemap()->GetCode(function);
1107  size_t num_params = function->sig->parameter_count();
1108  EnsureStackSpace(num_params);
1109  Push(args, num_params);
1110  PushFrame(code);
1111  }
1112 
1113  WasmInterpreter::State Run(int num_steps = -1) {
1114  DCHECK(state_ == WasmInterpreter::STOPPED ||
1115  state_ == WasmInterpreter::PAUSED);
1116  DCHECK(num_steps == -1 || num_steps > 0);
1117  if (num_steps == -1) {
1118  TRACE(" => Run()\n");
1119  } else if (num_steps == 1) {
1120  TRACE(" => Step()\n");
1121  } else {
1122  TRACE(" => Run(%d)\n", num_steps);
1123  }
1124  state_ = WasmInterpreter::RUNNING;
1125  Execute(frames_.back().code, frames_.back().pc, num_steps);
1126  // If state_ is STOPPED, the current activation must be fully unwound.
1127  DCHECK_IMPLIES(state_ == WasmInterpreter::STOPPED,
1128  current_activation().fp == frames_.size());
1129  return state_;
1130  }
1131 
1132  void Pause() { UNIMPLEMENTED(); }
1133 
1134  void Reset() {
1135  TRACE("----- RESET -----\n");
1136  sp_ = stack_.get();
1137  frames_.clear();
1138  state_ = WasmInterpreter::STOPPED;
1139  trap_reason_ = kTrapCount;
1140  possible_nondeterminism_ = false;
1141  }
1142 
1143  int GetFrameCount() {
1144  DCHECK_GE(kMaxInt, frames_.size());
1145  return static_cast<int>(frames_.size());
1146  }
1147 
1148  WasmValue GetReturnValue(uint32_t index) {
1149  if (state_ == WasmInterpreter::TRAPPED) return WasmValue(0xDEADBEEF);
1150  DCHECK_EQ(WasmInterpreter::FINISHED, state_);
1151  Activation act = current_activation();
1152  // Current activation must be finished.
1153  DCHECK_EQ(act.fp, frames_.size());
1154  return GetStackValue(act.sp + index);
1155  }
1156 
1157  WasmValue GetStackValue(sp_t index) {
1158  DCHECK_GT(StackHeight(), index);
1159  return stack_[index];
1160  }
1161 
1162  void SetStackValue(sp_t index, WasmValue value) {
1163  DCHECK_GT(StackHeight(), index);
1164  stack_[index] = value;
1165  }
1166 
1167  TrapReason GetTrapReason() { return trap_reason_; }
1168 
1169  pc_t GetBreakpointPc() { return break_pc_; }
1170 
1171  bool PossibleNondeterminism() { return possible_nondeterminism_; }
1172 
1173  uint64_t NumInterpretedCalls() { return num_interpreted_calls_; }
1174 
1175  void AddBreakFlags(uint8_t flags) { break_flags_ |= flags; }
1176 
1177  void ClearBreakFlags() { break_flags_ = WasmInterpreter::BreakFlag::None; }
1178 
1179  uint32_t NumActivations() {
1180  return static_cast<uint32_t>(activations_.size());
1181  }
1182 
1183  uint32_t StartActivation() {
1184  TRACE("----- START ACTIVATION %zu -----\n", activations_.size());
1185  // If you use activations, use them consistently:
1186  DCHECK_IMPLIES(activations_.empty(), frames_.empty());
1187  DCHECK_IMPLIES(activations_.empty(), StackHeight() == 0);
1188  uint32_t activation_id = static_cast<uint32_t>(activations_.size());
1189  activations_.emplace_back(static_cast<uint32_t>(frames_.size()),
1190  StackHeight());
1191  state_ = WasmInterpreter::STOPPED;
1192  return activation_id;
1193  }
1194 
1195  void FinishActivation(uint32_t id) {
1196  TRACE("----- FINISH ACTIVATION %zu -----\n", activations_.size() - 1);
1197  DCHECK_LT(0, activations_.size());
1198  DCHECK_EQ(activations_.size() - 1, id);
1199  // Stack height must match the start of this activation (otherwise unwind
1200  // first).
1201  DCHECK_EQ(activations_.back().fp, frames_.size());
1202  DCHECK_LE(activations_.back().sp, StackHeight());
1203  sp_ = stack_.get() + activations_.back().sp;
1204  activations_.pop_back();
1205  }
1206 
1207  uint32_t ActivationFrameBase(uint32_t id) {
1208  DCHECK_GT(activations_.size(), id);
1209  return activations_[id].fp;
1210  }
1211 
1212  // Handle a thrown exception. Returns whether the exception was handled inside
1213  // the current activation. Unwinds the interpreted stack accordingly.
1214  WasmInterpreter::Thread::ExceptionHandlingResult HandleException(
1215  Isolate* isolate) {
1216  DCHECK(isolate->has_pending_exception());
1217  // TODO(wasm): Add wasm exception handling (would return HANDLED).
1218  USE(isolate->pending_exception());
1219  TRACE("----- UNWIND -----\n");
1220  DCHECK_LT(0, activations_.size());
1221  Activation& act = activations_.back();
1222  DCHECK_LE(act.fp, frames_.size());
1223  frames_.resize(act.fp);
1224  DCHECK_LE(act.sp, StackHeight());
1225  sp_ = stack_.get() + act.sp;
1226  state_ = WasmInterpreter::STOPPED;
1227  return WasmInterpreter::Thread::UNWOUND;
1228  }
1229 
1230  private:
1231  // Entries on the stack of functions being evaluated.
1232  struct Frame {
1233  InterpreterCode* code;
1234  pc_t pc;
1235  sp_t sp;
1236 
1237  // Limit of parameters.
1238  sp_t plimit() { return sp + code->function->sig->parameter_count(); }
1239  // Limit of locals.
1240  sp_t llimit() { return plimit() + code->locals.type_list.size(); }
1241  };
1242 
1243  struct Block {
1244  pc_t pc;
1245  sp_t sp;
1246  size_t fp;
1247  uint32_t arity;
1248  };
1249 
1250  friend class InterpretedFrameImpl;
1251 
1252  CodeMap* codemap_;
1253  Handle<WasmInstanceObject> instance_object_;
1254  std::unique_ptr<WasmValue[]> stack_;
1255  WasmValue* stack_limit_ = nullptr; // End of allocated stack space.
1256  WasmValue* sp_ = nullptr; // Current stack pointer.
1257  ZoneVector<Frame> frames_;
1258  WasmInterpreter::State state_ = WasmInterpreter::STOPPED;
1259  pc_t break_pc_ = kInvalidPc;
1260  TrapReason trap_reason_ = kTrapCount;
1261  bool possible_nondeterminism_ = false;
1262  uint8_t break_flags_ = 0; // a combination of WasmInterpreter::BreakFlag
1263  uint64_t num_interpreted_calls_ = 0;
1264  // Store the stack height of each activation (for unwind and frame
1265  // inspection).
1266  ZoneVector<Activation> activations_;
1267 
1268  CodeMap* codemap() const { return codemap_; }
1269  const WasmModule* module() const { return codemap_->module(); }
1270 
1271  void DoTrap(TrapReason trap, pc_t pc) {
1272  TRACE("TRAP: %s\n", WasmOpcodes::TrapReasonMessage(trap));
1273  state_ = WasmInterpreter::TRAPPED;
1274  trap_reason_ = trap;
1275  CommitPc(pc);
1276  }
1277 
1278  // Push a frame with arguments already on the stack.
1279  void PushFrame(InterpreterCode* code) {
1280  DCHECK_NOT_NULL(code);
1281  DCHECK_NOT_NULL(code->side_table);
1282  EnsureStackSpace(code->side_table->max_stack_height_ +
1283  code->locals.type_list.size());
1284 
1285  ++num_interpreted_calls_;
1286  size_t arity = code->function->sig->parameter_count();
1287  // The parameters will overlap the arguments already on the stack.
1288  DCHECK_GE(StackHeight(), arity);
1289  frames_.push_back({code, 0, StackHeight() - arity});
1290  frames_.back().pc = InitLocals(code);
1291  TRACE(" => PushFrame #%zu (#%u @%zu)\n", frames_.size() - 1,
1292  code->function->func_index, frames_.back().pc);
1293  }
1294 
1295  pc_t InitLocals(InterpreterCode* code) {
1296  for (auto p : code->locals.type_list) {
1297  WasmValue val;
1298  switch (p) {
1299 #define CASE_TYPE(wasm, ctype) \
1300  case kWasm##wasm: \
1301  val = WasmValue(ctype{}); \
1302  break;
1303  WASM_CTYPES(CASE_TYPE)
1304 #undef CASE_TYPE
1305  default:
1306  UNREACHABLE();
1307  break;
1308  }
1309  Push(val);
1310  }
1311  return code->locals.encoded_size;
1312  }
1313 
1314  void CommitPc(pc_t pc) {
1315  DCHECK(!frames_.empty());
1316  frames_.back().pc = pc;
1317  }
1318 
1319  bool SkipBreakpoint(InterpreterCode* code, pc_t pc) {
1320  if (pc == break_pc_) {
1321  // Skip the previously hit breakpoint when resuming.
1322  break_pc_ = kInvalidPc;
1323  return true;
1324  }
1325  return false;
1326  }
1327 
1328  int LookupTargetDelta(InterpreterCode* code, pc_t pc) {
1329  return static_cast<int>(code->side_table->Lookup(pc).pc_diff);
1330  }
1331 
1332  int DoBreak(InterpreterCode* code, pc_t pc, size_t depth) {
1333  ControlTransferEntry& control_transfer_entry = code->side_table->Lookup(pc);
1334  DoStackTransfer(sp_ - control_transfer_entry.sp_diff,
1335  control_transfer_entry.target_arity);
1336  return control_transfer_entry.pc_diff;
1337  }
1338 
1339  pc_t ReturnPc(Decoder* decoder, InterpreterCode* code, pc_t pc) {
1340  switch (code->orig_start[pc]) {
1341  case kExprCallFunction: {
1342  CallFunctionImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
1343  return pc + 1 + imm.length;
1344  }
1345  case kExprCallIndirect: {
1346  CallIndirectImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
1347  return pc + 1 + imm.length;
1348  }
1349  default:
1350  UNREACHABLE();
1351  }
1352  }
1353 
1354  bool DoReturn(Decoder* decoder, InterpreterCode** code, pc_t* pc, pc_t* limit,
1355  size_t arity) {
1356  DCHECK_GT(frames_.size(), 0);
1357  WasmValue* sp_dest = stack_.get() + frames_.back().sp;
1358  frames_.pop_back();
1359  if (frames_.size() == current_activation().fp) {
1360  // A return from the last frame terminates the execution.
1361  state_ = WasmInterpreter::FINISHED;
1362  DoStackTransfer(sp_dest, arity);
1363  TRACE(" => finish\n");
1364  return false;
1365  } else {
1366  // Return to caller frame.
1367  Frame* top = &frames_.back();
1368  *code = top->code;
1369  decoder->Reset((*code)->start, (*code)->end);
1370  *pc = ReturnPc(decoder, *code, top->pc);
1371  *limit = top->code->end - top->code->start;
1372  TRACE(" => Return to #%zu (#%u @%zu)\n", frames_.size() - 1,
1373  (*code)->function->func_index, *pc);
1374  DoStackTransfer(sp_dest, arity);
1375  return true;
1376  }
1377  }
1378 
1379  // Returns true if the call was successful, false if the stack check failed
1380  // and the current activation was fully unwound.
1381  bool DoCall(Decoder* decoder, InterpreterCode* target, pc_t* pc,
1382  pc_t* limit) V8_WARN_UNUSED_RESULT {
1383  frames_.back().pc = *pc;
1384  PushFrame(target);
1385  if (!DoStackCheck()) return false;
1386  *pc = frames_.back().pc;
1387  *limit = target->end - target->start;
1388  decoder->Reset(target->start, target->end);
1389  return true;
1390  }
1391 
1392  // Copies {arity} values on the top of the stack down the stack to {dest},
1393  // dropping the values in-between.
1394  void DoStackTransfer(WasmValue* dest, size_t arity) {
1395  // before: |---------------| pop_count | arity |
1396  // ^ 0 ^ dest ^ sp_
1397  //
1398  // after: |---------------| arity |
1399  // ^ 0 ^ sp_
1400  DCHECK_LE(dest, sp_);
1401  DCHECK_LE(dest + arity, sp_);
1402  if (arity) memmove(dest, sp_ - arity, arity * sizeof(*sp_));
1403  sp_ = dest + arity;
1404  }
1405 
1406  template <typename mtype>
1407  inline Address BoundsCheckMem(uint32_t offset, uint32_t index) {
1408  size_t mem_size = instance_object_->memory_size();
1409  if (sizeof(mtype) > mem_size) return kNullAddress;
1410  if (offset > (mem_size - sizeof(mtype))) return kNullAddress;
1411  if (index > (mem_size - sizeof(mtype) - offset)) return kNullAddress;
1412  // Compute the effective address of the access, making sure to condition
1413  // the index even in the in-bounds case.
1414  return reinterpret_cast<Address>(instance_object_->memory_start()) +
1415  offset + (index & instance_object_->memory_mask());
1416  }
1417 
1418  template <typename ctype, typename mtype>
1419  bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len,
1420  MachineRepresentation rep) {
1421  MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc),
1422  sizeof(ctype));
1423  uint32_t index = Pop().to<uint32_t>();
1424  Address addr = BoundsCheckMem<mtype>(imm.offset, index);
1425  if (!addr) {
1426  DoTrap(kTrapMemOutOfBounds, pc);
1427  return false;
1428  }
1429  WasmValue result(
1430  converter<ctype, mtype>{}(ReadLittleEndianValue<mtype>(addr)));
1431 
1432  Push(result);
1433  len = 1 + imm.length;
1434 
1435  if (FLAG_trace_wasm_memory) {
1436  MemoryTracingInfo info(imm.offset + index, false, rep);
1437  TraceMemoryOperation(ExecutionTier::kInterpreter, &info,
1438  code->function->func_index, static_cast<int>(pc),
1439  instance_object_->memory_start());
1440  }
1441 
1442  return true;
1443  }
1444 
1445  template <typename ctype, typename mtype>
1446  bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len,
1447  MachineRepresentation rep) {
1448  MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc),
1449  sizeof(ctype));
1450  ctype val = Pop().to<ctype>();
1451 
1452  uint32_t index = Pop().to<uint32_t>();
1453  Address addr = BoundsCheckMem<mtype>(imm.offset, index);
1454  if (!addr) {
1455  DoTrap(kTrapMemOutOfBounds, pc);
1456  return false;
1457  }
1458  WriteLittleEndianValue<mtype>(addr, converter<mtype, ctype>{}(val));
1459  len = 1 + imm.length;
1460 
1461  if (FLAG_trace_wasm_memory) {
1462  MemoryTracingInfo info(imm.offset + index, true, rep);
1463  TraceMemoryOperation(ExecutionTier::kInterpreter, &info,
1464  code->function->func_index, static_cast<int>(pc),
1465  instance_object_->memory_start());
1466  }
1467 
1468  return true;
1469  }
1470 
1471  template <typename type, typename op_type>
1472  bool ExtractAtomicOpParams(Decoder* decoder, InterpreterCode* code,
1473  Address& address, pc_t pc, int& len,
1474  type* val = nullptr, type* val2 = nullptr) {
1475  MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + 1),
1476  sizeof(type));
1477  if (val2) *val2 = static_cast<type>(Pop().to<op_type>());
1478  if (val) *val = static_cast<type>(Pop().to<op_type>());
1479  uint32_t index = Pop().to<uint32_t>();
1480  address = BoundsCheckMem<type>(imm.offset, index);
1481  if (!address) {
1482  DoTrap(kTrapMemOutOfBounds, pc);
1483  return false;
1484  }
1485  len = 2 + imm.length;
1486  return true;
1487  }
1488 
1489  bool ExecuteNumericOp(WasmOpcode opcode, Decoder* decoder,
1490  InterpreterCode* code, pc_t pc, int& len) {
1491  switch (opcode) {
1492  case kExprI32SConvertSatF32:
1493  Push(WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<float>())));
1494  return true;
1495  case kExprI32UConvertSatF32:
1496  Push(WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<float>())));
1497  return true;
1498  case kExprI32SConvertSatF64:
1499  Push(WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<double>())));
1500  return true;
1501  case kExprI32UConvertSatF64:
1502  Push(WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<double>())));
1503  return true;
1504  case kExprI64SConvertSatF32:
1505  Push(WasmValue(ExecuteI64SConvertSatF32(Pop().to<float>())));
1506  return true;
1507  case kExprI64UConvertSatF32:
1508  Push(WasmValue(ExecuteI64UConvertSatF32(Pop().to<float>())));
1509  return true;
1510  case kExprI64SConvertSatF64:
1511  Push(WasmValue(ExecuteI64SConvertSatF64(Pop().to<double>())));
1512  return true;
1513  case kExprI64UConvertSatF64:
1514  Push(WasmValue(ExecuteI64UConvertSatF64(Pop().to<double>())));
1515  return true;
1516  default:
1517  FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
1518  OpcodeName(code->start[pc]));
1519  UNREACHABLE();
1520  }
1521  return false;
1522  }
1523 
1524  bool ExecuteAtomicOp(WasmOpcode opcode, Decoder* decoder,
1525  InterpreterCode* code, pc_t pc, int& len) {
1526  WasmValue result;
1527  switch (opcode) {
1528 // Disabling on Mips as 32 bit atomics are not correctly laid out for load/store
1529 // on big endian and 64 bit atomics fail to compile.
1530 #if !(V8_TARGET_ARCH_MIPS && V8_TARGET_BIG_ENDIAN)
1531 #define ATOMIC_BINOP_CASE(name, type, op_type, operation) \
1532  case kExpr##name: { \
1533  type val; \
1534  Address addr; \
1535  if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
1536  &val)) { \
1537  return false; \
1538  } \
1539  static_assert(sizeof(std::atomic<type>) == sizeof(type), \
1540  "Size mismatch for types std::atomic<" #type \
1541  ">, and " #type); \
1542  result = WasmValue(static_cast<op_type>( \
1543  std::operation(reinterpret_cast<std::atomic<type>*>(addr), val))); \
1544  Push(result); \
1545  break; \
1546  }
1547  ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, uint32_t, atomic_fetch_add);
1548  ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, uint32_t, atomic_fetch_add);
1549  ATOMIC_BINOP_CASE(I32AtomicAdd16U, uint16_t, uint32_t, atomic_fetch_add);
1550  ATOMIC_BINOP_CASE(I32AtomicSub, uint32_t, uint32_t, atomic_fetch_sub);
1551  ATOMIC_BINOP_CASE(I32AtomicSub8U, uint8_t, uint32_t, atomic_fetch_sub);
1552  ATOMIC_BINOP_CASE(I32AtomicSub16U, uint16_t, uint32_t, atomic_fetch_sub);
1553  ATOMIC_BINOP_CASE(I32AtomicAnd, uint32_t, uint32_t, atomic_fetch_and);
1554  ATOMIC_BINOP_CASE(I32AtomicAnd8U, uint8_t, uint32_t, atomic_fetch_and);
1555  ATOMIC_BINOP_CASE(I32AtomicAnd16U, uint16_t, uint32_t, atomic_fetch_and);
1556  ATOMIC_BINOP_CASE(I32AtomicOr, uint32_t, uint32_t, atomic_fetch_or);
1557  ATOMIC_BINOP_CASE(I32AtomicOr8U, uint8_t, uint32_t, atomic_fetch_or);
1558  ATOMIC_BINOP_CASE(I32AtomicOr16U, uint16_t, uint32_t, atomic_fetch_or);
1559  ATOMIC_BINOP_CASE(I32AtomicXor, uint32_t, uint32_t, atomic_fetch_xor);
1560  ATOMIC_BINOP_CASE(I32AtomicXor8U, uint8_t, uint32_t, atomic_fetch_xor);
1561  ATOMIC_BINOP_CASE(I32AtomicXor16U, uint16_t, uint32_t, atomic_fetch_xor);
1562  ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, uint32_t, atomic_exchange);
1563  ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, uint32_t,
1564  atomic_exchange);
1565  ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, uint32_t,
1566  atomic_exchange);
1567  ATOMIC_BINOP_CASE(I64AtomicAdd, uint64_t, uint64_t, atomic_fetch_add);
1568  ATOMIC_BINOP_CASE(I64AtomicAdd8U, uint8_t, uint64_t, atomic_fetch_add);
1569  ATOMIC_BINOP_CASE(I64AtomicAdd16U, uint16_t, uint64_t, atomic_fetch_add);
1570  ATOMIC_BINOP_CASE(I64AtomicAdd32U, uint32_t, uint64_t, atomic_fetch_add);
1571  ATOMIC_BINOP_CASE(I64AtomicSub, uint64_t, uint64_t, atomic_fetch_sub);
1572  ATOMIC_BINOP_CASE(I64AtomicSub8U, uint8_t, uint64_t, atomic_fetch_sub);
1573  ATOMIC_BINOP_CASE(I64AtomicSub16U, uint16_t, uint64_t, atomic_fetch_sub);
1574  ATOMIC_BINOP_CASE(I64AtomicSub32U, uint32_t, uint64_t, atomic_fetch_sub);
1575  ATOMIC_BINOP_CASE(I64AtomicAnd, uint64_t, uint64_t, atomic_fetch_and);
1576  ATOMIC_BINOP_CASE(I64AtomicAnd8U, uint8_t, uint64_t, atomic_fetch_and);
1577  ATOMIC_BINOP_CASE(I64AtomicAnd16U, uint16_t, uint64_t, atomic_fetch_and);
1578  ATOMIC_BINOP_CASE(I64AtomicAnd32U, uint32_t, uint64_t, atomic_fetch_and);
1579  ATOMIC_BINOP_CASE(I64AtomicOr, uint64_t, uint64_t, atomic_fetch_or);
1580  ATOMIC_BINOP_CASE(I64AtomicOr8U, uint8_t, uint64_t, atomic_fetch_or);
1581  ATOMIC_BINOP_CASE(I64AtomicOr16U, uint16_t, uint64_t, atomic_fetch_or);
1582  ATOMIC_BINOP_CASE(I64AtomicOr32U, uint32_t, uint64_t, atomic_fetch_or);
1583  ATOMIC_BINOP_CASE(I64AtomicXor, uint64_t, uint64_t, atomic_fetch_xor);
1584  ATOMIC_BINOP_CASE(I64AtomicXor8U, uint8_t, uint64_t, atomic_fetch_xor);
1585  ATOMIC_BINOP_CASE(I64AtomicXor16U, uint16_t, uint64_t, atomic_fetch_xor);
1586  ATOMIC_BINOP_CASE(I64AtomicXor32U, uint32_t, uint64_t, atomic_fetch_xor);
1587  ATOMIC_BINOP_CASE(I64AtomicExchange, uint64_t, uint64_t, atomic_exchange);
1588  ATOMIC_BINOP_CASE(I64AtomicExchange8U, uint8_t, uint64_t,
1589  atomic_exchange);
1590  ATOMIC_BINOP_CASE(I64AtomicExchange16U, uint16_t, uint64_t,
1591  atomic_exchange);
1592  ATOMIC_BINOP_CASE(I64AtomicExchange32U, uint32_t, uint64_t,
1593  atomic_exchange);
1594 #undef ATOMIC_BINOP_CASE
1595 #define ATOMIC_COMPARE_EXCHANGE_CASE(name, type, op_type) \
1596  case kExpr##name: { \
1597  type val; \
1598  type val2; \
1599  Address addr; \
1600  if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
1601  &val, &val2)) { \
1602  return false; \
1603  } \
1604  static_assert(sizeof(std::atomic<type>) == sizeof(type), \
1605  "Size mismatch for types std::atomic<" #type \
1606  ">, and " #type); \
1607  std::atomic_compare_exchange_strong( \
1608  reinterpret_cast<std::atomic<type>*>(addr), &val, val2); \
1609  Push(WasmValue(static_cast<op_type>(val))); \
1610  break; \
1611  }
1612  ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange, uint32_t,
1613  uint32_t);
1614  ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange8U, uint8_t,
1615  uint32_t);
1616  ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange16U, uint16_t,
1617  uint32_t);
1618  ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange, uint64_t,
1619  uint64_t);
1620  ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange8U, uint8_t,
1621  uint64_t);
1622  ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange16U, uint16_t,
1623  uint64_t);
1624  ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange32U, uint32_t,
1625  uint64_t);
1626 #undef ATOMIC_COMPARE_EXCHANGE_CASE
1627 #define ATOMIC_LOAD_CASE(name, type, op_type, operation) \
1628  case kExpr##name: { \
1629  Address addr; \
1630  if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len)) { \
1631  return false; \
1632  } \
1633  static_assert(sizeof(std::atomic<type>) == sizeof(type), \
1634  "Size mismatch for types std::atomic<" #type \
1635  ">, and " #type); \
1636  result = WasmValue(static_cast<op_type>( \
1637  std::operation(reinterpret_cast<std::atomic<type>*>(addr)))); \
1638  Push(result); \
1639  break; \
1640  }
1641  ATOMIC_LOAD_CASE(I32AtomicLoad, uint32_t, uint32_t, atomic_load);
1642  ATOMIC_LOAD_CASE(I32AtomicLoad8U, uint8_t, uint32_t, atomic_load);
1643  ATOMIC_LOAD_CASE(I32AtomicLoad16U, uint16_t, uint32_t, atomic_load);
1644  ATOMIC_LOAD_CASE(I64AtomicLoad, uint64_t, uint64_t, atomic_load);
1645  ATOMIC_LOAD_CASE(I64AtomicLoad8U, uint8_t, uint64_t, atomic_load);
1646  ATOMIC_LOAD_CASE(I64AtomicLoad16U, uint16_t, uint64_t, atomic_load);
1647  ATOMIC_LOAD_CASE(I64AtomicLoad32U, uint32_t, uint64_t, atomic_load);
1648 #undef ATOMIC_LOAD_CASE
1649 #define ATOMIC_STORE_CASE(name, type, op_type, operation) \
1650  case kExpr##name: { \
1651  type val; \
1652  Address addr; \
1653  if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \
1654  &val)) { \
1655  return false; \
1656  } \
1657  static_assert(sizeof(std::atomic<type>) == sizeof(type), \
1658  "Size mismatch for types std::atomic<" #type \
1659  ">, and " #type); \
1660  std::operation(reinterpret_cast<std::atomic<type>*>(addr), val); \
1661  break; \
1662  }
1663  ATOMIC_STORE_CASE(I32AtomicStore, uint32_t, uint32_t, atomic_store);
1664  ATOMIC_STORE_CASE(I32AtomicStore8U, uint8_t, uint32_t, atomic_store);
1665  ATOMIC_STORE_CASE(I32AtomicStore16U, uint16_t, uint32_t, atomic_store);
1666  ATOMIC_STORE_CASE(I64AtomicStore, uint64_t, uint64_t, atomic_store);
1667  ATOMIC_STORE_CASE(I64AtomicStore8U, uint8_t, uint64_t, atomic_store);
1668  ATOMIC_STORE_CASE(I64AtomicStore16U, uint16_t, uint64_t, atomic_store);
1669  ATOMIC_STORE_CASE(I64AtomicStore32U, uint32_t, uint64_t, atomic_store);
1670 #undef ATOMIC_STORE_CASE
1671 #endif // !(V8_TARGET_ARCH_MIPS && V8_TARGET_BIG_ENDIAN)
1672  default:
1673  UNREACHABLE();
1674  return false;
1675  }
1676  return true;
1677  }
1678 
1679  byte* GetGlobalPtr(const WasmGlobal* global) {
1680  if (global->mutability && global->imported) {
1681  return reinterpret_cast<byte*>(
1682  instance_object_->imported_mutable_globals()[global->index]);
1683  } else {
1684  return instance_object_->globals_start() + global->offset;
1685  }
1686  }
1687 
1688  bool ExecuteSimdOp(WasmOpcode opcode, Decoder* decoder, InterpreterCode* code,
1689  pc_t pc, int& len) {
1690  switch (opcode) {
1691 #define SPLAT_CASE(format, sType, valType, num) \
1692  case kExpr##format##Splat: { \
1693  WasmValue val = Pop(); \
1694  valType v = val.to<valType>(); \
1695  sType s; \
1696  for (int i = 0; i < num; i++) s.val[i] = v; \
1697  Push(WasmValue(Simd128(s))); \
1698  return true; \
1699  }
1700  SPLAT_CASE(I32x4, int4, int32_t, 4)
1701  SPLAT_CASE(F32x4, float4, float, 4)
1702  SPLAT_CASE(I16x8, int8, int32_t, 8)
1703  SPLAT_CASE(I8x16, int16, int32_t, 16)
1704 #undef SPLAT_CASE
1705 #define EXTRACT_LANE_CASE(format, name) \
1706  case kExpr##format##ExtractLane: { \
1707  SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
1708  ++len; \
1709  WasmValue val = Pop(); \
1710  Simd128 s = val.to_s128(); \
1711  auto ss = s.to_##name(); \
1712  Push(WasmValue(ss.val[LANE(imm.lane, ss)])); \
1713  return true; \
1714  }
1715  EXTRACT_LANE_CASE(I32x4, i32x4)
1716  EXTRACT_LANE_CASE(F32x4, f32x4)
1717  EXTRACT_LANE_CASE(I16x8, i16x8)
1718  EXTRACT_LANE_CASE(I8x16, i8x16)
1719 #undef EXTRACT_LANE_CASE
1720 #define BINOP_CASE(op, name, stype, count, expr) \
1721  case kExpr##op: { \
1722  WasmValue v2 = Pop(); \
1723  WasmValue v1 = Pop(); \
1724  stype s1 = v1.to_s128().to_##name(); \
1725  stype s2 = v2.to_s128().to_##name(); \
1726  stype res; \
1727  for (size_t i = 0; i < count; ++i) { \
1728  auto a = s1.val[LANE(i, s1)]; \
1729  auto b = s2.val[LANE(i, s1)]; \
1730  res.val[LANE(i, s1)] = expr; \
1731  } \
1732  Push(WasmValue(Simd128(res))); \
1733  return true; \
1734  }
1735  BINOP_CASE(F32x4Add, f32x4, float4, 4, a + b)
1736  BINOP_CASE(F32x4Sub, f32x4, float4, 4, a - b)
1737  BINOP_CASE(F32x4Mul, f32x4, float4, 4, a * b)
1738  BINOP_CASE(F32x4Min, f32x4, float4, 4, a < b ? a : b)
1739  BINOP_CASE(F32x4Max, f32x4, float4, 4, a > b ? a : b)
1740  BINOP_CASE(I32x4Add, i32x4, int4, 4, a + b)
1741  BINOP_CASE(I32x4Sub, i32x4, int4, 4, a - b)
1742  BINOP_CASE(I32x4Mul, i32x4, int4, 4, a * b)
1743  BINOP_CASE(I32x4MinS, i32x4, int4, 4, a < b ? a : b)
1744  BINOP_CASE(I32x4MinU, i32x4, int4, 4,
1745  static_cast<uint32_t>(a) < static_cast<uint32_t>(b) ? a : b)
1746  BINOP_CASE(I32x4MaxS, i32x4, int4, 4, a > b ? a : b)
1747  BINOP_CASE(I32x4MaxU, i32x4, int4, 4,
1748  static_cast<uint32_t>(a) > static_cast<uint32_t>(b) ? a : b)
1749  BINOP_CASE(S128And, i32x4, int4, 4, a & b)
1750  BINOP_CASE(S128Or, i32x4, int4, 4, a | b)
1751  BINOP_CASE(S128Xor, i32x4, int4, 4, a ^ b)
1752  BINOP_CASE(I16x8Add, i16x8, int8, 8, a + b)
1753  BINOP_CASE(I16x8Sub, i16x8, int8, 8, a - b)
1754  BINOP_CASE(I16x8Mul, i16x8, int8, 8, a * b)
1755  BINOP_CASE(I16x8MinS, i16x8, int8, 8, a < b ? a : b)
1756  BINOP_CASE(I16x8MinU, i16x8, int8, 8,
1757  static_cast<uint16_t>(a) < static_cast<uint16_t>(b) ? a : b)
1758  BINOP_CASE(I16x8MaxS, i16x8, int8, 8, a > b ? a : b)
1759  BINOP_CASE(I16x8MaxU, i16x8, int8, 8,
1760  static_cast<uint16_t>(a) > static_cast<uint16_t>(b) ? a : b)
1761  BINOP_CASE(I16x8AddSaturateS, i16x8, int8, 8, SaturateAdd<int16_t>(a, b))
1762  BINOP_CASE(I16x8AddSaturateU, i16x8, int8, 8, SaturateAdd<uint16_t>(a, b))
1763  BINOP_CASE(I16x8SubSaturateS, i16x8, int8, 8, SaturateSub<int16_t>(a, b))
1764  BINOP_CASE(I16x8SubSaturateU, i16x8, int8, 8, SaturateSub<uint16_t>(a, b))
1765  BINOP_CASE(I8x16Add, i8x16, int16, 16, a + b)
1766  BINOP_CASE(I8x16Sub, i8x16, int16, 16, a - b)
1767  BINOP_CASE(I8x16Mul, i8x16, int16, 16, a * b)
1768  BINOP_CASE(I8x16MinS, i8x16, int16, 16, a < b ? a : b)
1769  BINOP_CASE(I8x16MinU, i8x16, int16, 16,
1770  static_cast<uint8_t>(a) < static_cast<uint8_t>(b) ? a : b)
1771  BINOP_CASE(I8x16MaxS, i8x16, int16, 16, a > b ? a : b)
1772  BINOP_CASE(I8x16MaxU, i8x16, int16, 16,
1773  static_cast<uint8_t>(a) > static_cast<uint8_t>(b) ? a : b)
1774  BINOP_CASE(I8x16AddSaturateS, i8x16, int16, 16, SaturateAdd<int8_t>(a, b))
1775  BINOP_CASE(I8x16AddSaturateU, i8x16, int16, 16,
1776  SaturateAdd<uint8_t>(a, b))
1777  BINOP_CASE(I8x16SubSaturateS, i8x16, int16, 16, SaturateSub<int8_t>(a, b))
1778  BINOP_CASE(I8x16SubSaturateU, i8x16, int16, 16,
1779  SaturateSub<uint8_t>(a, b))
1780 #undef BINOP_CASE
1781 #define UNOP_CASE(op, name, stype, count, expr) \
1782  case kExpr##op: { \
1783  WasmValue v = Pop(); \
1784  stype s = v.to_s128().to_##name(); \
1785  stype res; \
1786  for (size_t i = 0; i < count; ++i) { \
1787  auto a = s.val[i]; \
1788  res.val[i] = expr; \
1789  } \
1790  Push(WasmValue(Simd128(res))); \
1791  return true; \
1792  }
1793  UNOP_CASE(F32x4Abs, f32x4, float4, 4, std::abs(a))
1794  UNOP_CASE(F32x4Neg, f32x4, float4, 4, -a)
1795  UNOP_CASE(F32x4RecipApprox, f32x4, float4, 4, 1.0f / a)
1796  UNOP_CASE(F32x4RecipSqrtApprox, f32x4, float4, 4, 1.0f / std::sqrt(a))
1797  UNOP_CASE(I32x4Neg, i32x4, int4, 4, -a)
1798  UNOP_CASE(S128Not, i32x4, int4, 4, ~a)
1799  UNOP_CASE(I16x8Neg, i16x8, int8, 8, -a)
1800  UNOP_CASE(I8x16Neg, i8x16, int16, 16, -a)
1801 #undef UNOP_CASE
1802 #define CMPOP_CASE(op, name, stype, out_stype, count, expr) \
1803  case kExpr##op: { \
1804  WasmValue v2 = Pop(); \
1805  WasmValue v1 = Pop(); \
1806  stype s1 = v1.to_s128().to_##name(); \
1807  stype s2 = v2.to_s128().to_##name(); \
1808  out_stype res; \
1809  for (size_t i = 0; i < count; ++i) { \
1810  auto a = s1.val[i]; \
1811  auto b = s2.val[i]; \
1812  res.val[i] = expr ? -1 : 0; \
1813  } \
1814  Push(WasmValue(Simd128(res))); \
1815  return true; \
1816  }
1817  CMPOP_CASE(F32x4Eq, f32x4, float4, int4, 4, a == b)
1818  CMPOP_CASE(F32x4Ne, f32x4, float4, int4, 4, a != b)
1819  CMPOP_CASE(F32x4Gt, f32x4, float4, int4, 4, a > b)
1820  CMPOP_CASE(F32x4Ge, f32x4, float4, int4, 4, a >= b)
1821  CMPOP_CASE(F32x4Lt, f32x4, float4, int4, 4, a < b)
1822  CMPOP_CASE(F32x4Le, f32x4, float4, int4, 4, a <= b)
1823  CMPOP_CASE(I32x4Eq, i32x4, int4, int4, 4, a == b)
1824  CMPOP_CASE(I32x4Ne, i32x4, int4, int4, 4, a != b)
1825  CMPOP_CASE(I32x4GtS, i32x4, int4, int4, 4, a > b)
1826  CMPOP_CASE(I32x4GeS, i32x4, int4, int4, 4, a >= b)
1827  CMPOP_CASE(I32x4LtS, i32x4, int4, int4, 4, a < b)
1828  CMPOP_CASE(I32x4LeS, i32x4, int4, int4, 4, a <= b)
1829  CMPOP_CASE(I32x4GtU, i32x4, int4, int4, 4,
1830  static_cast<uint32_t>(a) > static_cast<uint32_t>(b))
1831  CMPOP_CASE(I32x4GeU, i32x4, int4, int4, 4,
1832  static_cast<uint32_t>(a) >= static_cast<uint32_t>(b))
1833  CMPOP_CASE(I32x4LtU, i32x4, int4, int4, 4,
1834  static_cast<uint32_t>(a) < static_cast<uint32_t>(b))
1835  CMPOP_CASE(I32x4LeU, i32x4, int4, int4, 4,
1836  static_cast<uint32_t>(a) <= static_cast<uint32_t>(b))
1837  CMPOP_CASE(I16x8Eq, i16x8, int8, int8, 8, a == b)
1838  CMPOP_CASE(I16x8Ne, i16x8, int8, int8, 8, a != b)
1839  CMPOP_CASE(I16x8GtS, i16x8, int8, int8, 8, a > b)
1840  CMPOP_CASE(I16x8GeS, i16x8, int8, int8, 8, a >= b)
1841  CMPOP_CASE(I16x8LtS, i16x8, int8, int8, 8, a < b)
1842  CMPOP_CASE(I16x8LeS, i16x8, int8, int8, 8, a <= b)
1843  CMPOP_CASE(I16x8GtU, i16x8, int8, int8, 8,
1844  static_cast<uint16_t>(a) > static_cast<uint16_t>(b))
1845  CMPOP_CASE(I16x8GeU, i16x8, int8, int8, 8,
1846  static_cast<uint16_t>(a) >= static_cast<uint16_t>(b))
1847  CMPOP_CASE(I16x8LtU, i16x8, int8, int8, 8,
1848  static_cast<uint16_t>(a) < static_cast<uint16_t>(b))
1849  CMPOP_CASE(I16x8LeU, i16x8, int8, int8, 8,
1850  static_cast<uint16_t>(a) <= static_cast<uint16_t>(b))
1851  CMPOP_CASE(I8x16Eq, i8x16, int16, int16, 16, a == b)
1852  CMPOP_CASE(I8x16Ne, i8x16, int16, int16, 16, a != b)
1853  CMPOP_CASE(I8x16GtS, i8x16, int16, int16, 16, a > b)
1854  CMPOP_CASE(I8x16GeS, i8x16, int16, int16, 16, a >= b)
1855  CMPOP_CASE(I8x16LtS, i8x16, int16, int16, 16, a < b)
1856  CMPOP_CASE(I8x16LeS, i8x16, int16, int16, 16, a <= b)
1857  CMPOP_CASE(I8x16GtU, i8x16, int16, int16, 16,
1858  static_cast<uint8_t>(a) > static_cast<uint8_t>(b))
1859  CMPOP_CASE(I8x16GeU, i8x16, int16, int16, 16,
1860  static_cast<uint8_t>(a) >= static_cast<uint8_t>(b))
1861  CMPOP_CASE(I8x16LtU, i8x16, int16, int16, 16,
1862  static_cast<uint8_t>(a) < static_cast<uint8_t>(b))
1863  CMPOP_CASE(I8x16LeU, i8x16, int16, int16, 16,
1864  static_cast<uint8_t>(a) <= static_cast<uint8_t>(b))
1865 #undef CMPOP_CASE
1866 #define REPLACE_LANE_CASE(format, name, stype, ctype) \
1867  case kExpr##format##ReplaceLane: { \
1868  SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
1869  ++len; \
1870  WasmValue new_val = Pop(); \
1871  WasmValue simd_val = Pop(); \
1872  stype s = simd_val.to_s128().to_##name(); \
1873  s.val[LANE(imm.lane, s)] = new_val.to<ctype>(); \
1874  Push(WasmValue(Simd128(s))); \
1875  return true; \
1876  }
1877  REPLACE_LANE_CASE(F32x4, f32x4, float4, float)
1878  REPLACE_LANE_CASE(I32x4, i32x4, int4, int32_t)
1879  REPLACE_LANE_CASE(I16x8, i16x8, int8, int32_t)
1880  REPLACE_LANE_CASE(I8x16, i8x16, int16, int32_t)
1881 #undef REPLACE_LANE_CASE
1882  case kExprS128LoadMem:
1883  return ExecuteLoad<Simd128, Simd128>(decoder, code, pc, len,
1884  MachineRepresentation::kSimd128);
1885  case kExprS128StoreMem:
1886  return ExecuteStore<Simd128, Simd128>(decoder, code, pc, len,
1887  MachineRepresentation::kSimd128);
1888 #define SHIFT_CASE(op, name, stype, count, expr) \
1889  case kExpr##op: { \
1890  SimdShiftImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \
1891  ++len; \
1892  WasmValue v = Pop(); \
1893  stype s = v.to_s128().to_##name(); \
1894  stype res; \
1895  for (size_t i = 0; i < count; ++i) { \
1896  auto a = s.val[i]; \
1897  res.val[i] = expr; \
1898  } \
1899  Push(WasmValue(Simd128(res))); \
1900  return true; \
1901  }
1902  SHIFT_CASE(I32x4Shl, i32x4, int4, 4, a << imm.shift)
1903  SHIFT_CASE(I32x4ShrS, i32x4, int4, 4, a >> imm.shift)
1904  SHIFT_CASE(I32x4ShrU, i32x4, int4, 4,
1905  static_cast<uint32_t>(a) >> imm.shift)
1906  SHIFT_CASE(I16x8Shl, i16x8, int8, 8, a << imm.shift)
1907  SHIFT_CASE(I16x8ShrS, i16x8, int8, 8, a >> imm.shift)
1908  SHIFT_CASE(I16x8ShrU, i16x8, int8, 8,
1909  static_cast<uint16_t>(a) >> imm.shift)
1910  SHIFT_CASE(I8x16Shl, i8x16, int16, 16, a << imm.shift)
1911  SHIFT_CASE(I8x16ShrS, i8x16, int16, 16, a >> imm.shift)
1912  SHIFT_CASE(I8x16ShrU, i8x16, int16, 16,
1913  static_cast<uint8_t>(a) >> imm.shift)
1914 #undef SHIFT_CASE
1915 #define CONVERT_CASE(op, src_type, name, dst_type, count, start_index, ctype, \
1916  expr) \
1917  case kExpr##op: { \
1918  WasmValue v = Pop(); \
1919  src_type s = v.to_s128().to_##name(); \
1920  dst_type res; \
1921  for (size_t i = 0; i < count; ++i) { \
1922  ctype a = s.val[LANE(start_index + i, s)]; \
1923  res.val[LANE(i, res)] = expr; \
1924  } \
1925  Push(WasmValue(Simd128(res))); \
1926  return true; \
1927  }
1928  CONVERT_CASE(F32x4SConvertI32x4, int4, i32x4, float4, 4, 0, int32_t,
1929  static_cast<float>(a))
1930  CONVERT_CASE(F32x4UConvertI32x4, int4, i32x4, float4, 4, 0, uint32_t,
1931  static_cast<float>(a))
1932  CONVERT_CASE(I32x4SConvertF32x4, float4, f32x4, int4, 4, 0, double,
1933  std::isnan(a) ? 0
1934  : a<kMinInt ? kMinInt : a> kMaxInt
1935  ? kMaxInt
1936  : static_cast<int32_t>(a))
1937  CONVERT_CASE(I32x4UConvertF32x4, float4, f32x4, int4, 4, 0, double,
1938  std::isnan(a)
1939  ? 0
1940  : a<0 ? 0 : a> kMaxUInt32 ? kMaxUInt32
1941  : static_cast<uint32_t>(a))
1942  CONVERT_CASE(I32x4SConvertI16x8High, int8, i16x8, int4, 4, 4, int16_t,
1943  a)
1944  CONVERT_CASE(I32x4UConvertI16x8High, int8, i16x8, int4, 4, 4, uint16_t,
1945  a)
1946  CONVERT_CASE(I32x4SConvertI16x8Low, int8, i16x8, int4, 4, 0, int16_t, a)
1947  CONVERT_CASE(I32x4UConvertI16x8Low, int8, i16x8, int4, 4, 0, uint16_t,
1948  a)
1949  CONVERT_CASE(I16x8SConvertI8x16High, int16, i8x16, int8, 8, 8, int8_t,
1950  a)
1951  CONVERT_CASE(I16x8UConvertI8x16High, int16, i8x16, int8, 8, 8, uint8_t,
1952  a)
1953  CONVERT_CASE(I16x8SConvertI8x16Low, int16, i8x16, int8, 8, 0, int8_t, a)
1954  CONVERT_CASE(I16x8UConvertI8x16Low, int16, i8x16, int8, 8, 0, uint8_t,
1955  a)
1956 #undef CONVERT_CASE
1957 #define PACK_CASE(op, src_type, name, dst_type, count, ctype, dst_ctype, \
1958  is_unsigned) \
1959  case kExpr##op: { \
1960  WasmValue v2 = Pop(); \
1961  WasmValue v1 = Pop(); \
1962  src_type s1 = v1.to_s128().to_##name(); \
1963  src_type s2 = v2.to_s128().to_##name(); \
1964  dst_type res; \
1965  int64_t min = std::numeric_limits<ctype>::min(); \
1966  int64_t max = std::numeric_limits<ctype>::max(); \
1967  for (size_t i = 0; i < count; ++i) { \
1968  int32_t v = i < count / 2 ? s1.val[LANE(i, s1)] \
1969  : s2.val[LANE(i - count / 2, s2)]; \
1970  int64_t a = is_unsigned ? static_cast<int64_t>(v & 0xFFFFFFFFu) : v; \
1971  res.val[LANE(i, res)] = \
1972  static_cast<dst_ctype>(std::max(min, std::min(max, a))); \
1973  } \
1974  Push(WasmValue(Simd128(res))); \
1975  return true; \
1976  }
1977  PACK_CASE(I16x8SConvertI32x4, int4, i32x4, int8, 8, int16_t, int16_t,
1978  false)
1979  PACK_CASE(I16x8UConvertI32x4, int4, i32x4, int8, 8, uint16_t, int16_t,
1980  true)
1981  PACK_CASE(I8x16SConvertI16x8, int8, i16x8, int16, 16, int8_t, int8_t,
1982  false)
1983  PACK_CASE(I8x16UConvertI16x8, int8, i16x8, int16, 16, uint8_t, int8_t,
1984  true)
1985 #undef PACK_CASE
1986  case kExprS128Select: {
1987  int4 v2 = Pop().to_s128().to_i32x4();
1988  int4 v1 = Pop().to_s128().to_i32x4();
1989  int4 bool_val = Pop().to_s128().to_i32x4();
1990  int4 res;
1991  for (size_t i = 0; i < 4; ++i) {
1992  res.val[i] = v2.val[i] ^ ((v1.val[i] ^ v2.val[i]) & bool_val.val[i]);
1993  }
1994  Push(WasmValue(Simd128(res)));
1995  return true;
1996  }
1997 #define ADD_HORIZ_CASE(op, name, stype, count) \
1998  case kExpr##op: { \
1999  WasmValue v2 = Pop(); \
2000  WasmValue v1 = Pop(); \
2001  stype s1 = v1.to_s128().to_##name(); \
2002  stype s2 = v2.to_s128().to_##name(); \
2003  stype res; \
2004  for (size_t i = 0; i < count / 2; ++i) { \
2005  res.val[LANE(i, s1)] = \
2006  s1.val[LANE(i * 2, s1)] + s1.val[LANE(i * 2 + 1, s1)]; \
2007  res.val[LANE(i + count / 2, s1)] = \
2008  s2.val[LANE(i * 2, s1)] + s2.val[LANE(i * 2 + 1, s1)]; \
2009  } \
2010  Push(WasmValue(Simd128(res))); \
2011  return true; \
2012  }
2013  ADD_HORIZ_CASE(I32x4AddHoriz, i32x4, int4, 4)
2014  ADD_HORIZ_CASE(F32x4AddHoriz, f32x4, float4, 4)
2015  ADD_HORIZ_CASE(I16x8AddHoriz, i16x8, int8, 8)
2016 #undef ADD_HORIZ_CASE
2017  case kExprS8x16Shuffle: {
2019  code->at(pc));
2020  len += 16;
2021  int16 v2 = Pop().to_s128().to_i8x16();
2022  int16 v1 = Pop().to_s128().to_i8x16();
2023  int16 res;
2024  for (size_t i = 0; i < kSimd128Size; ++i) {
2025  int lane = imm.shuffle[i];
2026  res.val[LANE(i, v1)] = lane < kSimd128Size
2027  ? v1.val[LANE(lane, v1)]
2028  : v2.val[LANE(lane - kSimd128Size, v1)];
2029  }
2030  Push(WasmValue(Simd128(res)));
2031  return true;
2032  }
2033 #define REDUCTION_CASE(op, name, stype, count, operation) \
2034  case kExpr##op: { \
2035  stype s = Pop().to_s128().to_##name(); \
2036  int32_t res = s.val[0]; \
2037  for (size_t i = 1; i < count; ++i) { \
2038  res = res operation static_cast<int32_t>(s.val[i]); \
2039  } \
2040  Push(WasmValue(res)); \
2041  return true; \
2042  }
2043  REDUCTION_CASE(S1x4AnyTrue, i32x4, int4, 4, |)
2044  REDUCTION_CASE(S1x4AllTrue, i32x4, int4, 4, &)
2045  REDUCTION_CASE(S1x8AnyTrue, i16x8, int8, 8, |)
2046  REDUCTION_CASE(S1x8AllTrue, i16x8, int8, 8, &)
2047  REDUCTION_CASE(S1x16AnyTrue, i8x16, int16, 16, |)
2048  REDUCTION_CASE(S1x16AllTrue, i8x16, int16, 16, &)
2049 #undef REDUCTION_CASE
2050  default:
2051  return false;
2052  }
2053  }
2054 
2055  // Check if our control stack (frames_) exceeds the limit. Trigger stack
2056  // overflow if it does, and unwinding the current frame.
2057  // Returns true if execution can continue, false if the current activation was
2058  // fully unwound.
2059  // Do call this function immediately *after* pushing a new frame. The pc of
2060  // the top frame will be reset to 0 if the stack check fails.
2061  bool DoStackCheck() V8_WARN_UNUSED_RESULT {
2062  // The goal of this stack check is not to prevent actual stack overflows,
2063  // but to simulate stack overflows during the execution of compiled code.
2064  // That is why this function uses FLAG_stack_size, even though the value
2065  // stack actually lies in zone memory.
2066  const size_t stack_size_limit = FLAG_stack_size * KB;
2067  // Sum up the value stack size and the control stack size.
2068  const size_t current_stack_size =
2069  (sp_ - stack_.get()) + frames_.size() * sizeof(Frame);
2070  if (V8_LIKELY(current_stack_size <= stack_size_limit)) {
2071  return true;
2072  }
2073  // The pc of the top frame is initialized to the first instruction. We reset
2074  // it to 0 here such that we report the same position as in compiled code.
2075  frames_.back().pc = 0;
2076  Isolate* isolate = instance_object_->GetIsolate();
2077  HandleScope handle_scope(isolate);
2078  isolate->StackOverflow();
2079  return HandleException(isolate) == WasmInterpreter::Thread::HANDLED;
2080  }
2081 
2082  void Execute(InterpreterCode* code, pc_t pc, int max) {
2083  DCHECK_NOT_NULL(code->side_table);
2084  DCHECK(!frames_.empty());
2085  // There must be enough space on the stack to hold the arguments, locals,
2086  // and the value stack.
2087  DCHECK_LE(code->function->sig->parameter_count() +
2088  code->locals.type_list.size() +
2089  code->side_table->max_stack_height_,
2090  stack_limit_ - stack_.get() - frames_.back().sp);
2091 
2092  Decoder decoder(code->start, code->end);
2093  pc_t limit = code->end - code->start;
2094  bool hit_break = false;
2095 
2096  while (true) {
2097 #define PAUSE_IF_BREAK_FLAG(flag) \
2098  if (V8_UNLIKELY(break_flags_ & WasmInterpreter::BreakFlag::flag)) { \
2099  hit_break = true; \
2100  max = 0; \
2101  }
2102 
2103  DCHECK_GT(limit, pc);
2104  DCHECK_NOT_NULL(code->start);
2105 
2106  // Do first check for a breakpoint, in order to set hit_break correctly.
2107  const char* skip = " ";
2108  int len = 1;
2109  byte orig = code->start[pc];
2110  WasmOpcode opcode = static_cast<WasmOpcode>(orig);
2111  if (WasmOpcodes::IsPrefixOpcode(opcode)) {
2112  opcode = static_cast<WasmOpcode>(opcode << 8 | code->start[pc + 1]);
2113  }
2114  if (V8_UNLIKELY(orig == kInternalBreakpoint)) {
2115  orig = code->orig_start[pc];
2116  if (WasmOpcodes::IsPrefixOpcode(static_cast<WasmOpcode>(orig))) {
2117  opcode =
2118  static_cast<WasmOpcode>(orig << 8 | code->orig_start[pc + 1]);
2119  }
2120  if (SkipBreakpoint(code, pc)) {
2121  // skip breakpoint by switching on original code.
2122  skip = "[skip] ";
2123  } else {
2124  TRACE("@%-3zu: [break] %-24s:", pc, WasmOpcodes::OpcodeName(opcode));
2125  TraceValueStack();
2126  TRACE("\n");
2127  hit_break = true;
2128  break;
2129  }
2130  }
2131 
2132  // If max is 0, break. If max is positive (a limit is set), decrement it.
2133  if (max == 0) break;
2134  if (max > 0) --max;
2135 
2136  USE(skip);
2137  TRACE("@%-3zu: %s%-24s:", pc, skip, WasmOpcodes::OpcodeName(opcode));
2138  TraceValueStack();
2139  TRACE("\n");
2140 
2141 #ifdef DEBUG
2142  // Compute the stack effect of this opcode, and verify later that the
2143  // stack was modified accordingly.
2144  std::pair<uint32_t, uint32_t> stack_effect =
2145  StackEffect(codemap_->module(), frames_.back().code->function->sig,
2146  code->orig_start + pc, code->orig_end);
2147  sp_t expected_new_stack_height =
2148  StackHeight() - stack_effect.first + stack_effect.second;
2149 #endif
2150 
2151  switch (orig) {
2152  case kExprNop:
2153  break;
2154  case kExprBlock: {
2155  BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures,
2156  &decoder, code->at(pc));
2157  len = 1 + imm.length;
2158  break;
2159  }
2160  case kExprLoop: {
2161  BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures,
2162  &decoder, code->at(pc));
2163  len = 1 + imm.length;
2164  break;
2165  }
2166  case kExprIf: {
2167  BlockTypeImmediate<Decoder::kNoValidate> imm(kAllWasmFeatures,
2168  &decoder, code->at(pc));
2169  WasmValue cond = Pop();
2170  bool is_true = cond.to<uint32_t>() != 0;
2171  if (is_true) {
2172  // fall through to the true block.
2173  len = 1 + imm.length;
2174  TRACE(" true => fallthrough\n");
2175  } else {
2176  len = LookupTargetDelta(code, pc);
2177  TRACE(" false => @%zu\n", pc + len);
2178  }
2179  break;
2180  }
2181  case kExprElse: {
2182  len = LookupTargetDelta(code, pc);
2183  TRACE(" end => @%zu\n", pc + len);
2184  break;
2185  }
2186  case kExprSelect: {
2187  WasmValue cond = Pop();
2188  WasmValue fval = Pop();
2189  WasmValue tval = Pop();
2190  Push(cond.to<int32_t>() != 0 ? tval : fval);
2191  break;
2192  }
2193  case kExprBr: {
2194  BreakDepthImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
2195  len = DoBreak(code, pc, imm.depth);
2196  TRACE(" br => @%zu\n", pc + len);
2197  break;
2198  }
2199  case kExprBrIf: {
2200  BreakDepthImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
2201  WasmValue cond = Pop();
2202  bool is_true = cond.to<uint32_t>() != 0;
2203  if (is_true) {
2204  len = DoBreak(code, pc, imm.depth);
2205  TRACE(" br_if => @%zu\n", pc + len);
2206  } else {
2207  TRACE(" false => fallthrough\n");
2208  len = 1 + imm.length;
2209  }
2210  break;
2211  }
2212  case kExprBrTable: {
2214  code->at(pc));
2215  BranchTableIterator<Decoder::kNoValidate> iterator(&decoder, imm);
2216  uint32_t key = Pop().to<uint32_t>();
2217  uint32_t depth = 0;
2218  if (key >= imm.table_count) key = imm.table_count;
2219  for (uint32_t i = 0; i <= key; i++) {
2220  DCHECK(iterator.has_next());
2221  depth = iterator.next();
2222  }
2223  len = key + DoBreak(code, pc + key, static_cast<size_t>(depth));
2224  TRACE(" br[%u] => @%zu\n", key, pc + key + len);
2225  break;
2226  }
2227  case kExprReturn: {
2228  size_t arity = code->function->sig->return_count();
2229  if (!DoReturn(&decoder, &code, &pc, &limit, arity)) return;
2230  PAUSE_IF_BREAK_FLAG(AfterReturn);
2231  continue;
2232  }
2233  case kExprUnreachable: {
2234  return DoTrap(kTrapUnreachable, pc);
2235  }
2236  case kExprEnd: {
2237  break;
2238  }
2239  case kExprI32Const: {
2240  ImmI32Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
2241  Push(WasmValue(imm.value));
2242  len = 1 + imm.length;
2243  break;
2244  }
2245  case kExprI64Const: {
2246  ImmI64Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
2247  Push(WasmValue(imm.value));
2248  len = 1 + imm.length;
2249  break;
2250  }
2251  case kExprF32Const: {
2252  ImmF32Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
2253  Push(WasmValue(imm.value));
2254  len = 1 + imm.length;
2255  break;
2256  }
2257  case kExprF64Const: {
2258  ImmF64Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
2259  Push(WasmValue(imm.value));
2260  len = 1 + imm.length;
2261  break;
2262  }
2263  case kExprGetLocal: {
2264  LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
2265  Push(GetStackValue(frames_.back().sp + imm.index));
2266  len = 1 + imm.length;
2267  break;
2268  }
2269  case kExprSetLocal: {
2270  LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
2271  WasmValue val = Pop();
2272  SetStackValue(frames_.back().sp + imm.index, val);
2273  len = 1 + imm.length;
2274  break;
2275  }
2276  case kExprTeeLocal: {
2277  LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
2278  WasmValue val = Pop();
2279  SetStackValue(frames_.back().sp + imm.index, val);
2280  Push(val);
2281  len = 1 + imm.length;
2282  break;
2283  }
2284  case kExprDrop: {
2285  Pop();
2286  break;
2287  }
2288  case kExprCallFunction: {
2290  code->at(pc));
2291  InterpreterCode* target = codemap()->GetCode(imm.index);
2292  if (target->function->imported) {
2293  CommitPc(pc);
2294  ExternalCallResult result =
2295  CallImportedFunction(target->function->func_index);
2296  switch (result.type) {
2297  case ExternalCallResult::INTERNAL:
2298  // The import is a function of this instance. Call it directly.
2299  target = result.interpreter_code;
2300  DCHECK(!target->function->imported);
2301  break;
2302  case ExternalCallResult::INVALID_FUNC:
2303  case ExternalCallResult::SIGNATURE_MISMATCH:
2304  // Direct calls are checked statically.
2305  UNREACHABLE();
2306  case ExternalCallResult::EXTERNAL_RETURNED:
2307  PAUSE_IF_BREAK_FLAG(AfterCall);
2308  len = 1 + imm.length;
2309  break;
2310  case ExternalCallResult::EXTERNAL_UNWOUND:
2311  return;
2312  }
2313  if (result.type != ExternalCallResult::INTERNAL) break;
2314  }
2315  // Execute an internal call.
2316  if (!DoCall(&decoder, target, &pc, &limit)) return;
2317  code = target;
2318  PAUSE_IF_BREAK_FLAG(AfterCall);
2319  continue; // don't bump pc
2320  } break;
2321  case kExprCallIndirect: {
2323  code->at(pc));
2324  uint32_t entry_index = Pop().to<uint32_t>();
2325  // Assume only one table for now.
2326  DCHECK_LE(module()->tables.size(), 1u);
2327  CommitPc(pc); // TODO(wasm): Be more disciplined about committing PC.
2328  ExternalCallResult result =
2329  CallIndirectFunction(0, entry_index, imm.sig_index);
2330  switch (result.type) {
2331  case ExternalCallResult::INTERNAL:
2332  // The import is a function of this instance. Call it directly.
2333  if (!DoCall(&decoder, result.interpreter_code, &pc, &limit))
2334  return;
2335  code = result.interpreter_code;
2336  PAUSE_IF_BREAK_FLAG(AfterCall);
2337  continue; // don't bump pc
2338  case ExternalCallResult::INVALID_FUNC:
2339  return DoTrap(kTrapFuncInvalid, pc);
2340  case ExternalCallResult::SIGNATURE_MISMATCH:
2341  return DoTrap(kTrapFuncSigMismatch, pc);
2342  case ExternalCallResult::EXTERNAL_RETURNED:
2343  PAUSE_IF_BREAK_FLAG(AfterCall);
2344  len = 1 + imm.length;
2345  break;
2346  case ExternalCallResult::EXTERNAL_UNWOUND:
2347  return;
2348  }
2349  } break;
2350  case kExprGetGlobal: {
2352  code->at(pc));
2353  const WasmGlobal* global = &module()->globals[imm.index];
2354  byte* ptr = GetGlobalPtr(global);
2355  WasmValue val;
2356  switch (global->type) {
2357 #define CASE_TYPE(wasm, ctype) \
2358  case kWasm##wasm: \
2359  val = WasmValue( \
2360  ReadLittleEndianValue<ctype>(reinterpret_cast<Address>(ptr))); \
2361  break;
2362  WASM_CTYPES(CASE_TYPE)
2363 #undef CASE_TYPE
2364  default:
2365  UNREACHABLE();
2366  }
2367  Push(val);
2368  len = 1 + imm.length;
2369  break;
2370  }
2371  case kExprSetGlobal: {
2373  code->at(pc));
2374  const WasmGlobal* global = &module()->globals[imm.index];
2375  byte* ptr = GetGlobalPtr(global);
2376  WasmValue val = Pop();
2377  switch (global->type) {
2378 #define CASE_TYPE(wasm, ctype) \
2379  case kWasm##wasm: \
2380  WriteLittleEndianValue<ctype>(reinterpret_cast<Address>(ptr), \
2381  val.to<ctype>()); \
2382  break;
2383  WASM_CTYPES(CASE_TYPE)
2384 #undef CASE_TYPE
2385  default:
2386  UNREACHABLE();
2387  }
2388  len = 1 + imm.length;
2389  break;
2390  }
2391 
2392 #define LOAD_CASE(name, ctype, mtype, rep) \
2393  case kExpr##name: { \
2394  if (!ExecuteLoad<ctype, mtype>(&decoder, code, pc, len, \
2395  MachineRepresentation::rep)) \
2396  return; \
2397  break; \
2398  }
2399 
2400  LOAD_CASE(I32LoadMem8S, int32_t, int8_t, kWord8);
2401  LOAD_CASE(I32LoadMem8U, int32_t, uint8_t, kWord8);
2402  LOAD_CASE(I32LoadMem16S, int32_t, int16_t, kWord16);
2403  LOAD_CASE(I32LoadMem16U, int32_t, uint16_t, kWord16);
2404  LOAD_CASE(I64LoadMem8S, int64_t, int8_t, kWord8);
2405  LOAD_CASE(I64LoadMem8U, int64_t, uint8_t, kWord16);
2406  LOAD_CASE(I64LoadMem16S, int64_t, int16_t, kWord16);
2407  LOAD_CASE(I64LoadMem16U, int64_t, uint16_t, kWord16);
2408  LOAD_CASE(I64LoadMem32S, int64_t, int32_t, kWord32);
2409  LOAD_CASE(I64LoadMem32U, int64_t, uint32_t, kWord32);
2410  LOAD_CASE(I32LoadMem, int32_t, int32_t, kWord32);
2411  LOAD_CASE(I64LoadMem, int64_t, int64_t, kWord64);
2412  LOAD_CASE(F32LoadMem, Float32, uint32_t, kFloat32);
2413  LOAD_CASE(F64LoadMem, Float64, uint64_t, kFloat64);
2414 #undef LOAD_CASE
2415 
2416 #define STORE_CASE(name, ctype, mtype, rep) \
2417  case kExpr##name: { \
2418  if (!ExecuteStore<ctype, mtype>(&decoder, code, pc, len, \
2419  MachineRepresentation::rep)) \
2420  return; \
2421  break; \
2422  }
2423 
2424  STORE_CASE(I32StoreMem8, int32_t, int8_t, kWord8);
2425  STORE_CASE(I32StoreMem16, int32_t, int16_t, kWord16);
2426  STORE_CASE(I64StoreMem8, int64_t, int8_t, kWord8);
2427  STORE_CASE(I64StoreMem16, int64_t, int16_t, kWord16);
2428  STORE_CASE(I64StoreMem32, int64_t, int32_t, kWord32);
2429  STORE_CASE(I32StoreMem, int32_t, int32_t, kWord32);
2430  STORE_CASE(I64StoreMem, int64_t, int64_t, kWord64);
2431  STORE_CASE(F32StoreMem, Float32, uint32_t, kFloat32);
2432  STORE_CASE(F64StoreMem, Float64, uint64_t, kFloat64);
2433 #undef STORE_CASE
2434 
2435 #define ASMJS_LOAD_CASE(name, ctype, mtype, defval) \
2436  case kExpr##name: { \
2437  uint32_t index = Pop().to<uint32_t>(); \
2438  ctype result; \
2439  Address addr = BoundsCheckMem<mtype>(0, index); \
2440  if (!addr) { \
2441  result = defval; \
2442  } else { \
2443  /* TODO(titzer): alignment for asmjs load mem? */ \
2444  result = static_cast<ctype>(*reinterpret_cast<mtype*>(addr)); \
2445  } \
2446  Push(WasmValue(result)); \
2447  break; \
2448  }
2449  ASMJS_LOAD_CASE(I32AsmjsLoadMem8S, int32_t, int8_t, 0);
2450  ASMJS_LOAD_CASE(I32AsmjsLoadMem8U, int32_t, uint8_t, 0);
2451  ASMJS_LOAD_CASE(I32AsmjsLoadMem16S, int32_t, int16_t, 0);
2452  ASMJS_LOAD_CASE(I32AsmjsLoadMem16U, int32_t, uint16_t, 0);
2453  ASMJS_LOAD_CASE(I32AsmjsLoadMem, int32_t, int32_t, 0);
2454  ASMJS_LOAD_CASE(F32AsmjsLoadMem, float, float,
2455  std::numeric_limits<float>::quiet_NaN());
2456  ASMJS_LOAD_CASE(F64AsmjsLoadMem, double, double,
2457  std::numeric_limits<double>::quiet_NaN());
2458 #undef ASMJS_LOAD_CASE
2459 
2460 #define ASMJS_STORE_CASE(name, ctype, mtype) \
2461  case kExpr##name: { \
2462  WasmValue val = Pop(); \
2463  uint32_t index = Pop().to<uint32_t>(); \
2464  Address addr = BoundsCheckMem<mtype>(0, index); \
2465  if (addr) { \
2466  *(reinterpret_cast<mtype*>(addr)) = static_cast<mtype>(val.to<ctype>()); \
2467  } \
2468  Push(val); \
2469  break; \
2470  }
2471 
2472  ASMJS_STORE_CASE(I32AsmjsStoreMem8, int32_t, int8_t);
2473  ASMJS_STORE_CASE(I32AsmjsStoreMem16, int32_t, int16_t);
2474  ASMJS_STORE_CASE(I32AsmjsStoreMem, int32_t, int32_t);
2475  ASMJS_STORE_CASE(F32AsmjsStoreMem, float, float);
2476  ASMJS_STORE_CASE(F64AsmjsStoreMem, double, double);
2477 #undef ASMJS_STORE_CASE
2478  case kExprMemoryGrow: {
2480  code->at(pc));
2481  uint32_t delta_pages = Pop().to<uint32_t>();
2482  Handle<WasmMemoryObject> memory(instance_object_->memory_object(),
2483  instance_object_->GetIsolate());
2484  Isolate* isolate = memory->GetIsolate();
2485  int32_t result = WasmMemoryObject::Grow(isolate, memory, delta_pages);
2486  Push(WasmValue(result));
2487  len = 1 + imm.length;
2488  // Treat one grow_memory instruction like 1000 other instructions,
2489  // because it is a really expensive operation.
2490  if (max > 0) max = std::max(0, max - 1000);
2491  break;
2492  }
2493  case kExprMemorySize: {
2495  code->at(pc));
2496  Push(WasmValue(static_cast<uint32_t>(instance_object_->memory_size() /
2497  kWasmPageSize)));
2498  len = 1 + imm.length;
2499  break;
2500  }
2501  // We need to treat kExprI32ReinterpretF32 and kExprI64ReinterpretF64
2502  // specially to guarantee that the quiet bit of a NaN is preserved on
2503  // ia32 by the reinterpret casts.
2504  case kExprI32ReinterpretF32: {
2505  WasmValue val = Pop();
2506  Push(WasmValue(ExecuteI32ReinterpretF32(val)));
2507  break;
2508  }
2509  case kExprI64ReinterpretF64: {
2510  WasmValue val = Pop();
2511  Push(WasmValue(ExecuteI64ReinterpretF64(val)));
2512  break;
2513  }
2514 #define SIGN_EXTENSION_CASE(name, wtype, ntype) \
2515  case kExpr##name: { \
2516  ntype val = static_cast<ntype>(Pop().to<wtype>()); \
2517  Push(WasmValue(static_cast<wtype>(val))); \
2518  break; \
2519  }
2520  SIGN_EXTENSION_CASE(I32SExtendI8, int32_t, int8_t);
2521  SIGN_EXTENSION_CASE(I32SExtendI16, int32_t, int16_t);
2522  SIGN_EXTENSION_CASE(I64SExtendI8, int64_t, int8_t);
2523  SIGN_EXTENSION_CASE(I64SExtendI16, int64_t, int16_t);
2524  SIGN_EXTENSION_CASE(I64SExtendI32, int64_t, int32_t);
2525 #undef SIGN_EXTENSION_CASE
2526  case kNumericPrefix: {
2527  ++len;
2528  if (!ExecuteNumericOp(opcode, &decoder, code, pc, len)) return;
2529  break;
2530  }
2531  case kAtomicPrefix: {
2532  if (!ExecuteAtomicOp(opcode, &decoder, code, pc, len)) return;
2533  break;
2534  }
2535  case kSimdPrefix: {
2536  ++len;
2537  if (!ExecuteSimdOp(opcode, &decoder, code, pc, len)) return;
2538  break;
2539  }
2540 
2541 #define EXECUTE_SIMPLE_BINOP(name, ctype, op) \
2542  case kExpr##name: { \
2543  WasmValue rval = Pop(); \
2544  WasmValue lval = Pop(); \
2545  auto result = lval.to<ctype>() op rval.to<ctype>(); \
2546  possible_nondeterminism_ |= has_nondeterminism(result); \
2547  Push(WasmValue(result)); \
2548  break; \
2549  }
2550  FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP)
2551 #undef EXECUTE_SIMPLE_BINOP
2552 
2553 #define EXECUTE_OTHER_BINOP(name, ctype) \
2554  case kExpr##name: { \
2555  TrapReason trap = kTrapCount; \
2556  ctype rval = Pop().to<ctype>(); \
2557  ctype lval = Pop().to<ctype>(); \
2558  auto result = Execute##name(lval, rval, &trap); \
2559  possible_nondeterminism_ |= has_nondeterminism(result); \
2560  if (trap != kTrapCount) return DoTrap(trap, pc); \
2561  Push(WasmValue(result)); \
2562  break; \
2563  }
2564  FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP)
2565 #undef EXECUTE_OTHER_BINOP
2566 
2567 #define EXECUTE_UNOP(name, ctype, exec_fn) \
2568  case kExpr##name: { \
2569  TrapReason trap = kTrapCount; \
2570  ctype val = Pop().to<ctype>(); \
2571  auto result = exec_fn(val, &trap); \
2572  possible_nondeterminism_ |= has_nondeterminism(result); \
2573  if (trap != kTrapCount) return DoTrap(trap, pc); \
2574  Push(WasmValue(result)); \
2575  break; \
2576  }
2577 
2578 #define EXECUTE_OTHER_UNOP(name, ctype) EXECUTE_UNOP(name, ctype, Execute##name)
2579  FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP)
2580 #undef EXECUTE_OTHER_UNOP
2581 
2582 #define EXECUTE_I32CONV_FLOATOP(name, out_type, in_type) \
2583  EXECUTE_UNOP(name, in_type, ExecuteConvert<out_type>)
2584  FOREACH_I32CONV_FLOATOP(EXECUTE_I32CONV_FLOATOP)
2585 #undef EXECUTE_I32CONV_FLOATOP
2586 #undef EXECUTE_UNOP
2587 
2588  default:
2589  FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
2590  OpcodeName(code->start[pc]));
2591  UNREACHABLE();
2592  }
2593 
2594 #ifdef DEBUG
2595  if (!WasmOpcodes::IsControlOpcode(opcode)) {
2596  DCHECK_EQ(expected_new_stack_height, StackHeight());
2597  }
2598 #endif
2599 
2600  pc += len;
2601  if (pc == limit) {
2602  // Fell off end of code; do an implicit return.
2603  TRACE("@%-3zu: ImplicitReturn\n", pc);
2604  if (!DoReturn(&decoder, &code, &pc, &limit,
2605  code->function->sig->return_count()))
2606  return;
2607  PAUSE_IF_BREAK_FLAG(AfterReturn);
2608  }
2609 #undef PAUSE_IF_BREAK_FLAG
2610  }
2611 
2612  state_ = WasmInterpreter::PAUSED;
2613  break_pc_ = hit_break ? pc : kInvalidPc;
2614  CommitPc(pc);
2615  }
2616 
2617  WasmValue Pop() {
2618  DCHECK_GT(frames_.size(), 0);
2619  DCHECK_GT(StackHeight(), frames_.back().llimit()); // can't pop into locals
2620  return *--sp_;
2621  }
2622 
2623  void PopN(int n) {
2624  DCHECK_GE(StackHeight(), n);
2625  DCHECK_GT(frames_.size(), 0);
2626  // Check that we don't pop into locals.
2627  DCHECK_GE(StackHeight() - n, frames_.back().llimit());
2628  sp_ -= n;
2629  }
2630 
2631  WasmValue PopArity(size_t arity) {
2632  if (arity == 0) return WasmValue();
2633  CHECK_EQ(1, arity);
2634  return Pop();
2635  }
2636 
2637  void Push(WasmValue val) {
2638  DCHECK_NE(kWasmStmt, val.type());
2639  DCHECK_LE(1, stack_limit_ - sp_);
2640  *sp_++ = val;
2641  }
2642 
2643  void Push(WasmValue* vals, size_t arity) {
2644  DCHECK_LE(arity, stack_limit_ - sp_);
2645  for (WasmValue *val = vals, *end = vals + arity; val != end; ++val) {
2646  DCHECK_NE(kWasmStmt, val->type());
2647  }
2648  memcpy(sp_, vals, arity * sizeof(*sp_));
2649  sp_ += arity;
2650  }
2651 
2652  void EnsureStackSpace(size_t size) {
2653  if (V8_LIKELY(static_cast<size_t>(stack_limit_ - sp_) >= size)) return;
2654  size_t old_size = stack_limit_ - stack_.get();
2655  size_t requested_size =
2656  base::bits::RoundUpToPowerOfTwo64((sp_ - stack_.get()) + size);
2657  size_t new_size = Max(size_t{8}, Max(2 * old_size, requested_size));
2658  std::unique_ptr<WasmValue[]> new_stack(new WasmValue[new_size]);
2659  memcpy(new_stack.get(), stack_.get(), old_size * sizeof(*sp_));
2660  sp_ = new_stack.get() + (sp_ - stack_.get());
2661  stack_ = std::move(new_stack);
2662  stack_limit_ = stack_.get() + new_size;
2663  }
2664 
2665  sp_t StackHeight() { return sp_ - stack_.get(); }
2666 
2667  void TraceValueStack() {
2668 #ifdef DEBUG
2669  if (!FLAG_trace_wasm_interpreter) return;
2670  Frame* top = frames_.size() > 0 ? &frames_.back() : nullptr;
2671  sp_t sp = top ? top->sp : 0;
2672  sp_t plimit = top ? top->plimit() : 0;
2673  sp_t llimit = top ? top->llimit() : 0;
2674  for (size_t i = sp; i < StackHeight(); ++i) {
2675  if (i < plimit)
2676  PrintF(" p%zu:", i);
2677  else if (i < llimit)
2678  PrintF(" l%zu:", i);
2679  else
2680  PrintF(" s%zu:", i);
2681  WasmValue val = GetStackValue(i);
2682  switch (val.type()) {
2683  case kWasmI32:
2684  PrintF("i32:%d", val.to<int32_t>());
2685  break;
2686  case kWasmI64:
2687  PrintF("i64:%" PRId64 "", val.to<int64_t>());
2688  break;
2689  case kWasmF32:
2690  PrintF("f32:%f", val.to<float>());
2691  break;
2692  case kWasmF64:
2693  PrintF("f64:%lf", val.to<double>());
2694  break;
2695  case kWasmStmt:
2696  PrintF("void");
2697  break;
2698  default:
2699  UNREACHABLE();
2700  break;
2701  }
2702  }
2703 #endif // DEBUG
2704  }
2705 
2706  ExternalCallResult TryHandleException(Isolate* isolate) {
2707  if (HandleException(isolate) == WasmInterpreter::Thread::UNWOUND) {
2708  return {ExternalCallResult::EXTERNAL_UNWOUND};
2709  }
2710  return {ExternalCallResult::EXTERNAL_RETURNED};
2711  }
2712 
2713  ExternalCallResult CallExternalWasmFunction(Isolate* isolate,
2714  Handle<Object> object_ref,
2715  const WasmCode* code,
2716  FunctionSig* sig) {
2717  if (code->kind() == WasmCode::kWasmToJsWrapper &&
2718  !IsJSCompatibleSignature(sig)) {
2719  isolate->Throw(*isolate->factory()->NewTypeError(
2720  MessageTemplate::kWasmTrapTypeError));
2721  return TryHandleException(isolate);
2722  }
2723 
2724  Handle<WasmDebugInfo> debug_info(instance_object_->debug_info(), isolate);
2725  Handle<JSFunction> wasm_entry =
2726  WasmDebugInfo::GetCWasmEntry(debug_info, sig);
2727 
2728  TRACE(" => Calling external wasm function\n");
2729 
2730  // Copy the arguments to one buffer.
2731  // TODO(clemensh): Introduce a helper for all argument buffer
2732  // con-/destruction.
2733  int num_args = static_cast<int>(sig->parameter_count());
2734  std::vector<uint8_t> arg_buffer(num_args * 8);
2735  size_t offset = 0;
2736  WasmValue* wasm_args = sp_ - num_args;
2737  for (int i = 0; i < num_args; ++i) {
2738  int param_size = ValueTypes::ElementSizeInBytes(sig->GetParam(i));
2739  if (arg_buffer.size() < offset + param_size) {
2740  arg_buffer.resize(std::max(2 * arg_buffer.size(), offset + param_size));
2741  }
2742  Address address = reinterpret_cast<Address>(arg_buffer.data()) + offset;
2743  switch (sig->GetParam(i)) {
2744  case kWasmI32:
2745  WriteUnalignedValue(address, wasm_args[i].to<uint32_t>());
2746  break;
2747  case kWasmI64:
2748  WriteUnalignedValue(address, wasm_args[i].to<uint64_t>());
2749  break;
2750  case kWasmF32:
2751  WriteUnalignedValue(address, wasm_args[i].to<float>());
2752  break;
2753  case kWasmF64:
2754  WriteUnalignedValue(address, wasm_args[i].to<double>());
2755  break;
2756  default:
2757  UNIMPLEMENTED();
2758  }
2759  offset += param_size;
2760  }
2761 
2762  // Ensure that there is enough space in the arg_buffer to hold the return
2763  // value(s).
2764  size_t return_size = 0;
2765  for (ValueType t : sig->returns()) {
2766  return_size += ValueTypes::ElementSizeInBytes(t);
2767  }
2768  if (arg_buffer.size() < return_size) {
2769  arg_buffer.resize(return_size);
2770  }
2771 
2772  // Wrap the arg_buffer and the code target data pointers in handles. As
2773  // these are aligned pointers, to the GC it will look like Smis.
2774  Handle<Object> arg_buffer_obj(reinterpret_cast<Object*>(arg_buffer.data()),
2775  isolate);
2776  DCHECK(!arg_buffer_obj->IsHeapObject());
2777  Handle<Object> code_entry_obj(
2778  reinterpret_cast<Object*>(code->instruction_start()), isolate);
2779  DCHECK(!code_entry_obj->IsHeapObject());
2780 
2781  static_assert(compiler::CWasmEntryParameters::kNumParameters == 3,
2782  "code below needs adaption");
2783  Handle<Object> args[compiler::CWasmEntryParameters::kNumParameters];
2784  args[compiler::CWasmEntryParameters::kCodeEntry] = code_entry_obj;
2785  args[compiler::CWasmEntryParameters::kObjectRef] = object_ref;
2786  args[compiler::CWasmEntryParameters::kArgumentsBuffer] = arg_buffer_obj;
2787 
2788  Handle<Object> receiver = isolate->factory()->undefined_value();
2789  trap_handler::SetThreadInWasm();
2790  MaybeHandle<Object> maybe_retval =
2791  Execution::Call(isolate, wasm_entry, receiver, arraysize(args), args);
2792  TRACE(" => External wasm function returned%s\n",
2793  maybe_retval.is_null() ? " with exception" : "");
2794 
2795  if (maybe_retval.is_null()) {
2796  // JSEntryStub may through a stack overflow before we actually get to wasm
2797  // code or back to the interpreter, meaning the thread-in-wasm flag won't
2798  // be cleared.
2799  if (trap_handler::IsThreadInWasm()) {
2800  trap_handler::ClearThreadInWasm();
2801  }
2802  return TryHandleException(isolate);
2803  }
2804 
2805  trap_handler::ClearThreadInWasm();
2806 
2807  // Pop arguments off the stack.
2808  sp_ -= num_args;
2809  // Push return values.
2810  if (sig->return_count() > 0) {
2811  // TODO(wasm): Handle multiple returns.
2812  DCHECK_EQ(1, sig->return_count());
2813  Address address = reinterpret_cast<Address>(arg_buffer.data());
2814  switch (sig->GetReturn()) {
2815  case kWasmI32:
2816  Push(WasmValue(ReadUnalignedValue<uint32_t>(address)));
2817  break;
2818  case kWasmI64:
2819  Push(WasmValue(ReadUnalignedValue<uint64_t>(address)));
2820  break;
2821  case kWasmF32:
2822  Push(WasmValue(ReadUnalignedValue<float>(address)));
2823  break;
2824  case kWasmF64:
2825  Push(WasmValue(ReadUnalignedValue<double>(address)));
2826  break;
2827  default:
2828  UNIMPLEMENTED();
2829  }
2830  }
2831  return {ExternalCallResult::EXTERNAL_RETURNED};
2832  }
2833 
2834  static WasmCode* GetTargetCode(WasmCodeManager* code_manager,
2835  Address target) {
2836  NativeModule* native_module = code_manager->LookupNativeModule(target);
2837  if (native_module->is_jump_table_slot(target)) {
2838  uint32_t func_index =
2839  native_module->GetFunctionIndexFromJumpTableSlot(target);
2840  return native_module->code(func_index);
2841  }
2842  WasmCode* code = native_module->Lookup(target);
2843  DCHECK_EQ(code->instruction_start(), target);
2844  return code;
2845  }
2846 
2847  ExternalCallResult CallImportedFunction(uint32_t function_index) {
2848  DCHECK_GT(module()->num_imported_functions, function_index);
2849  // Use a new HandleScope to avoid leaking / accumulating handles in the
2850  // outer scope.
2851  Isolate* isolate = instance_object_->GetIsolate();
2852  HandleScope handle_scope(isolate);
2853 
2854  ImportedFunctionEntry entry(instance_object_, function_index);
2855  Handle<Object> object_ref(entry.object_ref(), isolate);
2856  WasmCode* code =
2857  GetTargetCode(isolate->wasm_engine()->code_manager(), entry.target());
2858  FunctionSig* sig = module()->functions[function_index].sig;
2859  return CallExternalWasmFunction(isolate, object_ref, code, sig);
2860  }
2861 
2862  ExternalCallResult CallIndirectFunction(uint32_t table_index,
2863  uint32_t entry_index,
2864  uint32_t sig_index) {
2865  if (codemap()->call_indirect_through_module()) {
2866  // Rely on the information stored in the WasmModule.
2867  InterpreterCode* code =
2868  codemap()->GetIndirectCode(table_index, entry_index);
2869  if (!code) return {ExternalCallResult::INVALID_FUNC};
2870  if (code->function->sig_index != sig_index) {
2871  // If not an exact match, we have to do a canonical check.
2872  int function_canonical_id =
2873  module()->signature_ids[code->function->sig_index];
2874  int expected_canonical_id = module()->signature_ids[sig_index];
2875  DCHECK_EQ(function_canonical_id,
2876  module()->signature_map.Find(*code->function->sig));
2877  if (function_canonical_id != expected_canonical_id) {
2878  return {ExternalCallResult::SIGNATURE_MISMATCH};
2879  }
2880  }
2881  return {ExternalCallResult::INTERNAL, code};
2882  }
2883 
2884  Isolate* isolate = instance_object_->GetIsolate();
2885  uint32_t expected_sig_id = module()->signature_ids[sig_index];
2886  DCHECK_EQ(expected_sig_id,
2887  module()->signature_map.Find(*module()->signatures[sig_index]));
2888 
2889  // The function table is stored in the instance.
2890  // TODO(wasm): the wasm interpreter currently supports only one table.
2891  CHECK_EQ(0, table_index);
2892  // Bounds check against table size.
2893  if (entry_index >= instance_object_->indirect_function_table_size()) {
2894  return {ExternalCallResult::INVALID_FUNC};
2895  }
2896 
2897  IndirectFunctionTableEntry entry(instance_object_, entry_index);
2898  // Signature check.
2899  if (entry.sig_id() != static_cast<int32_t>(expected_sig_id)) {
2900  return {ExternalCallResult::SIGNATURE_MISMATCH};
2901  }
2902 
2903  HandleScope scope(isolate);
2904  FunctionSig* signature = module()->signatures[sig_index];
2905  Handle<Object> object_ref = handle(entry.object_ref(), isolate);
2906  WasmCode* code =
2907  GetTargetCode(isolate->wasm_engine()->code_manager(), entry.target());
2908 
2909  if (!object_ref->IsWasmInstanceObject() || /* call to an import */
2910  !instance_object_.is_identical_to(object_ref) /* cross-instance */) {
2911  return CallExternalWasmFunction(isolate, object_ref, code, signature);
2912  }
2913 
2914  DCHECK(code->kind() == WasmCode::kInterpreterEntry ||
2915  code->kind() == WasmCode::kFunction);
2916  return {ExternalCallResult::INTERNAL, codemap()->GetCode(code->index())};
2917  }
2918 
2919  inline Activation current_activation() {
2920  return activations_.empty() ? Activation(0, 0) : activations_.back();
2921  }
2922 };
2923 
2925  public:
2926  InterpretedFrameImpl(ThreadImpl* thread, int index)
2927  : thread_(thread), index_(index) {
2928  DCHECK_LE(0, index);
2929  }
2930 
2931  const WasmFunction* function() const { return frame()->code->function; }
2932 
2933  int pc() const {
2934  DCHECK_LE(0, frame()->pc);
2935  DCHECK_GE(kMaxInt, frame()->pc);
2936  return static_cast<int>(frame()->pc);
2937  }
2938 
2939  int GetParameterCount() const {
2940  DCHECK_GE(kMaxInt, function()->sig->parameter_count());
2941  return static_cast<int>(function()->sig->parameter_count());
2942  }
2943 
2944  int GetLocalCount() const {
2945  size_t num_locals = function()->sig->parameter_count() +
2946  frame()->code->locals.type_list.size();
2947  DCHECK_GE(kMaxInt, num_locals);
2948  return static_cast<int>(num_locals);
2949  }
2950 
2951  int GetStackHeight() const {
2952  bool is_top_frame =
2953  static_cast<size_t>(index_) + 1 == thread_->frames_.size();
2954  size_t stack_limit =
2955  is_top_frame ? thread_->StackHeight() : thread_->frames_[index_ + 1].sp;
2956  DCHECK_LE(frame()->sp, stack_limit);
2957  size_t frame_size = stack_limit - frame()->sp;
2958  DCHECK_LE(GetLocalCount(), frame_size);
2959  return static_cast<int>(frame_size) - GetLocalCount();
2960  }
2961 
2962  WasmValue GetLocalValue(int index) const {
2963  DCHECK_LE(0, index);
2964  DCHECK_GT(GetLocalCount(), index);
2965  return thread_->GetStackValue(static_cast<int>(frame()->sp) + index);
2966  }
2967 
2968  WasmValue GetStackValue(int index) const {
2969  DCHECK_LE(0, index);
2970  // Index must be within the number of stack values of this frame.
2971  DCHECK_GT(GetStackHeight(), index);
2972  return thread_->GetStackValue(static_cast<int>(frame()->sp) +
2973  GetLocalCount() + index);
2974  }
2975 
2976  private:
2977  ThreadImpl* thread_;
2978  int index_;
2979 
2980  ThreadImpl::Frame* frame() const {
2981  DCHECK_GT(thread_->frames_.size(), index_);
2982  return &thread_->frames_[index_];
2983  }
2984 };
2985 
2986 namespace {
2987 
2988 // Converters between WasmInterpreter::Thread and WasmInterpreter::ThreadImpl.
2989 // Thread* is the public interface, without knowledge of the object layout.
2990 // This cast is potentially risky, but as long as we always cast it back before
2991 // accessing any data, it should be fine. UBSan is not complaining.
2992 WasmInterpreter::Thread* ToThread(ThreadImpl* impl) {
2993  return reinterpret_cast<WasmInterpreter::Thread*>(impl);
2994 }
2995 ThreadImpl* ToImpl(WasmInterpreter::Thread* thread) {
2996  return reinterpret_cast<ThreadImpl*>(thread);
2997 }
2998 
2999 // Same conversion for InterpretedFrame and InterpretedFrameImpl.
3000 InterpretedFrame* ToFrame(InterpretedFrameImpl* impl) {
3001  return reinterpret_cast<InterpretedFrame*>(impl);
3002 }
3003 const InterpretedFrameImpl* ToImpl(const InterpretedFrame* frame) {
3004  return reinterpret_cast<const InterpretedFrameImpl*>(frame);
3005 }
3006 
3007 } // namespace
3008 
3009 //============================================================================
3010 // Implementation of the pimpl idiom for WasmInterpreter::Thread.
3011 // Instead of placing a pointer to the ThreadImpl inside of the Thread object,
3012 // we just reinterpret_cast them. ThreadImpls are only allocated inside this
3013 // translation unit anyway.
3014 //============================================================================
3015 WasmInterpreter::State WasmInterpreter::Thread::state() {
3016  return ToImpl(this)->state();
3017 }
3018 void WasmInterpreter::Thread::InitFrame(const WasmFunction* function,
3019  WasmValue* args) {
3020  ToImpl(this)->InitFrame(function, args);
3021 }
3022 WasmInterpreter::State WasmInterpreter::Thread::Run(int num_steps) {
3023  return ToImpl(this)->Run(num_steps);
3024 }
3025 void WasmInterpreter::Thread::Pause() { return ToImpl(this)->Pause(); }
3026 void WasmInterpreter::Thread::Reset() { return ToImpl(this)->Reset(); }
3027 WasmInterpreter::Thread::ExceptionHandlingResult
3028 WasmInterpreter::Thread::HandleException(Isolate* isolate) {
3029  return ToImpl(this)->HandleException(isolate);
3030 }
3031 pc_t WasmInterpreter::Thread::GetBreakpointPc() {
3032  return ToImpl(this)->GetBreakpointPc();
3033 }
3034 int WasmInterpreter::Thread::GetFrameCount() {
3035  return ToImpl(this)->GetFrameCount();
3036 }
3037 WasmInterpreter::FramePtr WasmInterpreter::Thread::GetFrame(int index) {
3038  DCHECK_LE(0, index);
3039  DCHECK_GT(GetFrameCount(), index);
3040  return FramePtr(ToFrame(new InterpretedFrameImpl(ToImpl(this), index)));
3041 }
3042 WasmValue WasmInterpreter::Thread::GetReturnValue(int index) {
3043  return ToImpl(this)->GetReturnValue(index);
3044 }
3045 TrapReason WasmInterpreter::Thread::GetTrapReason() {
3046  return ToImpl(this)->GetTrapReason();
3047 }
3048 bool WasmInterpreter::Thread::PossibleNondeterminism() {
3049  return ToImpl(this)->PossibleNondeterminism();
3050 }
3051 uint64_t WasmInterpreter::Thread::NumInterpretedCalls() {
3052  return ToImpl(this)->NumInterpretedCalls();
3053 }
3054 void WasmInterpreter::Thread::AddBreakFlags(uint8_t flags) {
3055  ToImpl(this)->AddBreakFlags(flags);
3056 }
3057 void WasmInterpreter::Thread::ClearBreakFlags() {
3058  ToImpl(this)->ClearBreakFlags();
3059 }
3060 uint32_t WasmInterpreter::Thread::NumActivations() {
3061  return ToImpl(this)->NumActivations();
3062 }
3063 uint32_t WasmInterpreter::Thread::StartActivation() {
3064  return ToImpl(this)->StartActivation();
3065 }
3066 void WasmInterpreter::Thread::FinishActivation(uint32_t id) {
3067  ToImpl(this)->FinishActivation(id);
3068 }
3069 uint32_t WasmInterpreter::Thread::ActivationFrameBase(uint32_t id) {
3070  return ToImpl(this)->ActivationFrameBase(id);
3071 }
3072 
3073 //============================================================================
3074 // The implementation details of the interpreter.
3075 //============================================================================
3077  public:
3078  // Create a copy of the module bytes for the interpreter, since the passed
3079  // pointer might be invalidated after constructing the interpreter.
3080  const ZoneVector<uint8_t> module_bytes_;
3081  CodeMap codemap_;
3082  ZoneVector<ThreadImpl> threads_;
3083 
3084  WasmInterpreterInternals(Zone* zone, const WasmModule* module,
3085  const ModuleWireBytes& wire_bytes,
3086  Handle<WasmInstanceObject> instance_object)
3087  : module_bytes_(wire_bytes.start(), wire_bytes.end(), zone),
3088  codemap_(module, module_bytes_.data(), zone),
3089  threads_(zone) {
3090  threads_.emplace_back(zone, &codemap_, instance_object);
3091  }
3092 };
3093 
3094 namespace {
3095 void NopFinalizer(const v8::WeakCallbackInfo<void>& data) {
3096  Address* global_handle_location =
3097  reinterpret_cast<Address*>(data.GetParameter());
3098  GlobalHandles::Destroy(global_handle_location);
3099 }
3100 
3102  Isolate* isolate, Handle<WasmInstanceObject> instance_object) {
3103  Handle<WasmInstanceObject> weak_instance =
3104  isolate->global_handles()->Create<WasmInstanceObject>(*instance_object);
3105  Address* global_handle_location = weak_instance.location();
3106  GlobalHandles::MakeWeak(global_handle_location, global_handle_location,
3107  &NopFinalizer, v8::WeakCallbackType::kParameter);
3108  return weak_instance;
3109 }
3110 } // namespace
3111 
3112 //============================================================================
3113 // Implementation of the public interface of the interpreter.
3114 //============================================================================
3115 WasmInterpreter::WasmInterpreter(Isolate* isolate, const WasmModule* module,
3116  const ModuleWireBytes& wire_bytes,
3117  Handle<WasmInstanceObject> instance_object)
3118  : zone_(isolate->allocator(), ZONE_NAME),
3119  internals_(new (&zone_) WasmInterpreterInternals(
3120  &zone_, module, wire_bytes, MakeWeak(isolate, instance_object))) {}
3121 
3122 WasmInterpreter::~WasmInterpreter() { internals_->~WasmInterpreterInternals(); }
3123 
3124 void WasmInterpreter::Run() { internals_->threads_[0].Run(); }
3125 
3126 void WasmInterpreter::Pause() { internals_->threads_[0].Pause(); }
3127 
3128 bool WasmInterpreter::SetBreakpoint(const WasmFunction* function, pc_t pc,
3129  bool enabled) {
3130  InterpreterCode* code = internals_->codemap_.GetCode(function);
3131  size_t size = static_cast<size_t>(code->end - code->start);
3132  // Check bounds for {pc}.
3133  if (pc < code->locals.encoded_size || pc >= size) return false;
3134  // Make a copy of the code before enabling a breakpoint.
3135  if (enabled && code->orig_start == code->start) {
3136  code->start = reinterpret_cast<byte*>(zone_.New(size));
3137  memcpy(code->start, code->orig_start, size);
3138  code->end = code->start + size;
3139  }
3140  bool prev = code->start[pc] == kInternalBreakpoint;
3141  if (enabled) {
3142  code->start[pc] = kInternalBreakpoint;
3143  } else {
3144  code->start[pc] = code->orig_start[pc];
3145  }
3146  return prev;
3147 }
3148 
3149 bool WasmInterpreter::GetBreakpoint(const WasmFunction* function, pc_t pc) {
3150  InterpreterCode* code = internals_->codemap_.GetCode(function);
3151  size_t size = static_cast<size_t>(code->end - code->start);
3152  // Check bounds for {pc}.
3153  if (pc < code->locals.encoded_size || pc >= size) return false;
3154  // Check if a breakpoint is present at that place in the code.
3155  return code->start[pc] == kInternalBreakpoint;
3156 }
3157 
3158 bool WasmInterpreter::SetTracing(const WasmFunction* function, bool enabled) {
3159  UNIMPLEMENTED();
3160  return false;
3161 }
3162 
3163 int WasmInterpreter::GetThreadCount() {
3164  return 1; // only one thread for now.
3165 }
3166 
3167 WasmInterpreter::Thread* WasmInterpreter::GetThread(int id) {
3168  CHECK_EQ(0, id); // only one thread for now.
3169  return ToThread(&internals_->threads_[id]);
3170 }
3171 
3172 void WasmInterpreter::AddFunctionForTesting(const WasmFunction* function) {
3173  internals_->codemap_.AddFunction(function, nullptr, nullptr);
3174 }
3175 
3176 void WasmInterpreter::SetFunctionCodeForTesting(const WasmFunction* function,
3177  const byte* start,
3178  const byte* end) {
3179  internals_->codemap_.SetFunctionCode(function, start, end);
3180 }
3181 
3182 void WasmInterpreter::SetCallIndirectTestMode() {
3183  internals_->codemap_.set_call_indirect_through_module(true);
3184 }
3185 
3186 ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
3187  Zone* zone, const WasmModule* module, const byte* start, const byte* end) {
3188  // Create some dummy structures, to avoid special-casing the implementation
3189  // just for testing.
3190  FunctionSig sig(0, 0, nullptr);
3191  WasmFunction function{&sig, 0, 0, {0, 0}, false, false};
3192  InterpreterCode code{
3193  &function, BodyLocalDecls(zone), start, end, nullptr, nullptr, nullptr};
3194 
3195  // Now compute and return the control transfers.
3196  SideTable side_table(zone, module, &code);
3197  return side_table.map_;
3198 }
3199 
3200 //============================================================================
3201 // Implementation of the frame inspection interface.
3202 //============================================================================
3203 const WasmFunction* InterpretedFrame::function() const {
3204  return ToImpl(this)->function();
3205 }
3206 int InterpretedFrame::pc() const { return ToImpl(this)->pc(); }
3207 int InterpretedFrame::GetParameterCount() const {
3208  return ToImpl(this)->GetParameterCount();
3209 }
3210 int InterpretedFrame::GetLocalCount() const {
3211  return ToImpl(this)->GetLocalCount();
3212 }
3213 int InterpretedFrame::GetStackHeight() const {
3214  return ToImpl(this)->GetStackHeight();
3215 }
3216 WasmValue InterpretedFrame::GetLocalValue(int index) const {
3217  return ToImpl(this)->GetLocalValue(index);
3218 }
3219 WasmValue InterpretedFrame::GetStackValue(int index) const {
3220  return ToImpl(this)->GetStackValue(index);
3221 }
3222 void InterpretedFrameDeleter::operator()(InterpretedFrame* ptr) {
3223  delete ToImpl(ptr);
3224 }
3225 
3226 #undef TRACE
3227 #undef LANE
3228 #undef FOREACH_INTERNAL_OPCODE
3229 #undef WASM_CTYPES
3230 #undef FOREACH_SIMPLE_BINOP
3231 #undef FOREACH_OTHER_BINOP
3232 #undef FOREACH_I32CONV_FLOATOP
3233 #undef FOREACH_OTHER_UNOP
3234 
3235 } // namespace wasm
3236 } // namespace internal
3237 } // namespace v8
STL namespace.
Definition: libplatform.h:13