8 #include "src/wasm/wasm-interpreter.h" 10 #include "src/assembler-inl.h" 11 #include "src/boxed-float.h" 12 #include "src/compiler/wasm-compiler.h" 13 #include "src/conversions.h" 14 #include "src/identity-map.h" 15 #include "src/objects-inl.h" 16 #include "src/trap-handler/trap-handler.h" 17 #include "src/utils.h" 18 #include "src/wasm/decoder.h" 19 #include "src/wasm/function-body-decoder-impl.h" 20 #include "src/wasm/function-body-decoder.h" 21 #include "src/wasm/memory-tracing.h" 22 #include "src/wasm/wasm-engine.h" 23 #include "src/wasm/wasm-external-refs.h" 24 #include "src/wasm/wasm-limits.h" 25 #include "src/wasm/wasm-module.h" 26 #include "src/wasm/wasm-objects-inl.h" 28 #include "src/zone/accounting-allocator.h" 29 #include "src/zone/zone-containers.h" 37 if (FLAG_trace_wasm_interpreter) PrintF(__VA_ARGS__); \ 40 #if V8_TARGET_BIG_ENDIAN 41 #define LANE(i, type) ((sizeof(type.val) / sizeof(type.val[0])) - (i)-1) 43 #define LANE(i, type) (i) 46 #define FOREACH_INTERNAL_OPCODE(V) V(Breakpoint, 0xFF) 48 #define WASM_CTYPES(V) \ 49 V(I32, int32_t) V(I64, int64_t) V(F32, float) V(F64, double) V(S128, Simd128) 51 #define FOREACH_SIMPLE_BINOP(V) \ 52 V(I32Add, uint32_t, +) \ 53 V(I32Sub, uint32_t, -) \ 54 V(I32Mul, uint32_t, *) \ 55 V(I32And, uint32_t, &) \ 56 V(I32Ior, uint32_t, |) \ 57 V(I32Xor, uint32_t, ^) \ 58 V(I32Eq, uint32_t, ==) \ 59 V(I32Ne, uint32_t, !=) \ 60 V(I32LtU, uint32_t, <) \ 61 V(I32LeU, uint32_t, <=) \ 62 V(I32GtU, uint32_t, >) \ 63 V(I32GeU, uint32_t, >=) \ 64 V(I32LtS, int32_t, <) \ 65 V(I32LeS, int32_t, <=) \ 66 V(I32GtS, int32_t, >) \ 67 V(I32GeS, int32_t, >=) \ 68 V(I64Add, uint64_t, +) \ 69 V(I64Sub, uint64_t, -) \ 70 V(I64Mul, uint64_t, *) \ 71 V(I64And, uint64_t, &) \ 72 V(I64Ior, uint64_t, |) \ 73 V(I64Xor, uint64_t, ^) \ 74 V(I64Eq, uint64_t, ==) \ 75 V(I64Ne, uint64_t, !=) \ 76 V(I64LtU, uint64_t, <) \ 77 V(I64LeU, uint64_t, <=) \ 78 V(I64GtU, uint64_t, >) \ 79 V(I64GeU, uint64_t, >=) \ 80 V(I64LtS, int64_t, <) \ 81 V(I64LeS, int64_t, <=) \ 82 V(I64GtS, int64_t, >) \ 83 V(I64GeS, int64_t, >=) \ 92 V(F64Add, double, +) \ 93 V(F64Sub, double, -) \ 94 V(F64Eq, double, ==) \ 95 V(F64Ne, double, !=) \ 97 V(F64Le, double, <=) \ 99 V(F64Ge, double, >=) \ 100 V(F32Mul, float, *) \ 101 V(F64Mul, double, *) \ 102 V(F32Div, float, /) \ 105 #define FOREACH_OTHER_BINOP(V) \ 106 V(I32DivS, int32_t) \ 107 V(I32DivU, uint32_t) \ 108 V(I32RemS, int32_t) \ 109 V(I32RemU, uint32_t) \ 110 V(I32Shl, uint32_t) \ 111 V(I32ShrU, uint32_t) \ 112 V(I32ShrS, int32_t) \ 113 V(I64DivS, int64_t) \ 114 V(I64DivU, uint64_t) \ 115 V(I64RemS, int64_t) \ 116 V(I64RemU, uint64_t) \ 117 V(I64Shl, uint64_t) \ 118 V(I64ShrU, uint64_t) \ 119 V(I64ShrS, int64_t) \ 128 V(I32AsmjsDivS, int32_t) \ 129 V(I32AsmjsDivU, uint32_t) \ 130 V(I32AsmjsRemS, int32_t) \ 131 V(I32AsmjsRemU, uint32_t) \ 132 V(F32CopySign, Float32) \ 133 V(F64CopySign, Float64) 135 #define FOREACH_I32CONV_FLOATOP(V) \ 136 V(I32SConvertF32, int32_t, float) \ 137 V(I32SConvertF64, int32_t, double) \ 138 V(I32UConvertF32, uint32_t, float) \ 139 V(I32UConvertF64, uint32_t, double) 141 #define FOREACH_OTHER_UNOP(V) \ 142 V(I32Clz, uint32_t) \ 143 V(I32Ctz, uint32_t) \ 144 V(I32Popcnt, uint32_t) \ 145 V(I32Eqz, uint32_t) \ 146 V(I64Clz, uint64_t) \ 147 V(I64Ctz, uint64_t) \ 148 V(I64Popcnt, uint64_t) \ 149 V(I64Eqz, uint64_t) \ 155 V(F32NearestInt, float) \ 159 V(F64Floor, double) \ 160 V(F64Trunc, double) \ 161 V(F64NearestInt, double) \ 162 V(I32ConvertI64, int64_t) \ 163 V(I64SConvertF32, float) \ 164 V(I64SConvertF64, double) \ 165 V(I64UConvertF32, float) \ 166 V(I64UConvertF64, double) \ 167 V(I64SConvertI32, int32_t) \ 168 V(I64UConvertI32, uint32_t) \ 169 V(F32SConvertI32, int32_t) \ 170 V(F32UConvertI32, uint32_t) \ 171 V(F32SConvertI64, int64_t) \ 172 V(F32UConvertI64, uint64_t) \ 173 V(F32ConvertF64, double) \ 174 V(F32ReinterpretI32, int32_t) \ 175 V(F64SConvertI32, int32_t) \ 176 V(F64UConvertI32, uint32_t) \ 177 V(F64SConvertI64, int64_t) \ 178 V(F64UConvertI64, uint64_t) \ 179 V(F64ConvertF32, float) \ 180 V(F64ReinterpretI64, int64_t) \ 181 V(I32AsmjsSConvertF32, float) \ 182 V(I32AsmjsUConvertF32, float) \ 183 V(I32AsmjsSConvertF64, double) \ 184 V(I32AsmjsUConvertF64, double) \ 191 constexpr uint64_t kFloat64SignBitMask = uint64_t{1} << 63;
193 inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) {
195 *trap = kTrapDivByZero;
198 if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
199 *trap = kTrapDivUnrepresentable;
207 *trap = kTrapDivByZero;
213 inline int32_t ExecuteI32RemS(int32_t a, int32_t b, TrapReason* trap) {
215 *trap = kTrapRemByZero;
218 if (b == -1)
return 0;
224 *trap = kTrapRemByZero;
231 return a << (b & 0x1F);
235 return a >> (b & 0x1F);
238 inline int32_t ExecuteI32ShrS(int32_t a, int32_t b, TrapReason* trap) {
239 return a >> (b & 0x1F);
244 *trap = kTrapDivByZero;
247 if (b == -1 && a == std::numeric_limits<int64_t>::min()) {
248 *trap = kTrapDivUnrepresentable;
254 inline uint64_t ExecuteI64DivU(uint64_t a, uint64_t b, TrapReason* trap) {
256 *trap = kTrapDivByZero;
264 *trap = kTrapRemByZero;
267 if (b == -1)
return 0;
271 inline uint64_t ExecuteI64RemU(uint64_t a, uint64_t b, TrapReason* trap) {
273 *trap = kTrapRemByZero;
279 inline uint64_t ExecuteI64Shl(uint64_t a, uint64_t b, TrapReason* trap) {
280 return a << (b & 0x3F);
283 inline uint64_t ExecuteI64ShrU(uint64_t a, uint64_t b, TrapReason* trap) {
284 return a >> (b & 0x3F);
288 return a >> (b & 0x3F);
293 return (a >> shift) | (a << (32 - shift));
298 return (a << shift) | (a >> (32 - shift));
301 inline uint64_t ExecuteI64Ror(uint64_t a, uint64_t b, TrapReason* trap) {
303 return (a >> shift) | (a << (64 - shift));
306 inline uint64_t ExecuteI64Rol(uint64_t a, uint64_t b, TrapReason* trap) {
308 return (a << shift) | (a >> (64 - shift));
311 inline float ExecuteF32Min(
float a,
float b, TrapReason* trap) {
315 inline float ExecuteF32Max(
float a,
float b, TrapReason* trap) {
319 inline Float32 ExecuteF32CopySign(Float32 a, Float32 b, TrapReason* trap) {
320 return Float32::FromBits((a.get_bits() & ~kFloat32SignBitMask) |
321 (b.get_bits() & kFloat32SignBitMask));
324 inline double ExecuteF64Min(
double a,
double b, TrapReason* trap) {
328 inline double ExecuteF64Max(
double a,
double b, TrapReason* trap) {
332 inline Float64 ExecuteF64CopySign(Float64 a, Float64 b, TrapReason* trap) {
333 return Float64::FromBits((a.get_bits() & ~kFloat64SignBitMask) |
334 (b.get_bits() & kFloat64SignBitMask));
337 inline int32_t ExecuteI32AsmjsDivS(int32_t a, int32_t b, TrapReason* trap) {
338 if (b == 0)
return 0;
339 if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
340 return std::numeric_limits<int32_t>::min();
346 if (b == 0)
return 0;
350 inline int32_t ExecuteI32AsmjsRemS(int32_t a, int32_t b, TrapReason* trap) {
351 if (b == 0)
return 0;
352 if (b == -1)
return 0;
357 if (b == 0)
return 0;
361 inline int32_t ExecuteI32AsmjsSConvertF32(
float a, TrapReason* trap) {
362 return DoubleToInt32(a);
365 inline uint32_t ExecuteI32AsmjsUConvertF32(
float a, TrapReason* trap) {
366 return DoubleToUint32(a);
369 inline int32_t ExecuteI32AsmjsSConvertF64(
double a, TrapReason* trap) {
370 return DoubleToInt32(a);
373 inline uint32_t ExecuteI32AsmjsUConvertF64(
double a, TrapReason* trap) {
374 return DoubleToUint32(a);
377 int32_t ExecuteI32Clz(
uint32_t val, TrapReason* trap) {
378 return base::bits::CountLeadingZeros(val);
382 return base::bits::CountTrailingZeros(val);
386 return base::bits::CountPopulation(val);
390 return val == 0 ? 1 : 0;
393 int64_t ExecuteI64Clz(uint64_t val, TrapReason* trap) {
394 return base::bits::CountLeadingZeros(val);
397 inline uint64_t ExecuteI64Ctz(uint64_t val, TrapReason* trap) {
398 return base::bits::CountTrailingZeros(val);
401 inline int64_t ExecuteI64Popcnt(uint64_t val, TrapReason* trap) {
402 return base::bits::CountPopulation(val);
405 inline int32_t ExecuteI64Eqz(uint64_t val, TrapReason* trap) {
406 return val == 0 ? 1 : 0;
409 inline Float32 ExecuteF32Abs(Float32 a, TrapReason* trap) {
410 return Float32::FromBits(a.get_bits() & ~kFloat32SignBitMask);
413 inline Float32 ExecuteF32Neg(Float32 a, TrapReason* trap) {
414 return Float32::FromBits(a.get_bits() ^ kFloat32SignBitMask);
417 inline float ExecuteF32Ceil(
float a, TrapReason* trap) {
return ceilf(a); }
419 inline float ExecuteF32Floor(
float a, TrapReason* trap) {
return floorf(a); }
421 inline float ExecuteF32Trunc(
float a, TrapReason* trap) {
return truncf(a); }
423 inline float ExecuteF32NearestInt(
float a, TrapReason* trap) {
424 return nearbyintf(a);
427 inline float ExecuteF32Sqrt(
float a, TrapReason* trap) {
428 float result = sqrtf(a);
432 inline Float64 ExecuteF64Abs(Float64 a, TrapReason* trap) {
433 return Float64::FromBits(a.get_bits() & ~kFloat64SignBitMask);
436 inline Float64 ExecuteF64Neg(Float64 a, TrapReason* trap) {
437 return Float64::FromBits(a.get_bits() ^ kFloat64SignBitMask);
440 inline double ExecuteF64Ceil(
double a, TrapReason* trap) {
return ceil(a); }
442 inline double ExecuteF64Floor(
double a, TrapReason* trap) {
return floor(a); }
444 inline double ExecuteF64Trunc(
double a, TrapReason* trap) {
return trunc(a); }
446 inline double ExecuteF64NearestInt(
double a, TrapReason* trap) {
450 inline double ExecuteF64Sqrt(
double a, TrapReason* trap) {
return sqrt(a); }
452 template <
typename int_type,
typename float_type>
453 int_type ExecuteConvert(float_type a, TrapReason* trap) {
454 if (is_inbounds<int_type>(a)) {
455 return static_cast<int_type
>(a);
457 *trap = kTrapFloatUnrepresentable;
461 template <
typename int_type,
typename float_type>
462 int_type ExecuteConvertSaturate(float_type a) {
463 TrapReason base_trap = kTrapCount;
464 int32_t val = ExecuteConvert<int_type>(a, &base_trap);
465 if (base_trap == kTrapCount) {
468 return std::isnan(a) ? 0
469 : (a < static_cast<float_type>(0.0)
470 ? std::numeric_limits<int_type>::min()
471 :
std::numeric_limits<int_type>::max());
474 template <
typename dst_type,
typename src_type,
void (*fn)(Address)>
475 inline dst_type CallExternalIntToFloatFunction(src_type input) {
476 uint8_t data[std::max(
sizeof(dst_type),
sizeof(src_type))];
477 Address data_addr =
reinterpret_cast<Address
>(data);
478 WriteUnalignedValue<src_type>(data_addr, input);
480 return ReadUnalignedValue<dst_type>(data_addr);
483 template <
typename dst_type,
typename src_type,
int32_t (*fn)(Address)>
484 inline dst_type CallExternalFloatToIntFunction(src_type input,
486 uint8_t data[std::max(
sizeof(dst_type),
sizeof(src_type))];
487 Address data_addr =
reinterpret_cast<Address
>(data);
488 WriteUnalignedValue<src_type>(data_addr, input);
489 if (!fn(data_addr)) *trap = kTrapFloatUnrepresentable;
490 return ReadUnalignedValue<dst_type>(data_addr);
494 return static_cast<uint32_t>(a & 0xFFFFFFFF);
497 int64_t ExecuteI64SConvertF32(
float a, TrapReason* trap) {
498 return CallExternalFloatToIntFunction<
int64_t, float,
499 float32_to_int64_wrapper>(a, trap);
502 int64_t ExecuteI64SConvertSatF32(
float a) {
503 TrapReason base_trap = kTrapCount;
504 int64_t val = ExecuteI64SConvertF32(a, &base_trap);
505 if (base_trap == kTrapCount) {
508 return std::isnan(a) ? 0
509 : (a < 0.0 ? std::numeric_limits<int64_t>::min()
513 int64_t ExecuteI64SConvertF64(
double a, TrapReason* trap) {
514 return CallExternalFloatToIntFunction<
int64_t, double,
515 float64_to_int64_wrapper>(a, trap);
518 int64_t ExecuteI64SConvertSatF64(
double a) {
519 TrapReason base_trap = kTrapCount;
520 int64_t val = ExecuteI64SConvertF64(a, &base_trap);
521 if (base_trap == kTrapCount) {
524 return std::isnan(a) ? 0
525 : (a < 0.0 ? std::numeric_limits<int64_t>::min()
529 uint64_t ExecuteI64UConvertF32(
float a, TrapReason* trap) {
530 return CallExternalFloatToIntFunction<uint64_t, float,
531 float32_to_uint64_wrapper>(a, trap);
534 uint64_t ExecuteI64UConvertSatF32(
float a) {
535 TrapReason base_trap = kTrapCount;
536 uint64_t val = ExecuteI64UConvertF32(a, &base_trap);
537 if (base_trap == kTrapCount) {
540 return std::isnan(a) ? 0
541 : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
542 :
std::numeric_limits<uint64_t>::max());
545 uint64_t ExecuteI64UConvertF64(
double a, TrapReason* trap) {
546 return CallExternalFloatToIntFunction<uint64_t, double,
547 float64_to_uint64_wrapper>(a, trap);
550 uint64_t ExecuteI64UConvertSatF64(
double a) {
551 TrapReason base_trap = kTrapCount;
552 int64_t val = ExecuteI64UConvertF64(a, &base_trap);
553 if (base_trap == kTrapCount) {
556 return std::isnan(a) ? 0
557 : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
558 :
std::numeric_limits<uint64_t>::max());
561 inline int64_t ExecuteI64SConvertI32(int32_t a, TrapReason* trap) {
562 return static_cast<int64_t>(a);
566 return static_cast<uint64_t
>(a);
569 inline float ExecuteF32SConvertI32(int32_t a, TrapReason* trap) {
570 return static_cast<float>(a);
573 inline float ExecuteF32UConvertI32(
uint32_t a, TrapReason* trap) {
574 return static_cast<float>(a);
577 inline float ExecuteF32SConvertI64(
int64_t a, TrapReason* trap) {
578 return static_cast<float>(a);
581 inline float ExecuteF32UConvertI64(uint64_t a, TrapReason* trap) {
582 return CallExternalIntToFloatFunction<float, uint64_t,
583 uint64_to_float32_wrapper>(a);
586 inline float ExecuteF32ConvertF64(
double a, TrapReason* trap) {
587 return static_cast<float>(a);
590 inline Float32 ExecuteF32ReinterpretI32(int32_t a, TrapReason* trap) {
591 return Float32::FromBits(a);
594 inline double ExecuteF64SConvertI32(int32_t a, TrapReason* trap) {
595 return static_cast<double>(a);
598 inline double ExecuteF64UConvertI32(
uint32_t a, TrapReason* trap) {
599 return static_cast<double>(a);
602 inline double ExecuteF64SConvertI64(
int64_t a, TrapReason* trap) {
603 return static_cast<double>(a);
606 inline double ExecuteF64UConvertI64(uint64_t a, TrapReason* trap) {
607 return CallExternalIntToFloatFunction<double, uint64_t,
608 uint64_to_float64_wrapper>(a);
611 inline double ExecuteF64ConvertF32(
float a, TrapReason* trap) {
612 return static_cast<double>(a);
615 inline Float64 ExecuteF64ReinterpretI64(
int64_t a, TrapReason* trap) {
616 return Float64::FromBits(a);
619 inline int32_t ExecuteI32ReinterpretF32(WasmValue a) {
620 return a.to_f32_boxed().get_bits();
623 inline int64_t ExecuteI64ReinterpretF64(WasmValue a) {
624 return a.to_f64_boxed().get_bits();
627 enum InternalOpcode {
628 #define DECL_INTERNAL_ENUM(name, value) kInternal##name = value, 629 FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_ENUM)
630 #undef DECL_INTERNAL_ENUM 633 const char* OpcodeName(
uint32_t val) {
635 #define DECL_INTERNAL_CASE(name, value) \ 636 case kInternal##name: \ 637 return "Internal" #name; 638 FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_CASE)
639 #undef DECL_INTERNAL_CASE 641 return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(val));
652 const byte* orig_start;
653 const byte* orig_end;
658 const byte* at(
pc_t pc) {
return start + pc; }
672 Zone control_transfer_zone(zone->allocator(), ZONE_NAME);
677 : target_stack_height(target_stack_height),
686 const byte* target =
nullptr;
693 return new (zone) CLabel(zone, stack_height, arity);
697 void Bind(
const byte* pc) {
703 void Ref(
const byte* from_pc,
uint32_t stack_height) {
705 DCHECK_IMPLIES(target, *target == kExprLoop);
706 refs.push_back({from_pc, stack_height});
710 DCHECK_NOT_NULL(target);
711 for (
auto ref : refs) {
712 size_t offset =
static_cast<size_t>(ref.from_pc - start);
713 auto pcdiff =
static_cast<pcdiff_t
>(target - ref.from_pc);
714 DCHECK_GE(ref.stack_height, target_stack_height);
716 static_cast<spdiff_t>(ref.stack_height - target_stack_height);
717 TRACE(
"control transfer @%zu: Δpc %d, stack %u->%u = -%u\n", offset,
718 pcdiff, ref.stack_height, target_stack_height, spdiff);
720 entry.pc_diff = pcdiff;
721 entry.sp_diff = spdiff;
722 entry.target_arity = arity;
737 bool unreachable =
false;
739 Control(
const byte* pc, CLabel* end_label, CLabel* else_label,
742 end_label(end_label),
743 else_label(else_label),
744 exit_arity(exit_arity) {}
745 Control(
const byte* pc, CLabel* end_label,
uint32_t exit_arity)
746 : Control(pc, end_label,
nullptr, exit_arity) {}
749 end_label->Finish(map, start);
750 if (else_label) else_label->Finish(map, start);
762 static_cast<uint32_t>(code->function->sig->return_count());
764 CLabel::New(&control_transfer_zone, stack_height, func_arity);
765 control_stack.emplace_back(code->orig_start, func_label, func_arity);
766 auto control_parent = [&]() -> Control& {
767 DCHECK_LE(2, control_stack.size());
768 return control_stack[control_stack.size() - 2];
770 auto copy_unreachable = [&] {
771 control_stack.back().unreachable = control_parent().unreachable;
774 i.has_next();
i.next()) {
775 WasmOpcode opcode =
i.current();
776 if (WasmOpcodes::IsPrefixOpcode(opcode)) opcode =
i.prefixed_opcode();
777 bool unreachable = control_stack.back().unreachable;
779 TRACE(
"@%u: %s (is unreachable)\n",
i.pc_offset(),
780 WasmOpcodes::OpcodeName(opcode));
783 StackEffect(module, code->function->sig,
i.pc(),
i.end());
784 TRACE(
"@%u: %s (sp %d - %d + %d)\n",
i.pc_offset(),
785 WasmOpcodes::OpcodeName(opcode), stack_height, stack_effect.first,
786 stack_effect.second);
787 DCHECK_GE(stack_height, stack_effect.first);
788 DCHECK_GE(kMaxUInt32, static_cast<uint64_t>(stack_height) -
789 stack_effect.first + stack_effect.second);
790 stack_height = stack_height - stack_effect.first + stack_effect.second;
791 if (stack_height > max_stack_height_) max_stack_height_ = stack_height;
796 bool is_loop = opcode == kExprLoop;
799 if (imm.type == kWasmVar) {
800 imm.sig = module->signatures[imm.sig_index];
802 TRACE(
"control @%u: %s, arity %d->%d\n",
i.pc_offset(),
803 is_loop ?
"Loop" :
"Block", imm.in_arity(), imm.out_arity());
805 CLabel::New(&control_transfer_zone, stack_height,
806 is_loop ? imm.in_arity() : imm.out_arity());
807 control_stack.emplace_back(
i.pc(), label, imm.out_arity());
809 if (is_loop) label->Bind(
i.pc());
815 if (imm.type == kWasmVar) {
816 imm.sig = module->signatures[imm.sig_index];
818 TRACE(
"control @%u: If, arity %d->%d\n",
i.pc_offset(),
819 imm.in_arity(), imm.out_arity());
820 CLabel* end_label = CLabel::New(&control_transfer_zone, stack_height,
823 CLabel::New(&control_transfer_zone, stack_height, 0);
824 control_stack.emplace_back(
i.pc(), end_label, else_label,
827 if (!unreachable) else_label->Ref(
i.pc(), stack_height);
831 Control* c = &control_stack.back();
833 TRACE(
"control @%u: Else\n",
i.pc_offset());
834 if (!control_parent().unreachable) {
835 c->end_label->Ref(
i.pc(), stack_height);
837 DCHECK_NOT_NULL(c->else_label);
838 c->else_label->Bind(
i.pc() + 1);
839 c->else_label->Finish(&map_, code->orig_start);
840 c->else_label =
nullptr;
841 DCHECK_GE(stack_height, c->end_label->target_stack_height);
842 stack_height = c->end_label->target_stack_height;
846 Control* c = &control_stack.back();
847 TRACE(
"control @%u: End\n",
i.pc_offset());
849 DCHECK_IMPLIES(c->end_label->target, *c->pc == kExprLoop);
850 if (!c->end_label->target) {
851 if (c->else_label) c->else_label->Bind(
i.pc());
852 c->end_label->Bind(
i.pc() + 1);
854 c->Finish(&map_, code->orig_start);
855 DCHECK_GE(stack_height, c->end_label->target_stack_height);
856 stack_height = c->end_label->target_stack_height + c->exit_arity;
857 control_stack.pop_back();
862 TRACE(
"control @%u: Br[depth=%u]\n",
i.pc_offset(), imm.depth);
863 Control* c = &control_stack[control_stack.size() - imm.depth - 1];
864 if (!unreachable) c->end_label->Ref(
i.pc(), stack_height);
869 TRACE(
"control @%u: BrIf[depth=%u]\n",
i.pc_offset(), imm.depth);
870 Control* c = &control_stack[control_stack.size() - imm.depth - 1];
871 if (!unreachable) c->end_label->Ref(
i.pc(), stack_height);
877 TRACE(
"control @%u: BrTable[count=%u]\n",
i.pc_offset(),
880 while (iterator.has_next()) {
883 Control* c = &control_stack[control_stack.size() - target - 1];
884 c->end_label->Ref(
i.pc() + j, stack_height);
892 if (WasmOpcodes::IsUnconditionalJump(opcode)) {
893 control_stack.back().unreachable =
true;
896 DCHECK_EQ(0, control_stack.size());
897 DCHECK_EQ(func_arity, stack_height);
901 auto result = map_.find(from);
902 DCHECK(result != map_.end());
903 return result->second;
915 bool call_indirect_through_module_ =
false;
919 : zone_(zone), module_(module), interpreter_code_(zone) {
920 if (module ==
nullptr)
return;
921 interpreter_code_.reserve(module->functions.size());
922 for (
const WasmFunction&
function : module->functions) {
923 if (
function.imported) {
924 DCHECK(!
function.code.is_set());
925 AddFunction(&
function,
nullptr,
nullptr);
927 AddFunction(&
function, module_start +
function.code.offset(),
928 module_start +
function.code.end_offset());
933 bool call_indirect_through_module() {
return call_indirect_through_module_; }
935 void set_call_indirect_through_module(
bool val) {
936 call_indirect_through_module_ = val;
939 const WasmModule* module()
const {
return module_; }
943 DCHECK_EQ(
function, code->function);
948 DCHECK_LT(function_index, interpreter_code_.size());
949 return Preprocess(&interpreter_code_[function_index]);
955 if (table_index >= module_->tables.size())
return nullptr;
957 saved_index = table_index;
958 table_index &=
static_cast<int32_t
>((table_index - module_->tables.size()) &
959 ~static_cast<int32_t>(table_index)) >>
961 DCHECK_EQ(table_index, saved_index);
962 const WasmTable* table = &module_->tables[table_index];
963 if (entry_index >= table->values.size())
return nullptr;
965 saved_index = entry_index;
966 entry_index &=
static_cast<int32_t
>((entry_index - table->values.size()) &
967 ~static_cast<int32_t>(entry_index)) >>
969 DCHECK_EQ(entry_index, saved_index);
970 uint32_t index = table->values[entry_index];
971 if (index >= interpreter_code_.size())
return nullptr;
974 index &=
static_cast<int32_t
>((index - interpreter_code_.size()) &
975 ~static_cast<int32_t>(index)) >>
977 DCHECK_EQ(index, saved_index);
979 return GetCode(index);
983 DCHECK_EQ(code->function->imported, code->start ==
nullptr);
984 if (!code->side_table && code->start) {
986 code->side_table =
new (zone_)
SideTable(zone_, module_, code);
991 void AddFunction(
const WasmFunction*
function,
const byte* code_start,
992 const byte* code_end) {
995 code_end,
const_cast<byte*
>(code_start), const_cast<byte*>(code_end),
998 DCHECK_EQ(interpreter_code_.size(),
function->func_index);
999 interpreter_code_.push_back(code);
1002 void SetFunctionCode(
const WasmFunction*
function,
const byte* start,
1004 DCHECK_LT(function->func_index, interpreter_code_.size());
1006 DCHECK_EQ(
function, code->function);
1007 code->orig_start = start;
1008 code->orig_end = end;
1009 code->start =
const_cast<byte*
>(start);
1010 code->end =
const_cast<byte*
>(end);
1011 code->side_table =
nullptr;
1018 struct ExternalCallResult {
1033 InterpreterCode* interpreter_code;
1036 DCHECK_NE(INTERNAL,
type);
1038 ExternalCallResult(
Type type, InterpreterCode* code)
1039 :
type(
type), interpreter_code(code) {
1040 DCHECK_EQ(INTERNAL,
type);
1045 template <
typename dst,
typename src>
1047 dst operator()(src val)
const {
return static_cast<dst
>(val); }
1050 struct converter<Float64, uint64_t> {
1051 Float64 operator()(uint64_t val)
const {
return Float64::FromBits(val); }
1054 struct converter<Float32,
uint32_t> {
1055 Float32 operator()(
uint32_t val)
const {
return Float32::FromBits(val); }
1058 struct converter<uint64_t, Float64> {
1059 uint64_t operator()(Float64 val)
const {
return val.get_bits(); }
1062 struct converter<
uint32_t, Float32> {
1063 uint32_t operator()(Float32 val)
const {
return val.get_bits(); }
1066 template <
typename T>
1067 V8_INLINE
bool has_nondeterminism(T val) {
1068 static_assert(!std::is_floating_point<T>::value,
"missing specialization");
1072 V8_INLINE
bool has_nondeterminism<float>(
float val) {
1073 return std::isnan(val);
1076 V8_INLINE
bool has_nondeterminism<double>(
double val) {
1077 return std::isnan(val);
1093 : codemap_(codemap),
1094 instance_object_(instance_object),
1096 activations_(zone) {}
1102 WasmInterpreter::State state() {
return state_; }
1105 DCHECK_EQ(current_activation().fp, frames_.size());
1107 size_t num_params =
function->sig->parameter_count();
1108 EnsureStackSpace(num_params);
1109 Push(args, num_params);
1113 WasmInterpreter::State Run(
int num_steps = -1) {
1114 DCHECK(state_ == WasmInterpreter::STOPPED ||
1115 state_ == WasmInterpreter::PAUSED);
1116 DCHECK(num_steps == -1 || num_steps > 0);
1117 if (num_steps == -1) {
1118 TRACE(
" => Run()\n");
1119 }
else if (num_steps == 1) {
1120 TRACE(
" => Step()\n");
1122 TRACE(
" => Run(%d)\n", num_steps);
1124 state_ = WasmInterpreter::RUNNING;
1125 Execute(frames_.back().code, frames_.back().pc, num_steps);
1127 DCHECK_IMPLIES(state_ == WasmInterpreter::STOPPED,
1128 current_activation().fp == frames_.size());
1132 void Pause() { UNIMPLEMENTED(); }
1135 TRACE(
"----- RESET -----\n");
1138 state_ = WasmInterpreter::STOPPED;
1139 trap_reason_ = kTrapCount;
1140 possible_nondeterminism_ =
false;
1143 int GetFrameCount() {
1144 DCHECK_GE(kMaxInt, frames_.size());
1145 return static_cast<int>(frames_.size());
1149 if (state_ == WasmInterpreter::TRAPPED)
return WasmValue(0xDEADBEEF);
1150 DCHECK_EQ(WasmInterpreter::FINISHED, state_);
1151 Activation act = current_activation();
1153 DCHECK_EQ(act.fp, frames_.size());
1154 return GetStackValue(act.sp + index);
1158 DCHECK_GT(StackHeight(), index);
1159 return stack_[index];
1163 DCHECK_GT(StackHeight(), index);
1164 stack_[index] = value;
1167 TrapReason GetTrapReason() {
return trap_reason_; }
1169 pc_t GetBreakpointPc() {
return break_pc_; }
1171 bool PossibleNondeterminism() {
return possible_nondeterminism_; }
1173 uint64_t NumInterpretedCalls() {
return num_interpreted_calls_; }
1175 void AddBreakFlags(uint8_t flags) { break_flags_ |= flags; }
1177 void ClearBreakFlags() { break_flags_ = WasmInterpreter::BreakFlag::None; }
1180 return static_cast<uint32_t>(activations_.size());
1184 TRACE(
"----- START ACTIVATION %zu -----\n", activations_.size());
1186 DCHECK_IMPLIES(activations_.empty(), frames_.empty());
1187 DCHECK_IMPLIES(activations_.empty(), StackHeight() == 0);
1189 activations_.emplace_back(static_cast<uint32_t>(frames_.size()),
1191 state_ = WasmInterpreter::STOPPED;
1192 return activation_id;
1195 void FinishActivation(
uint32_t id) {
1196 TRACE(
"----- FINISH ACTIVATION %zu -----\n", activations_.size() - 1);
1197 DCHECK_LT(0, activations_.size());
1198 DCHECK_EQ(activations_.size() - 1, id);
1201 DCHECK_EQ(activations_.back().fp, frames_.size());
1202 DCHECK_LE(activations_.back().sp, StackHeight());
1203 sp_ = stack_.get() + activations_.back().sp;
1204 activations_.pop_back();
1208 DCHECK_GT(activations_.size(), id);
1209 return activations_[id].fp;
1214 WasmInterpreter::Thread::ExceptionHandlingResult HandleException(
1216 DCHECK(isolate->has_pending_exception());
1218 USE(isolate->pending_exception());
1219 TRACE(
"----- UNWIND -----\n");
1220 DCHECK_LT(0, activations_.size());
1221 Activation& act = activations_.back();
1222 DCHECK_LE(act.fp, frames_.size());
1223 frames_.resize(act.fp);
1224 DCHECK_LE(act.sp, StackHeight());
1225 sp_ = stack_.get() + act.sp;
1226 state_ = WasmInterpreter::STOPPED;
1227 return WasmInterpreter::Thread::UNWOUND;
1238 sp_t plimit() {
return sp + code->function->sig->parameter_count(); }
1240 sp_t llimit() {
return plimit() + code->locals.type_list.size(); }
1254 std::unique_ptr<WasmValue[]> stack_;
1258 WasmInterpreter::State state_ = WasmInterpreter::STOPPED;
1259 pc_t break_pc_ = kInvalidPc;
1260 TrapReason trap_reason_ = kTrapCount;
1261 bool possible_nondeterminism_ =
false;
1262 uint8_t break_flags_ = 0;
1263 uint64_t num_interpreted_calls_ = 0;
1268 CodeMap* codemap()
const {
return codemap_; }
1269 const WasmModule* module()
const {
return codemap_->module(); }
1271 void DoTrap(TrapReason trap,
pc_t pc) {
1272 TRACE(
"TRAP: %s\n", WasmOpcodes::TrapReasonMessage(trap));
1273 state_ = WasmInterpreter::TRAPPED;
1274 trap_reason_ = trap;
1280 DCHECK_NOT_NULL(code);
1281 DCHECK_NOT_NULL(code->side_table);
1282 EnsureStackSpace(code->side_table->max_stack_height_ +
1283 code->locals.type_list.size());
1285 ++num_interpreted_calls_;
1286 size_t arity = code->function->sig->parameter_count();
1288 DCHECK_GE(StackHeight(), arity);
1289 frames_.push_back({code, 0, StackHeight() - arity});
1290 frames_.back().pc = InitLocals(code);
1291 TRACE(
" => PushFrame #%zu (#%u @%zu)\n", frames_.size() - 1,
1292 code->function->func_index, frames_.back().pc);
1296 for (
auto p : code->locals.type_list) {
1299 #define CASE_TYPE(wasm, ctype) \ 1301 val = WasmValue(ctype{}); \ 1303 WASM_CTYPES(CASE_TYPE)
1311 return code->locals.encoded_size;
1314 void CommitPc(
pc_t pc) {
1315 DCHECK(!frames_.empty());
1316 frames_.back().pc = pc;
1320 if (pc == break_pc_) {
1322 break_pc_ = kInvalidPc;
1329 return static_cast<int>(code->side_table->Lookup(pc).pc_diff);
1334 DoStackTransfer(sp_ - control_transfer_entry.sp_diff,
1335 control_transfer_entry.target_arity);
1336 return control_transfer_entry.pc_diff;
1340 switch (code->orig_start[pc]) {
1341 case kExprCallFunction: {
1343 return pc + 1 + imm.length;
1345 case kExprCallIndirect: {
1347 return pc + 1 + imm.length;
1356 DCHECK_GT(frames_.size(), 0);
1357 WasmValue* sp_dest = stack_.get() + frames_.back().sp;
1359 if (frames_.size() == current_activation().fp) {
1361 state_ = WasmInterpreter::FINISHED;
1362 DoStackTransfer(sp_dest, arity);
1363 TRACE(
" => finish\n");
1367 Frame* top = &frames_.back();
1369 decoder->Reset((*code)->start, (*code)->end);
1370 *pc = ReturnPc(decoder, *code, top->pc);
1371 *limit = top->code->end - top->code->start;
1372 TRACE(
" => Return to #%zu (#%u @%zu)\n", frames_.size() - 1,
1373 (*code)->function->func_index, *pc);
1374 DoStackTransfer(sp_dest, arity);
1382 pc_t* limit) V8_WARN_UNUSED_RESULT {
1383 frames_.back().pc = *pc;
1385 if (!DoStackCheck())
return false;
1386 *pc = frames_.back().pc;
1387 *limit = target->end - target->start;
1388 decoder->Reset(target->start, target->end);
1394 void DoStackTransfer(
WasmValue* dest,
size_t arity) {
1400 DCHECK_LE(dest, sp_);
1401 DCHECK_LE(dest + arity, sp_);
1402 if (arity) memmove(dest, sp_ - arity, arity *
sizeof(*sp_));
1406 template <
typename mtype>
1408 size_t mem_size = instance_object_->memory_size();
1409 if (
sizeof(mtype) > mem_size)
return kNullAddress;
1410 if (offset > (mem_size -
sizeof(mtype)))
return kNullAddress;
1411 if (index > (mem_size -
sizeof(mtype) - offset))
return kNullAddress;
1414 return reinterpret_cast<Address>(instance_object_->memory_start()) +
1415 offset + (index & instance_object_->memory_mask());
1418 template <
typename ctype,
typename mtype>
1420 MachineRepresentation rep) {
1424 Address addr = BoundsCheckMem<mtype>(imm.offset, index);
1426 DoTrap(kTrapMemOutOfBounds, pc);
1430 converter<ctype, mtype>{}(ReadLittleEndianValue<mtype>(addr)));
1433 len = 1 + imm.length;
1435 if (FLAG_trace_wasm_memory) {
1437 TraceMemoryOperation(ExecutionTier::kInterpreter, &info,
1438 code->function->func_index, static_cast<int>(pc),
1439 instance_object_->memory_start());
1445 template <
typename ctype,
typename mtype>
1447 MachineRepresentation rep) {
1450 ctype val = Pop().to<ctype>();
1453 Address addr = BoundsCheckMem<mtype>(imm.offset, index);
1455 DoTrap(kTrapMemOutOfBounds, pc);
1458 WriteLittleEndianValue<mtype>(addr, converter<mtype, ctype>{}(val));
1459 len = 1 + imm.length;
1461 if (FLAG_trace_wasm_memory) {
1463 TraceMemoryOperation(ExecutionTier::kInterpreter, &info,
1464 code->function->func_index, static_cast<int>(pc),
1465 instance_object_->memory_start());
1471 template <
typename type,
typename op_type>
1474 type* val =
nullptr,
type* val2 =
nullptr) {
1477 if (val2) *val2 =
static_cast<type>(Pop().to<op_type>());
1478 if (val) *val =
static_cast<type>(Pop().to<op_type>());
1480 address = BoundsCheckMem<type>(imm.offset, index);
1482 DoTrap(kTrapMemOutOfBounds, pc);
1485 len = 2 + imm.length;
1489 bool ExecuteNumericOp(WasmOpcode opcode,
Decoder* decoder,
1492 case kExprI32SConvertSatF32:
1493 Push(
WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<float>())));
1495 case kExprI32UConvertSatF32:
1496 Push(
WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<float>())));
1498 case kExprI32SConvertSatF64:
1499 Push(
WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<double>())));
1501 case kExprI32UConvertSatF64:
1502 Push(
WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<double>())));
1504 case kExprI64SConvertSatF32:
1505 Push(
WasmValue(ExecuteI64SConvertSatF32(Pop().to<float>())));
1507 case kExprI64UConvertSatF32:
1508 Push(
WasmValue(ExecuteI64UConvertSatF32(Pop().to<float>())));
1510 case kExprI64SConvertSatF64:
1511 Push(
WasmValue(ExecuteI64SConvertSatF64(Pop().to<double>())));
1513 case kExprI64UConvertSatF64:
1514 Push(
WasmValue(ExecuteI64UConvertSatF64(Pop().to<double>())));
1517 FATAL(
"Unknown or unimplemented opcode #%d:%s", code->start[pc],
1518 OpcodeName(code->start[pc]));
1524 bool ExecuteAtomicOp(WasmOpcode opcode,
Decoder* decoder,
1530 #if !(V8_TARGET_ARCH_MIPS && V8_TARGET_BIG_ENDIAN) 1531 #define ATOMIC_BINOP_CASE(name, type, op_type, operation) \ 1532 case kExpr##name: { \ 1535 if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \ 1539 static_assert(sizeof(std::atomic<type>) == sizeof(type), \ 1540 "Size mismatch for types std::atomic<" #type \ 1542 result = WasmValue(static_cast<op_type>( \ 1543 std::operation(reinterpret_cast<std::atomic<type>*>(addr), val))); \ 1548 ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t,
uint32_t, atomic_fetch_add);
1549 ATOMIC_BINOP_CASE(I32AtomicAdd16U, uint16_t,
uint32_t, atomic_fetch_add);
1551 ATOMIC_BINOP_CASE(I32AtomicSub8U, uint8_t,
uint32_t, atomic_fetch_sub);
1552 ATOMIC_BINOP_CASE(I32AtomicSub16U, uint16_t,
uint32_t, atomic_fetch_sub);
1554 ATOMIC_BINOP_CASE(I32AtomicAnd8U, uint8_t,
uint32_t, atomic_fetch_and);
1555 ATOMIC_BINOP_CASE(I32AtomicAnd16U, uint16_t,
uint32_t, atomic_fetch_and);
1557 ATOMIC_BINOP_CASE(I32AtomicOr8U, uint8_t,
uint32_t, atomic_fetch_or);
1558 ATOMIC_BINOP_CASE(I32AtomicOr16U, uint16_t,
uint32_t, atomic_fetch_or);
1560 ATOMIC_BINOP_CASE(I32AtomicXor8U, uint8_t,
uint32_t, atomic_fetch_xor);
1561 ATOMIC_BINOP_CASE(I32AtomicXor16U, uint16_t,
uint32_t, atomic_fetch_xor);
1563 ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t,
uint32_t,
1565 ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t,
uint32_t,
1567 ATOMIC_BINOP_CASE(I64AtomicAdd, uint64_t, uint64_t, atomic_fetch_add);
1568 ATOMIC_BINOP_CASE(I64AtomicAdd8U, uint8_t, uint64_t, atomic_fetch_add);
1569 ATOMIC_BINOP_CASE(I64AtomicAdd16U, uint16_t, uint64_t, atomic_fetch_add);
1570 ATOMIC_BINOP_CASE(I64AtomicAdd32U,
uint32_t, uint64_t, atomic_fetch_add);
1571 ATOMIC_BINOP_CASE(I64AtomicSub, uint64_t, uint64_t, atomic_fetch_sub);
1572 ATOMIC_BINOP_CASE(I64AtomicSub8U, uint8_t, uint64_t, atomic_fetch_sub);
1573 ATOMIC_BINOP_CASE(I64AtomicSub16U, uint16_t, uint64_t, atomic_fetch_sub);
1574 ATOMIC_BINOP_CASE(I64AtomicSub32U,
uint32_t, uint64_t, atomic_fetch_sub);
1575 ATOMIC_BINOP_CASE(I64AtomicAnd, uint64_t, uint64_t, atomic_fetch_and);
1576 ATOMIC_BINOP_CASE(I64AtomicAnd8U, uint8_t, uint64_t, atomic_fetch_and);
1577 ATOMIC_BINOP_CASE(I64AtomicAnd16U, uint16_t, uint64_t, atomic_fetch_and);
1578 ATOMIC_BINOP_CASE(I64AtomicAnd32U,
uint32_t, uint64_t, atomic_fetch_and);
1579 ATOMIC_BINOP_CASE(I64AtomicOr, uint64_t, uint64_t, atomic_fetch_or);
1580 ATOMIC_BINOP_CASE(I64AtomicOr8U, uint8_t, uint64_t, atomic_fetch_or);
1581 ATOMIC_BINOP_CASE(I64AtomicOr16U, uint16_t, uint64_t, atomic_fetch_or);
1582 ATOMIC_BINOP_CASE(I64AtomicOr32U,
uint32_t, uint64_t, atomic_fetch_or);
1583 ATOMIC_BINOP_CASE(I64AtomicXor, uint64_t, uint64_t, atomic_fetch_xor);
1584 ATOMIC_BINOP_CASE(I64AtomicXor8U, uint8_t, uint64_t, atomic_fetch_xor);
1585 ATOMIC_BINOP_CASE(I64AtomicXor16U, uint16_t, uint64_t, atomic_fetch_xor);
1586 ATOMIC_BINOP_CASE(I64AtomicXor32U,
uint32_t, uint64_t, atomic_fetch_xor);
1587 ATOMIC_BINOP_CASE(I64AtomicExchange, uint64_t, uint64_t, atomic_exchange);
1588 ATOMIC_BINOP_CASE(I64AtomicExchange8U, uint8_t, uint64_t,
1590 ATOMIC_BINOP_CASE(I64AtomicExchange16U, uint16_t, uint64_t,
1592 ATOMIC_BINOP_CASE(I64AtomicExchange32U,
uint32_t, uint64_t,
1594 #undef ATOMIC_BINOP_CASE 1595 #define ATOMIC_COMPARE_EXCHANGE_CASE(name, type, op_type) \ 1596 case kExpr##name: { \ 1600 if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \ 1604 static_assert(sizeof(std::atomic<type>) == sizeof(type), \ 1605 "Size mismatch for types std::atomic<" #type \ 1607 std::atomic_compare_exchange_strong( \ 1608 reinterpret_cast<std::atomic<type>*>(addr), &val, val2); \ 1609 Push(WasmValue(static_cast<op_type>(val))); \ 1612 ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange,
uint32_t,
1614 ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange8U, uint8_t,
1616 ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange16U, uint16_t,
1618 ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange, uint64_t,
1620 ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange8U, uint8_t,
1622 ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange16U, uint16_t,
1624 ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange32U,
uint32_t,
1626 #undef ATOMIC_COMPARE_EXCHANGE_CASE 1627 #define ATOMIC_LOAD_CASE(name, type, op_type, operation) \ 1628 case kExpr##name: { \ 1630 if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len)) { \ 1633 static_assert(sizeof(std::atomic<type>) == sizeof(type), \ 1634 "Size mismatch for types std::atomic<" #type \ 1636 result = WasmValue(static_cast<op_type>( \ 1637 std::operation(reinterpret_cast<std::atomic<type>*>(addr)))); \ 1642 ATOMIC_LOAD_CASE(I32AtomicLoad8U, uint8_t,
uint32_t, atomic_load);
1643 ATOMIC_LOAD_CASE(I32AtomicLoad16U, uint16_t,
uint32_t, atomic_load);
1644 ATOMIC_LOAD_CASE(I64AtomicLoad, uint64_t, uint64_t, atomic_load);
1645 ATOMIC_LOAD_CASE(I64AtomicLoad8U, uint8_t, uint64_t, atomic_load);
1646 ATOMIC_LOAD_CASE(I64AtomicLoad16U, uint16_t, uint64_t, atomic_load);
1647 ATOMIC_LOAD_CASE(I64AtomicLoad32U,
uint32_t, uint64_t, atomic_load);
1648 #undef ATOMIC_LOAD_CASE 1649 #define ATOMIC_STORE_CASE(name, type, op_type, operation) \ 1650 case kExpr##name: { \ 1653 if (!ExtractAtomicOpParams<type, op_type>(decoder, code, addr, pc, len, \ 1657 static_assert(sizeof(std::atomic<type>) == sizeof(type), \ 1658 "Size mismatch for types std::atomic<" #type \ 1660 std::operation(reinterpret_cast<std::atomic<type>*>(addr), val); \ 1664 ATOMIC_STORE_CASE(I32AtomicStore8U, uint8_t,
uint32_t, atomic_store);
1665 ATOMIC_STORE_CASE(I32AtomicStore16U, uint16_t,
uint32_t, atomic_store);
1666 ATOMIC_STORE_CASE(I64AtomicStore, uint64_t, uint64_t, atomic_store);
1667 ATOMIC_STORE_CASE(I64AtomicStore8U, uint8_t, uint64_t, atomic_store);
1668 ATOMIC_STORE_CASE(I64AtomicStore16U, uint16_t, uint64_t, atomic_store);
1669 ATOMIC_STORE_CASE(I64AtomicStore32U,
uint32_t, uint64_t, atomic_store);
1670 #undef ATOMIC_STORE_CASE 1671 #endif // !(V8_TARGET_ARCH_MIPS && V8_TARGET_BIG_ENDIAN) 1679 byte* GetGlobalPtr(
const WasmGlobal* global) {
1680 if (global->mutability && global->imported) {
1681 return reinterpret_cast<byte*
>(
1682 instance_object_->imported_mutable_globals()[global->index]);
1684 return instance_object_->globals_start() + global->offset;
1689 pc_t pc,
int& len) {
1691 #define SPLAT_CASE(format, sType, valType, num) \ 1692 case kExpr##format##Splat: { \ 1693 WasmValue val = Pop(); \ 1694 valType v = val.to<valType>(); \ 1696 for (int i = 0; i < num; i++) s.val[i] = v; \ 1697 Push(WasmValue(Simd128(s))); \ 1700 SPLAT_CASE(I32x4, int4, int32_t, 4)
1701 SPLAT_CASE(F32x4, float4,
float, 4)
1702 SPLAT_CASE(I16x8, int8, int32_t, 8)
1703 SPLAT_CASE(I8x16, int16, int32_t, 16)
1705 #define EXTRACT_LANE_CASE(format, name) \ 1706 case kExpr##format##ExtractLane: { \ 1707 SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \ 1709 WasmValue val = Pop(); \ 1710 Simd128 s = val.to_s128(); \ 1711 auto ss = s.to_##name(); \ 1712 Push(WasmValue(ss.val[LANE(imm.lane, ss)])); \ 1715 EXTRACT_LANE_CASE(I32x4, i32x4)
1716 EXTRACT_LANE_CASE(F32x4, f32x4)
1717 EXTRACT_LANE_CASE(I16x8, i16x8)
1718 EXTRACT_LANE_CASE(I8x16, i8x16)
1719 #undef EXTRACT_LANE_CASE 1720 #define BINOP_CASE(op, name, stype, count, expr) \ 1722 WasmValue v2 = Pop(); \ 1723 WasmValue v1 = Pop(); \ 1724 stype s1 = v1.to_s128().to_##name(); \ 1725 stype s2 = v2.to_s128().to_##name(); \ 1727 for (size_t i = 0; i < count; ++i) { \ 1728 auto a = s1.val[LANE(i, s1)]; \ 1729 auto b = s2.val[LANE(i, s1)]; \ 1730 res.val[LANE(i, s1)] = expr; \ 1732 Push(WasmValue(Simd128(res))); \ 1735 BINOP_CASE(F32x4Add, f32x4, float4, 4, a + b)
1736 BINOP_CASE(F32x4Sub, f32x4, float4, 4, a - b)
1737 BINOP_CASE(F32x4Mul, f32x4, float4, 4, a * b)
1738 BINOP_CASE(F32x4Min, f32x4, float4, 4, a < b ? a : b)
1739 BINOP_CASE(F32x4Max, f32x4, float4, 4, a > b ? a : b)
1740 BINOP_CASE(I32x4Add, i32x4, int4, 4, a + b)
1741 BINOP_CASE(I32x4Sub, i32x4, int4, 4, a - b)
1742 BINOP_CASE(I32x4Mul, i32x4, int4, 4, a * b)
1743 BINOP_CASE(I32x4MinS, i32x4, int4, 4, a < b ? a : b)
1744 BINOP_CASE(I32x4MinU, i32x4, int4, 4,
1745 static_cast<uint32_t>(a) < static_cast<uint32_t>(b) ? a : b)
1746 BINOP_CASE(I32x4MaxS, i32x4, int4, 4, a > b ? a : b)
1747 BINOP_CASE(I32x4MaxU, i32x4, int4, 4,
1748 static_cast<uint32_t>(a) > static_cast<uint32_t>(b) ? a : b)
1749 BINOP_CASE(S128And, i32x4, int4, 4, a & b)
1750 BINOP_CASE(S128Or, i32x4, int4, 4, a | b)
1751 BINOP_CASE(S128Xor, i32x4, int4, 4, a ^ b)
1752 BINOP_CASE(I16x8Add, i16x8, int8, 8, a + b)
1753 BINOP_CASE(I16x8Sub, i16x8, int8, 8, a - b)
1754 BINOP_CASE(I16x8Mul, i16x8, int8, 8, a * b)
1755 BINOP_CASE(I16x8MinS, i16x8, int8, 8, a < b ? a : b)
1756 BINOP_CASE(I16x8MinU, i16x8, int8, 8,
1757 static_cast<uint16_t>(a) < static_cast<uint16_t>(b) ? a : b)
1758 BINOP_CASE(I16x8MaxS, i16x8, int8, 8, a > b ? a : b)
1759 BINOP_CASE(I16x8MaxU, i16x8, int8, 8,
1760 static_cast<uint16_t>(a) > static_cast<uint16_t>(b) ? a : b)
1761 BINOP_CASE(I16x8AddSaturateS, i16x8, int8, 8, SaturateAdd<int16_t>(a, b))
1762 BINOP_CASE(I16x8AddSaturateU, i16x8, int8, 8, SaturateAdd<uint16_t>(a, b))
1763 BINOP_CASE(I16x8SubSaturateS, i16x8, int8, 8, SaturateSub<int16_t>(a, b))
1764 BINOP_CASE(I16x8SubSaturateU, i16x8, int8, 8, SaturateSub<uint16_t>(a, b))
1765 BINOP_CASE(I8x16Add, i8x16, int16, 16, a + b)
1766 BINOP_CASE(I8x16Sub, i8x16, int16, 16, a - b)
1767 BINOP_CASE(I8x16Mul, i8x16, int16, 16, a * b)
1768 BINOP_CASE(I8x16MinS, i8x16, int16, 16, a < b ? a : b)
1769 BINOP_CASE(I8x16MinU, i8x16, int16, 16,
1770 static_cast<uint8_t>(a) < static_cast<uint8_t>(b) ? a : b)
1771 BINOP_CASE(I8x16MaxS, i8x16, int16, 16, a > b ? a : b)
1772 BINOP_CASE(I8x16MaxU, i8x16, int16, 16,
1773 static_cast<uint8_t>(a) > static_cast<uint8_t>(b) ? a : b)
1774 BINOP_CASE(I8x16AddSaturateS, i8x16, int16, 16, SaturateAdd<int8_t>(a, b))
1775 BINOP_CASE(I8x16AddSaturateU, i8x16, int16, 16,
1776 SaturateAdd<uint8_t>(a, b))
1777 BINOP_CASE(I8x16SubSaturateS, i8x16, int16, 16, SaturateSub<int8_t>(a, b))
1778 BINOP_CASE(I8x16SubSaturateU, i8x16, int16, 16,
1779 SaturateSub<uint8_t>(a, b))
1781 #define UNOP_CASE(op, name, stype, count, expr) \ 1783 WasmValue v = Pop(); \ 1784 stype s = v.to_s128().to_##name(); \ 1786 for (size_t i = 0; i < count; ++i) { \ 1787 auto a = s.val[i]; \ 1788 res.val[i] = expr; \ 1790 Push(WasmValue(Simd128(res))); \ 1793 UNOP_CASE(F32x4Abs, f32x4, float4, 4, std::abs(a))
1794 UNOP_CASE(F32x4Neg, f32x4, float4, 4, -a)
1795 UNOP_CASE(F32x4RecipApprox, f32x4, float4, 4, 1.0f / a)
1796 UNOP_CASE(F32x4RecipSqrtApprox, f32x4, float4, 4, 1.0f / std::sqrt(a))
1797 UNOP_CASE(I32x4Neg, i32x4, int4, 4, -a)
1798 UNOP_CASE(S128Not, i32x4, int4, 4, ~a)
1799 UNOP_CASE(I16x8Neg, i16x8, int8, 8, -a)
1800 UNOP_CASE(I8x16Neg, i8x16, int16, 16, -a)
1802 #define CMPOP_CASE(op, name, stype, out_stype, count, expr) \ 1804 WasmValue v2 = Pop(); \ 1805 WasmValue v1 = Pop(); \ 1806 stype s1 = v1.to_s128().to_##name(); \ 1807 stype s2 = v2.to_s128().to_##name(); \ 1809 for (size_t i = 0; i < count; ++i) { \ 1810 auto a = s1.val[i]; \ 1811 auto b = s2.val[i]; \ 1812 res.val[i] = expr ? -1 : 0; \ 1814 Push(WasmValue(Simd128(res))); \ 1817 CMPOP_CASE(F32x4Eq, f32x4, float4, int4, 4, a == b)
1818 CMPOP_CASE(F32x4Ne, f32x4, float4, int4, 4, a != b)
1819 CMPOP_CASE(F32x4Gt, f32x4, float4, int4, 4, a > b)
1820 CMPOP_CASE(F32x4Ge, f32x4, float4, int4, 4, a >= b)
1821 CMPOP_CASE(F32x4Lt, f32x4, float4, int4, 4, a < b)
1822 CMPOP_CASE(F32x4Le, f32x4, float4, int4, 4, a <= b)
1823 CMPOP_CASE(I32x4Eq, i32x4, int4, int4, 4, a == b)
1824 CMPOP_CASE(I32x4Ne, i32x4, int4, int4, 4, a != b)
1825 CMPOP_CASE(I32x4GtS, i32x4, int4, int4, 4, a > b)
1826 CMPOP_CASE(I32x4GeS, i32x4, int4, int4, 4, a >= b)
1827 CMPOP_CASE(I32x4LtS, i32x4, int4, int4, 4, a < b)
1828 CMPOP_CASE(I32x4LeS, i32x4, int4, int4, 4, a <= b)
1829 CMPOP_CASE(I32x4GtU, i32x4, int4, int4, 4,
1830 static_cast<uint32_t>(a) > static_cast<uint32_t>(b))
1831 CMPOP_CASE(I32x4GeU, i32x4, int4, int4, 4,
1832 static_cast<uint32_t>(a) >= static_cast<uint32_t>(b))
1833 CMPOP_CASE(I32x4LtU, i32x4, int4, int4, 4,
1834 static_cast<uint32_t>(a) < static_cast<uint32_t>(b))
1835 CMPOP_CASE(I32x4LeU, i32x4, int4, int4, 4,
1836 static_cast<uint32_t>(a) <= static_cast<uint32_t>(b))
1837 CMPOP_CASE(I16x8Eq, i16x8, int8, int8, 8, a == b)
1838 CMPOP_CASE(I16x8Ne, i16x8, int8, int8, 8, a != b)
1839 CMPOP_CASE(I16x8GtS, i16x8, int8, int8, 8, a > b)
1840 CMPOP_CASE(I16x8GeS, i16x8, int8, int8, 8, a >= b)
1841 CMPOP_CASE(I16x8LtS, i16x8, int8, int8, 8, a < b)
1842 CMPOP_CASE(I16x8LeS, i16x8, int8, int8, 8, a <= b)
1843 CMPOP_CASE(I16x8GtU, i16x8, int8, int8, 8,
1844 static_cast<uint16_t>(a) > static_cast<uint16_t>(b))
1845 CMPOP_CASE(I16x8GeU, i16x8, int8, int8, 8,
1846 static_cast<uint16_t>(a) >= static_cast<uint16_t>(b))
1847 CMPOP_CASE(I16x8LtU, i16x8, int8, int8, 8,
1848 static_cast<uint16_t>(a) < static_cast<uint16_t>(b))
1849 CMPOP_CASE(I16x8LeU, i16x8, int8, int8, 8,
1850 static_cast<uint16_t>(a) <= static_cast<uint16_t>(b))
1851 CMPOP_CASE(I8x16Eq, i8x16, int16, int16, 16, a == b)
1852 CMPOP_CASE(I8x16Ne, i8x16, int16, int16, 16, a != b)
1853 CMPOP_CASE(I8x16GtS, i8x16, int16, int16, 16, a > b)
1854 CMPOP_CASE(I8x16GeS, i8x16, int16, int16, 16, a >= b)
1855 CMPOP_CASE(I8x16LtS, i8x16, int16, int16, 16, a < b)
1856 CMPOP_CASE(I8x16LeS, i8x16, int16, int16, 16, a <= b)
1857 CMPOP_CASE(I8x16GtU, i8x16, int16, int16, 16,
1858 static_cast<uint8_t>(a) > static_cast<uint8_t>(b))
1859 CMPOP_CASE(I8x16GeU, i8x16, int16, int16, 16,
1860 static_cast<uint8_t>(a) >= static_cast<uint8_t>(b))
1861 CMPOP_CASE(I8x16LtU, i8x16, int16, int16, 16,
1862 static_cast<uint8_t>(a) < static_cast<uint8_t>(b))
1863 CMPOP_CASE(I8x16LeU, i8x16, int16, int16, 16,
1864 static_cast<uint8_t>(a) <= static_cast<uint8_t>(b))
1866 #define REPLACE_LANE_CASE(format, name, stype, ctype) \ 1867 case kExpr##format##ReplaceLane: { \ 1868 SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \ 1870 WasmValue new_val = Pop(); \ 1871 WasmValue simd_val = Pop(); \ 1872 stype s = simd_val.to_s128().to_##name(); \ 1873 s.val[LANE(imm.lane, s)] = new_val.to<ctype>(); \ 1874 Push(WasmValue(Simd128(s))); \ 1877 REPLACE_LANE_CASE(F32x4, f32x4, float4,
float)
1878 REPLACE_LANE_CASE(I32x4, i32x4, int4, int32_t)
1879 REPLACE_LANE_CASE(I16x8, i16x8, int8, int32_t)
1880 REPLACE_LANE_CASE(I8x16, i8x16, int16, int32_t)
1881 #undef REPLACE_LANE_CASE 1882 case kExprS128LoadMem:
1883 return ExecuteLoad<Simd128, Simd128>(decoder, code, pc, len,
1884 MachineRepresentation::kSimd128);
1885 case kExprS128StoreMem:
1886 return ExecuteStore<Simd128, Simd128>(decoder, code, pc, len,
1887 MachineRepresentation::kSimd128);
1888 #define SHIFT_CASE(op, name, stype, count, expr) \ 1890 SimdShiftImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc)); \ 1892 WasmValue v = Pop(); \ 1893 stype s = v.to_s128().to_##name(); \ 1895 for (size_t i = 0; i < count; ++i) { \ 1896 auto a = s.val[i]; \ 1897 res.val[i] = expr; \ 1899 Push(WasmValue(Simd128(res))); \ 1902 SHIFT_CASE(I32x4Shl, i32x4, int4, 4, a << imm.shift)
1903 SHIFT_CASE(I32x4ShrS, i32x4, int4, 4, a >> imm.shift)
1904 SHIFT_CASE(I32x4ShrU, i32x4, int4, 4,
1905 static_cast<uint32_t>(a) >> imm.shift)
1906 SHIFT_CASE(I16x8Shl, i16x8, int8, 8, a << imm.shift)
1907 SHIFT_CASE(I16x8ShrS, i16x8, int8, 8, a >> imm.shift)
1908 SHIFT_CASE(I16x8ShrU, i16x8, int8, 8,
1909 static_cast<uint16_t>(a) >> imm.shift)
1910 SHIFT_CASE(I8x16Shl, i8x16, int16, 16, a << imm.shift)
1911 SHIFT_CASE(I8x16ShrS, i8x16, int16, 16, a >> imm.shift)
1912 SHIFT_CASE(I8x16ShrU, i8x16, int16, 16,
1913 static_cast<uint8_t>(a) >> imm.shift)
1915 #define CONVERT_CASE(op, src_type, name, dst_type, count, start_index, ctype, \ 1918 WasmValue v = Pop(); \ 1919 src_type s = v.to_s128().to_##name(); \ 1921 for (size_t i = 0; i < count; ++i) { \ 1922 ctype a = s.val[LANE(start_index + i, s)]; \ 1923 res.val[LANE(i, res)] = expr; \ 1925 Push(WasmValue(Simd128(res))); \ 1928 CONVERT_CASE(F32x4SConvertI32x4, int4, i32x4, float4, 4, 0, int32_t,
1929 static_cast<float>(a))
1930 CONVERT_CASE(F32x4UConvertI32x4, int4, i32x4, float4, 4, 0,
uint32_t,
1931 static_cast<float>(a))
1932 CONVERT_CASE(I32x4SConvertF32x4, float4, f32x4, int4, 4, 0,
double,
1934 : a<kMinInt ? kMinInt : a> kMaxInt
1936 : static_cast<int32_t>(a))
1937 CONVERT_CASE(I32x4UConvertF32x4, float4, f32x4, int4, 4, 0,
double,
1940 : a<0 ? 0 : a> kMaxUInt32 ? kMaxUInt32
1941 : static_cast<uint32_t>(a))
1942 CONVERT_CASE(I32x4SConvertI16x8High, int8, i16x8, int4, 4, 4, int16_t,
1944 CONVERT_CASE(I32x4UConvertI16x8High, int8, i16x8, int4, 4, 4, uint16_t,
1946 CONVERT_CASE(I32x4SConvertI16x8Low, int8, i16x8, int4, 4, 0, int16_t, a)
1947 CONVERT_CASE(I32x4UConvertI16x8Low, int8, i16x8, int4, 4, 0, uint16_t,
1949 CONVERT_CASE(I16x8SConvertI8x16High, int16, i8x16, int8, 8, 8, int8_t,
1951 CONVERT_CASE(I16x8UConvertI8x16High, int16, i8x16, int8, 8, 8, uint8_t,
1953 CONVERT_CASE(I16x8SConvertI8x16Low, int16, i8x16, int8, 8, 0, int8_t, a)
1954 CONVERT_CASE(I16x8UConvertI8x16Low, int16, i8x16, int8, 8, 0, uint8_t,
1957 #define PACK_CASE(op, src_type, name, dst_type, count, ctype, dst_ctype, \ 1960 WasmValue v2 = Pop(); \ 1961 WasmValue v1 = Pop(); \ 1962 src_type s1 = v1.to_s128().to_##name(); \ 1963 src_type s2 = v2.to_s128().to_##name(); \ 1965 int64_t min = std::numeric_limits<ctype>::min(); \ 1966 int64_t max = std::numeric_limits<ctype>::max(); \ 1967 for (size_t i = 0; i < count; ++i) { \ 1968 int32_t v = i < count / 2 ? s1.val[LANE(i, s1)] \ 1969 : s2.val[LANE(i - count / 2, s2)]; \ 1970 int64_t a = is_unsigned ? static_cast<int64_t>(v & 0xFFFFFFFFu) : v; \ 1971 res.val[LANE(i, res)] = \ 1972 static_cast<dst_ctype>(std::max(min, std::min(max, a))); \ 1974 Push(WasmValue(Simd128(res))); \ 1977 PACK_CASE(I16x8SConvertI32x4, int4, i32x4, int8, 8, int16_t, int16_t,
1979 PACK_CASE(I16x8UConvertI32x4, int4, i32x4, int8, 8, uint16_t, int16_t,
1981 PACK_CASE(I8x16SConvertI16x8, int8, i16x8, int16, 16, int8_t, int8_t,
1983 PACK_CASE(I8x16UConvertI16x8, int8, i16x8, int16, 16, uint8_t, int8_t,
1986 case kExprS128Select: {
1987 int4 v2 = Pop().to_s128().to_i32x4();
1988 int4 v1 = Pop().to_s128().to_i32x4();
1989 int4 bool_val = Pop().to_s128().to_i32x4();
1991 for (
size_t i = 0;
i < 4; ++
i) {
1992 res.val[
i] = v2.val[
i] ^ ((v1.val[
i] ^ v2.val[
i]) & bool_val.val[
i]);
1997 #define ADD_HORIZ_CASE(op, name, stype, count) \ 1999 WasmValue v2 = Pop(); \ 2000 WasmValue v1 = Pop(); \ 2001 stype s1 = v1.to_s128().to_##name(); \ 2002 stype s2 = v2.to_s128().to_##name(); \ 2004 for (size_t i = 0; i < count / 2; ++i) { \ 2005 res.val[LANE(i, s1)] = \ 2006 s1.val[LANE(i * 2, s1)] + s1.val[LANE(i * 2 + 1, s1)]; \ 2007 res.val[LANE(i + count / 2, s1)] = \ 2008 s2.val[LANE(i * 2, s1)] + s2.val[LANE(i * 2 + 1, s1)]; \ 2010 Push(WasmValue(Simd128(res))); \ 2013 ADD_HORIZ_CASE(I32x4AddHoriz, i32x4, int4, 4)
2014 ADD_HORIZ_CASE(F32x4AddHoriz, f32x4, float4, 4)
2015 ADD_HORIZ_CASE(I16x8AddHoriz, i16x8, int8, 8)
2016 #undef ADD_HORIZ_CASE 2017 case kExprS8x16Shuffle: {
2021 int16 v2 = Pop().to_s128().to_i8x16();
2022 int16 v1 = Pop().to_s128().to_i8x16();
2024 for (
size_t i = 0;
i < kSimd128Size; ++
i) {
2025 int lane = imm.shuffle[
i];
2026 res.val[LANE(
i, v1)] = lane < kSimd128Size
2027 ? v1.val[LANE(lane, v1)]
2028 : v2.val[LANE(lane - kSimd128Size, v1)];
2033 #define REDUCTION_CASE(op, name, stype, count, operation) \ 2035 stype s = Pop().to_s128().to_##name(); \ 2036 int32_t res = s.val[0]; \ 2037 for (size_t i = 1; i < count; ++i) { \ 2038 res = res operation static_cast<int32_t>(s.val[i]); \ 2040 Push(WasmValue(res)); \ 2043 REDUCTION_CASE(S1x4AnyTrue, i32x4, int4, 4, |)
2044 REDUCTION_CASE(S1x4AllTrue, i32x4, int4, 4, &)
2045 REDUCTION_CASE(S1x8AnyTrue, i16x8, int8, 8, |)
2046 REDUCTION_CASE(S1x8AllTrue, i16x8, int8, 8, &)
2047 REDUCTION_CASE(S1x16AnyTrue, i8x16, int16, 16, |)
2048 REDUCTION_CASE(S1x16AllTrue, i8x16, int16, 16, &)
2049 #undef REDUCTION_CASE 2061 bool DoStackCheck() V8_WARN_UNUSED_RESULT {
2066 const size_t stack_size_limit = FLAG_stack_size * KB;
2068 const size_t current_stack_size =
2069 (sp_ - stack_.get()) + frames_.size() *
sizeof(Frame);
2070 if (V8_LIKELY(current_stack_size <= stack_size_limit)) {
2075 frames_.back().pc = 0;
2076 Isolate* isolate = instance_object_->GetIsolate();
2078 isolate->StackOverflow();
2079 return HandleException(isolate) == WasmInterpreter::Thread::HANDLED;
2083 DCHECK_NOT_NULL(code->side_table);
2084 DCHECK(!frames_.empty());
2087 DCHECK_LE(code->function->sig->parameter_count() +
2088 code->locals.type_list.size() +
2089 code->side_table->max_stack_height_,
2090 stack_limit_ - stack_.get() - frames_.back().sp);
2092 Decoder decoder(code->start, code->end);
2093 pc_t limit = code->end - code->start;
2094 bool hit_break =
false;
2097 #define PAUSE_IF_BREAK_FLAG(flag) \ 2098 if (V8_UNLIKELY(break_flags_ & WasmInterpreter::BreakFlag::flag)) { \ 2103 DCHECK_GT(limit, pc);
2104 DCHECK_NOT_NULL(code->start);
2107 const char* skip =
" ";
2109 byte orig = code->start[pc];
2110 WasmOpcode opcode =
static_cast<WasmOpcode
>(orig);
2111 if (WasmOpcodes::IsPrefixOpcode(opcode)) {
2112 opcode =
static_cast<WasmOpcode
>(opcode << 8 | code->start[pc + 1]);
2114 if (V8_UNLIKELY(orig == kInternalBreakpoint)) {
2115 orig = code->orig_start[pc];
2116 if (WasmOpcodes::IsPrefixOpcode(static_cast<WasmOpcode>(orig))) {
2118 static_cast<WasmOpcode
>(orig << 8 | code->orig_start[pc + 1]);
2120 if (SkipBreakpoint(code, pc)) {
2124 TRACE(
"@%-3zu: [break] %-24s:", pc, WasmOpcodes::OpcodeName(opcode));
2133 if (max == 0)
break;
2137 TRACE(
"@%-3zu: %s%-24s:", pc, skip, WasmOpcodes::OpcodeName(opcode));
2144 std::pair<uint32_t, uint32_t> stack_effect =
2145 StackEffect(codemap_->module(), frames_.back().code->function->sig,
2146 code->orig_start + pc, code->orig_end);
2147 sp_t expected_new_stack_height =
2148 StackHeight() - stack_effect.first + stack_effect.second;
2156 &decoder, code->at(pc));
2157 len = 1 + imm.length;
2162 &decoder, code->at(pc));
2163 len = 1 + imm.length;
2168 &decoder, code->at(pc));
2170 bool is_true = cond.to<
uint32_t>() != 0;
2173 len = 1 + imm.length;
2174 TRACE(
" true => fallthrough\n");
2176 len = LookupTargetDelta(code, pc);
2177 TRACE(
" false => @%zu\n", pc + len);
2182 len = LookupTargetDelta(code, pc);
2183 TRACE(
" end => @%zu\n", pc + len);
2190 Push(cond.to<int32_t>() != 0 ? tval : fval);
2195 len = DoBreak(code, pc, imm.depth);
2196 TRACE(
" br => @%zu\n", pc + len);
2202 bool is_true = cond.to<
uint32_t>() != 0;
2204 len = DoBreak(code, pc, imm.depth);
2205 TRACE(
" br_if => @%zu\n", pc + len);
2207 TRACE(
" false => fallthrough\n");
2208 len = 1 + imm.length;
2212 case kExprBrTable: {
2218 if (key >= imm.table_count) key = imm.table_count;
2220 DCHECK(iterator.has_next());
2221 depth = iterator.next();
2223 len = key + DoBreak(code, pc + key, static_cast<size_t>(depth));
2224 TRACE(
" br[%u] => @%zu\n", key, pc + key + len);
2228 size_t arity = code->function->sig->return_count();
2229 if (!DoReturn(&decoder, &code, &pc, &limit, arity))
return;
2230 PAUSE_IF_BREAK_FLAG(AfterReturn);
2233 case kExprUnreachable: {
2234 return DoTrap(kTrapUnreachable, pc);
2239 case kExprI32Const: {
2242 len = 1 + imm.length;
2245 case kExprI64Const: {
2248 len = 1 + imm.length;
2251 case kExprF32Const: {
2254 len = 1 + imm.length;
2257 case kExprF64Const: {
2260 len = 1 + imm.length;
2263 case kExprGetLocal: {
2265 Push(GetStackValue(frames_.back().sp + imm.index));
2266 len = 1 + imm.length;
2269 case kExprSetLocal: {
2272 SetStackValue(frames_.back().sp + imm.index, val);
2273 len = 1 + imm.length;
2276 case kExprTeeLocal: {
2279 SetStackValue(frames_.back().sp + imm.index, val);
2281 len = 1 + imm.length;
2288 case kExprCallFunction: {
2292 if (target->function->imported) {
2294 ExternalCallResult result =
2295 CallImportedFunction(target->function->func_index);
2296 switch (result.type) {
2297 case ExternalCallResult::INTERNAL:
2299 target = result.interpreter_code;
2300 DCHECK(!target->function->imported);
2302 case ExternalCallResult::INVALID_FUNC:
2303 case ExternalCallResult::SIGNATURE_MISMATCH:
2306 case ExternalCallResult::EXTERNAL_RETURNED:
2307 PAUSE_IF_BREAK_FLAG(AfterCall);
2308 len = 1 + imm.length;
2310 case ExternalCallResult::EXTERNAL_UNWOUND:
2313 if (result.type != ExternalCallResult::INTERNAL)
break;
2316 if (!DoCall(&decoder, target, &pc, &limit))
return;
2318 PAUSE_IF_BREAK_FLAG(AfterCall);
2321 case kExprCallIndirect: {
2326 DCHECK_LE(module()->tables.size(), 1u);
2328 ExternalCallResult result =
2329 CallIndirectFunction(0, entry_index, imm.sig_index);
2330 switch (result.type) {
2331 case ExternalCallResult::INTERNAL:
2333 if (!DoCall(&decoder, result.interpreter_code, &pc, &limit))
2335 code = result.interpreter_code;
2336 PAUSE_IF_BREAK_FLAG(AfterCall);
2338 case ExternalCallResult::INVALID_FUNC:
2339 return DoTrap(kTrapFuncInvalid, pc);
2340 case ExternalCallResult::SIGNATURE_MISMATCH:
2341 return DoTrap(kTrapFuncSigMismatch, pc);
2342 case ExternalCallResult::EXTERNAL_RETURNED:
2343 PAUSE_IF_BREAK_FLAG(AfterCall);
2344 len = 1 + imm.length;
2346 case ExternalCallResult::EXTERNAL_UNWOUND:
2350 case kExprGetGlobal: {
2353 const WasmGlobal* global = &module()->globals[imm.index];
2354 byte* ptr = GetGlobalPtr(global);
2356 switch (global->type) {
2357 #define CASE_TYPE(wasm, ctype) \ 2360 ReadLittleEndianValue<ctype>(reinterpret_cast<Address>(ptr))); \ 2362 WASM_CTYPES(CASE_TYPE)
2368 len = 1 + imm.length;
2371 case kExprSetGlobal: {
2374 const WasmGlobal* global = &module()->globals[imm.index];
2375 byte* ptr = GetGlobalPtr(global);
2377 switch (global->type) {
2378 #define CASE_TYPE(wasm, ctype) \ 2380 WriteLittleEndianValue<ctype>(reinterpret_cast<Address>(ptr), \ 2383 WASM_CTYPES(CASE_TYPE)
2388 len = 1 + imm.length;
2392 #define LOAD_CASE(name, ctype, mtype, rep) \ 2393 case kExpr##name: { \ 2394 if (!ExecuteLoad<ctype, mtype>(&decoder, code, pc, len, \ 2395 MachineRepresentation::rep)) \ 2400 LOAD_CASE(I32LoadMem8S, int32_t, int8_t, kWord8);
2401 LOAD_CASE(I32LoadMem8U, int32_t, uint8_t, kWord8);
2402 LOAD_CASE(I32LoadMem16S, int32_t, int16_t, kWord16);
2403 LOAD_CASE(I32LoadMem16U, int32_t, uint16_t, kWord16);
2404 LOAD_CASE(I64LoadMem8S,
int64_t, int8_t, kWord8);
2405 LOAD_CASE(I64LoadMem8U,
int64_t, uint8_t, kWord16);
2406 LOAD_CASE(I64LoadMem16S,
int64_t, int16_t, kWord16);
2407 LOAD_CASE(I64LoadMem16U,
int64_t, uint16_t, kWord16);
2408 LOAD_CASE(I64LoadMem32S,
int64_t, int32_t, kWord32);
2410 LOAD_CASE(I32LoadMem, int32_t, int32_t, kWord32);
2413 LOAD_CASE(F64LoadMem,
Float64, uint64_t, kFloat64);
2416 #define STORE_CASE(name, ctype, mtype, rep) \ 2417 case kExpr##name: { \ 2418 if (!ExecuteStore<ctype, mtype>(&decoder, code, pc, len, \ 2419 MachineRepresentation::rep)) \ 2424 STORE_CASE(I32StoreMem8, int32_t, int8_t, kWord8);
2425 STORE_CASE(I32StoreMem16, int32_t, int16_t, kWord16);
2426 STORE_CASE(I64StoreMem8,
int64_t, int8_t, kWord8);
2427 STORE_CASE(I64StoreMem16,
int64_t, int16_t, kWord16);
2428 STORE_CASE(I64StoreMem32,
int64_t, int32_t, kWord32);
2429 STORE_CASE(I32StoreMem, int32_t, int32_t, kWord32);
2432 STORE_CASE(F64StoreMem,
Float64, uint64_t, kFloat64);
2435 #define ASMJS_LOAD_CASE(name, ctype, mtype, defval) \ 2436 case kExpr##name: { \ 2437 uint32_t index = Pop().to<uint32_t>(); \ 2439 Address addr = BoundsCheckMem<mtype>(0, index); \ 2444 result = static_cast<ctype>(*reinterpret_cast<mtype*>(addr)); \ 2446 Push(WasmValue(result)); \ 2449 ASMJS_LOAD_CASE(I32AsmjsLoadMem8S, int32_t, int8_t, 0);
2450 ASMJS_LOAD_CASE(I32AsmjsLoadMem8U, int32_t, uint8_t, 0);
2451 ASMJS_LOAD_CASE(I32AsmjsLoadMem16S, int32_t, int16_t, 0);
2452 ASMJS_LOAD_CASE(I32AsmjsLoadMem16U, int32_t, uint16_t, 0);
2453 ASMJS_LOAD_CASE(I32AsmjsLoadMem, int32_t, int32_t, 0);
2454 ASMJS_LOAD_CASE(F32AsmjsLoadMem,
float,
float,
2455 std::numeric_limits<float>::quiet_NaN());
2456 ASMJS_LOAD_CASE(F64AsmjsLoadMem,
double,
double,
2457 std::numeric_limits<double>::quiet_NaN());
2458 #undef ASMJS_LOAD_CASE 2460 #define ASMJS_STORE_CASE(name, ctype, mtype) \ 2461 case kExpr##name: { \ 2462 WasmValue val = Pop(); \ 2463 uint32_t index = Pop().to<uint32_t>(); \ 2464 Address addr = BoundsCheckMem<mtype>(0, index); \ 2466 *(reinterpret_cast<mtype*>(addr)) = static_cast<mtype>(val.to<ctype>()); \ 2472 ASMJS_STORE_CASE(I32AsmjsStoreMem8, int32_t, int8_t);
2473 ASMJS_STORE_CASE(I32AsmjsStoreMem16, int32_t, int16_t);
2474 ASMJS_STORE_CASE(I32AsmjsStoreMem, int32_t, int32_t);
2475 ASMJS_STORE_CASE(F32AsmjsStoreMem,
float,
float);
2476 ASMJS_STORE_CASE(F64AsmjsStoreMem,
double,
double);
2477 #undef ASMJS_STORE_CASE 2478 case kExprMemoryGrow: {
2483 instance_object_->GetIsolate());
2484 Isolate* isolate = memory->GetIsolate();
2485 int32_t result = WasmMemoryObject::Grow(isolate, memory, delta_pages);
2487 len = 1 + imm.length;
2490 if (max > 0) max = std::max(0, max - 1000);
2493 case kExprMemorySize: {
2496 Push(
WasmValue(static_cast<uint32_t>(instance_object_->memory_size() /
2498 len = 1 + imm.length;
2504 case kExprI32ReinterpretF32: {
2506 Push(
WasmValue(ExecuteI32ReinterpretF32(val)));
2509 case kExprI64ReinterpretF64: {
2511 Push(
WasmValue(ExecuteI64ReinterpretF64(val)));
2514 #define SIGN_EXTENSION_CASE(name, wtype, ntype) \ 2515 case kExpr##name: { \ 2516 ntype val = static_cast<ntype>(Pop().to<wtype>()); \ 2517 Push(WasmValue(static_cast<wtype>(val))); \ 2520 SIGN_EXTENSION_CASE(I32SExtendI8, int32_t, int8_t);
2521 SIGN_EXTENSION_CASE(I32SExtendI16, int32_t, int16_t);
2522 SIGN_EXTENSION_CASE(I64SExtendI8,
int64_t, int8_t);
2523 SIGN_EXTENSION_CASE(I64SExtendI16,
int64_t, int16_t);
2524 SIGN_EXTENSION_CASE(I64SExtendI32,
int64_t, int32_t);
2525 #undef SIGN_EXTENSION_CASE 2526 case kNumericPrefix: {
2528 if (!ExecuteNumericOp(opcode, &decoder, code, pc, len))
return;
2531 case kAtomicPrefix: {
2532 if (!ExecuteAtomicOp(opcode, &decoder, code, pc, len))
return;
2537 if (!ExecuteSimdOp(opcode, &decoder, code, pc, len))
return;
2541 #define EXECUTE_SIMPLE_BINOP(name, ctype, op) \ 2542 case kExpr##name: { \ 2543 WasmValue rval = Pop(); \ 2544 WasmValue lval = Pop(); \ 2545 auto result = lval.to<ctype>() op rval.to<ctype>(); \ 2546 possible_nondeterminism_ |= has_nondeterminism(result); \ 2547 Push(WasmValue(result)); \ 2550 FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP)
2551 #undef EXECUTE_SIMPLE_BINOP 2553 #define EXECUTE_OTHER_BINOP(name, ctype) \ 2554 case kExpr##name: { \ 2555 TrapReason trap = kTrapCount; \ 2556 ctype rval = Pop().to<ctype>(); \ 2557 ctype lval = Pop().to<ctype>(); \ 2558 auto result = Execute##name(lval, rval, &trap); \ 2559 possible_nondeterminism_ |= has_nondeterminism(result); \ 2560 if (trap != kTrapCount) return DoTrap(trap, pc); \ 2561 Push(WasmValue(result)); \ 2564 FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP)
2565 #undef EXECUTE_OTHER_BINOP 2567 #define EXECUTE_UNOP(name, ctype, exec_fn) \ 2568 case kExpr##name: { \ 2569 TrapReason trap = kTrapCount; \ 2570 ctype val = Pop().to<ctype>(); \ 2571 auto result = exec_fn(val, &trap); \ 2572 possible_nondeterminism_ |= has_nondeterminism(result); \ 2573 if (trap != kTrapCount) return DoTrap(trap, pc); \ 2574 Push(WasmValue(result)); \ 2578 #define EXECUTE_OTHER_UNOP(name, ctype) EXECUTE_UNOP(name, ctype, Execute##name) 2579 FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP)
2580 #undef EXECUTE_OTHER_UNOP 2582 #define EXECUTE_I32CONV_FLOATOP(name, out_type, in_type) \ 2583 EXECUTE_UNOP(name, in_type, ExecuteConvert<out_type>) 2584 FOREACH_I32CONV_FLOATOP(EXECUTE_I32CONV_FLOATOP)
2585 #undef EXECUTE_I32CONV_FLOATOP 2589 FATAL(
"Unknown or unimplemented opcode #%d:%s", code->start[pc],
2590 OpcodeName(code->start[pc]));
2595 if (!WasmOpcodes::IsControlOpcode(opcode)) {
2596 DCHECK_EQ(expected_new_stack_height, StackHeight());
2603 TRACE(
"@%-3zu: ImplicitReturn\n", pc);
2604 if (!DoReturn(&decoder, &code, &pc, &limit,
2605 code->function->sig->return_count()))
2607 PAUSE_IF_BREAK_FLAG(AfterReturn);
2609 #undef PAUSE_IF_BREAK_FLAG 2612 state_ = WasmInterpreter::PAUSED;
2613 break_pc_ = hit_break ? pc : kInvalidPc;
2618 DCHECK_GT(frames_.size(), 0);
2619 DCHECK_GT(StackHeight(), frames_.back().llimit());
2624 DCHECK_GE(StackHeight(), n);
2625 DCHECK_GT(frames_.size(), 0);
2627 DCHECK_GE(StackHeight() - n, frames_.back().llimit());
2638 DCHECK_NE(kWasmStmt, val.type());
2639 DCHECK_LE(1, stack_limit_ - sp_);
2643 void Push(
WasmValue* vals,
size_t arity) {
2644 DCHECK_LE(arity, stack_limit_ - sp_);
2645 for (
WasmValue *val = vals, *end = vals + arity; val != end; ++val) {
2646 DCHECK_NE(kWasmStmt, val->type());
2648 memcpy(sp_, vals, arity *
sizeof(*sp_));
2652 void EnsureStackSpace(
size_t size) {
2653 if (V8_LIKELY(static_cast<size_t>(stack_limit_ - sp_) >= size))
return;
2654 size_t old_size = stack_limit_ - stack_.get();
2655 size_t requested_size =
2656 base::bits::RoundUpToPowerOfTwo64((sp_ - stack_.get()) + size);
2657 size_t new_size = Max(
size_t{8}, Max(2 * old_size, requested_size));
2658 std::unique_ptr<WasmValue[]> new_stack(
new WasmValue[new_size]);
2659 memcpy(new_stack.get(), stack_.get(), old_size *
sizeof(*sp_));
2660 sp_ = new_stack.get() + (sp_ - stack_.get());
2661 stack_ = std::move(new_stack);
2662 stack_limit_ = stack_.get() + new_size;
2665 sp_t StackHeight() {
return sp_ - stack_.get(); }
2667 void TraceValueStack() {
2669 if (!FLAG_trace_wasm_interpreter)
return;
2670 Frame* top = frames_.size() > 0 ? &frames_.back() :
nullptr;
2671 sp_t sp = top ? top->sp : 0;
2672 sp_t plimit = top ? top->plimit() : 0;
2673 sp_t llimit = top ? top->llimit() : 0;
2674 for (
size_t i = sp;
i < StackHeight(); ++
i) {
2676 PrintF(
" p%zu:",
i);
2677 else if (
i < llimit)
2678 PrintF(
" l%zu:",
i);
2680 PrintF(
" s%zu:",
i);
2682 switch (val.type()) {
2684 PrintF(
"i32:%d", val.to<int32_t>());
2687 PrintF(
"i64:%" PRId64
"", val.to<
int64_t>());
2690 PrintF(
"f32:%f", val.to<
float>());
2693 PrintF(
"f64:%lf", val.to<
double>());
2706 ExternalCallResult TryHandleException(
Isolate* isolate) {
2707 if (HandleException(isolate) == WasmInterpreter::Thread::UNWOUND) {
2708 return {ExternalCallResult::EXTERNAL_UNWOUND};
2710 return {ExternalCallResult::EXTERNAL_RETURNED};
2713 ExternalCallResult CallExternalWasmFunction(
Isolate* isolate,
2717 if (code->kind() == WasmCode::kWasmToJsWrapper &&
2718 !IsJSCompatibleSignature(sig)) {
2719 isolate->Throw(*isolate->factory()->NewTypeError(
2720 MessageTemplate::kWasmTrapTypeError));
2721 return TryHandleException(isolate);
2726 WasmDebugInfo::GetCWasmEntry(debug_info, sig);
2728 TRACE(
" => Calling external wasm function\n");
2733 int num_args =
static_cast<int>(sig->parameter_count());
2734 std::vector<uint8_t> arg_buffer(num_args * 8);
2737 for (
int i = 0;
i < num_args; ++
i) {
2738 int param_size = ValueTypes::ElementSizeInBytes(sig->GetParam(
i));
2739 if (arg_buffer.size() < offset + param_size) {
2740 arg_buffer.resize(std::max(2 * arg_buffer.size(), offset + param_size));
2742 Address address =
reinterpret_cast<Address>(arg_buffer.data()) + offset;
2743 switch (sig->GetParam(
i)) {
2745 WriteUnalignedValue(address, wasm_args[
i].to<uint32_t>());
2748 WriteUnalignedValue(address, wasm_args[
i].to<uint64_t>());
2751 WriteUnalignedValue(address, wasm_args[
i].to<float>());
2754 WriteUnalignedValue(address, wasm_args[
i].to<double>());
2759 offset += param_size;
2764 size_t return_size = 0;
2765 for (ValueType t : sig->returns()) {
2766 return_size += ValueTypes::ElementSizeInBytes(t);
2768 if (arg_buffer.size() < return_size) {
2769 arg_buffer.resize(return_size);
2774 Handle<Object> arg_buffer_obj(reinterpret_cast<Object*>(arg_buffer.data()),
2776 DCHECK(!arg_buffer_obj->IsHeapObject());
2778 reinterpret_cast<Object*>(code->instruction_start()), isolate);
2779 DCHECK(!code_entry_obj->IsHeapObject());
2781 static_assert(compiler::CWasmEntryParameters::kNumParameters == 3,
2782 "code below needs adaption");
2783 Handle<Object> args[compiler::CWasmEntryParameters::kNumParameters];
2784 args[compiler::CWasmEntryParameters::kCodeEntry] = code_entry_obj;
2785 args[compiler::CWasmEntryParameters::kObjectRef] = object_ref;
2786 args[compiler::CWasmEntryParameters::kArgumentsBuffer] = arg_buffer_obj;
2789 trap_handler::SetThreadInWasm();
2791 Execution::Call(isolate, wasm_entry, receiver, arraysize(args), args);
2792 TRACE(
" => External wasm function returned%s\n",
2793 maybe_retval.is_null() ?
" with exception" :
"");
2795 if (maybe_retval.is_null()) {
2799 if (trap_handler::IsThreadInWasm()) {
2800 trap_handler::ClearThreadInWasm();
2802 return TryHandleException(isolate);
2805 trap_handler::ClearThreadInWasm();
2810 if (sig->return_count() > 0) {
2812 DCHECK_EQ(1, sig->return_count());
2814 switch (sig->GetReturn()) {
2816 Push(
WasmValue(ReadUnalignedValue<uint32_t>(address)));
2819 Push(
WasmValue(ReadUnalignedValue<uint64_t>(address)));
2822 Push(
WasmValue(ReadUnalignedValue<float>(address)));
2825 Push(
WasmValue(ReadUnalignedValue<double>(address)));
2831 return {ExternalCallResult::EXTERNAL_RETURNED};
2836 NativeModule* native_module = code_manager->LookupNativeModule(target);
2837 if (native_module->is_jump_table_slot(target)) {
2839 native_module->GetFunctionIndexFromJumpTableSlot(target);
2840 return native_module->code(func_index);
2842 WasmCode* code = native_module->Lookup(target);
2843 DCHECK_EQ(code->instruction_start(), target);
2847 ExternalCallResult CallImportedFunction(
uint32_t function_index) {
2848 DCHECK_GT(module()->num_imported_functions, function_index);
2851 Isolate* isolate = instance_object_->GetIsolate();
2857 GetTargetCode(isolate->wasm_engine()->code_manager(), entry.target());
2858 FunctionSig* sig = module()->functions[function_index].sig;
2859 return CallExternalWasmFunction(isolate, object_ref, code, sig);
2862 ExternalCallResult CallIndirectFunction(
uint32_t table_index,
2865 if (codemap()->call_indirect_through_module()) {
2868 codemap()->GetIndirectCode(table_index, entry_index);
2869 if (!code)
return {ExternalCallResult::INVALID_FUNC};
2870 if (code->function->sig_index != sig_index) {
2872 int function_canonical_id =
2873 module()->signature_ids[code->function->sig_index];
2874 int expected_canonical_id = module()->signature_ids[sig_index];
2875 DCHECK_EQ(function_canonical_id,
2876 module()->signature_map.Find(*code->function->sig));
2877 if (function_canonical_id != expected_canonical_id) {
2878 return {ExternalCallResult::SIGNATURE_MISMATCH};
2881 return {ExternalCallResult::INTERNAL, code};
2884 Isolate* isolate = instance_object_->GetIsolate();
2885 uint32_t expected_sig_id = module()->signature_ids[sig_index];
2886 DCHECK_EQ(expected_sig_id,
2887 module()->signature_map.Find(*module()->signatures[sig_index]));
2891 CHECK_EQ(0, table_index);
2893 if (entry_index >= instance_object_->indirect_function_table_size()) {
2894 return {ExternalCallResult::INVALID_FUNC};
2899 if (entry.sig_id() !=
static_cast<int32_t
>(expected_sig_id)) {
2900 return {ExternalCallResult::SIGNATURE_MISMATCH};
2904 FunctionSig* signature = module()->signatures[sig_index];
2907 GetTargetCode(isolate->wasm_engine()->code_manager(), entry.target());
2909 if (!object_ref->IsWasmInstanceObject() ||
2910 !instance_object_.is_identical_to(object_ref) ) {
2911 return CallExternalWasmFunction(isolate, object_ref, code, signature);
2914 DCHECK(code->kind() == WasmCode::kInterpreterEntry ||
2915 code->kind() == WasmCode::kFunction);
2916 return {ExternalCallResult::INTERNAL, codemap()->GetCode(code->index())};
2919 inline Activation current_activation() {
2920 return activations_.empty() ? Activation(0, 0) : activations_.back();
2927 : thread_(thread), index_(index) {
2928 DCHECK_LE(0, index);
2931 const WasmFunction*
function()
const {
return frame()->code->function; }
2934 DCHECK_LE(0, frame()->pc);
2935 DCHECK_GE(kMaxInt, frame()->pc);
2936 return static_cast<int>(frame()->pc);
2939 int GetParameterCount()
const {
2940 DCHECK_GE(kMaxInt,
function()->sig->parameter_count());
2941 return static_cast<int>(
function()->sig->parameter_count());
2944 int GetLocalCount()
const {
2945 size_t num_locals =
function()->sig->parameter_count() +
2946 frame()->code->locals.type_list.size();
2947 DCHECK_GE(kMaxInt, num_locals);
2948 return static_cast<int>(num_locals);
2951 int GetStackHeight()
const {
2953 static_cast<size_t>(index_) + 1 == thread_->frames_.size();
2954 size_t stack_limit =
2955 is_top_frame ? thread_->StackHeight() : thread_->frames_[index_ + 1].sp;
2956 DCHECK_LE(frame()->sp, stack_limit);
2957 size_t frame_size = stack_limit - frame()->sp;
2958 DCHECK_LE(GetLocalCount(), frame_size);
2959 return static_cast<int>(frame_size) - GetLocalCount();
2962 WasmValue GetLocalValue(
int index)
const {
2963 DCHECK_LE(0, index);
2964 DCHECK_GT(GetLocalCount(), index);
2965 return thread_->GetStackValue(static_cast<int>(frame()->sp) + index);
2968 WasmValue GetStackValue(
int index)
const {
2969 DCHECK_LE(0, index);
2971 DCHECK_GT(GetStackHeight(), index);
2972 return thread_->GetStackValue(static_cast<int>(frame()->sp) +
2973 GetLocalCount() + index);
2980 ThreadImpl::Frame* frame()
const {
2981 DCHECK_GT(thread_->frames_.size(), index_);
2982 return &thread_->frames_[index_];
2995 ThreadImpl* ToImpl(WasmInterpreter::Thread* thread) {
2996 return reinterpret_cast<ThreadImpl*
>(thread);
3000 InterpretedFrame* ToFrame(InterpretedFrameImpl* impl) {
3001 return reinterpret_cast<InterpretedFrame*
>(impl);
3003 const InterpretedFrameImpl* ToImpl(
const InterpretedFrame* frame) {
3004 return reinterpret_cast<const InterpretedFrameImpl*
>(frame);
3015 WasmInterpreter::State WasmInterpreter::Thread::state() {
3016 return ToImpl(
this)->state();
3018 void WasmInterpreter::Thread::InitFrame(
const WasmFunction*
function,
3020 ToImpl(
this)->InitFrame(
function, args);
3022 WasmInterpreter::State WasmInterpreter::Thread::Run(
int num_steps) {
3023 return ToImpl(
this)->Run(num_steps);
3025 void WasmInterpreter::Thread::Pause() {
return ToImpl(
this)->Pause(); }
3026 void WasmInterpreter::Thread::Reset() {
return ToImpl(
this)->Reset(); }
3027 WasmInterpreter::Thread::ExceptionHandlingResult
3028 WasmInterpreter::Thread::HandleException(Isolate* isolate) {
3029 return ToImpl(
this)->HandleException(isolate);
3031 pc_t WasmInterpreter::Thread::GetBreakpointPc() {
3032 return ToImpl(
this)->GetBreakpointPc();
3034 int WasmInterpreter::Thread::GetFrameCount() {
3035 return ToImpl(
this)->GetFrameCount();
3037 WasmInterpreter::FramePtr WasmInterpreter::Thread::GetFrame(
int index) {
3038 DCHECK_LE(0, index);
3039 DCHECK_GT(GetFrameCount(), index);
3040 return FramePtr(ToFrame(
new InterpretedFrameImpl(ToImpl(
this), index)));
3042 WasmValue WasmInterpreter::Thread::GetReturnValue(
int index) {
3043 return ToImpl(
this)->GetReturnValue(index);
3045 TrapReason WasmInterpreter::Thread::GetTrapReason() {
3046 return ToImpl(
this)->GetTrapReason();
3048 bool WasmInterpreter::Thread::PossibleNondeterminism() {
3049 return ToImpl(
this)->PossibleNondeterminism();
3051 uint64_t WasmInterpreter::Thread::NumInterpretedCalls() {
3052 return ToImpl(
this)->NumInterpretedCalls();
3054 void WasmInterpreter::Thread::AddBreakFlags(uint8_t flags) {
3055 ToImpl(
this)->AddBreakFlags(flags);
3057 void WasmInterpreter::Thread::ClearBreakFlags() {
3058 ToImpl(
this)->ClearBreakFlags();
3060 uint32_t WasmInterpreter::Thread::NumActivations() {
3061 return ToImpl(
this)->NumActivations();
3063 uint32_t WasmInterpreter::Thread::StartActivation() {
3064 return ToImpl(
this)->StartActivation();
3066 void WasmInterpreter::Thread::FinishActivation(
uint32_t id) {
3067 ToImpl(
this)->FinishActivation(
id);
3070 return ToImpl(
this)->ActivationFrameBase(
id);
3087 : module_bytes_(wire_bytes.start(), wire_bytes.end(), zone),
3088 codemap_(module, module_bytes_.data(), zone),
3090 threads_.emplace_back(zone, &codemap_, instance_object);
3096 Address* global_handle_location =
3097 reinterpret_cast<Address*
>(data.GetParameter());
3098 GlobalHandles::Destroy(global_handle_location);
3105 Address* global_handle_location = weak_instance.location();
3106 GlobalHandles::MakeWeak(global_handle_location, global_handle_location,
3107 &NopFinalizer, v8::WeakCallbackType::kParameter);
3108 return weak_instance;
3115 WasmInterpreter::WasmInterpreter(Isolate* isolate,
const WasmModule* module,
3116 const ModuleWireBytes& wire_bytes,
3117 Handle<WasmInstanceObject> instance_object)
3118 : zone_(isolate->allocator(), ZONE_NAME),
3119 internals_(new (&zone_) WasmInterpreterInternals(
3120 &zone_, module, wire_bytes, MakeWeak(isolate, instance_object))) {}
3122 WasmInterpreter::~WasmInterpreter() { internals_->~WasmInterpreterInternals(); }
3124 void WasmInterpreter::Run() { internals_->threads_[0].Run(); }
3126 void WasmInterpreter::Pause() { internals_->threads_[0].Pause(); }
3128 bool WasmInterpreter::SetBreakpoint(
const WasmFunction*
function, pc_t pc,
3130 InterpreterCode* code = internals_->codemap_.GetCode(
function);
3131 size_t size =
static_cast<size_t>(code->end - code->start);
3133 if (pc < code->locals.encoded_size || pc >= size)
return false;
3135 if (enabled && code->orig_start == code->start) {
3136 code->start =
reinterpret_cast<byte*
>(zone_.New(size));
3137 memcpy(code->start, code->orig_start, size);
3138 code->end = code->start + size;
3140 bool prev = code->start[pc] == kInternalBreakpoint;
3142 code->start[pc] = kInternalBreakpoint;
3144 code->start[pc] = code->orig_start[pc];
3149 bool WasmInterpreter::GetBreakpoint(
const WasmFunction*
function, pc_t pc) {
3150 InterpreterCode* code = internals_->codemap_.GetCode(
function);
3151 size_t size =
static_cast<size_t>(code->end - code->start);
3153 if (pc < code->locals.encoded_size || pc >= size)
return false;
3155 return code->start[pc] == kInternalBreakpoint;
3158 bool WasmInterpreter::SetTracing(
const WasmFunction*
function,
bool enabled) {
3163 int WasmInterpreter::GetThreadCount() {
3167 WasmInterpreter::Thread* WasmInterpreter::GetThread(
int id) {
3169 return ToThread(&internals_->threads_[
id]);
3172 void WasmInterpreter::AddFunctionForTesting(
const WasmFunction*
function) {
3173 internals_->codemap_.AddFunction(
function,
nullptr,
nullptr);
3176 void WasmInterpreter::SetFunctionCodeForTesting(
const WasmFunction*
function,
3179 internals_->codemap_.SetFunctionCode(
function, start, end);
3182 void WasmInterpreter::SetCallIndirectTestMode() {
3183 internals_->codemap_.set_call_indirect_through_module(
true);
3186 ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
3187 Zone* zone,
const WasmModule* module,
const byte* start,
const byte* end) {
3190 FunctionSig sig(0, 0,
nullptr);
3191 WasmFunction
function{&sig, 0, 0, {0, 0},
false,
false};
3192 InterpreterCode code{
3193 &
function, BodyLocalDecls(zone), start, end,
nullptr,
nullptr,
nullptr};
3196 SideTable side_table(zone, module, &code);
3197 return side_table.map_;
3203 const WasmFunction* InterpretedFrame::function()
const {
3204 return ToImpl(
this)->function();
3206 int InterpretedFrame::pc()
const {
return ToImpl(
this)->pc(); }
3207 int InterpretedFrame::GetParameterCount()
const {
3208 return ToImpl(
this)->GetParameterCount();
3210 int InterpretedFrame::GetLocalCount()
const {
3211 return ToImpl(
this)->GetLocalCount();
3213 int InterpretedFrame::GetStackHeight()
const {
3214 return ToImpl(
this)->GetStackHeight();
3216 WasmValue InterpretedFrame::GetLocalValue(
int index)
const {
3217 return ToImpl(
this)->GetLocalValue(index);
3219 WasmValue InterpretedFrame::GetStackValue(
int index)
const {
3220 return ToImpl(
this)->GetStackValue(index);
3222 void InterpretedFrameDeleter::operator()(InterpretedFrame* ptr) {
3228 #undef FOREACH_INTERNAL_OPCODE 3230 #undef FOREACH_SIMPLE_BINOP 3231 #undef FOREACH_OTHER_BINOP 3232 #undef FOREACH_I32CONV_FLOATOP 3233 #undef FOREACH_OTHER_UNOP