5 #ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_ 6 #define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_ 11 #include "src/base/bits.h" 12 #include "src/frames.h" 13 #include "src/macro-assembler.h" 14 #include "src/wasm/baseline/liftoff-assembler-defs.h" 15 #include "src/wasm/baseline/liftoff-register.h" 16 #include "src/wasm/function-body-decoder.h" 17 #include "src/wasm/wasm-code-manager.h" 18 #include "src/wasm/wasm-module.h" 19 #include "src/wasm/wasm-opcodes.h" 20 #include "src/wasm/wasm-value.h" 35 static constexpr
uint32_t kStackSlotSize = 8;
37 static constexpr ValueType kWasmIntPtr =
38 kPointerSize == 8 ? kWasmI64 : kWasmI32;
42 enum Location : uint8_t { kStack, kRegister, KIntConst };
46 : loc_(kRegister), type_(
type), reg_(r) {
47 DCHECK_EQ(r.reg_class(), reg_class_for(
type));
50 : loc_(KIntConst), type_(
type), i32_const_(i32_const) {
51 DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
54 bool operator==(
const VarState& other)
const {
55 if (loc_ != other.loc_)
return false;
56 if (type_ != other.type_)
return false;
61 return reg_ == other.reg_;
63 return i32_const_ == other.i32_const_;
68 bool is_stack()
const {
return loc_ == kStack; }
69 bool is_gp_reg()
const {
return loc_ == kRegister && reg_.is_gp(); }
70 bool is_fp_reg()
const {
return loc_ == kRegister && reg_.is_fp(); }
71 bool is_reg()
const {
return loc_ == kRegister; }
72 bool is_const()
const {
return loc_ == KIntConst; }
74 ValueType
type()
const {
return type_; }
76 Location loc()
const {
return loc_; }
78 int32_t i32_const()
const {
79 DCHECK_EQ(loc_, KIntConst);
83 DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
84 DCHECK_EQ(loc_, KIntConst);
85 return type_ == kWasmI32 ?
WasmValue(i32_const_)
89 Register gp_reg()
const {
return reg().gp(); }
92 DCHECK_EQ(loc_, kRegister);
95 RegClass reg_class()
const {
return reg().reg_class(); }
97 void MakeStack() { loc_ = kStack; }
111 ASSERT_TRIVIALLY_COPYABLE(
VarState);
120 std::vector<VarState> stack_state;
122 uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
127 bool has_unused_register(RegClass rc,
LiftoffRegList pinned = {})
const {
128 if (kNeedI64RegPair && rc == kGpRegPair) {
130 kGpCacheRegList.MaskOut(used_registers).MaskOut(pinned);
131 return available_regs.GetNumRegsSet() >= 2;
133 DCHECK(rc == kGpReg || rc == kFpReg);
135 return has_unused_register(candidates, pinned);
141 candidates.MaskOut(used_registers).MaskOut(pinned);
142 return !available_regs.is_empty();
147 if (kNeedI64RegPair && rc == kGpRegPair) {
148 Register low = pinned.set(unused_register(kGpReg, pinned)).gp();
149 Register high = unused_register(kGpReg, pinned).gp();
150 return LiftoffRegister::ForPair(low, high);
152 DCHECK(rc == kGpReg || rc == kFpReg);
154 return unused_register(candidates, pinned);
160 candidates.MaskOut(used_registers).MaskOut(pinned);
161 return available_regs.GetFirstRegSet();
167 inc_used(reg.high());
170 used_registers.set(reg);
171 DCHECK_GT(kMaxInt, register_use_count[reg.liftoff_code()]);
172 ++register_use_count[reg.liftoff_code()];
177 DCHECK(is_used(reg));
180 dec_used(reg.high());
183 int code = reg.liftoff_code();
184 DCHECK_LT(0, register_use_count[code]);
185 if (--register_use_count[code] == 0) used_registers.clear(reg);
189 if (reg.is_pair())
return is_used(reg.low()) || is_used(reg.high());
190 bool used = used_registers.has(reg);
191 DCHECK_EQ(used, register_use_count[reg.liftoff_code()] != 0);
197 DCHECK_EQ(register_use_count[reg.low().liftoff_code()],
198 register_use_count[reg.high().liftoff_code()]);
201 DCHECK_GT(arraysize(register_use_count), reg.liftoff_code());
202 return register_use_count[reg.liftoff_code()];
206 register_use_count[reg.liftoff_code()] = 0;
207 used_registers.clear(reg);
212 void reset_used_registers() {
214 memset(register_use_count, 0,
sizeof(register_use_count));
220 DCHECK(!unpinned.is_empty());
222 DCHECK(unpinned.MaskOut(used_registers).is_empty());
224 if (unspilled.is_empty()) {
225 unspilled = unpinned;
226 last_spilled_regs = {};
229 last_spilled_regs.set(reg);
242 return static_cast<uint32_t>(stack_state.size());
258 DCHECK_EQ(reg_class_for(
type), reg.reg_class());
259 cache_state_.inc_used(reg);
260 cache_state_.stack_state.emplace_back(
type, reg);
263 void SpillRegister(LiftoffRegister);
265 uint32_t GetNumUses(LiftoffRegister reg) {
266 return cache_state_.get_use_count(reg);
271 LiftoffRegister GetUnusedRegister(
272 RegClass rc, std::initializer_list<LiftoffRegister> try_first,
273 LiftoffRegList pinned = {}) {
274 for (LiftoffRegister reg : try_first) {
275 DCHECK_EQ(reg.reg_class(), rc);
276 if (cache_state_.is_free(reg))
return reg;
278 return GetUnusedRegister(rc, pinned);
282 LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned = {}) {
283 if (kNeedI64RegPair && rc == kGpRegPair) {
284 LiftoffRegList candidates = kGpCacheRegList;
285 Register low = pinned.set(GetUnusedRegister(candidates, pinned)).gp();
286 Register high = GetUnusedRegister(candidates, pinned).gp();
287 return LiftoffRegister::ForPair(low, high);
289 DCHECK(rc == kGpReg || rc == kFpReg);
290 LiftoffRegList candidates = GetCacheRegList(rc);
291 return GetUnusedRegister(candidates, pinned);
295 LiftoffRegister GetUnusedRegister(LiftoffRegList candidates,
296 LiftoffRegList pinned = {}) {
297 if (cache_state_.has_unused_register(candidates, pinned)) {
298 return cache_state_.unused_register(candidates, pinned);
300 return SpillOneRegister(candidates, pinned);
303 void MergeFullStackWith(CacheState&);
304 void MergeStackWith(CacheState&,
uint32_t arity);
308 void SpillAllRegisters();
312 void RecordUsedSpillSlot(
uint32_t index) {
313 if (index >= num_used_spill_slots_) num_used_spill_slots_ = index + 1;
319 void PrepareCall(FunctionSig*, compiler::CallDescriptor*,
320 Register* target =
nullptr,
321 Register* target_instance =
nullptr);
323 void FinishCall(FunctionSig*, compiler::CallDescriptor*);
326 void Move(LiftoffRegister dst, LiftoffRegister src, ValueType);
335 template <
typename Dst,
typename Src>
341 #ifdef ENABLE_SLOW_DCHECKS 343 bool ValidateCacheState()
const;
354 inline int PrepareStackFrame();
355 inline void PatchPrepareStackFrame(
int offset,
uint32_t stack_slots);
356 inline void FinishCode();
357 inline void AbortCompilation();
360 RelocInfo::Mode rmode = RelocInfo::NONE);
362 inline void SpillInstance(
Register instance);
363 inline void FillInstanceInto(
Register dst);
366 uint32_t* protected_load_pc =
nullptr,
367 bool is_load_mem =
false);
370 uint32_t* protected_store_pc =
nullptr,
371 bool is_store_mem =
false);
374 inline void MoveStackValue(
uint32_t dst_index,
uint32_t src_index, ValueType);
392 Label* trap_div_by_zero,
393 Label* trap_div_unrepresentable);
395 Label* trap_div_by_zero);
397 Label* trap_rem_by_zero);
399 Label* trap_rem_by_zero);
408 LiftoffRegList pinned = {});
409 inline void emit_i32_shr(Register dst, Register src,
int amount);
412 inline bool emit_i32_clz(Register dst, Register src);
413 inline bool emit_i32_ctz(Register dst, Register src);
414 inline bool emit_i32_popcnt(Register dst, Register src);
417 inline void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
418 LiftoffRegister rhs);
419 inline void emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
420 LiftoffRegister rhs);
421 inline void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
422 LiftoffRegister rhs);
423 inline bool emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
424 LiftoffRegister rhs, Label* trap_div_by_zero,
425 Label* trap_div_unrepresentable);
426 inline bool emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
427 LiftoffRegister rhs, Label* trap_div_by_zero);
428 inline bool emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
429 LiftoffRegister rhs, Label* trap_rem_by_zero);
430 inline bool emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
431 LiftoffRegister rhs, Label* trap_rem_by_zero);
432 inline void emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
433 LiftoffRegister rhs);
434 inline void emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
435 LiftoffRegister rhs);
436 inline void emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
437 LiftoffRegister rhs);
438 inline void emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
439 Register amount, LiftoffRegList pinned = {});
440 inline void emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
441 Register amount, LiftoffRegList pinned = {});
442 inline void emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
443 Register amount, LiftoffRegList pinned = {});
444 inline void emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
447 inline void emit_i32_to_intptr(Register dst, Register src);
449 inline void emit_ptrsize_add(Register dst, Register lhs, Register rhs) {
450 if (kPointerSize == 8) {
451 emit_i64_add(LiftoffRegister(dst), LiftoffRegister(lhs),
452 LiftoffRegister(rhs));
454 emit_i32_add(dst, lhs, rhs);
457 inline void emit_ptrsize_sub(Register dst, Register lhs, Register rhs) {
458 if (kPointerSize == 8) {
459 emit_i64_sub(LiftoffRegister(dst), LiftoffRegister(lhs),
460 LiftoffRegister(rhs));
462 emit_i32_sub(dst, lhs, rhs);
465 inline void emit_ptrsize_and(Register dst, Register lhs, Register rhs) {
466 if (kPointerSize == 8) {
467 emit_i64_and(LiftoffRegister(dst), LiftoffRegister(lhs),
468 LiftoffRegister(rhs));
470 emit_i32_and(dst, lhs, rhs);
473 inline void emit_ptrsize_shr(Register dst, Register src,
int amount) {
474 if (kPointerSize == 8) {
475 emit_i64_shr(LiftoffRegister(dst), LiftoffRegister(src), amount);
477 emit_i32_shr(dst, src, amount);
482 inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
484 inline void emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
486 inline void emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
488 inline void emit_f32_div(DoubleRegister dst, DoubleRegister lhs,
490 inline void emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
492 inline void emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
494 inline void emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
498 inline void emit_f32_abs(DoubleRegister dst, DoubleRegister src);
499 inline void emit_f32_neg(DoubleRegister dst, DoubleRegister src);
500 inline bool emit_f32_ceil(DoubleRegister dst, DoubleRegister src);
501 inline bool emit_f32_floor(DoubleRegister dst, DoubleRegister src);
502 inline bool emit_f32_trunc(DoubleRegister dst, DoubleRegister src);
503 inline bool emit_f32_nearest_int(DoubleRegister dst, DoubleRegister src);
504 inline void emit_f32_sqrt(DoubleRegister dst, DoubleRegister src);
507 inline void emit_f64_add(DoubleRegister dst, DoubleRegister lhs,
509 inline void emit_f64_sub(DoubleRegister dst, DoubleRegister lhs,
511 inline void emit_f64_mul(DoubleRegister dst, DoubleRegister lhs,
513 inline void emit_f64_div(DoubleRegister dst, DoubleRegister lhs,
515 inline void emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
517 inline void emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
519 inline void emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
523 inline void emit_f64_abs(DoubleRegister dst, DoubleRegister src);
524 inline void emit_f64_neg(DoubleRegister dst, DoubleRegister src);
525 inline bool emit_f64_ceil(DoubleRegister dst, DoubleRegister src);
526 inline bool emit_f64_floor(DoubleRegister dst, DoubleRegister src);
527 inline bool emit_f64_trunc(DoubleRegister dst, DoubleRegister src);
528 inline bool emit_f64_nearest_int(DoubleRegister dst, DoubleRegister src);
529 inline void emit_f64_sqrt(DoubleRegister dst, DoubleRegister src);
531 inline bool emit_type_conversion(WasmOpcode opcode, LiftoffRegister dst,
532 LiftoffRegister src, Label* trap =
nullptr);
534 inline void emit_i32_signextend_i8(Register dst, Register src);
535 inline void emit_i32_signextend_i16(Register dst, Register src);
536 inline void emit_i64_signextend_i8(LiftoffRegister dst, LiftoffRegister src);
537 inline void emit_i64_signextend_i16(LiftoffRegister dst, LiftoffRegister src);
538 inline void emit_i64_signextend_i32(LiftoffRegister dst, LiftoffRegister src);
540 inline void emit_jump(Label*);
541 inline void emit_jump(Register);
543 inline void emit_cond_jump(Condition, Label*, ValueType value, Register lhs,
544 Register rhs = no_reg);
546 inline void emit_i32_eqz(Register dst, Register src);
547 inline void emit_i32_set_cond(Condition, Register dst, Register lhs,
549 inline void emit_i64_eqz(Register dst, LiftoffRegister src);
550 inline void emit_i64_set_cond(Condition condition, Register dst,
551 LiftoffRegister lhs, LiftoffRegister rhs);
552 inline void emit_f32_set_cond(Condition condition, Register dst,
553 DoubleRegister lhs, DoubleRegister rhs);
554 inline void emit_f64_set_cond(Condition condition, Register dst,
555 DoubleRegister lhs, DoubleRegister rhs);
557 inline void StackCheck(Label* ool_code, Register limit_address);
559 inline void CallTrapCallbackForTesting();
561 inline void AssertUnreachable(AbortReason reason);
563 inline void PushRegisters(LiftoffRegList);
564 inline void PopRegisters(LiftoffRegList);
566 inline void DropStackSlotsAndRet(
uint32_t num_stack_slots);
573 inline void CallC(FunctionSig* sig,
const LiftoffRegister* args,
574 const LiftoffRegister* rets, ValueType out_argument_type,
575 int stack_bytes, ExternalReference ext_ref);
577 inline void CallNativeWasmCode(Address addr);
579 inline void CallIndirect(FunctionSig* sig,
580 compiler::CallDescriptor* call_descriptor,
582 inline void CallRuntimeStub(WasmCode::RuntimeStubId sid);
585 inline void AllocateStackSlot(Register addr,
uint32_t size);
586 inline void DeallocateStackSlot(
uint32_t size);
592 uint32_t num_locals()
const {
return num_locals_; }
593 void set_num_locals(
uint32_t num_locals);
595 uint32_t GetTotalFrameSlotCount()
const {
596 return num_locals_ + num_used_spill_slots_;
599 ValueType local_type(
uint32_t index) {
600 DCHECK_GT(num_locals_, index);
602 num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
603 return locals[index];
606 void set_local_type(
uint32_t index, ValueType type) {
608 num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
609 locals[index] = type;
612 CacheState* cache_state() {
return &cache_state_; }
613 const CacheState* cache_state()
const {
return &cache_state_; }
615 bool did_bailout() {
return bailout_reason_ !=
nullptr; }
616 const char* bailout_reason()
const {
return bailout_reason_; }
618 void bailout(
const char* reason) {
619 if (bailout_reason_ !=
nullptr)
return;
621 bailout_reason_ = reason;
626 static constexpr
uint32_t kInlineLocalTypes = 8;
628 ValueType local_types_[kInlineLocalTypes];
629 ValueType* more_local_types_;
631 static_assert(
sizeof(ValueType) == 1,
632 "Reconsider this inlining if ValueType gets bigger");
633 CacheState cache_state_;
635 const char* bailout_reason_ =
nullptr;
637 LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
638 LiftoffRegList pinned);
641 std::ostream& operator<<(std::ostream& os, LiftoffAssembler::VarState);
647 #ifdef V8_TARGET_ARCH_32_BIT 650 template <
void (LiftoffAssembler::*op)(Register, Register, Register)>
651 void EmitI64IndependentHalfOperation(LiftoffAssembler* assm,
652 LiftoffRegister dst, LiftoffRegister lhs,
653 LiftoffRegister rhs) {
656 if (dst.low() != lhs.high() && dst.low() != rhs.high()) {
657 (assm->*op)(dst.low_gp(), lhs.low_gp(), rhs.low_gp());
658 (assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
663 if (dst.high() != lhs.low() && dst.high() != rhs.low()) {
664 (assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
665 (assm->*op)(dst.low_gp(), lhs.low_gp(), rhs.low_gp());
670 assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
671 (assm->*op)(tmp, lhs.low_gp(), rhs.low_gp());
672 (assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
673 assm->Move(dst.low_gp(), tmp, kWasmI32);
677 void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
678 LiftoffRegister rhs) {
679 liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_and>(
680 this, dst, lhs, rhs);
683 void LiftoffAssembler::emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
684 LiftoffRegister rhs) {
685 liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_or>(
686 this, dst, lhs, rhs);
689 void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
690 LiftoffRegister rhs) {
691 liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_xor>(
692 this, dst, lhs, rhs);
695 #endif // V8_TARGET_ARCH_32_BIT 707 slots_.emplace_back(src, src_index, half);
711 inline void Construct();
716 Slot(Slot&&) =
default;
719 : src_(src), src_index_(src_index), half_(half) {}
721 : src_(src), half_(kLowWord) {}
728 std::vector<Slot> slots_;
737 #if V8_TARGET_ARCH_IA32 738 #include "src/wasm/baseline/ia32/liftoff-assembler-ia32.h" 739 #elif V8_TARGET_ARCH_X64 740 #include "src/wasm/baseline/x64/liftoff-assembler-x64.h" 741 #elif V8_TARGET_ARCH_ARM64 742 #include "src/wasm/baseline/arm64/liftoff-assembler-arm64.h" 743 #elif V8_TARGET_ARCH_ARM 744 #include "src/wasm/baseline/arm/liftoff-assembler-arm.h" 745 #elif V8_TARGET_ARCH_PPC 746 #include "src/wasm/baseline/ppc/liftoff-assembler-ppc.h" 747 #elif V8_TARGET_ARCH_MIPS 748 #include "src/wasm/baseline/mips/liftoff-assembler-mips.h" 749 #elif V8_TARGET_ARCH_MIPS64 750 #include "src/wasm/baseline/mips64/liftoff-assembler-mips64.h" 751 #elif V8_TARGET_ARCH_S390 752 #include "src/wasm/baseline/s390/liftoff-assembler-s390.h" 754 #error Unsupported architecture. 757 #endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_