5 #ifndef V8_WASM_BASELINE_IA32_LIFTOFF_ASSEMBLER_IA32_H_ 6 #define V8_WASM_BASELINE_IA32_LIFTOFF_ASSEMBLER_IA32_H_ 8 #include "src/wasm/baseline/liftoff-assembler.h" 10 #include "src/assembler.h" 11 #include "src/wasm/value-type.h" 17 #define REQUIRE_CPU_FEATURE(name, ...) \ 18 if (!CpuFeatures::IsSupported(name)) { \ 19 bailout("no " #name); \ 22 CpuFeatureScope feature(this, name); 28 constexpr int32_t kConstantStackSpace = 8;
29 constexpr int32_t kFirstStackSlotOffset =
30 kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
32 inline Operand GetStackSlot(
uint32_t index) {
33 int32_t offset = index * LiftoffAssembler::kStackSlotSize;
34 return Operand(ebp, -kFirstStackSlotOffset - offset);
37 inline Operand GetHalfStackSlot(
uint32_t half_index) {
38 int32_t offset = half_index * (LiftoffAssembler::kStackSlotSize / 2);
39 return Operand(ebp, -kFirstStackSlotOffset - offset);
43 inline Operand GetInstanceOperand() {
return Operand(ebp, -8); }
45 static constexpr LiftoffRegList kByteRegs =
46 LiftoffRegList::FromBits<Register::ListOf<eax, ecx, edx>()>();
48 inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
49 int32_t offset, ValueType type) {
50 Operand src(base, offset);
53 assm->mov(dst.gp(), src);
56 assm->mov(dst.low_gp(), src);
57 assm->mov(dst.high_gp(), Operand(base, offset + 4));
60 assm->movss(dst.fp(), src);
63 assm->movsd(dst.fp(), src);
70 inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
71 LiftoffRegister src, ValueType type) {
72 Operand dst(base, offset);
75 assm->mov(dst, src.gp());
78 assm->mov(dst, src.low_gp());
79 assm->mov(Operand(base, offset + 4), src.high_gp());
82 assm->movss(dst, src.fp());
85 assm->movsd(dst, src.fp());
92 inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
98 assm->push(reg.high_gp());
99 assm->push(reg.low_gp());
102 assm->sub(esp, Immediate(
sizeof(
float)));
103 assm->movss(Operand(esp, 0), reg.fp());
106 assm->sub(esp, Immediate(
sizeof(
double)));
107 assm->movsd(Operand(esp, 0), reg.fp());
114 template <
typename... Regs>
115 inline void SpillRegisters(LiftoffAssembler* assm, Regs... regs) {
116 for (LiftoffRegister r : {LiftoffRegister(regs)...}) {
117 if (assm->cache_state()->is_used(r)) assm->SpillRegister(r);
121 inline void SignExtendI32ToI64(Assembler* assm, LiftoffRegister reg) {
122 assm->mov(reg.high_gp(), reg.low_gp());
123 assm->sar(reg.high_gp(), 31);
126 constexpr DoubleRegister kScratchDoubleReg = xmm7;
128 constexpr
int kSubSpSize = 6;
132 int LiftoffAssembler::PrepareStackFrame() {
133 int offset = pc_offset();
135 DCHECK_EQ(liftoff::kSubSpSize, pc_offset() - offset);
139 void LiftoffAssembler::PatchPrepareStackFrame(
int offset,
141 uint32_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
142 DCHECK_LE(bytes, kMaxInt);
145 constexpr
int kAvailableSpace = 64;
146 Assembler patching_assembler(AssemblerOptions{}, buffer_ + offset,
149 constexpr
int kPageSize = 4 * 1024;
150 if (bytes > kPageSize) {
157 int ool_offset = pc_offset() - offset;
158 patching_assembler.jmp_rel(ool_offset);
159 DCHECK_GE(liftoff::kSubSpSize, patching_assembler.pc_offset());
160 patching_assembler.Nop(liftoff::kSubSpSize -
161 patching_assembler.pc_offset());
167 AllocateStackFrame(edi);
170 int func_start_offset = offset + liftoff::kSubSpSize - pc_offset();
171 jmp_rel(func_start_offset);
175 patching_assembler.sub_sp_32(bytes);
176 DCHECK_EQ(liftoff::kSubSpSize, patching_assembler.pc_offset());
179 void LiftoffAssembler::FinishCode() {}
181 void LiftoffAssembler::AbortCompilation() {}
183 void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
184 RelocInfo::Mode rmode) {
185 switch (value.type()) {
187 TurboAssembler::Move(reg.gp(), Immediate(value.to_i32(), rmode));
190 DCHECK(RelocInfo::IsNone(rmode));
191 int32_t low_word = value.to_i64();
192 int32_t high_word = value.to_i64() >> 32;
193 TurboAssembler::Move(reg.low_gp(), Immediate(low_word));
194 TurboAssembler::Move(reg.high_gp(), Immediate(high_word));
198 TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
201 TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
208 void LiftoffAssembler::LoadFromInstance(Register dst,
uint32_t offset,
210 DCHECK_LE(offset, kMaxInt);
211 mov(dst, liftoff::GetInstanceOperand());
213 mov(dst, Operand(dst, offset));
216 void LiftoffAssembler::SpillInstance(Register instance) {
217 mov(liftoff::GetInstanceOperand(), instance);
220 void LiftoffAssembler::FillInstanceInto(Register dst) {
221 mov(dst, liftoff::GetInstanceOperand());
224 void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
225 Register offset_reg,
uint32_t offset_imm,
226 LoadType type, LiftoffRegList pinned,
227 uint32_t* protected_load_pc,
bool is_load_mem) {
228 DCHECK_EQ(type.value_type() == kWasmI64, dst.is_pair());
234 Operand src_op = offset_reg == no_reg
235 ? Operand(src_addr, bit_cast<int32_t>(offset_imm))
236 : Operand(src_addr, offset_reg, times_1, offset_imm);
237 if (protected_load_pc) *protected_load_pc = pc_offset();
239 switch (type.value()) {
240 case LoadType::kI32Load8U:
241 movzx_b(dst.gp(), src_op);
243 case LoadType::kI32Load8S:
244 movsx_b(dst.gp(), src_op);
246 case LoadType::kI64Load8U:
247 movzx_b(dst.low_gp(), src_op);
248 xor_(dst.high_gp(), dst.high_gp());
250 case LoadType::kI64Load8S:
251 movsx_b(dst.low_gp(), src_op);
252 liftoff::SignExtendI32ToI64(
this, dst);
254 case LoadType::kI32Load16U:
255 movzx_w(dst.gp(), src_op);
257 case LoadType::kI32Load16S:
258 movsx_w(dst.gp(), src_op);
260 case LoadType::kI64Load16U:
261 movzx_w(dst.low_gp(), src_op);
262 xor_(dst.high_gp(), dst.high_gp());
264 case LoadType::kI64Load16S:
265 movsx_w(dst.low_gp(), src_op);
266 liftoff::SignExtendI32ToI64(
this, dst);
268 case LoadType::kI32Load:
269 mov(dst.gp(), src_op);
271 case LoadType::kI64Load32U:
272 mov(dst.low_gp(), src_op);
273 xor_(dst.high_gp(), dst.high_gp());
275 case LoadType::kI64Load32S:
276 mov(dst.low_gp(), src_op);
277 liftoff::SignExtendI32ToI64(
this, dst);
279 case LoadType::kI64Load: {
281 Operand upper_src_op =
283 ? Operand(src_addr, bit_cast<int32_t>(offset_imm + 4))
284 : Operand(src_addr, offset_reg, times_1, offset_imm + 4);
287 mov(dst.high_gp(), upper_src_op);
288 mov(dst.low_gp(), src_op);
291 case LoadType::kF32Load:
292 movss(dst.fp(), src_op);
294 case LoadType::kF64Load:
295 movsd(dst.fp(), src_op);
302 void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
303 uint32_t offset_imm, LiftoffRegister src,
304 StoreType type, LiftoffRegList pinned,
305 uint32_t* protected_store_pc,
bool is_store_mem) {
306 DCHECK_EQ(type.value_type() == kWasmI64, src.is_pair());
310 Operand dst_op = offset_reg == no_reg
311 ? Operand(dst_addr, bit_cast<int32_t>(offset_imm))
312 : Operand(dst_addr, offset_reg, times_1, offset_imm);
313 if (protected_store_pc) *protected_store_pc = pc_offset();
315 switch (type.value()) {
316 case StoreType::kI64Store8:
319 case StoreType::kI32Store8:
321 if (src.gp().is_byte_register()) {
322 mov_b(dst_op, src.gp());
327 LiftoffRegList pinned_byte = pinned | LiftoffRegList::ForRegs(dst_addr);
328 if (offset_reg != no_reg) pinned_byte.set(offset_reg);
330 GetUnusedRegister(liftoff::kByteRegs, pinned_byte).gp();
331 mov(byte_src, src.gp());
332 mov_b(dst_op, byte_src);
335 case StoreType::kI64Store16:
338 case StoreType::kI32Store16:
339 mov_w(dst_op, src.gp());
341 case StoreType::kI64Store32:
344 case StoreType::kI32Store:
345 mov(dst_op, src.gp());
347 case StoreType::kI64Store: {
349 Operand upper_dst_op =
351 ? Operand(dst_addr, bit_cast<int32_t>(offset_imm + 4))
352 : Operand(dst_addr, offset_reg, times_1, offset_imm + 4);
355 mov(upper_dst_op, src.high_gp());
356 mov(dst_op, src.low_gp());
359 case StoreType::kF32Store:
360 movss(dst_op, src.fp());
362 case StoreType::kF64Store:
363 movsd(dst_op, src.fp());
370 void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
373 liftoff::Load(
this, dst, ebp, kPointerSize * (caller_slot_idx + 1), type);
376 void LiftoffAssembler::MoveStackValue(
uint32_t dst_index,
uint32_t src_index,
378 DCHECK_NE(dst_index, src_index);
379 if (cache_state_.has_unused_register(kGpReg)) {
380 LiftoffRegister reg = GetUnusedRegister(kGpReg);
381 Fill(reg, src_index, type);
382 Spill(dst_index, reg, type);
384 push(liftoff::GetStackSlot(src_index));
385 pop(liftoff::GetStackSlot(dst_index));
389 void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
391 DCHECK_EQ(kWasmI32, type);
395 void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
398 if (type == kWasmF32) {
401 DCHECK_EQ(kWasmF64, type);
406 void LiftoffAssembler::Spill(
uint32_t index, LiftoffRegister reg,
408 RecordUsedSpillSlot(index);
409 Operand dst = liftoff::GetStackSlot(index);
415 mov(dst, reg.low_gp());
416 mov(liftoff::GetHalfStackSlot(2 * index - 1), reg.high_gp());
419 movss(dst, reg.fp());
422 movsd(dst, reg.fp());
429 void LiftoffAssembler::Spill(
uint32_t index, WasmValue value) {
430 RecordUsedSpillSlot(index);
431 Operand dst = liftoff::GetStackSlot(index);
432 switch (value.type()) {
434 mov(dst, Immediate(value.to_i32()));
437 int32_t low_word = value.to_i64();
438 int32_t high_word = value.to_i64() >> 32;
439 mov(dst, Immediate(low_word));
440 mov(liftoff::GetHalfStackSlot(2 * index - 1), Immediate(high_word));
449 void LiftoffAssembler::Fill(LiftoffRegister reg,
uint32_t index,
451 Operand src = liftoff::GetStackSlot(index);
457 mov(reg.low_gp(), src);
458 mov(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index - 1));
461 movss(reg.fp(), src);
464 movsd(reg.fp(), src);
471 void LiftoffAssembler::FillI64Half(Register reg,
uint32_t half_index) {
472 mov(reg, liftoff::GetHalfStackSlot(half_index));
475 void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
477 lea(dst, Operand(lhs, rhs, times_1, 0));
483 void LiftoffAssembler::emit_i32_sub(Register dst, Register lhs, Register rhs) {
488 if (dst != lhs) mov(dst, lhs);
494 template <
void (Assembler::*op)(Register, Register)>
495 void EmitCommutativeBinOp(LiftoffAssembler* assm, Register dst, Register lhs,
498 (assm->*op)(dst, lhs);
500 if (dst != lhs) assm->mov(dst, lhs);
501 (assm->*op)(dst, rhs);
506 void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
507 liftoff::EmitCommutativeBinOp<&Assembler::imul>(
this, dst, lhs, rhs);
511 enum class DivOrRem : uint8_t { kDiv, kRem };
512 template <
bool is_
signed, DivOrRem div_or_rem>
513 void EmitInt32DivOrRem(LiftoffAssembler* assm, Register dst, Register lhs,
514 Register rhs, Label* trap_div_by_zero,
515 Label* trap_div_unrepresentable) {
516 constexpr
bool needs_unrepresentable_check =
517 is_signed && div_or_rem == DivOrRem::kDiv;
518 constexpr
bool special_case_minus_1 =
519 is_signed && div_or_rem == DivOrRem::kRem;
520 DCHECK_EQ(needs_unrepresentable_check, trap_div_unrepresentable !=
nullptr);
527 liftoff::SpillRegisters(assm, eax, edx);
528 if (rhs == eax || rhs == edx) {
529 LiftoffRegList unavailable = LiftoffRegList::ForRegs(eax, edx, lhs);
530 Register tmp = assm->GetUnusedRegister(kGpReg, unavailable).gp();
536 assm->test(rhs, rhs);
537 assm->j(zero, trap_div_by_zero);
540 if (needs_unrepresentable_check) {
544 assm->j(not_equal, &do_div);
545 assm->cmp(lhs, kMinInt);
546 assm->j(equal, trap_div_unrepresentable);
548 }
else if (special_case_minus_1) {
553 assm->j(not_equal, &do_rem);
554 assm->xor_(dst, dst);
561 if (lhs != eax) assm->mov(eax, lhs);
566 assm->xor_(edx, edx);
571 constexpr Register kResultReg = div_or_rem == DivOrRem::kDiv ? eax : edx;
572 if (dst != kResultReg) assm->mov(dst, kResultReg);
573 if (special_case_minus_1) assm->bind(&done);
577 void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
578 Label* trap_div_by_zero,
579 Label* trap_div_unrepresentable) {
580 liftoff::EmitInt32DivOrRem<true, liftoff::DivOrRem::kDiv>(
581 this, dst, lhs, rhs, trap_div_by_zero, trap_div_unrepresentable);
584 void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
585 Label* trap_div_by_zero) {
586 liftoff::EmitInt32DivOrRem<false, liftoff::DivOrRem::kDiv>(
587 this, dst, lhs, rhs, trap_div_by_zero,
nullptr);
590 void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
591 Label* trap_div_by_zero) {
592 liftoff::EmitInt32DivOrRem<true, liftoff::DivOrRem::kRem>(
593 this, dst, lhs, rhs, trap_div_by_zero,
nullptr);
596 void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
597 Label* trap_div_by_zero) {
598 liftoff::EmitInt32DivOrRem<false, liftoff::DivOrRem::kRem>(
599 this, dst, lhs, rhs, trap_div_by_zero,
nullptr);
602 void LiftoffAssembler::emit_i32_and(Register dst, Register lhs, Register rhs) {
603 liftoff::EmitCommutativeBinOp<&Assembler::and_>(
this, dst, lhs, rhs);
606 void LiftoffAssembler::emit_i32_or(Register dst, Register lhs, Register rhs) {
607 liftoff::EmitCommutativeBinOp<&Assembler::or_>(
this, dst, lhs, rhs);
610 void LiftoffAssembler::emit_i32_xor(Register dst, Register lhs, Register rhs) {
611 liftoff::EmitCommutativeBinOp<&Assembler::xor_>(
this, dst, lhs, rhs);
615 inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
616 Register src, Register amount,
617 void (Assembler::*emit_shift)(Register),
618 LiftoffRegList pinned) {
624 Register tmp = assm->GetUnusedRegister(kGpReg, pinned).gp();
626 if (amount != ecx) assm->mov(ecx, amount);
627 (assm->*emit_shift)(tmp);
634 Register tmp_reg = no_reg;
636 if (assm->cache_state()->is_used(LiftoffRegister(ecx)) ||
637 pinned.has(LiftoffRegister(ecx))) {
638 tmp_reg = assm->GetUnusedRegister(kGpReg, pinned).gp();
639 assm->mov(tmp_reg, ecx);
640 if (src == ecx) src = tmp_reg;
642 assm->mov(ecx, amount);
646 if (dst != src) assm->mov(dst, src);
647 (assm->*emit_shift)(dst);
650 if (tmp_reg.is_valid()) assm->mov(ecx, tmp_reg);
654 void LiftoffAssembler::emit_i32_shl(Register dst, Register src, Register amount,
655 LiftoffRegList pinned) {
656 liftoff::EmitShiftOperation(
this, dst, src, amount, &Assembler::shl_cl,
660 void LiftoffAssembler::emit_i32_sar(Register dst, Register src, Register amount,
661 LiftoffRegList pinned) {
662 liftoff::EmitShiftOperation(
this, dst, src, amount, &Assembler::sar_cl,
666 void LiftoffAssembler::emit_i32_shr(Register dst, Register src, Register amount,
667 LiftoffRegList pinned) {
668 liftoff::EmitShiftOperation(
this, dst, src, amount, &Assembler::shr_cl,
672 void LiftoffAssembler::emit_i32_shr(Register dst, Register src,
int amount) {
673 if (dst != src) mov(dst, src);
674 DCHECK(is_uint5(amount));
678 bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
682 j(not_zero, &nonzero_input, Label::kNear);
683 mov(dst, Immediate(32));
684 jmp(&continuation, Label::kNear);
686 bind(&nonzero_input);
696 bool LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
700 j(not_zero, &nonzero_input, Label::kNear);
701 mov(dst, Immediate(32));
702 jmp(&continuation, Label::kNear);
704 bind(&nonzero_input);
712 bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
713 if (!CpuFeatures::IsSupported(POPCNT))
return false;
714 CpuFeatureScope scope(
this, POPCNT);
720 template <void (Assembler::*op)(Register, Register),
721 void (Assembler::*op_with_carry)(Register, Register)>
722 inline void OpWithCarry(LiftoffAssembler* assm, LiftoffRegister dst,
723 LiftoffRegister lhs, LiftoffRegister rhs) {
727 LiftoffRegList keep_alive = LiftoffRegList::ForRegs(lhs.high_gp(), rhs);
728 Register dst_low = keep_alive.has(dst.low_gp())
729 ? assm->GetUnusedRegister(kGpReg, keep_alive).gp()
732 if (dst_low != lhs.low_gp()) assm->mov(dst_low, lhs.low_gp());
733 (assm->*op)(dst_low, rhs.low_gp());
736 keep_alive = LiftoffRegList::ForRegs(dst_low, rhs.high_gp());
737 Register dst_high = keep_alive.has(dst.high_gp())
738 ? assm->GetUnusedRegister(kGpReg, keep_alive).gp()
741 if (dst_high != lhs.high_gp()) assm->mov(dst_high, lhs.high_gp());
742 (assm->*op_with_carry)(dst_high, rhs.high_gp());
745 LiftoffRegister tmp_result = LiftoffRegister::ForPair(dst_low, dst_high);
746 if (tmp_result != dst) assm->Move(dst, tmp_result, kWasmI64);
750 void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
751 LiftoffRegister rhs) {
752 liftoff::OpWithCarry<&Assembler::add, &Assembler::adc>(
this, dst, lhs, rhs);
755 void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
756 LiftoffRegister rhs) {
757 liftoff::OpWithCarry<&Assembler::sub, &Assembler::sbb>(
this, dst, lhs, rhs);
760 void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
761 LiftoffRegister rhs) {
769 Register dst_hi = edx;
770 Register dst_lo = eax;
771 Register lhs_hi = ecx;
772 Register lhs_lo = dst_lo;
773 Register rhs_hi = dst_hi;
774 Register rhs_lo = esi;
777 liftoff::SpillRegisters(
this, dst_hi, dst_lo, lhs_hi, rhs_lo);
780 ParallelRegisterMoveTuple reg_moves[]{
781 {LiftoffRegister::ForPair(lhs_lo, lhs_hi), lhs, kWasmI64},
782 {LiftoffRegister::ForPair(rhs_lo, rhs_hi), rhs, kWasmI64}};
783 ParallelRegisterMove(ArrayVector(reg_moves));
786 imul(lhs_hi, rhs_lo);
788 imul(rhs_hi, lhs_lo);
797 LiftoffRegister dst_tmp = LiftoffRegister::ForPair(dst_lo, dst_hi);
798 if (dst != dst_tmp) Move(dst, dst_tmp, kWasmI64);
801 bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
803 Label* trap_div_by_zero,
804 Label* trap_div_unrepresentable) {
808 bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
810 Label* trap_div_by_zero) {
814 bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
816 Label* trap_div_by_zero) {
820 bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
822 Label* trap_div_by_zero) {
827 inline bool PairContains(LiftoffRegister pair, Register reg) {
828 return pair.low_gp() == reg || pair.high_gp() == reg;
831 inline LiftoffRegister ReplaceInPair(LiftoffRegister pair, Register old_reg,
833 if (pair.low_gp() == old_reg) {
834 return LiftoffRegister::ForPair(new_reg, pair.high_gp());
836 if (pair.high_gp() == old_reg) {
837 return LiftoffRegister::ForPair(pair.low_gp(), new_reg);
842 inline void Emit64BitShiftOperation(
843 LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister src,
844 Register amount,
void (TurboAssembler::*emit_shift)(Register, Register),
845 LiftoffRegList pinned) {
849 std::vector<LiftoffAssembler::ParallelRegisterMoveTuple> reg_moves;
853 Register ecx_replace = no_reg;
854 if (PairContains(dst, ecx)) {
855 ecx_replace = assm->GetUnusedRegister(kGpReg, pinned).gp();
856 dst = ReplaceInPair(dst, ecx, ecx_replace);
860 }
else if (amount != ecx &&
861 (assm->cache_state()->is_used(LiftoffRegister(ecx)) ||
862 pinned.has(LiftoffRegister(ecx)))) {
863 ecx_replace = assm->GetUnusedRegister(kGpReg, pinned).gp();
864 reg_moves.emplace_back(ecx_replace, ecx, kWasmI32);
867 reg_moves.emplace_back(dst, src, kWasmI64);
868 reg_moves.emplace_back(ecx, amount, kWasmI32);
869 assm->ParallelRegisterMove({reg_moves.data(), reg_moves.size()});
872 (assm->*emit_shift)(dst.high_gp(), dst.low_gp());
875 if (ecx_replace != no_reg) assm->mov(ecx, ecx_replace);
879 void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
880 Register amount, LiftoffRegList pinned) {
881 liftoff::Emit64BitShiftOperation(
this, dst, src, amount,
882 &TurboAssembler::ShlPair_cl, pinned);
885 void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
886 Register amount, LiftoffRegList pinned) {
887 liftoff::Emit64BitShiftOperation(
this, dst, src, amount,
888 &TurboAssembler::SarPair_cl, pinned);
891 void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
892 Register amount, LiftoffRegList pinned) {
893 liftoff::Emit64BitShiftOperation(
this, dst, src, amount,
894 &TurboAssembler::ShrPair_cl, pinned);
897 void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
899 if (dst != src) Move(dst, src, kWasmI64);
900 DCHECK(is_uint6(amount));
901 ShrPair(dst.high_gp(), dst.low_gp(), amount);
904 void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
908 void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
909 DoubleRegister rhs) {
910 if (CpuFeatures::IsSupported(AVX)) {
911 CpuFeatureScope scope(
this, AVX);
912 vaddss(dst, lhs, rhs);
913 }
else if (dst == rhs) {
916 if (dst != lhs) movss(dst, lhs);
921 void LiftoffAssembler::emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
922 DoubleRegister rhs) {
923 if (CpuFeatures::IsSupported(AVX)) {
924 CpuFeatureScope scope(
this, AVX);
925 vsubss(dst, lhs, rhs);
926 }
else if (dst == rhs) {
927 movss(liftoff::kScratchDoubleReg, rhs);
929 subss(dst, liftoff::kScratchDoubleReg);
931 if (dst != lhs) movss(dst, lhs);
936 void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
937 DoubleRegister rhs) {
938 if (CpuFeatures::IsSupported(AVX)) {
939 CpuFeatureScope scope(
this, AVX);
940 vmulss(dst, lhs, rhs);
941 }
else if (dst == rhs) {
944 if (dst != lhs) movss(dst, lhs);
949 void LiftoffAssembler::emit_f32_div(DoubleRegister dst, DoubleRegister lhs,
950 DoubleRegister rhs) {
951 if (CpuFeatures::IsSupported(AVX)) {
952 CpuFeatureScope scope(
this, AVX);
953 vdivss(dst, lhs, rhs);
954 }
else if (dst == rhs) {
955 movss(liftoff::kScratchDoubleReg, rhs);
957 divss(dst, liftoff::kScratchDoubleReg);
959 if (dst != lhs) movss(dst, lhs);
965 enum class MinOrMax : uint8_t { kMin, kMax };
966 template <
typename type>
967 inline void EmitFloatMinOrMax(LiftoffAssembler* assm, DoubleRegister dst,
968 DoubleRegister lhs, DoubleRegister rhs,
969 MinOrMax min_or_max) {
977 Register tmp = assm->GetUnusedRegister(kGpReg).gp();
979 #define dop(name, ...) \ 981 if (sizeof(type) == 4) { \ 982 assm->name##s(__VA_ARGS__); \ 984 assm->name##d(__VA_ARGS__); \ 990 dop(ucomis, lhs, rhs);
991 assm->j(parity_even, &is_nan, Label::kNear);
992 assm->j(below, &lhs_below_rhs, Label::kNear);
993 assm->j(above, &lhs_above_rhs, Label::kNear);
1001 dop(movmskp, tmp, rhs);
1002 assm->test(tmp, Immediate(1));
1003 assm->j(zero, &lhs_below_rhs, Label::kNear);
1004 assm->jmp(&lhs_above_rhs, Label::kNear);
1006 assm->bind(&is_nan);
1008 dop(xorp, dst, dst);
1009 dop(divs, dst, dst);
1010 assm->jmp(&done, Label::kNear);
1012 assm->bind(&lhs_below_rhs);
1013 DoubleRegister lhs_below_rhs_src = min_or_max == MinOrMax::kMin ? lhs : rhs;
1014 if (dst != lhs_below_rhs_src) dop(movs, dst, lhs_below_rhs_src);
1015 assm->jmp(&done, Label::kNear);
1017 assm->bind(&lhs_above_rhs);
1018 DoubleRegister lhs_above_rhs_src = min_or_max == MinOrMax::kMin ? rhs : lhs;
1019 if (dst != lhs_above_rhs_src) dop(movs, dst, lhs_above_rhs_src);
1025 void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
1026 DoubleRegister rhs) {
1027 liftoff::EmitFloatMinOrMax<float>(
this, dst, lhs, rhs,
1028 liftoff::MinOrMax::kMin);
1031 void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
1032 DoubleRegister rhs) {
1033 liftoff::EmitFloatMinOrMax<float>(
this, dst, lhs, rhs,
1034 liftoff::MinOrMax::kMax);
1037 void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
1038 DoubleRegister rhs) {
1039 static constexpr
int kF32SignBit = 1 << 31;
1040 Register scratch = GetUnusedRegister(kGpReg).gp();
1042 GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(scratch)).gp();
1044 and_(scratch, Immediate(~kF32SignBit));
1045 Movd(scratch2, rhs);
1046 and_(scratch2, Immediate(kF32SignBit));
1047 or_(scratch, scratch2);
1051 void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) {
1054 TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit - 1);
1055 Andps(dst, liftoff::kScratchDoubleReg);
1057 TurboAssembler::Move(dst, kSignBit - 1);
1062 void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
1065 TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit);
1066 Xorps(dst, liftoff::kScratchDoubleReg);
1068 TurboAssembler::Move(dst, kSignBit);
1073 bool LiftoffAssembler::emit_f32_ceil(DoubleRegister dst, DoubleRegister src) {
1074 if (CpuFeatures::IsSupported(SSE4_1)) {
1075 CpuFeatureScope feature(
this, SSE4_1);
1076 roundss(dst, src, kRoundUp);
1082 bool LiftoffAssembler::emit_f32_floor(DoubleRegister dst, DoubleRegister src) {
1083 if (CpuFeatures::IsSupported(SSE4_1)) {
1084 CpuFeatureScope feature(
this, SSE4_1);
1085 roundss(dst, src, kRoundDown);
1091 bool LiftoffAssembler::emit_f32_trunc(DoubleRegister dst, DoubleRegister src) {
1092 if (CpuFeatures::IsSupported(SSE4_1)) {
1093 CpuFeatureScope feature(
this, SSE4_1);
1094 roundss(dst, src, kRoundToZero);
1100 bool LiftoffAssembler::emit_f32_nearest_int(DoubleRegister dst,
1101 DoubleRegister src) {
1102 if (CpuFeatures::IsSupported(SSE4_1)) {
1103 CpuFeatureScope feature(
this, SSE4_1);
1104 roundss(dst, src, kRoundToNearest);
1110 void LiftoffAssembler::emit_f32_sqrt(DoubleRegister dst, DoubleRegister src) {
1114 void LiftoffAssembler::emit_f64_add(DoubleRegister dst, DoubleRegister lhs,
1115 DoubleRegister rhs) {
1116 if (CpuFeatures::IsSupported(AVX)) {
1117 CpuFeatureScope scope(
this, AVX);
1118 vaddsd(dst, lhs, rhs);
1119 }
else if (dst == rhs) {
1122 if (dst != lhs) movsd(dst, lhs);
1127 void LiftoffAssembler::emit_f64_sub(DoubleRegister dst, DoubleRegister lhs,
1128 DoubleRegister rhs) {
1129 if (CpuFeatures::IsSupported(AVX)) {
1130 CpuFeatureScope scope(
this, AVX);
1131 vsubsd(dst, lhs, rhs);
1132 }
else if (dst == rhs) {
1133 movsd(liftoff::kScratchDoubleReg, rhs);
1135 subsd(dst, liftoff::kScratchDoubleReg);
1137 if (dst != lhs) movsd(dst, lhs);
1142 void LiftoffAssembler::emit_f64_mul(DoubleRegister dst, DoubleRegister lhs,
1143 DoubleRegister rhs) {
1144 if (CpuFeatures::IsSupported(AVX)) {
1145 CpuFeatureScope scope(
this, AVX);
1146 vmulsd(dst, lhs, rhs);
1147 }
else if (dst == rhs) {
1150 if (dst != lhs) movsd(dst, lhs);
1155 void LiftoffAssembler::emit_f64_div(DoubleRegister dst, DoubleRegister lhs,
1156 DoubleRegister rhs) {
1157 if (CpuFeatures::IsSupported(AVX)) {
1158 CpuFeatureScope scope(
this, AVX);
1159 vdivsd(dst, lhs, rhs);
1160 }
else if (dst == rhs) {
1161 movsd(liftoff::kScratchDoubleReg, rhs);
1163 divsd(dst, liftoff::kScratchDoubleReg);
1165 if (dst != lhs) movsd(dst, lhs);
1170 void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
1171 DoubleRegister rhs) {
1172 liftoff::EmitFloatMinOrMax<double>(
this, dst, lhs, rhs,
1173 liftoff::MinOrMax::kMin);
1176 void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
1177 DoubleRegister rhs) {
1178 static constexpr
int kF32SignBit = 1 << 31;
1181 Register scratch = GetUnusedRegister(kGpReg).gp();
1183 GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(scratch)).gp();
1185 Pextrd(scratch, lhs, 1);
1186 and_(scratch, Immediate(~kF32SignBit));
1187 Pextrd(scratch2, rhs, 1);
1188 and_(scratch2, Immediate(kF32SignBit));
1189 or_(scratch, scratch2);
1191 Pinsrd(dst, scratch, 1);
1194 void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
1195 DoubleRegister rhs) {
1196 liftoff::EmitFloatMinOrMax<double>(
this, dst, lhs, rhs,
1197 liftoff::MinOrMax::kMax);
1200 void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) {
1201 static constexpr uint64_t kSignBit = uint64_t{1} << 63;
1203 TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit - 1);
1204 Andpd(dst, liftoff::kScratchDoubleReg);
1206 TurboAssembler::Move(dst, kSignBit - 1);
1211 void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
1212 static constexpr uint64_t kSignBit = uint64_t{1} << 63;
1214 TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit);
1215 Xorpd(dst, liftoff::kScratchDoubleReg);
1217 TurboAssembler::Move(dst, kSignBit);
1222 bool LiftoffAssembler::emit_f64_ceil(DoubleRegister dst, DoubleRegister src) {
1223 REQUIRE_CPU_FEATURE(SSE4_1,
true);
1224 roundsd(dst, src, kRoundUp);
1228 bool LiftoffAssembler::emit_f64_floor(DoubleRegister dst, DoubleRegister src) {
1229 REQUIRE_CPU_FEATURE(SSE4_1,
true);
1230 roundsd(dst, src, kRoundDown);
1234 bool LiftoffAssembler::emit_f64_trunc(DoubleRegister dst, DoubleRegister src) {
1235 REQUIRE_CPU_FEATURE(SSE4_1,
true);
1236 roundsd(dst, src, kRoundToZero);
1240 bool LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst,
1241 DoubleRegister src) {
1242 REQUIRE_CPU_FEATURE(SSE4_1,
true);
1243 roundsd(dst, src, kRoundToNearest);
1247 void LiftoffAssembler::emit_f64_sqrt(DoubleRegister dst, DoubleRegister src) {
1254 template <
typename dst_type,
typename src_type>
1255 inline void ConvertFloatToIntAndBack(LiftoffAssembler* assm, Register dst,
1257 DoubleRegister converted_back,
1258 LiftoffRegList pinned) {
1259 if (std::is_same<double, src_type>::value) {
1260 if (std::is_signed<dst_type>::value) {
1261 assm->cvttsd2si(dst, src);
1262 assm->Cvtsi2sd(converted_back, dst);
1264 assm->Cvttsd2ui(dst, src, liftoff::kScratchDoubleReg);
1265 assm->Cvtui2sd(converted_back, dst,
1266 assm->GetUnusedRegister(kGpReg, pinned).gp());
1269 if (std::is_signed<dst_type>::value) {
1270 assm->cvttss2si(dst, src);
1271 assm->Cvtsi2ss(converted_back, dst);
1273 assm->Cvttss2ui(dst, src, liftoff::kScratchDoubleReg);
1274 assm->Cvtui2ss(converted_back, dst,
1275 assm->GetUnusedRegister(kGpReg, pinned).gp());
1280 template <
typename dst_type,
typename src_type>
1281 inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
1282 DoubleRegister src, Label* trap) {
1283 if (!CpuFeatures::IsSupported(SSE4_1)) {
1284 assm->bailout(
"no SSE4.1");
1287 CpuFeatureScope feature(assm, SSE4_1);
1289 LiftoffRegList pinned = LiftoffRegList::ForRegs(src, dst);
1290 DoubleRegister rounded =
1291 pinned.set(assm->GetUnusedRegister(kFpReg, pinned)).fp();
1292 DoubleRegister converted_back =
1293 pinned.set(assm->GetUnusedRegister(kFpReg, pinned)).fp();
1295 if (std::is_same<double, src_type>::value) {
1296 assm->roundsd(rounded, src, kRoundToZero);
1298 assm->roundss(rounded, src, kRoundToZero);
1300 ConvertFloatToIntAndBack<dst_type, src_type>(assm, dst, rounded,
1301 converted_back, pinned);
1302 if (std::is_same<double, src_type>::value) {
1303 assm->ucomisd(converted_back, rounded);
1305 assm->ucomiss(converted_back, rounded);
1310 assm->j(parity_even, trap);
1311 assm->j(not_equal, trap);
1316 bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
1317 LiftoffRegister dst,
1318 LiftoffRegister src, Label* trap) {
1320 case kExprI32ConvertI64:
1321 if (dst.gp() != src.low_gp()) mov(dst.gp(), src.low_gp());
1323 case kExprI32SConvertF32:
1324 return liftoff::EmitTruncateFloatToInt<int32_t, float>(
this, dst.gp(),
1326 case kExprI32UConvertF32:
1327 return liftoff::EmitTruncateFloatToInt<uint32_t, float>(
this, dst.gp(),
1329 case kExprI32SConvertF64:
1330 return liftoff::EmitTruncateFloatToInt<int32_t, double>(
this, dst.gp(),
1332 case kExprI32UConvertF64:
1333 return liftoff::EmitTruncateFloatToInt<uint32_t, double>(
this, dst.gp(),
1335 case kExprI32ReinterpretF32:
1336 Movd(dst.gp(), src.fp());
1338 case kExprI64SConvertI32:
1339 if (dst.low_gp() != src.gp()) mov(dst.low_gp(), src.gp());
1340 if (dst.high_gp() != src.gp()) mov(dst.high_gp(), src.gp());
1341 sar(dst.high_gp(), 31);
1343 case kExprI64UConvertI32:
1344 if (dst.low_gp() != src.gp()) mov(dst.low_gp(), src.gp());
1345 xor_(dst.high_gp(), dst.high_gp());
1347 case kExprI64ReinterpretF64:
1349 sub(esp, Immediate(8));
1350 movsd(Operand(esp, 0), src.fp());
1355 case kExprF32SConvertI32:
1356 cvtsi2ss(dst.fp(), src.gp());
1358 case kExprF32UConvertI32: {
1359 LiftoffRegList pinned = LiftoffRegList::ForRegs(dst, src);
1360 Register scratch = GetUnusedRegister(kGpReg, pinned).gp();
1361 Cvtui2ss(dst.fp(), src.gp(), scratch);
1364 case kExprF32ConvertF64:
1365 cvtsd2ss(dst.fp(), src.fp());
1367 case kExprF32ReinterpretI32:
1368 Movd(dst.fp(), src.gp());
1370 case kExprF64SConvertI32:
1371 Cvtsi2sd(dst.fp(), src.gp());
1373 case kExprF64UConvertI32: {
1374 LiftoffRegList pinned = LiftoffRegList::ForRegs(dst, src);
1375 Register scratch = GetUnusedRegister(kGpReg, pinned).gp();
1376 Cvtui2sd(dst.fp(), src.gp(), scratch);
1379 case kExprF64ConvertF32:
1380 cvtss2sd(dst.fp(), src.fp());
1382 case kExprF64ReinterpretI64:
1384 push(src.high_gp());
1387 movsd(dst.fp(), Operand(esp, 0));
1388 add(esp, Immediate(8));
1395 void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
1399 void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
1403 void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
1404 LiftoffRegister src) {
1405 movsx_b(dst.low_gp(), src.low_gp());
1406 liftoff::SignExtendI32ToI64(
this, dst);
1409 void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
1410 LiftoffRegister src) {
1411 movsx_w(dst.low_gp(), src.low_gp());
1412 liftoff::SignExtendI32ToI64(
this, dst);
1415 void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
1416 LiftoffRegister src) {
1417 if (dst.low_gp() != src.low_gp()) mov(dst.low_gp(), src.low_gp());
1418 liftoff::SignExtendI32ToI64(
this, dst);
1421 void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
1423 void LiftoffAssembler::emit_jump(Register target) { jmp(target); }
1425 void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
1426 ValueType type, Register lhs,
1428 if (rhs != no_reg) {
1437 DCHECK_EQ(type, kWasmI32);
1448 inline Register GetTmpByteRegister(LiftoffAssembler* assm, Register candidate) {
1449 if (candidate.is_byte_register())
return candidate;
1450 LiftoffRegList pinned = LiftoffRegList::ForRegs(candidate);
1453 return assm->GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
1458 inline void setcc_32_no_spill(LiftoffAssembler* assm, Condition cond,
1459 Register dst, Register tmp_byte_reg) {
1460 assm->setcc(cond, tmp_byte_reg);
1461 assm->movzx_b(dst, tmp_byte_reg);
1465 inline void setcc_32(LiftoffAssembler* assm, Condition cond, Register dst) {
1466 Register tmp_byte_reg = GetTmpByteRegister(assm, dst);
1467 setcc_32_no_spill(assm, cond, dst, tmp_byte_reg);
1472 void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
1474 liftoff::setcc_32(
this, equal, dst);
1477 void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
1478 Register lhs, Register rhs) {
1480 liftoff::setcc_32(
this, cond, dst);
1483 void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
1486 if (src.low_gp() == dst) {
1487 or_(dst, src.high_gp());
1489 if (src.high_gp() != dst) mov(dst, src.high_gp());
1490 or_(dst, src.low_gp());
1492 liftoff::setcc_32(
this, equal, dst);
1496 inline Condition cond_make_unsigned(Condition cond) {
1498 case kSignedLessThan:
1499 return kUnsignedLessThan;
1500 case kSignedLessEqual:
1501 return kUnsignedLessEqual;
1502 case kSignedGreaterThan:
1503 return kUnsignedGreaterThan;
1504 case kSignedGreaterEqual:
1505 return kUnsignedGreaterEqual;
1512 void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
1513 LiftoffRegister lhs,
1514 LiftoffRegister rhs) {
1517 Register tmp_byte_reg = liftoff::GetTmpByteRegister(
this, dst);
1522 Condition unsigned_cond = liftoff::cond_make_unsigned(cond);
1527 cmp(lhs.high_gp(), rhs.high_gp());
1528 j(not_equal, &setcc, Label::kNear);
1529 cmp(lhs.low_gp(), rhs.low_gp());
1530 if (unsigned_cond != cond) {
1533 liftoff::setcc_32_no_spill(
this, unsigned_cond, dst, tmp_byte_reg);
1537 liftoff::setcc_32_no_spill(
this, cond, dst, tmp_byte_reg);
1542 template <
void (Assembler::*cmp_op)(DoubleRegister, DoubleRegister)>
1543 void EmitFloatSetCond(LiftoffAssembler* assm, Condition cond, Register dst,
1544 DoubleRegister lhs, DoubleRegister rhs) {
1550 Register tmp_byte_reg = GetTmpByteRegister(assm, dst);
1552 (assm->*cmp_op)(lhs, rhs);
1554 assm->j(parity_odd, ¬_nan, Label::kNear);
1556 if (cond == not_equal) {
1557 assm->mov(dst, Immediate(1));
1559 assm->xor_(dst, dst);
1561 assm->jmp(&cont, Label::kNear);
1562 assm->bind(¬_nan);
1564 setcc_32_no_spill(assm, cond, dst, tmp_byte_reg);
1569 void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
1571 DoubleRegister rhs) {
1572 liftoff::EmitFloatSetCond<&Assembler::ucomiss>(
this, cond, dst, lhs, rhs);
1575 void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
1577 DoubleRegister rhs) {
1578 liftoff::EmitFloatSetCond<&Assembler::ucomisd>(
this, cond, dst, lhs, rhs);
1581 void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
1582 cmp(esp, Operand(limit_address, 0));
1583 j(below_equal, ool_code);
1586 void LiftoffAssembler::CallTrapCallbackForTesting() {
1587 PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
1588 CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
1591 void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
1592 TurboAssembler::AssertUnreachable(reason);
1595 void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
1596 LiftoffRegList gp_regs = regs & kGpCacheRegList;
1597 while (!gp_regs.is_empty()) {
1598 LiftoffRegister reg = gp_regs.GetFirstRegSet();
1602 LiftoffRegList fp_regs = regs & kFpCacheRegList;
1603 unsigned num_fp_regs = fp_regs.GetNumRegsSet();
1605 sub(esp, Immediate(num_fp_regs * kStackSlotSize));
1606 unsigned offset = 0;
1607 while (!fp_regs.is_empty()) {
1608 LiftoffRegister reg = fp_regs.GetFirstRegSet();
1609 movsd(Operand(esp, offset), reg.fp());
1611 offset +=
sizeof(double);
1613 DCHECK_EQ(offset, num_fp_regs *
sizeof(
double));
1617 void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
1618 LiftoffRegList fp_regs = regs & kFpCacheRegList;
1619 unsigned fp_offset = 0;
1620 while (!fp_regs.is_empty()) {
1621 LiftoffRegister reg = fp_regs.GetFirstRegSet();
1622 movsd(reg.fp(), Operand(esp, fp_offset));
1624 fp_offset +=
sizeof(double);
1626 if (fp_offset) add(esp, Immediate(fp_offset));
1627 LiftoffRegList gp_regs = regs & kGpCacheRegList;
1628 while (!gp_regs.is_empty()) {
1629 LiftoffRegister reg = gp_regs.GetLastRegSet();
1635 void LiftoffAssembler::DropStackSlotsAndRet(
uint32_t num_stack_slots) {
1636 DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize);
1637 ret(static_cast<int>(num_stack_slots * kPointerSize));
1640 void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
1641 const LiftoffRegister* args,
1642 const LiftoffRegister* rets,
1643 ValueType out_argument_type,
int stack_bytes,
1644 ExternalReference ext_ref) {
1645 sub(esp, Immediate(stack_bytes));
1648 for (ValueType param_type : sig->parameters()) {
1649 liftoff::Store(
this, esp, arg_bytes, *args++, param_type);
1650 arg_bytes += ValueTypes::MemSize(param_type);
1652 DCHECK_LE(arg_bytes, stack_bytes);
1654 constexpr Register kScratch = eax;
1655 constexpr Register kArgumentBuffer = ecx;
1656 constexpr
int kNumCCallArgs = 1;
1657 mov(kArgumentBuffer, esp);
1658 PrepareCallCFunction(kNumCCallArgs, kScratch);
1662 mov(Operand(esp, 0), kArgumentBuffer);
1665 CallCFunction(ext_ref, kNumCCallArgs);
1668 const LiftoffRegister* next_result_reg = rets;
1669 if (sig->return_count() > 0) {
1670 DCHECK_EQ(1, sig->return_count());
1671 constexpr Register kReturnReg = eax;
1672 if (kReturnReg != next_result_reg->gp()) {
1673 Move(*next_result_reg, LiftoffRegister(kReturnReg), sig->GetReturn(0));
1679 if (out_argument_type != kWasmStmt) {
1680 liftoff::Load(
this, *next_result_reg, esp, 0, out_argument_type);
1683 add(esp, Immediate(stack_bytes));
1686 void LiftoffAssembler::CallNativeWasmCode(Address addr) {
1687 wasm_call(addr, RelocInfo::WASM_CALL);
1690 void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
1691 compiler::CallDescriptor* call_descriptor,
1695 DCHECK(target.is_valid());
1696 if (FLAG_untrusted_code_mitigations) {
1697 RetpolineCall(target);
1703 void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
1706 wasm_call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
1709 void LiftoffAssembler::AllocateStackSlot(Register addr,
uint32_t size) {
1710 sub(esp, Immediate(size));
1714 void LiftoffAssembler::DeallocateStackSlot(
uint32_t size) {
1715 add(esp, Immediate(size));
1718 void LiftoffStackSlots::Construct() {
1719 for (
auto& slot : slots_) {
1720 const LiftoffAssembler::VarState& src = slot.src_;
1721 switch (src.loc()) {
1722 case LiftoffAssembler::VarState::kStack:
1723 if (src.type() == kWasmF64) {
1724 DCHECK_EQ(kLowWord, slot.half_);
1725 asm_->push(liftoff::GetHalfStackSlot(2 * slot.src_index_ - 1));
1727 asm_->push(liftoff::GetHalfStackSlot(2 * slot.src_index_ -
1728 (slot.half_ == kLowWord ? 0 : 1)));
1730 case LiftoffAssembler::VarState::kRegister:
1731 if (src.type() == kWasmI64) {
1733 asm_, slot.half_ == kLowWord ? src.reg().low() : src.reg().high(),
1736 liftoff::push(asm_, src.reg(), src.type());
1739 case LiftoffAssembler::VarState::KIntConst:
1741 asm_->push(Immediate(slot.half_ == kLowWord ? src.i32_const()
1742 : src.i32_const() >> 31));
1748 #undef REQUIRE_CPU_FEATURE 1754 #endif // V8_WASM_BASELINE_IA32_LIFTOFF_ASSEMBLER_IA32_H_