5 #ifndef V8_WASM_BASELINE_ARM64_LIFTOFF_ASSEMBLER_ARM64_H_ 6 #define V8_WASM_BASELINE_ARM64_LIFTOFF_ASSEMBLER_ARM64_H_ 8 #include "src/wasm/baseline/liftoff-assembler.h" 10 #define BAILOUT(reason) bailout("arm64 " reason) 44 constexpr int32_t kInstanceOffset = 2 * kPointerSize;
45 constexpr int32_t kFirstStackSlotOffset = kInstanceOffset + kPointerSize;
46 constexpr int32_t kConstantStackSpace = 0;
48 inline MemOperand GetStackSlot(
uint32_t index) {
50 kFirstStackSlotOffset + index * LiftoffAssembler::kStackSlotSize;
51 return MemOperand(fp, -offset);
54 inline MemOperand GetInstanceOperand() {
55 return MemOperand(fp, -kInstanceOffset);
58 inline CPURegister GetRegFromType(
const LiftoffRegister& reg, ValueType type) {
73 inline CPURegList PadRegList(RegList list) {
74 if ((base::bits::CountPopulation(list) & 1) != 0) list |= padreg.bit();
75 return CPURegList(CPURegister::kRegister, kXRegSizeInBits, list);
78 inline CPURegList PadVRegList(RegList list) {
79 if ((base::bits::CountPopulation(list) & 1) != 0) list |= fp_scratch.bit();
80 return CPURegList(CPURegister::kVRegister, kDRegSizeInBits, list);
83 inline CPURegister AcquireByType(UseScratchRegisterScope* temps,
87 return temps->AcquireW();
89 return temps->AcquireX();
91 return temps->AcquireS();
93 return temps->AcquireD();
99 inline MemOperand GetMemOp(LiftoffAssembler* assm,
100 UseScratchRegisterScope* temps, Register addr,
101 Register offset,
uint32_t offset_imm) {
105 DCHECK(is_uint31(offset_imm));
106 if (offset.IsValid()) {
107 if (offset_imm == 0)
return MemOperand(addr.X(), offset.W(), UXTW);
108 Register tmp = temps->AcquireW();
109 assm->Add(tmp, offset.W(), offset_imm);
110 return MemOperand(addr.X(), tmp, UXTW);
112 return MemOperand(addr.X(), offset_imm);
117 int LiftoffAssembler::PrepareStackFrame() {
118 int offset = pc_offset();
119 InstructionAccurateScope scope(
this, 1);
124 void LiftoffAssembler::PatchPrepareStackFrame(
int offset,
126 static_assert(kStackSlotSize == kXRegSize,
127 "kStackSlotSize must equal kXRegSize");
128 uint32_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
131 bytes = RoundUp(bytes, kQuadWordSizeInBytes);
132 if (!IsImmAddSub(bytes)) {
134 bytes = RoundUp(bytes, 0x1000);
135 if (!IsImmAddSub(bytes)) {
138 BAILOUT(
"Stack too big");
146 if (bytes > KB / 2) {
147 BAILOUT(
"Stack limited to 512 bytes to avoid a bug in StackCheck");
151 PatchingAssembler patching_assembler(AssemblerOptions{}, buffer_ + offset, 1);
152 patching_assembler.PatchSubSp(bytes);
155 void LiftoffAssembler::FinishCode() { CheckConstPool(
true,
false); }
157 void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); }
159 void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
160 RelocInfo::Mode rmode) {
161 switch (value.type()) {
163 Mov(reg.gp().W(), Immediate(value.to_i32(), rmode));
166 Mov(reg.gp().X(), Immediate(value.to_i64(), rmode));
169 Fmov(reg.fp().S(), value.to_f32_boxed().get_scalar());
172 Fmov(reg.fp().D(), value.to_f64_boxed().get_scalar());
179 void LiftoffAssembler::LoadFromInstance(Register dst,
uint32_t offset,
181 DCHECK_LE(offset, kMaxInt);
182 Ldr(dst, liftoff::GetInstanceOperand());
183 DCHECK(size == 4 || size == 8);
185 Ldr(dst.W(), MemOperand(dst, offset));
187 Ldr(dst, MemOperand(dst, offset));
191 void LiftoffAssembler::SpillInstance(Register instance) {
192 Str(instance, liftoff::GetInstanceOperand());
195 void LiftoffAssembler::FillInstanceInto(Register dst) {
196 Ldr(dst, liftoff::GetInstanceOperand());
199 void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
200 Register offset_reg,
uint32_t offset_imm,
201 LoadType type, LiftoffRegList pinned,
202 uint32_t* protected_load_pc,
bool is_load_mem) {
203 UseScratchRegisterScope temps(
this);
205 liftoff::GetMemOp(
this, &temps, src_addr, offset_reg, offset_imm);
206 if (protected_load_pc) *protected_load_pc = pc_offset();
207 switch (type.value()) {
208 case LoadType::kI32Load8U:
209 case LoadType::kI64Load8U:
210 Ldrb(dst.gp().W(), src_op);
212 case LoadType::kI32Load8S:
213 Ldrsb(dst.gp().W(), src_op);
215 case LoadType::kI64Load8S:
216 Ldrsb(dst.gp().X(), src_op);
218 case LoadType::kI32Load16U:
219 case LoadType::kI64Load16U:
220 Ldrh(dst.gp().W(), src_op);
222 case LoadType::kI32Load16S:
223 Ldrsh(dst.gp().W(), src_op);
225 case LoadType::kI64Load16S:
226 Ldrsh(dst.gp().X(), src_op);
228 case LoadType::kI32Load:
229 case LoadType::kI64Load32U:
230 Ldr(dst.gp().W(), src_op);
232 case LoadType::kI64Load32S:
233 Ldrsw(dst.gp().X(), src_op);
235 case LoadType::kI64Load:
236 Ldr(dst.gp().X(), src_op);
238 case LoadType::kF32Load:
239 Ldr(dst.fp().S(), src_op);
241 case LoadType::kF64Load:
242 Ldr(dst.fp().D(), src_op);
249 void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
250 uint32_t offset_imm, LiftoffRegister src,
251 StoreType type, LiftoffRegList pinned,
252 uint32_t* protected_store_pc,
bool is_store_mem) {
253 UseScratchRegisterScope temps(
this);
255 liftoff::GetMemOp(
this, &temps, dst_addr, offset_reg, offset_imm);
256 if (protected_store_pc) *protected_store_pc = pc_offset();
257 switch (type.value()) {
258 case StoreType::kI32Store8:
259 case StoreType::kI64Store8:
260 Strb(src.gp().W(), dst_op);
262 case StoreType::kI32Store16:
263 case StoreType::kI64Store16:
264 Strh(src.gp().W(), dst_op);
266 case StoreType::kI32Store:
267 case StoreType::kI64Store32:
268 Str(src.gp().W(), dst_op);
270 case StoreType::kI64Store:
271 Str(src.gp().X(), dst_op);
273 case StoreType::kF32Store:
274 Str(src.fp().S(), dst_op);
276 case StoreType::kF64Store:
277 Str(src.fp().D(), dst_op);
284 void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
287 int32_t offset = (caller_slot_idx + 1) * LiftoffAssembler::kStackSlotSize;
288 Ldr(liftoff::GetRegFromType(dst, type), MemOperand(fp, offset));
291 void LiftoffAssembler::MoveStackValue(
uint32_t dst_index,
uint32_t src_index,
293 UseScratchRegisterScope temps(
this);
294 CPURegister scratch = liftoff::AcquireByType(&temps, type);
295 Ldr(scratch, liftoff::GetStackSlot(src_index));
296 Str(scratch, liftoff::GetStackSlot(dst_index));
299 void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
300 if (type == kWasmI32) {
301 Mov(dst.W(), src.W());
303 DCHECK_EQ(kWasmI64, type);
304 Mov(dst.X(), src.X());
308 void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
310 if (type == kWasmF32) {
311 Fmov(dst.S(), src.S());
313 DCHECK_EQ(kWasmF64, type);
314 Fmov(dst.D(), src.D());
318 void LiftoffAssembler::Spill(
uint32_t index, LiftoffRegister reg,
320 RecordUsedSpillSlot(index);
321 MemOperand dst = liftoff::GetStackSlot(index);
322 Str(liftoff::GetRegFromType(reg, type), dst);
325 void LiftoffAssembler::Spill(
uint32_t index, WasmValue value) {
326 RecordUsedSpillSlot(index);
327 MemOperand dst = liftoff::GetStackSlot(index);
328 UseScratchRegisterScope temps(
this);
329 CPURegister src = CPURegister::no_reg();
330 switch (value.type()) {
332 src = temps.AcquireW();
333 Mov(src.W(), value.to_i32());
336 src = temps.AcquireX();
337 Mov(src.X(), value.to_i64());
346 void LiftoffAssembler::Fill(LiftoffRegister reg,
uint32_t index,
348 MemOperand src = liftoff::GetStackSlot(index);
349 Ldr(liftoff::GetRegFromType(reg, type), src);
352 void LiftoffAssembler::FillI64Half(Register,
uint32_t half_index) {
356 #define I32_BINOP(name, instruction) \ 357 void LiftoffAssembler::emit_##name(Register dst, Register lhs, \ 359 instruction(dst.W(), lhs.W(), rhs.W()); \ 361 #define I64_BINOP(name, instruction) \ 362 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \ 363 LiftoffRegister rhs) { \ 364 instruction(dst.gp().X(), lhs.gp().X(), rhs.gp().X()); \ 366 #define FP32_BINOP(name, instruction) \ 367 void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \ 368 DoubleRegister rhs) { \ 369 instruction(dst.S(), lhs.S(), rhs.S()); \ 371 #define FP32_UNOP(name, instruction) \ 372 void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \ 373 instruction(dst.S(), src.S()); \ 375 #define FP32_UNOP_RETURN_TRUE(name, instruction) \ 376 bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \ 377 instruction(dst.S(), src.S()); \ 380 #define FP64_BINOP(name, instruction) \ 381 void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \ 382 DoubleRegister rhs) { \ 383 instruction(dst.D(), lhs.D(), rhs.D()); \ 385 #define FP64_UNOP(name, instruction) \ 386 void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \ 387 instruction(dst.D(), src.D()); \ 389 #define FP64_UNOP_RETURN_TRUE(name, instruction) \ 390 bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \ 391 instruction(dst.D(), src.D()); \ 394 #define I32_SHIFTOP(name, instruction) \ 395 void LiftoffAssembler::emit_##name(Register dst, Register src, \ 396 Register amount, LiftoffRegList pinned) { \ 397 instruction(dst.W(), src.W(), amount.W()); \ 399 #define I32_SHIFTOP_I(name, instruction) \ 400 I32_SHIFTOP(name, instruction) \ 401 void LiftoffAssembler::emit_##name(Register dst, Register src, int amount) { \ 402 DCHECK(is_uint5(amount)); \ 403 instruction(dst.W(), src.W(), amount); \ 405 #define I64_SHIFTOP(name, instruction) \ 406 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \ 407 Register amount, LiftoffRegList pinned) { \ 408 instruction(dst.gp().X(), src.gp().X(), amount.X()); \ 410 #define I64_SHIFTOP_I(name, instruction) \ 411 I64_SHIFTOP(name, instruction) \ 412 void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \ 414 DCHECK(is_uint6(amount)); \ 415 instruction(dst.gp().X(), src.gp().X(), amount); \ 418 I32_BINOP(i32_add, Add)
419 I32_BINOP(i32_sub, Sub)
420 I32_BINOP(i32_mul, Mul)
421 I32_BINOP(i32_and, And)
422 I32_BINOP(i32_or, Orr)
423 I32_BINOP(i32_xor, Eor)
424 I32_SHIFTOP(i32_shl, Lsl)
425 I32_SHIFTOP(i32_sar, Asr)
426 I32_SHIFTOP_I(i32_shr, Lsr)
427 I64_BINOP(i64_add, Add)
428 I64_BINOP(i64_sub, Sub)
429 I64_BINOP(i64_mul, Mul)
430 I64_BINOP(i64_and, And)
431 I64_BINOP(i64_or, Orr)
432 I64_BINOP(i64_xor, Eor)
433 I64_SHIFTOP(i64_shl, Lsl)
434 I64_SHIFTOP(i64_sar, Asr)
435 I64_SHIFTOP_I(i64_shr, Lsr)
436 FP32_BINOP(f32_add, Fadd)
437 FP32_BINOP(f32_sub, Fsub)
438 FP32_BINOP(f32_mul, Fmul)
439 FP32_BINOP(f32_div, Fdiv)
440 FP32_BINOP(f32_min, Fmin)
441 FP32_BINOP(f32_max, Fmax)
442 FP32_UNOP(f32_abs, Fabs)
443 FP32_UNOP(f32_neg, Fneg)
444 FP32_UNOP_RETURN_TRUE(f32_ceil, Frintp)
445 FP32_UNOP_RETURN_TRUE(f32_floor, Frintm)
446 FP32_UNOP_RETURN_TRUE(f32_trunc, Frintz)
447 FP32_UNOP_RETURN_TRUE(f32_nearest_int, Frintn)
448 FP32_UNOP(f32_sqrt, Fsqrt)
449 FP64_BINOP(f64_add, Fadd)
450 FP64_BINOP(f64_sub, Fsub)
451 FP64_BINOP(f64_mul, Fmul)
452 FP64_BINOP(f64_div, Fdiv)
453 FP64_BINOP(f64_min, Fmin)
454 FP64_BINOP(f64_max, Fmax)
455 FP64_UNOP(f64_abs, Fabs)
456 FP64_UNOP(f64_neg, Fneg)
457 FP64_UNOP_RETURN_TRUE(f64_ceil, Frintp)
458 FP64_UNOP_RETURN_TRUE(f64_floor, Frintm)
459 FP64_UNOP_RETURN_TRUE(f64_trunc, Frintz)
460 FP64_UNOP_RETURN_TRUE(f64_nearest_int, Frintn)
461 FP64_UNOP(f64_sqrt, Fsqrt)
469 #undef FP64_UNOP_RETURN_TRUE 475 bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
476 Clz(dst.W(), src.W());
480 bool LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
481 Rbit(dst.W(), src.W());
482 Clz(dst.W(), dst.W());
486 bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
487 UseScratchRegisterScope temps(
this);
488 VRegister scratch = temps.AcquireV(kFormat8B);
489 Fmov(scratch.S(), src.W());
490 Cnt(scratch, scratch);
491 Addv(scratch.B(), scratch);
492 Fmov(dst.W(), scratch.S());
496 void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
497 Label* trap_div_by_zero,
498 Label* trap_div_unrepresentable) {
499 Register dst_w = dst.W();
500 Register lhs_w = lhs.W();
501 Register rhs_w = rhs.W();
502 bool can_use_dst = !dst_w.Aliases(lhs_w) && !dst_w.Aliases(rhs_w);
505 Sdiv(dst_w, lhs_w, rhs_w);
508 Cbz(rhs_w, trap_div_by_zero);
511 Ccmp(lhs_w, 1, NoFlag, eq);
512 B(trap_div_unrepresentable, vs);
515 Sdiv(dst_w, lhs_w, rhs_w);
519 void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
520 Label* trap_div_by_zero) {
522 Cbz(rhs.W(), trap_div_by_zero);
524 Udiv(dst.W(), lhs.W(), rhs.W());
527 void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
528 Label* trap_div_by_zero) {
529 Register dst_w = dst.W();
530 Register lhs_w = lhs.W();
531 Register rhs_w = rhs.W();
535 UseScratchRegisterScope temps(
this);
536 Register scratch = temps.AcquireW();
537 Sdiv(scratch, lhs_w, rhs_w);
539 Cbz(rhs_w, trap_div_by_zero);
541 Msub(dst_w, scratch, rhs_w, lhs_w);
544 void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
545 Label* trap_div_by_zero) {
546 Register dst_w = dst.W();
547 Register lhs_w = lhs.W();
548 Register rhs_w = rhs.W();
550 UseScratchRegisterScope temps(
this);
551 Register scratch = temps.AcquireW();
552 Udiv(scratch, lhs_w, rhs_w);
554 Cbz(rhs_w, trap_div_by_zero);
556 Msub(dst_w, scratch, rhs_w, lhs_w);
559 bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
561 Label* trap_div_by_zero,
562 Label* trap_div_unrepresentable) {
563 Register dst_x = dst.gp().X();
564 Register lhs_x = lhs.gp().X();
565 Register rhs_x = rhs.gp().X();
566 bool can_use_dst = !dst_x.Aliases(lhs_x) && !dst_x.Aliases(rhs_x);
569 Sdiv(dst_x, lhs_x, rhs_x);
572 Cbz(rhs_x, trap_div_by_zero);
575 Ccmp(lhs_x, 1, NoFlag, eq);
576 B(trap_div_unrepresentable, vs);
579 Sdiv(dst_x, lhs_x, rhs_x);
584 bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
586 Label* trap_div_by_zero) {
588 Cbz(rhs.gp().X(), trap_div_by_zero);
590 Udiv(dst.gp().X(), lhs.gp().X(), rhs.gp().X());
594 bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
596 Label* trap_div_by_zero) {
597 Register dst_x = dst.gp().X();
598 Register lhs_x = lhs.gp().X();
599 Register rhs_x = rhs.gp().X();
603 UseScratchRegisterScope temps(
this);
604 Register scratch = temps.AcquireX();
605 Sdiv(scratch, lhs_x, rhs_x);
607 Cbz(rhs_x, trap_div_by_zero);
609 Msub(dst_x, scratch, rhs_x, lhs_x);
613 bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
615 Label* trap_div_by_zero) {
616 Register dst_x = dst.gp().X();
617 Register lhs_x = lhs.gp().X();
618 Register rhs_x = rhs.gp().X();
620 UseScratchRegisterScope temps(
this);
621 Register scratch = temps.AcquireX();
622 Udiv(scratch, lhs_x, rhs_x);
624 Cbz(rhs_x, trap_div_by_zero);
626 Msub(dst_x, scratch, rhs_x, lhs_x);
630 void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
634 void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
635 DoubleRegister rhs) {
636 UseScratchRegisterScope temps(
this);
637 DoubleRegister scratch = temps.AcquireD();
638 Ushr(scratch.V2S(), rhs.V2S(), 31);
640 Fmov(dst.S(), lhs.S());
642 Sli(dst.V2S(), scratch.V2S(), 31);
645 void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
646 DoubleRegister rhs) {
647 UseScratchRegisterScope temps(
this);
648 DoubleRegister scratch = temps.AcquireD();
649 Ushr(scratch.V1D(), rhs.V1D(), 63);
651 Fmov(dst.D(), lhs.D());
653 Sli(dst.V1D(), scratch.V1D(), 63);
656 bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
658 LiftoffRegister src, Label* trap) {
660 case kExprI32ConvertI64:
661 if (src != dst) Mov(dst.gp().W(), src.gp().W());
663 case kExprI32SConvertF32:
664 Fcvtzs(dst.gp().W(), src.fp().S());
666 Fcmp(src.fp().S(),
static_cast<float>(INT32_MIN));
668 Ccmp(dst.gp().W(), -1, VFlag, ge);
671 case kExprI32UConvertF32:
672 Fcvtzu(dst.gp().W(), src.fp().S());
674 Fcmp(src.fp().S(), -1.0);
676 Ccmp(dst.gp().W(), -1, ZFlag, gt);
679 case kExprI32SConvertF64: {
683 UseScratchRegisterScope temps(
this);
684 VRegister fp_ref = temps.AcquireD();
685 VRegister fp_cmp = temps.AcquireD();
686 Fcvtzs(dst.gp().W(), src.fp().D());
687 Frintz(fp_ref, src.fp().D());
688 Scvtf(fp_cmp, dst.gp().W());
690 Fcmp(fp_cmp, fp_ref);
694 case kExprI32UConvertF64: {
698 UseScratchRegisterScope temps(
this);
699 VRegister fp_ref = temps.AcquireD();
700 VRegister fp_cmp = temps.AcquireD();
701 Fcvtzu(dst.gp().W(), src.fp().D());
702 Frintz(fp_ref, src.fp().D());
703 Ucvtf(fp_cmp, dst.gp().W());
705 Fcmp(fp_cmp, fp_ref);
709 case kExprI32ReinterpretF32:
710 Fmov(dst.gp().W(), src.fp().S());
712 case kExprI64SConvertI32:
713 Sxtw(dst.gp().X(), src.gp().W());
715 case kExprI64SConvertF32:
716 Fcvtzs(dst.gp().X(), src.fp().S());
718 Fcmp(src.fp().S(),
static_cast<float>(INT64_MIN));
720 Ccmp(dst.gp().X(), -1, VFlag, ge);
723 case kExprI64UConvertF32:
724 Fcvtzu(dst.gp().X(), src.fp().S());
726 Fcmp(src.fp().S(), -1.0);
728 Ccmp(dst.gp().X(), -1, ZFlag, gt);
731 case kExprI64SConvertF64:
732 Fcvtzs(dst.gp().X(), src.fp().D());
734 Fcmp(src.fp().D(),
static_cast<float>(INT64_MIN));
736 Ccmp(dst.gp().X(), -1, VFlag, ge);
739 case kExprI64UConvertF64:
740 Fcvtzu(dst.gp().X(), src.fp().D());
742 Fcmp(src.fp().D(), -1.0);
744 Ccmp(dst.gp().X(), -1, ZFlag, gt);
747 case kExprI64UConvertI32:
748 Mov(dst.gp().W(), src.gp().W());
750 case kExprI64ReinterpretF64:
751 Fmov(dst.gp().X(), src.fp().D());
753 case kExprF32SConvertI32:
754 Scvtf(dst.fp().S(), src.gp().W());
756 case kExprF32UConvertI32:
757 Ucvtf(dst.fp().S(), src.gp().W());
759 case kExprF32SConvertI64:
760 Scvtf(dst.fp().S(), src.gp().X());
762 case kExprF32UConvertI64:
763 Ucvtf(dst.fp().S(), src.gp().X());
765 case kExprF32ConvertF64:
766 Fcvt(dst.fp().S(), src.fp().D());
768 case kExprF32ReinterpretI32:
769 Fmov(dst.fp().S(), src.gp().W());
771 case kExprF64SConvertI32:
772 Scvtf(dst.fp().D(), src.gp().W());
774 case kExprF64UConvertI32:
775 Ucvtf(dst.fp().D(), src.gp().W());
777 case kExprF64SConvertI64:
778 Scvtf(dst.fp().D(), src.gp().X());
780 case kExprF64UConvertI64:
781 Ucvtf(dst.fp().D(), src.gp().X());
783 case kExprF64ConvertF32:
784 Fcvt(dst.fp().D(), src.fp().S());
786 case kExprF64ReinterpretI64:
787 Fmov(dst.fp().D(), src.gp().X());
794 void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
798 void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
802 void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
803 LiftoffRegister src) {
804 sxtb(dst.gp(), src.gp());
807 void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
808 LiftoffRegister src) {
809 sxth(dst.gp(), src.gp());
812 void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
813 LiftoffRegister src) {
814 sxtw(dst.gp(), src.gp());
817 void LiftoffAssembler::emit_jump(Label* label) { B(label); }
819 void LiftoffAssembler::emit_jump(Register target) { Br(target); }
821 void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
822 ValueType type, Register lhs,
827 Cmp(lhs.W(), rhs.W());
834 Cmp(lhs.X(), rhs.X());
845 void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
850 void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
851 Register lhs, Register rhs) {
852 Cmp(lhs.W(), rhs.W());
856 void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
857 Cmp(src.gp().X(), xzr);
861 void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
863 LiftoffRegister rhs) {
864 Cmp(lhs.gp().X(), rhs.gp().X());
868 void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
870 DoubleRegister rhs) {
871 Fcmp(lhs.S(), rhs.S());
875 Csel(dst.W(), wzr, dst.W(), vs);
879 void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
881 DoubleRegister rhs) {
882 Fcmp(lhs.D(), rhs.D());
886 Csel(dst.W(), wzr, dst.W(), vs);
890 void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
891 Ldr(limit_address, MemOperand(limit_address));
892 Cmp(sp, limit_address);
896 void LiftoffAssembler::CallTrapCallbackForTesting() {
897 CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
900 void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
901 TurboAssembler::AssertUnreachable(reason);
904 void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
905 PushCPURegList(liftoff::PadRegList(regs.GetGpList()));
906 PushCPURegList(liftoff::PadVRegList(regs.GetFpList()));
909 void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
910 PopCPURegList(liftoff::PadVRegList(regs.GetFpList()));
911 PopCPURegList(liftoff::PadRegList(regs.GetGpList()));
914 void LiftoffAssembler::DropStackSlotsAndRet(
uint32_t num_stack_slots) {
915 DropSlots(num_stack_slots);
919 void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
920 const LiftoffRegister* args,
921 const LiftoffRegister* rets,
922 ValueType out_argument_type,
int stack_bytes,
923 ExternalReference ext_ref) {
925 int total_size = RoundUp(stack_bytes, kQuadWordSizeInBytes);
927 Claim(total_size, 1);
930 for (ValueType param_type : sig->parameters()) {
931 Poke(liftoff::GetRegFromType(*args++, param_type), arg_bytes);
932 arg_bytes += ValueTypes::MemSize(param_type);
934 DCHECK_LE(arg_bytes, stack_bytes);
940 constexpr
int kNumCCallArgs = 1;
941 CallCFunction(ext_ref, kNumCCallArgs);
944 const LiftoffRegister* next_result_reg = rets;
945 if (sig->return_count() > 0) {
946 DCHECK_EQ(1, sig->return_count());
947 constexpr Register kReturnReg = x0;
948 if (kReturnReg != next_result_reg->gp()) {
949 Move(*next_result_reg, LiftoffRegister(kReturnReg), sig->GetReturn(0));
955 if (out_argument_type != kWasmStmt) {
956 Peek(liftoff::GetRegFromType(*next_result_reg, out_argument_type), 0);
962 void LiftoffAssembler::CallNativeWasmCode(Address addr) {
963 Call(addr, RelocInfo::WASM_CALL);
966 void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
967 compiler::CallDescriptor* call_descriptor,
971 DCHECK(target.IsValid());
975 void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
978 Call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
981 void LiftoffAssembler::AllocateStackSlot(Register addr,
uint32_t size) {
983 size = RoundUp(size, kQuadWordSizeInBytes);
988 void LiftoffAssembler::DeallocateStackSlot(
uint32_t size) {
990 size = RoundUp(size, kQuadWordSizeInBytes);
994 void LiftoffStackSlots::Construct() {
995 size_t slot_count = slots_.size();
997 asm_->Claim(RoundUp(slot_count, 2));
998 size_t slot_index = 0;
999 for (
auto& slot : slots_) {
1000 size_t poke_offset = (slot_count - slot_index - 1) * kXRegSize;
1001 switch (slot.src_.loc()) {
1002 case LiftoffAssembler::VarState::kStack: {
1003 UseScratchRegisterScope temps(asm_);
1004 CPURegister scratch = liftoff::AcquireByType(&temps, slot.src_.type());
1005 asm_->Ldr(scratch, liftoff::GetStackSlot(slot.src_index_));
1006 asm_->Poke(scratch, poke_offset);
1009 case LiftoffAssembler::VarState::kRegister:
1010 asm_->Poke(liftoff::GetRegFromType(slot.src_.reg(), slot.src_.type()),
1013 case LiftoffAssembler::VarState::KIntConst:
1014 DCHECK(slot.src_.type() == kWasmI32 || slot.src_.type() == kWasmI64);
1015 if (slot.src_.i32_const() == 0) {
1016 Register zero_reg = slot.src_.type() == kWasmI32 ? wzr : xzr;
1017 asm_->Poke(zero_reg, poke_offset);
1019 UseScratchRegisterScope temps(asm_);
1020 Register scratch = slot.src_.type() == kWasmI32 ? temps.AcquireW()
1022 asm_->Mov(scratch,
int64_t{slot.src_.i32_const()});
1023 asm_->Poke(scratch, poke_offset);
1037 #endif // V8_WASM_BASELINE_ARM64_LIFTOFF_ASSEMBLER_ARM64_H_