V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
liftoff-assembler-mips64.h
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
6 #define V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
7 
8 #include "src/wasm/baseline/liftoff-assembler.h"
9 
10 #define BAILOUT(reason) bailout("mips64 " reason)
11 
12 namespace v8 {
13 namespace internal {
14 namespace wasm {
15 
16 namespace liftoff {
17 
18 // fp-8 holds the stack marker, fp-16 is the instance parameter, first stack
19 // slot is located at fp-24.
20 constexpr int32_t kConstantStackSpace = 16;
21 constexpr int32_t kFirstStackSlotOffset =
22  kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
23 
24 inline MemOperand GetStackSlot(uint32_t index) {
25  int32_t offset = index * LiftoffAssembler::kStackSlotSize;
26  return MemOperand(fp, -kFirstStackSlotOffset - offset);
27 }
28 
29 inline MemOperand GetInstanceOperand() { return MemOperand(fp, -16); }
30 
31 inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
32  ValueType type) {
33  switch (type) {
34  case kWasmI32:
35  assm->lw(dst.gp(), src);
36  break;
37  case kWasmI64:
38  assm->ld(dst.gp(), src);
39  break;
40  case kWasmF32:
41  assm->lwc1(dst.fp(), src);
42  break;
43  case kWasmF64:
44  assm->Ldc1(dst.fp(), src);
45  break;
46  default:
47  UNREACHABLE();
48  }
49 }
50 
51 inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
52  LiftoffRegister src, ValueType type) {
53  MemOperand dst(base, offset);
54  switch (type) {
55  case kWasmI32:
56  assm->Usw(src.gp(), dst);
57  break;
58  case kWasmI64:
59  assm->Usd(src.gp(), dst);
60  break;
61  case kWasmF32:
62  assm->Uswc1(src.fp(), dst, t8);
63  break;
64  case kWasmF64:
65  assm->Usdc1(src.fp(), dst, t8);
66  break;
67  default:
68  UNREACHABLE();
69  }
70 }
71 
72 inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
73  switch (type) {
74  case kWasmI32:
75  assm->daddiu(sp, sp, -kPointerSize);
76  assm->sw(reg.gp(), MemOperand(sp, 0));
77  break;
78  case kWasmI64:
79  assm->push(reg.gp());
80  break;
81  case kWasmF32:
82  assm->daddiu(sp, sp, -kPointerSize);
83  assm->swc1(reg.fp(), MemOperand(sp, 0));
84  break;
85  case kWasmF64:
86  assm->daddiu(sp, sp, -kPointerSize);
87  assm->Sdc1(reg.fp(), MemOperand(sp, 0));
88  break;
89  default:
90  UNREACHABLE();
91  }
92 }
93 
94 #if defined(V8_TARGET_BIG_ENDIAN)
95 inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst,
96  LoadType type, LiftoffRegList pinned) {
97  bool is_float = false;
98  LiftoffRegister tmp = dst;
99  switch (type.value()) {
100  case LoadType::kI64Load8U:
101  case LoadType::kI64Load8S:
102  case LoadType::kI32Load8U:
103  case LoadType::kI32Load8S:
104  // No need to change endianness for byte size.
105  return;
106  case LoadType::kF32Load:
107  is_float = true;
108  tmp = assm->GetUnusedRegister(kGpReg, pinned);
109  assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, dst);
110  V8_FALLTHROUGH;
111  case LoadType::kI64Load32U:
112  assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 4);
113  break;
114  case LoadType::kI32Load:
115  case LoadType::kI64Load32S:
116  assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
117  break;
118  case LoadType::kI32Load16S:
119  case LoadType::kI64Load16S:
120  assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
121  break;
122  case LoadType::kI32Load16U:
123  case LoadType::kI64Load16U:
124  assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2);
125  break;
126  case LoadType::kF64Load:
127  is_float = true;
128  tmp = assm->GetUnusedRegister(kGpReg, pinned);
129  assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, dst);
130  V8_FALLTHROUGH;
131  case LoadType::kI64Load:
132  assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
133  break;
134  default:
135  UNREACHABLE();
136  }
137 
138  if (is_float) {
139  switch (type.value()) {
140  case LoadType::kF32Load:
141  assm->emit_type_conversion(kExprF32ReinterpretI32, dst, tmp);
142  break;
143  case LoadType::kF64Load:
144  assm->emit_type_conversion(kExprF64ReinterpretI64, dst, tmp);
145  break;
146  default:
147  UNREACHABLE();
148  }
149  }
150 }
151 
152 inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
153  StoreType type, LiftoffRegList pinned) {
154  bool is_float = false;
155  LiftoffRegister tmp = src;
156  switch (type.value()) {
157  case StoreType::kI64Store8:
158  case StoreType::kI32Store8:
159  // No need to change endianness for byte size.
160  return;
161  case StoreType::kF32Store:
162  is_float = true;
163  tmp = assm->GetUnusedRegister(kGpReg, pinned);
164  assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src);
165  V8_FALLTHROUGH;
166  case StoreType::kI32Store:
167  assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
168  break;
169  case StoreType::kI32Store16:
170  assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
171  break;
172  case StoreType::kF64Store:
173  is_float = true;
174  tmp = assm->GetUnusedRegister(kGpReg, pinned);
175  assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src);
176  V8_FALLTHROUGH;
177  case StoreType::kI64Store:
178  assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
179  break;
180  case StoreType::kI64Store32:
181  assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
182  break;
183  case StoreType::kI64Store16:
184  assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
185  break;
186  default:
187  UNREACHABLE();
188  }
189 
190  if (is_float) {
191  switch (type.value()) {
192  case StoreType::kF32Store:
193  assm->emit_type_conversion(kExprF32ReinterpretI32, src, tmp);
194  break;
195  case StoreType::kF64Store:
196  assm->emit_type_conversion(kExprF64ReinterpretI64, src, tmp);
197  break;
198  default:
199  UNREACHABLE();
200  }
201  }
202 }
203 #endif // V8_TARGET_BIG_ENDIAN
204 
205 } // namespace liftoff
206 
207 int LiftoffAssembler::PrepareStackFrame() {
208  int offset = pc_offset();
209  // When constant that represents size of stack frame can't be represented
210  // as 16bit we need three instructions to add it to sp, so we reserve space
211  // for this case.
212  daddiu(sp, sp, 0);
213  nop();
214  nop();
215  return offset;
216 }
217 
218 void LiftoffAssembler::PatchPrepareStackFrame(int offset,
219  uint32_t stack_slots) {
220  uint64_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
221  DCHECK_LE(bytes, kMaxInt);
222  // We can't run out of space, just pass anything big enough to not cause the
223  // assembler to try to grow the buffer.
224  constexpr int kAvailableSpace = 256;
225  TurboAssembler patching_assembler(nullptr, AssemblerOptions{},
226  buffer_ + offset, kAvailableSpace,
227  CodeObjectRequired::kNo);
228  // If bytes can be represented as 16bit, daddiu will be generated and two
229  // nops will stay untouched. Otherwise, lui-ori sequence will load it to
230  // register and, as third instruction, daddu will be generated.
231  patching_assembler.Daddu(sp, sp, Operand(-bytes));
232 }
233 
234 void LiftoffAssembler::FinishCode() {}
235 
236 void LiftoffAssembler::AbortCompilation() {}
237 
238 void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
239  RelocInfo::Mode rmode) {
240  switch (value.type()) {
241  case kWasmI32:
242  TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
243  break;
244  case kWasmI64:
245  TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode));
246  break;
247  case kWasmF32:
248  TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
249  break;
250  case kWasmF64:
251  TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
252  break;
253  default:
254  UNREACHABLE();
255  }
256 }
257 
258 void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
259  int size) {
260  DCHECK_LE(offset, kMaxInt);
261  ld(dst, liftoff::GetInstanceOperand());
262  DCHECK(size == 4 || size == 8);
263  if (size == 4) {
264  lw(dst, MemOperand(dst, offset));
265  } else {
266  ld(dst, MemOperand(dst, offset));
267  }
268 }
269 
270 void LiftoffAssembler::SpillInstance(Register instance) {
271  sd(instance, liftoff::GetInstanceOperand());
272 }
273 
274 void LiftoffAssembler::FillInstanceInto(Register dst) {
275  ld(dst, liftoff::GetInstanceOperand());
276 }
277 
278 void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
279  Register offset_reg, uint32_t offset_imm,
280  LoadType type, LiftoffRegList pinned,
281  uint32_t* protected_load_pc, bool is_load_mem) {
282  Register src = no_reg;
283  if (offset_reg != no_reg) {
284  src = GetUnusedRegister(kGpReg, pinned).gp();
285  emit_ptrsize_add(src, src_addr, offset_reg);
286  }
287  MemOperand src_op = (offset_reg != no_reg) ? MemOperand(src, offset_imm)
288  : MemOperand(src_addr, offset_imm);
289 
290  if (protected_load_pc) *protected_load_pc = pc_offset();
291  switch (type.value()) {
292  case LoadType::kI32Load8U:
293  case LoadType::kI64Load8U:
294  lbu(dst.gp(), src_op);
295  break;
296  case LoadType::kI32Load8S:
297  case LoadType::kI64Load8S:
298  lb(dst.gp(), src_op);
299  break;
300  case LoadType::kI32Load16U:
301  case LoadType::kI64Load16U:
302  TurboAssembler::Ulhu(dst.gp(), src_op);
303  break;
304  case LoadType::kI32Load16S:
305  case LoadType::kI64Load16S:
306  TurboAssembler::Ulh(dst.gp(), src_op);
307  break;
308  case LoadType::kI64Load32U:
309  TurboAssembler::Ulwu(dst.gp(), src_op);
310  break;
311  case LoadType::kI32Load:
312  case LoadType::kI64Load32S:
313  TurboAssembler::Ulw(dst.gp(), src_op);
314  break;
315  case LoadType::kI64Load:
316  TurboAssembler::Uld(dst.gp(), src_op);
317  break;
318  case LoadType::kF32Load:
319  TurboAssembler::Ulwc1(dst.fp(), src_op, t8);
320  break;
321  case LoadType::kF64Load:
322  TurboAssembler::Uldc1(dst.fp(), src_op, t8);
323  break;
324  default:
325  UNREACHABLE();
326  }
327 
328 #if defined(V8_TARGET_BIG_ENDIAN)
329  if (is_load_mem) {
330  pinned.set(src_op.rm());
331  liftoff::ChangeEndiannessLoad(this, dst, type, pinned);
332  }
333 #endif
334 }
335 
336 void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
337  uint32_t offset_imm, LiftoffRegister src,
338  StoreType type, LiftoffRegList pinned,
339  uint32_t* protected_store_pc, bool is_store_mem) {
340  Register dst = no_reg;
341  MemOperand dst_op = MemOperand(dst_addr, offset_imm);
342  if (offset_reg != no_reg) {
343  if (is_store_mem) {
344  pinned.set(src);
345  }
346  dst = GetUnusedRegister(kGpReg, pinned).gp();
347  emit_ptrsize_add(dst, dst_addr, offset_reg);
348  dst_op = MemOperand(dst, offset_imm);
349  }
350 
351 #if defined(V8_TARGET_BIG_ENDIAN)
352  if (is_store_mem) {
353  pinned.set(dst_op.rm());
354  LiftoffRegister tmp = GetUnusedRegister(src.reg_class(), pinned);
355  // Save original value.
356  Move(tmp, src, type.value_type());
357 
358  src = tmp;
359  pinned.set(tmp);
360  liftoff::ChangeEndiannessStore(this, src, type, pinned);
361  }
362 #endif
363 
364  if (protected_store_pc) *protected_store_pc = pc_offset();
365  switch (type.value()) {
366  case StoreType::kI32Store8:
367  case StoreType::kI64Store8:
368  sb(src.gp(), dst_op);
369  break;
370  case StoreType::kI32Store16:
371  case StoreType::kI64Store16:
372  TurboAssembler::Ush(src.gp(), dst_op, t8);
373  break;
374  case StoreType::kI32Store:
375  case StoreType::kI64Store32:
376  TurboAssembler::Usw(src.gp(), dst_op);
377  break;
378  case StoreType::kI64Store:
379  TurboAssembler::Usd(src.gp(), dst_op);
380  break;
381  case StoreType::kF32Store:
382  TurboAssembler::Uswc1(src.fp(), dst_op, t8);
383  break;
384  case StoreType::kF64Store:
385  TurboAssembler::Usdc1(src.fp(), dst_op, t8);
386  break;
387  default:
388  UNREACHABLE();
389  }
390 }
391 
392 void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
393  uint32_t caller_slot_idx,
394  ValueType type) {
395  MemOperand src(fp, kPointerSize * (caller_slot_idx + 1));
396  liftoff::Load(this, dst, src, type);
397 }
398 
399 void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
400  ValueType type) {
401  DCHECK_NE(dst_index, src_index);
402  LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
403  Fill(reg, src_index, type);
404  Spill(dst_index, reg, type);
405 }
406 
407 void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
408  DCHECK_NE(dst, src);
409  // TODO(ksreten): Handle different sizes here.
410  TurboAssembler::Move(dst, src);
411 }
412 
413 void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
414  ValueType type) {
415  DCHECK_NE(dst, src);
416  TurboAssembler::Move(dst, src);
417 }
418 
419 void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
420  ValueType type) {
421  RecordUsedSpillSlot(index);
422  MemOperand dst = liftoff::GetStackSlot(index);
423  switch (type) {
424  case kWasmI32:
425  sw(reg.gp(), dst);
426  break;
427  case kWasmI64:
428  sd(reg.gp(), dst);
429  break;
430  case kWasmF32:
431  swc1(reg.fp(), dst);
432  break;
433  case kWasmF64:
434  TurboAssembler::Sdc1(reg.fp(), dst);
435  break;
436  default:
437  UNREACHABLE();
438  }
439 }
440 
441 void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
442  RecordUsedSpillSlot(index);
443  MemOperand dst = liftoff::GetStackSlot(index);
444  switch (value.type()) {
445  case kWasmI32: {
446  LiftoffRegister tmp = GetUnusedRegister(kGpReg);
447  TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
448  sw(tmp.gp(), dst);
449  break;
450  }
451  case kWasmI64: {
452  LiftoffRegister tmp = GetUnusedRegister(kGpReg);
453  TurboAssembler::li(tmp.gp(), value.to_i64());
454  sd(tmp.gp(), dst);
455  break;
456  }
457  default:
458  // kWasmF32 and kWasmF64 are unreachable, since those
459  // constants are not tracked.
460  UNREACHABLE();
461  }
462 }
463 
464 void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
465  ValueType type) {
466  MemOperand src = liftoff::GetStackSlot(index);
467  switch (type) {
468  case kWasmI32:
469  lw(reg.gp(), src);
470  break;
471  case kWasmI64:
472  ld(reg.gp(), src);
473  break;
474  case kWasmF32:
475  lwc1(reg.fp(), src);
476  break;
477  case kWasmF64:
478  TurboAssembler::Ldc1(reg.fp(), src);
479  break;
480  default:
481  UNREACHABLE();
482  }
483 }
484 
485 void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
486  UNREACHABLE();
487 }
488 
489 void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
490  TurboAssembler::Mul(dst, lhs, rhs);
491 }
492 
493 void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
494  Label* trap_div_by_zero,
495  Label* trap_div_unrepresentable) {
496  TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
497 
498  // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable.
499  TurboAssembler::li(kScratchReg, 1);
500  TurboAssembler::li(kScratchReg2, 1);
501  TurboAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq);
502  TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq);
503  daddu(kScratchReg, kScratchReg, kScratchReg2);
504  TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
505  Operand(zero_reg));
506 
507  TurboAssembler::Div(dst, lhs, rhs);
508 }
509 
510 void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
511  Label* trap_div_by_zero) {
512  TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
513  TurboAssembler::Divu(dst, lhs, rhs);
514 }
515 
516 void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
517  Label* trap_div_by_zero) {
518  TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
519  TurboAssembler::Mod(dst, lhs, rhs);
520 }
521 
522 void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
523  Label* trap_div_by_zero) {
524  TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
525  TurboAssembler::Modu(dst, lhs, rhs);
526 }
527 
528 #define I32_BINOP(name, instruction) \
529  void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
530  Register rhs) { \
531  instruction(dst, lhs, rhs); \
532  }
533 
534 // clang-format off
535 I32_BINOP(add, addu)
536 I32_BINOP(sub, subu)
537 I32_BINOP(and, and_)
538 I32_BINOP(or, or_)
539 I32_BINOP(xor, xor_)
540 // clang-format on
541 
542 #undef I32_BINOP
543 
544 bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
545  TurboAssembler::Clz(dst, src);
546  return true;
547 }
548 
549 bool LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
550  TurboAssembler::Ctz(dst, src);
551  return true;
552 }
553 
554 bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
555  TurboAssembler::Popcnt(dst, src);
556  return true;
557 }
558 
559 #define I32_SHIFTOP(name, instruction) \
560  void LiftoffAssembler::emit_i32_##name( \
561  Register dst, Register src, Register amount, LiftoffRegList pinned) { \
562  instruction(dst, src, amount); \
563  }
564 #define I32_SHIFTOP_I(name, instruction) \
565  I32_SHIFTOP(name, instruction##v) \
566  void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \
567  int amount) { \
568  DCHECK(is_uint5(amount)); \
569  instruction(dst, src, amount); \
570  }
571 
572 I32_SHIFTOP(shl, sllv)
573 I32_SHIFTOP(sar, srav)
574 I32_SHIFTOP_I(shr, srl)
575 
576 #undef I32_SHIFTOP
577 #undef I32_SHIFTOP_I
578 
579 void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
580  LiftoffRegister rhs) {
581  TurboAssembler::Dmul(dst.gp(), lhs.gp(), rhs.gp());
582 }
583 
584 bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
585  LiftoffRegister rhs,
586  Label* trap_div_by_zero,
587  Label* trap_div_unrepresentable) {
588  TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
589 
590  // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable.
591  TurboAssembler::li(kScratchReg, 1);
592  TurboAssembler::li(kScratchReg2, 1);
593  TurboAssembler::LoadZeroOnCondition(
594  kScratchReg, lhs.gp(), Operand(std::numeric_limits<int64_t>::min()), eq);
595  TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq);
596  daddu(kScratchReg, kScratchReg, kScratchReg2);
597  TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
598  Operand(zero_reg));
599 
600  TurboAssembler::Ddiv(dst.gp(), lhs.gp(), rhs.gp());
601  return true;
602 }
603 
604 bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
605  LiftoffRegister rhs,
606  Label* trap_div_by_zero) {
607  TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
608  TurboAssembler::Ddivu(dst.gp(), lhs.gp(), rhs.gp());
609  return true;
610 }
611 
612 bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
613  LiftoffRegister rhs,
614  Label* trap_div_by_zero) {
615  TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
616  TurboAssembler::Dmod(dst.gp(), lhs.gp(), rhs.gp());
617  return true;
618 }
619 
620 bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
621  LiftoffRegister rhs,
622  Label* trap_div_by_zero) {
623  TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
624  TurboAssembler::Dmodu(dst.gp(), lhs.gp(), rhs.gp());
625  return true;
626 }
627 
628 #define I64_BINOP(name, instruction) \
629  void LiftoffAssembler::emit_i64_##name( \
630  LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
631  instruction(dst.gp(), lhs.gp(), rhs.gp()); \
632  }
633 
634 // clang-format off
635 I64_BINOP(add, daddu)
636 I64_BINOP(sub, dsubu)
637 I64_BINOP(and, and_)
638 I64_BINOP(or, or_)
639 I64_BINOP(xor, xor_)
640 // clang-format on
641 
642 #undef I64_BINOP
643 
644 #define I64_SHIFTOP(name, instruction) \
645  void LiftoffAssembler::emit_i64_##name(LiftoffRegister dst, \
646  LiftoffRegister src, Register amount, \
647  LiftoffRegList pinned) { \
648  instruction(dst.gp(), src.gp(), amount); \
649  }
650 #define I64_SHIFTOP_I(name, instruction) \
651  I64_SHIFTOP(name, instruction##v) \
652  void LiftoffAssembler::emit_i64_##name(LiftoffRegister dst, \
653  LiftoffRegister src, int amount) { \
654  DCHECK(is_uint6(amount)); \
655  instruction(dst.gp(), src.gp(), amount); \
656  }
657 
658 I64_SHIFTOP(shl, dsllv)
659 I64_SHIFTOP(sar, dsrav)
660 I64_SHIFTOP_I(shr, dsrl)
661 
662 #undef I64_SHIFTOP
663 #undef I64_SHIFTOP_I
664 
665 void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
666  addu(dst, src, zero_reg);
667 }
668 
669 void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
670  TurboAssembler::Neg_s(dst, src);
671 }
672 
673 void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
674  TurboAssembler::Neg_d(dst, src);
675 }
676 
677 void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
678  DoubleRegister rhs) {
679  Label ool, done;
680  TurboAssembler::Float32Min(dst, lhs, rhs, &ool);
681  Branch(&done);
682 
683  bind(&ool);
684  TurboAssembler::Float32MinOutOfLine(dst, lhs, rhs);
685  bind(&done);
686 }
687 
688 void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
689  DoubleRegister rhs) {
690  Label ool, done;
691  TurboAssembler::Float32Max(dst, lhs, rhs, &ool);
692  Branch(&done);
693 
694  bind(&ool);
695  TurboAssembler::Float32MaxOutOfLine(dst, lhs, rhs);
696  bind(&done);
697 }
698 
699 void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
700  DoubleRegister rhs) {
701  BAILOUT("f32_copysign");
702 }
703 
704 void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
705  DoubleRegister rhs) {
706  Label ool, done;
707  TurboAssembler::Float64Min(dst, lhs, rhs, &ool);
708  Branch(&done);
709 
710  bind(&ool);
711  TurboAssembler::Float64MinOutOfLine(dst, lhs, rhs);
712  bind(&done);
713 }
714 
715 void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
716  DoubleRegister rhs) {
717  Label ool, done;
718  TurboAssembler::Float64Max(dst, lhs, rhs, &ool);
719  Branch(&done);
720 
721  bind(&ool);
722  TurboAssembler::Float64MaxOutOfLine(dst, lhs, rhs);
723  bind(&done);
724 }
725 
726 void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
727  DoubleRegister rhs) {
728  BAILOUT("f64_copysign");
729 }
730 
731 #define FP_BINOP(name, instruction) \
732  void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
733  DoubleRegister rhs) { \
734  instruction(dst, lhs, rhs); \
735  }
736 #define FP_UNOP(name, instruction) \
737  void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
738  instruction(dst, src); \
739  }
740 #define FP_UNOP_RETURN_TRUE(name, instruction) \
741  bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
742  instruction(dst, src); \
743  return true; \
744  }
745 
746 FP_BINOP(f32_add, add_s)
747 FP_BINOP(f32_sub, sub_s)
748 FP_BINOP(f32_mul, mul_s)
749 FP_BINOP(f32_div, div_s)
750 FP_UNOP(f32_abs, abs_s)
751 FP_UNOP_RETURN_TRUE(f32_ceil, Ceil_s_s)
752 FP_UNOP_RETURN_TRUE(f32_floor, Floor_s_s)
753 FP_UNOP_RETURN_TRUE(f32_trunc, Trunc_s_s)
754 FP_UNOP_RETURN_TRUE(f32_nearest_int, Round_s_s)
755 FP_UNOP(f32_sqrt, sqrt_s)
756 FP_BINOP(f64_add, add_d)
757 FP_BINOP(f64_sub, sub_d)
758 FP_BINOP(f64_mul, mul_d)
759 FP_BINOP(f64_div, div_d)
760 FP_UNOP(f64_abs, abs_d)
761 FP_UNOP_RETURN_TRUE(f64_ceil, Ceil_d_d)
762 FP_UNOP_RETURN_TRUE(f64_floor, Floor_d_d)
763 FP_UNOP_RETURN_TRUE(f64_trunc, Trunc_d_d)
764 FP_UNOP_RETURN_TRUE(f64_nearest_int, Round_d_d)
765 FP_UNOP(f64_sqrt, sqrt_d)
766 
767 #undef FP_BINOP
768 #undef FP_UNOP
769 #undef FP_UNOP_RETURN_TRUE
770 
771 bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
772  LiftoffRegister dst,
773  LiftoffRegister src, Label* trap) {
774  switch (opcode) {
775  case kExprI32ConvertI64:
776  TurboAssembler::Ext(dst.gp(), src.gp(), 0, 32);
777  return true;
778  case kExprI32SConvertF32: {
779  LiftoffRegister rounded =
780  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
781  LiftoffRegister converted_back =
782  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
783 
784  // Real conversion.
785  TurboAssembler::Trunc_s_s(rounded.fp(), src.fp());
786  trunc_w_s(kScratchDoubleReg, rounded.fp());
787  mfc1(dst.gp(), kScratchDoubleReg);
788  // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
789  // because INT32_MIN allows easier out-of-bounds detection.
790  TurboAssembler::Addu(kScratchReg, dst.gp(), 1);
791  TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
792  TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
793 
794  // Checking if trap.
795  mtc1(dst.gp(), kScratchDoubleReg);
796  cvt_s_w(converted_back.fp(), kScratchDoubleReg);
797  TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp());
798  TurboAssembler::BranchFalseF(trap);
799  return true;
800  }
801  case kExprI32UConvertF32: {
802  LiftoffRegister rounded =
803  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
804  LiftoffRegister converted_back =
805  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
806 
807  // Real conversion.
808  TurboAssembler::Trunc_s_s(rounded.fp(), src.fp());
809  TurboAssembler::Trunc_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg);
810  // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
811  // because 0 allows easier out-of-bounds detection.
812  TurboAssembler::Addu(kScratchReg, dst.gp(), 1);
813  TurboAssembler::Movz(dst.gp(), zero_reg, kScratchReg);
814 
815  // Checking if trap.
816  TurboAssembler::Cvt_d_uw(converted_back.fp(), dst.gp());
817  cvt_s_d(converted_back.fp(), converted_back.fp());
818  TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp());
819  TurboAssembler::BranchFalseF(trap);
820  return true;
821  }
822  case kExprI32SConvertF64: {
823  LiftoffRegister rounded =
824  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
825  LiftoffRegister converted_back =
826  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
827 
828  // Real conversion.
829  TurboAssembler::Trunc_d_d(rounded.fp(), src.fp());
830  trunc_w_d(kScratchDoubleReg, rounded.fp());
831  mfc1(dst.gp(), kScratchDoubleReg);
832 
833  // Checking if trap.
834  cvt_d_w(converted_back.fp(), kScratchDoubleReg);
835  TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
836  TurboAssembler::BranchFalseF(trap);
837  return true;
838  }
839  case kExprI32UConvertF64: {
840  LiftoffRegister rounded =
841  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
842  LiftoffRegister converted_back =
843  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
844 
845  // Real conversion.
846  TurboAssembler::Trunc_d_d(rounded.fp(), src.fp());
847  TurboAssembler::Trunc_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg);
848 
849  // Checking if trap.
850  TurboAssembler::Cvt_d_uw(converted_back.fp(), dst.gp());
851  TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
852  TurboAssembler::BranchFalseF(trap);
853  return true;
854  }
855  case kExprI32ReinterpretF32:
856  TurboAssembler::FmoveLow(dst.gp(), src.fp());
857  return true;
858  case kExprI64SConvertI32:
859  sll(dst.gp(), src.gp(), 0);
860  return true;
861  case kExprI64UConvertI32:
862  TurboAssembler::Dext(dst.gp(), src.gp(), 0, 32);
863  return true;
864  case kExprI64SConvertF32: {
865  LiftoffRegister rounded =
866  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
867  LiftoffRegister converted_back =
868  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
869 
870  // Real conversion.
871  TurboAssembler::Trunc_s_s(rounded.fp(), src.fp());
872  trunc_l_s(kScratchDoubleReg, rounded.fp());
873  dmfc1(dst.gp(), kScratchDoubleReg);
874  // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
875  // because INT64_MIN allows easier out-of-bounds detection.
876  TurboAssembler::Daddu(kScratchReg, dst.gp(), 1);
877  TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
878  TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
879 
880  // Checking if trap.
881  dmtc1(dst.gp(), kScratchDoubleReg);
882  cvt_s_l(converted_back.fp(), kScratchDoubleReg);
883  TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp());
884  TurboAssembler::BranchFalseF(trap);
885  return true;
886  }
887  case kExprI64UConvertF32: {
888  // Real conversion.
889  TurboAssembler::Trunc_ul_s(dst.gp(), src.fp(), kScratchDoubleReg,
890  kScratchReg);
891 
892  // Checking if trap.
893  TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
894  return true;
895  }
896  case kExprI64SConvertF64: {
897  LiftoffRegister rounded =
898  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
899  LiftoffRegister converted_back =
900  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
901 
902  // Real conversion.
903  TurboAssembler::Trunc_d_d(rounded.fp(), src.fp());
904  trunc_l_d(kScratchDoubleReg, rounded.fp());
905  dmfc1(dst.gp(), kScratchDoubleReg);
906  // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
907  // because INT64_MIN allows easier out-of-bounds detection.
908  TurboAssembler::Daddu(kScratchReg, dst.gp(), 1);
909  TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
910  TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
911 
912  // Checking if trap.
913  dmtc1(dst.gp(), kScratchDoubleReg);
914  cvt_d_l(converted_back.fp(), kScratchDoubleReg);
915  TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
916  TurboAssembler::BranchFalseF(trap);
917  return true;
918  }
919  case kExprI64UConvertF64: {
920  // Real conversion.
921  TurboAssembler::Trunc_ul_d(dst.gp(), src.fp(), kScratchDoubleReg,
922  kScratchReg);
923 
924  // Checking if trap.
925  TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
926  return true;
927  }
928  case kExprI64ReinterpretF64:
929  dmfc1(dst.gp(), src.fp());
930  return true;
931  case kExprF32SConvertI32: {
932  LiftoffRegister scratch =
933  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
934  mtc1(src.gp(), scratch.fp());
935  cvt_s_w(dst.fp(), scratch.fp());
936  return true;
937  }
938  case kExprF32UConvertI32:
939  TurboAssembler::Cvt_s_uw(dst.fp(), src.gp());
940  return true;
941  case kExprF32ConvertF64:
942  cvt_s_d(dst.fp(), src.fp());
943  return true;
944  case kExprF32ReinterpretI32:
945  TurboAssembler::FmoveLow(dst.fp(), src.gp());
946  return true;
947  case kExprF64SConvertI32: {
948  LiftoffRegister scratch =
949  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
950  mtc1(src.gp(), scratch.fp());
951  cvt_d_w(dst.fp(), scratch.fp());
952  return true;
953  }
954  case kExprF64UConvertI32:
955  TurboAssembler::Cvt_d_uw(dst.fp(), src.gp());
956  return true;
957  case kExprF64ConvertF32:
958  cvt_d_s(dst.fp(), src.fp());
959  return true;
960  case kExprF64ReinterpretI64:
961  dmtc1(src.gp(), dst.fp());
962  return true;
963  default:
964  return false;
965  }
966 }
967 
968 void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
969  BAILOUT("emit_i32_signextend_i8");
970 }
971 
972 void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
973  BAILOUT("emit_i32_signextend_i16");
974 }
975 
976 void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
977  LiftoffRegister src) {
978  BAILOUT("emit_i64_signextend_i8");
979 }
980 
981 void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
982  LiftoffRegister src) {
983  BAILOUT("emit_i64_signextend_i16");
984 }
985 
986 void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
987  LiftoffRegister src) {
988  BAILOUT("emit_i64_signextend_i32");
989 }
990 
991 void LiftoffAssembler::emit_jump(Label* label) {
992  TurboAssembler::Branch(label);
993 }
994 
995 void LiftoffAssembler::emit_jump(Register target) {
996  TurboAssembler::Jump(target);
997 }
998 
999 void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
1000  ValueType type, Register lhs,
1001  Register rhs) {
1002  if (rhs != no_reg) {
1003  TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
1004  } else {
1005  TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
1006  }
1007 }
1008 
1009 void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
1010  sltiu(dst, src, 1);
1011 }
1012 
1013 void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
1014  Register lhs, Register rhs) {
1015  Register tmp = dst;
1016  if (dst == lhs || dst == rhs) {
1017  tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
1018  }
1019  // Write 1 as result.
1020  TurboAssembler::li(tmp, 1);
1021 
1022  // If negative condition is true, write 0 as result.
1023  Condition neg_cond = NegateCondition(cond);
1024  TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond);
1025 
1026  // If tmp != dst, result will be moved.
1027  TurboAssembler::Move(dst, tmp);
1028 }
1029 
1030 void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
1031  sltiu(dst, src.gp(), 1);
1032 }
1033 
1034 void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
1035  LiftoffRegister lhs,
1036  LiftoffRegister rhs) {
1037  Register tmp = dst;
1038  if (dst == lhs.gp() || dst == rhs.gp()) {
1039  tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
1040  }
1041  // Write 1 as result.
1042  TurboAssembler::li(tmp, 1);
1043 
1044  // If negative condition is true, write 0 as result.
1045  Condition neg_cond = NegateCondition(cond);
1046  TurboAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()),
1047  neg_cond);
1048 
1049  // If tmp != dst, result will be moved.
1050  TurboAssembler::Move(dst, tmp);
1051 }
1052 
1053 namespace liftoff {
1054 
1055 inline FPUCondition ConditionToConditionCmpFPU(bool& predicate,
1056  Condition condition) {
1057  switch (condition) {
1058  case kEqual:
1059  predicate = true;
1060  return EQ;
1061  case kUnequal:
1062  predicate = false;
1063  return EQ;
1064  case kUnsignedLessThan:
1065  predicate = true;
1066  return OLT;
1067  case kUnsignedGreaterEqual:
1068  predicate = false;
1069  return OLT;
1070  case kUnsignedLessEqual:
1071  predicate = true;
1072  return OLE;
1073  case kUnsignedGreaterThan:
1074  predicate = false;
1075  return OLE;
1076  default:
1077  predicate = true;
1078  break;
1079  }
1080  UNREACHABLE();
1081 }
1082 
1083 }; // namespace liftoff
1084 
1085 void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
1086  DoubleRegister lhs,
1087  DoubleRegister rhs) {
1088  Label not_nan, cont;
1089  TurboAssembler::CompareIsNanF32(lhs, rhs);
1090  TurboAssembler::BranchFalseF(&not_nan);
1091  // If one of the operands is NaN, return 1 for f32.ne, else 0.
1092  if (cond == ne) {
1093  TurboAssembler::li(dst, 1);
1094  } else {
1095  TurboAssembler::Move(dst, zero_reg);
1096  }
1097  TurboAssembler::Branch(&cont);
1098 
1099  bind(&not_nan);
1100 
1101  TurboAssembler::li(dst, 1);
1102  bool predicate;
1103  FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(predicate, cond);
1104  TurboAssembler::CompareF32(fcond, lhs, rhs);
1105  if (predicate) {
1106  TurboAssembler::LoadZeroIfNotFPUCondition(dst);
1107  } else {
1108  TurboAssembler::LoadZeroIfFPUCondition(dst);
1109  }
1110 
1111  bind(&cont);
1112 }
1113 
1114 void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
1115  DoubleRegister lhs,
1116  DoubleRegister rhs) {
1117  Label not_nan, cont;
1118  TurboAssembler::CompareIsNanF64(lhs, rhs);
1119  TurboAssembler::BranchFalseF(&not_nan);
1120  // If one of the operands is NaN, return 1 for f64.ne, else 0.
1121  if (cond == ne) {
1122  TurboAssembler::li(dst, 1);
1123  } else {
1124  TurboAssembler::Move(dst, zero_reg);
1125  }
1126  TurboAssembler::Branch(&cont);
1127 
1128  bind(&not_nan);
1129 
1130  TurboAssembler::li(dst, 1);
1131  bool predicate;
1132  FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(predicate, cond);
1133  TurboAssembler::CompareF64(fcond, lhs, rhs);
1134  if (predicate) {
1135  TurboAssembler::LoadZeroIfNotFPUCondition(dst);
1136  } else {
1137  TurboAssembler::LoadZeroIfFPUCondition(dst);
1138  }
1139 
1140  bind(&cont);
1141 }
1142 
1143 void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
1144  TurboAssembler::Uld(limit_address, MemOperand(limit_address));
1145  TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
1146 }
1147 
1148 void LiftoffAssembler::CallTrapCallbackForTesting() {
1149  PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
1150  CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
1151 }
1152 
1153 void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
1154  if (emit_debug_code()) Abort(reason);
1155 }
1156 
1157 void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
1158  LiftoffRegList gp_regs = regs & kGpCacheRegList;
1159  unsigned num_gp_regs = gp_regs.GetNumRegsSet();
1160  if (num_gp_regs) {
1161  unsigned offset = num_gp_regs * kPointerSize;
1162  daddiu(sp, sp, -offset);
1163  while (!gp_regs.is_empty()) {
1164  LiftoffRegister reg = gp_regs.GetFirstRegSet();
1165  offset -= kPointerSize;
1166  sd(reg.gp(), MemOperand(sp, offset));
1167  gp_regs.clear(reg);
1168  }
1169  DCHECK_EQ(offset, 0);
1170  }
1171  LiftoffRegList fp_regs = regs & kFpCacheRegList;
1172  unsigned num_fp_regs = fp_regs.GetNumRegsSet();
1173  if (num_fp_regs) {
1174  daddiu(sp, sp, -(num_fp_regs * kStackSlotSize));
1175  unsigned offset = 0;
1176  while (!fp_regs.is_empty()) {
1177  LiftoffRegister reg = fp_regs.GetFirstRegSet();
1178  TurboAssembler::Sdc1(reg.fp(), MemOperand(sp, offset));
1179  fp_regs.clear(reg);
1180  offset += sizeof(double);
1181  }
1182  DCHECK_EQ(offset, num_fp_regs * sizeof(double));
1183  }
1184 }
1185 
1186 void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
1187  LiftoffRegList fp_regs = regs & kFpCacheRegList;
1188  unsigned fp_offset = 0;
1189  while (!fp_regs.is_empty()) {
1190  LiftoffRegister reg = fp_regs.GetFirstRegSet();
1191  TurboAssembler::Ldc1(reg.fp(), MemOperand(sp, fp_offset));
1192  fp_regs.clear(reg);
1193  fp_offset += sizeof(double);
1194  }
1195  if (fp_offset) daddiu(sp, sp, fp_offset);
1196  LiftoffRegList gp_regs = regs & kGpCacheRegList;
1197  unsigned gp_offset = 0;
1198  while (!gp_regs.is_empty()) {
1199  LiftoffRegister reg = gp_regs.GetLastRegSet();
1200  ld(reg.gp(), MemOperand(sp, gp_offset));
1201  gp_regs.clear(reg);
1202  gp_offset += kPointerSize;
1203  }
1204  daddiu(sp, sp, gp_offset);
1205 }
1206 
1207 void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
1208  DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
1209  TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
1210 }
1211 
1212 void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
1213  const LiftoffRegister* args,
1214  const LiftoffRegister* rets,
1215  ValueType out_argument_type, int stack_bytes,
1216  ExternalReference ext_ref) {
1217  daddiu(sp, sp, -stack_bytes);
1218 
1219  int arg_bytes = 0;
1220  for (ValueType param_type : sig->parameters()) {
1221  liftoff::Store(this, sp, arg_bytes, *args++, param_type);
1222  arg_bytes += ValueTypes::MemSize(param_type);
1223  }
1224  DCHECK_LE(arg_bytes, stack_bytes);
1225 
1226  // Pass a pointer to the buffer with the arguments to the C function.
1227  // On mips, the first argument is passed in {a0}.
1228  constexpr Register kFirstArgReg = a0;
1229  mov(kFirstArgReg, sp);
1230 
1231  // Now call the C function.
1232  constexpr int kNumCCallArgs = 1;
1233  PrepareCallCFunction(kNumCCallArgs, kScratchReg);
1234  CallCFunction(ext_ref, kNumCCallArgs);
1235 
1236  // Move return value to the right register.
1237  const LiftoffRegister* next_result_reg = rets;
1238  if (sig->return_count() > 0) {
1239  DCHECK_EQ(1, sig->return_count());
1240  constexpr Register kReturnReg = v0;
1241  if (kReturnReg != next_result_reg->gp()) {
1242  Move(*next_result_reg, LiftoffRegister(kReturnReg), sig->GetReturn(0));
1243  }
1244  ++next_result_reg;
1245  }
1246 
1247  // Load potential output value from the buffer on the stack.
1248  if (out_argument_type != kWasmStmt) {
1249  liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_type);
1250  }
1251 
1252  daddiu(sp, sp, stack_bytes);
1253 }
1254 
1255 void LiftoffAssembler::CallNativeWasmCode(Address addr) {
1256  Call(addr, RelocInfo::WASM_CALL);
1257 }
1258 
1259 void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
1260  compiler::CallDescriptor* call_descriptor,
1261  Register target) {
1262  if (target == no_reg) {
1263  pop(kScratchReg);
1264  Call(kScratchReg);
1265  } else {
1266  Call(target);
1267  }
1268 }
1269 
1270 void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
1271  // A direct call to a wasm runtime stub defined in this module.
1272  // Just encode the stub index. This will be patched at relocation.
1273  Call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
1274 }
1275 
1276 void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
1277  daddiu(sp, sp, -size);
1278  TurboAssembler::Move(addr, sp);
1279 }
1280 
1281 void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
1282  daddiu(sp, sp, size);
1283 }
1284 
1285 void LiftoffStackSlots::Construct() {
1286  for (auto& slot : slots_) {
1287  const LiftoffAssembler::VarState& src = slot.src_;
1288  switch (src.loc()) {
1289  case LiftoffAssembler::VarState::kStack:
1290  asm_->ld(kScratchReg, liftoff::GetStackSlot(slot.src_index_));
1291  asm_->push(kScratchReg);
1292  break;
1293  case LiftoffAssembler::VarState::kRegister:
1294  liftoff::push(asm_, src.reg(), src.type());
1295  break;
1296  case LiftoffAssembler::VarState::KIntConst: {
1297  asm_->li(kScratchReg, Operand(src.i32_const()));
1298  asm_->push(kScratchReg);
1299  break;
1300  }
1301  }
1302  }
1303 }
1304 
1305 } // namespace wasm
1306 } // namespace internal
1307 } // namespace v8
1308 
1309 #undef BAILOUT
1310 
1311 #endif // V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
Definition: libplatform.h:13