V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
liftoff-assembler-mips.h
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_
6 #define V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_
7 
8 #include "src/wasm/baseline/liftoff-assembler.h"
9 
10 #define BAILOUT(reason) bailout("mips " reason)
11 
12 namespace v8 {
13 namespace internal {
14 namespace wasm {
15 
16 namespace liftoff {
17 
18 #if defined(V8_TARGET_BIG_ENDIAN)
19 constexpr int32_t kLowWordOffset = 4;
20 constexpr int32_t kHighWordOffset = 0;
21 #else
22 constexpr int32_t kLowWordOffset = 0;
23 constexpr int32_t kHighWordOffset = 4;
24 #endif
25 
26 // fp-4 holds the stack marker, fp-8 is the instance parameter, first stack
27 // slot is located at fp-16.
28 constexpr int32_t kConstantStackSpace = 8;
29 constexpr int32_t kFirstStackSlotOffset =
30  kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
31 
32 inline MemOperand GetStackSlot(uint32_t index) {
33  int32_t offset = index * LiftoffAssembler::kStackSlotSize;
34  return MemOperand(fp, -kFirstStackSlotOffset - offset);
35 }
36 
37 inline MemOperand GetHalfStackSlot(uint32_t half_index) {
38  int32_t offset = half_index * (LiftoffAssembler::kStackSlotSize / 2);
39  return MemOperand(fp, -kFirstStackSlotOffset - offset);
40 }
41 
42 inline MemOperand GetInstanceOperand() { return MemOperand(fp, -8); }
43 
44 inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
45  int32_t offset, ValueType type) {
46  MemOperand src(base, offset);
47  switch (type) {
48  case kWasmI32:
49  assm->lw(dst.gp(), src);
50  break;
51  case kWasmI64:
52  assm->lw(dst.low_gp(),
53  MemOperand(base, offset + liftoff::kLowWordOffset));
54  assm->lw(dst.high_gp(),
55  MemOperand(base, offset + liftoff::kHighWordOffset));
56  break;
57  case kWasmF32:
58  assm->lwc1(dst.fp(), src);
59  break;
60  case kWasmF64:
61  assm->Ldc1(dst.fp(), src);
62  break;
63  default:
64  UNREACHABLE();
65  }
66 }
67 
68 inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
69  LiftoffRegister src, ValueType type) {
70  MemOperand dst(base, offset);
71  switch (type) {
72  case kWasmI32:
73  assm->Usw(src.gp(), dst);
74  break;
75  case kWasmI64:
76  assm->Usw(src.low_gp(),
77  MemOperand(base, offset + liftoff::kLowWordOffset));
78  assm->Usw(src.high_gp(),
79  MemOperand(base, offset + liftoff::kHighWordOffset));
80  break;
81  case kWasmF32:
82  assm->Uswc1(src.fp(), dst, t8);
83  break;
84  case kWasmF64:
85  assm->Usdc1(src.fp(), dst, t8);
86  break;
87  default:
88  UNREACHABLE();
89  }
90 }
91 
92 inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
93  switch (type) {
94  case kWasmI32:
95  assm->push(reg.gp());
96  break;
97  case kWasmI64:
98  assm->Push(reg.high_gp(), reg.low_gp());
99  break;
100  case kWasmF32:
101  assm->addiu(sp, sp, -sizeof(float));
102  assm->swc1(reg.fp(), MemOperand(sp, 0));
103  break;
104  case kWasmF64:
105  assm->addiu(sp, sp, -sizeof(double));
106  assm->Sdc1(reg.fp(), MemOperand(sp, 0));
107  break;
108  default:
109  UNREACHABLE();
110  }
111 }
112 
113 #if defined(V8_TARGET_BIG_ENDIAN)
114 inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst,
115  LoadType type, LiftoffRegList pinned) {
116  bool is_float = false;
117  LiftoffRegister tmp = dst;
118  switch (type.value()) {
119  case LoadType::kI64Load8U:
120  case LoadType::kI64Load8S:
121  case LoadType::kI32Load8U:
122  case LoadType::kI32Load8S:
123  // No need to change endianness for byte size.
124  return;
125  case LoadType::kF32Load:
126  is_float = true;
127  tmp = assm->GetUnusedRegister(kGpReg, pinned);
128  assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, dst);
129  V8_FALLTHROUGH;
130  case LoadType::kI32Load:
131  assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
132  break;
133  case LoadType::kI32Load16S:
134  assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
135  break;
136  case LoadType::kI32Load16U:
137  assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2);
138  break;
139  case LoadType::kF64Load:
140  is_float = true;
141  tmp = assm->GetUnusedRegister(kGpRegPair, pinned);
142  assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, dst);
143  V8_FALLTHROUGH;
144  case LoadType::kI64Load:
145  assm->TurboAssembler::Move(kScratchReg, tmp.low_gp());
146  assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
147  assm->TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4);
148  break;
149  case LoadType::kI64Load16U:
150  assm->TurboAssembler::ByteSwapUnsigned(tmp.low_gp(), tmp.low_gp(), 2);
151  assm->TurboAssembler::Move(tmp.high_gp(), zero_reg);
152  break;
153  case LoadType::kI64Load16S:
154  assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.low_gp(), 2);
155  assm->sra(tmp.high_gp(), tmp.low_gp(), 31);
156  break;
157  case LoadType::kI64Load32U:
158  assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.low_gp(), 4);
159  assm->TurboAssembler::Move(tmp.high_gp(), zero_reg);
160  break;
161  case LoadType::kI64Load32S:
162  assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.low_gp(), 4);
163  assm->sra(tmp.high_gp(), tmp.low_gp(), 31);
164  break;
165  default:
166  UNREACHABLE();
167  }
168 
169  if (is_float) {
170  switch (type.value()) {
171  case LoadType::kF32Load:
172  assm->emit_type_conversion(kExprF32ReinterpretI32, dst, tmp);
173  break;
174  case LoadType::kF64Load:
175  assm->emit_type_conversion(kExprF64ReinterpretI64, dst, tmp);
176  break;
177  default:
178  UNREACHABLE();
179  }
180  }
181 }
182 
183 inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
184  StoreType type, LiftoffRegList pinned) {
185  bool is_float = false;
186  LiftoffRegister tmp = src;
187  switch (type.value()) {
188  case StoreType::kI64Store8:
189  case StoreType::kI32Store8:
190  // No need to change endianness for byte size.
191  return;
192  case StoreType::kF32Store:
193  is_float = true;
194  tmp = assm->GetUnusedRegister(kGpReg, pinned);
195  assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src);
196  V8_FALLTHROUGH;
197  case StoreType::kI32Store:
198  assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
199  break;
200  case StoreType::kI32Store16:
201  assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
202  break;
203  case StoreType::kF64Store:
204  is_float = true;
205  tmp = assm->GetUnusedRegister(kGpRegPair, pinned);
206  assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src);
207  V8_FALLTHROUGH;
208  case StoreType::kI64Store:
209  assm->TurboAssembler::Move(kScratchReg, tmp.low_gp());
210  assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4);
211  assm->TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4);
212  break;
213  case StoreType::kI64Store32:
214  assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.low_gp(), 4);
215  break;
216  case StoreType::kI64Store16:
217  assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.low_gp(), 2);
218  break;
219  default:
220  UNREACHABLE();
221  }
222 
223  if (is_float) {
224  switch (type.value()) {
225  case StoreType::kF32Store:
226  assm->emit_type_conversion(kExprF32ReinterpretI32, src, tmp);
227  break;
228  case StoreType::kF64Store:
229  assm->emit_type_conversion(kExprF64ReinterpretI64, src, tmp);
230  break;
231  default:
232  UNREACHABLE();
233  }
234  }
235 }
236 #endif // V8_TARGET_BIG_ENDIAN
237 
238 } // namespace liftoff
239 
240 int LiftoffAssembler::PrepareStackFrame() {
241  int offset = pc_offset();
242  // When constant that represents size of stack frame can't be represented
243  // as 16bit we need three instructions to add it to sp, so we reserve space
244  // for this case.
245  addiu(sp, sp, 0);
246  nop();
247  nop();
248  return offset;
249 }
250 
251 void LiftoffAssembler::PatchPrepareStackFrame(int offset,
252  uint32_t stack_slots) {
253  uint32_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
254  DCHECK_LE(bytes, kMaxInt);
255  // We can't run out of space, just pass anything big enough to not cause the
256  // assembler to try to grow the buffer.
257  constexpr int kAvailableSpace = 256;
258  TurboAssembler patching_assembler(nullptr, AssemblerOptions{},
259  buffer_ + offset, kAvailableSpace,
260  CodeObjectRequired::kNo);
261  // If bytes can be represented as 16bit, addiu will be generated and two
262  // nops will stay untouched. Otherwise, lui-ori sequence will load it to
263  // register and, as third instruction, addu will be generated.
264  patching_assembler.Addu(sp, sp, Operand(-bytes));
265 }
266 
267 void LiftoffAssembler::FinishCode() {}
268 
269 void LiftoffAssembler::AbortCompilation() {}
270 
271 void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
272  RelocInfo::Mode rmode) {
273  switch (value.type()) {
274  case kWasmI32:
275  TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
276  break;
277  case kWasmI64: {
278  DCHECK(RelocInfo::IsNone(rmode));
279  int32_t low_word = value.to_i64();
280  int32_t high_word = value.to_i64() >> 32;
281  TurboAssembler::li(reg.low_gp(), Operand(low_word));
282  TurboAssembler::li(reg.high_gp(), Operand(high_word));
283  break;
284  }
285  case kWasmF32:
286  TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
287  break;
288  case kWasmF64:
289  TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
290  break;
291  default:
292  UNREACHABLE();
293  }
294 }
295 
296 void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
297  int size) {
298  DCHECK_LE(offset, kMaxInt);
299  lw(dst, liftoff::GetInstanceOperand());
300  DCHECK_EQ(4, size);
301  lw(dst, MemOperand(dst, offset));
302 }
303 
304 void LiftoffAssembler::SpillInstance(Register instance) {
305  sw(instance, liftoff::GetInstanceOperand());
306 }
307 
308 void LiftoffAssembler::FillInstanceInto(Register dst) {
309  lw(dst, liftoff::GetInstanceOperand());
310 }
311 
312 void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
313  Register offset_reg, uint32_t offset_imm,
314  LoadType type, LiftoffRegList pinned,
315  uint32_t* protected_load_pc, bool is_load_mem) {
316  Register src = no_reg;
317  if (offset_reg != no_reg) {
318  src = GetUnusedRegister(kGpReg, pinned).gp();
319  emit_ptrsize_add(src, src_addr, offset_reg);
320  }
321  MemOperand src_op = (offset_reg != no_reg) ? MemOperand(src, offset_imm)
322  : MemOperand(src_addr, offset_imm);
323 
324  if (protected_load_pc) *protected_load_pc = pc_offset();
325  switch (type.value()) {
326  case LoadType::kI32Load8U:
327  lbu(dst.gp(), src_op);
328  break;
329  case LoadType::kI64Load8U:
330  lbu(dst.low_gp(), src_op);
331  xor_(dst.high_gp(), dst.high_gp(), dst.high_gp());
332  break;
333  case LoadType::kI32Load8S:
334  lb(dst.gp(), src_op);
335  break;
336  case LoadType::kI64Load8S:
337  lb(dst.low_gp(), src_op);
338  TurboAssembler::Move(dst.high_gp(), dst.low_gp());
339  sra(dst.high_gp(), dst.high_gp(), 31);
340  break;
341  case LoadType::kI32Load16U:
342  TurboAssembler::Ulhu(dst.gp(), src_op);
343  break;
344  case LoadType::kI64Load16U:
345  TurboAssembler::Ulhu(dst.low_gp(), src_op);
346  xor_(dst.high_gp(), dst.high_gp(), dst.high_gp());
347  break;
348  case LoadType::kI32Load16S:
349  TurboAssembler::Ulh(dst.gp(), src_op);
350  break;
351  case LoadType::kI64Load16S:
352  TurboAssembler::Ulh(dst.low_gp(), src_op);
353  TurboAssembler::Move(dst.high_gp(), dst.low_gp());
354  sra(dst.high_gp(), dst.high_gp(), 31);
355  break;
356  case LoadType::kI32Load:
357  TurboAssembler::Ulw(dst.gp(), src_op);
358  break;
359  case LoadType::kI64Load32U:
360  TurboAssembler::Ulw(dst.low_gp(), src_op);
361  xor_(dst.high_gp(), dst.high_gp(), dst.high_gp());
362  break;
363  case LoadType::kI64Load32S:
364  TurboAssembler::Ulw(dst.low_gp(), src_op);
365  TurboAssembler::Move(dst.high_gp(), dst.low_gp());
366  sra(dst.high_gp(), dst.high_gp(), 31);
367  break;
368  case LoadType::kI64Load: {
369  MemOperand src_op =
370  (offset_reg != no_reg)
371  ? MemOperand(src, offset_imm + liftoff::kLowWordOffset)
372  : MemOperand(src_addr, offset_imm + liftoff::kLowWordOffset);
373  MemOperand src_op_upper =
374  (offset_reg != no_reg)
375  ? MemOperand(src, offset_imm + liftoff::kHighWordOffset)
376  : MemOperand(src_addr, offset_imm + liftoff::kHighWordOffset);
377  TurboAssembler::Ulw(dst.low_gp(), src_op);
378  TurboAssembler::Ulw(dst.high_gp(), src_op_upper);
379  break;
380  }
381  case LoadType::kF32Load:
382  TurboAssembler::Ulwc1(dst.fp(), src_op, t8);
383  break;
384  case LoadType::kF64Load:
385  TurboAssembler::Uldc1(dst.fp(), src_op, t8);
386  break;
387  default:
388  UNREACHABLE();
389  }
390 
391 #if defined(V8_TARGET_BIG_ENDIAN)
392  if (is_load_mem) {
393  pinned.set(src_op.rm());
394  liftoff::ChangeEndiannessLoad(this, dst, type, pinned);
395  }
396 #endif
397 }
398 
399 void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
400  uint32_t offset_imm, LiftoffRegister src,
401  StoreType type, LiftoffRegList pinned,
402  uint32_t* protected_store_pc, bool is_store_mem) {
403  Register dst = no_reg;
404  MemOperand dst_op = MemOperand(dst_addr, offset_imm);
405  if (offset_reg != no_reg) {
406  if (is_store_mem) {
407  pinned.set(src);
408  }
409  dst = GetUnusedRegister(kGpReg, pinned).gp();
410  emit_ptrsize_add(dst, dst_addr, offset_reg);
411  dst_op = MemOperand(dst, offset_imm);
412  }
413 
414 #if defined(V8_TARGET_BIG_ENDIAN)
415  if (is_store_mem) {
416  pinned = pinned | LiftoffRegList::ForRegs(dst_op.rm(), src);
417  LiftoffRegister tmp = GetUnusedRegister(src.reg_class(), pinned);
418  // Save original value.
419  Move(tmp, src, type.value_type());
420 
421  src = tmp;
422  pinned.set(tmp);
423  liftoff::ChangeEndiannessStore(this, src, type, pinned);
424  }
425 #endif
426 
427  if (protected_store_pc) *protected_store_pc = pc_offset();
428  switch (type.value()) {
429  case StoreType::kI64Store8:
430  src = src.low();
431  V8_FALLTHROUGH;
432  case StoreType::kI32Store8:
433  sb(src.gp(), dst_op);
434  break;
435  case StoreType::kI64Store16:
436  src = src.low();
437  V8_FALLTHROUGH;
438  case StoreType::kI32Store16:
439  TurboAssembler::Ush(src.gp(), dst_op, t8);
440  break;
441  case StoreType::kI64Store32:
442  src = src.low();
443  V8_FALLTHROUGH;
444  case StoreType::kI32Store:
445  TurboAssembler::Usw(src.gp(), dst_op);
446  break;
447  case StoreType::kI64Store: {
448  MemOperand dst_op_lower(dst_op.rm(),
449  offset_imm + liftoff::kLowWordOffset);
450  MemOperand dst_op_upper(dst_op.rm(),
451  offset_imm + liftoff::kHighWordOffset);
452  TurboAssembler::Usw(src.low_gp(), dst_op_lower);
453  TurboAssembler::Usw(src.high_gp(), dst_op_upper);
454  break;
455  }
456  case StoreType::kF32Store:
457  TurboAssembler::Uswc1(src.fp(), dst_op, t8);
458  break;
459  case StoreType::kF64Store:
460  TurboAssembler::Usdc1(src.fp(), dst_op, t8);
461  break;
462  default:
463  UNREACHABLE();
464  }
465 }
466 
467 void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
468  uint32_t caller_slot_idx,
469  ValueType type) {
470  int32_t offset = kPointerSize * (caller_slot_idx + 1);
471  liftoff::Load(this, dst, fp, offset, type);
472 }
473 
474 void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
475  ValueType type) {
476  DCHECK_NE(dst_index, src_index);
477  LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
478  Fill(reg, src_index, type);
479  Spill(dst_index, reg, type);
480 }
481 
482 void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
483  DCHECK_NE(dst, src);
484  TurboAssembler::mov(dst, src);
485 }
486 
487 void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
488  ValueType type) {
489  DCHECK_NE(dst, src);
490  TurboAssembler::Move(dst, src);
491 }
492 
493 void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
494  ValueType type) {
495  RecordUsedSpillSlot(index);
496  MemOperand dst = liftoff::GetStackSlot(index);
497  switch (type) {
498  case kWasmI32:
499  sw(reg.gp(), dst);
500  break;
501  case kWasmI64:
502  sw(reg.low_gp(), dst);
503  sw(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
504  break;
505  case kWasmF32:
506  swc1(reg.fp(), dst);
507  break;
508  case kWasmF64:
509  TurboAssembler::Sdc1(reg.fp(), dst);
510  break;
511  default:
512  UNREACHABLE();
513  }
514 }
515 
516 void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
517  RecordUsedSpillSlot(index);
518  MemOperand dst = liftoff::GetStackSlot(index);
519  switch (value.type()) {
520  case kWasmI32: {
521  LiftoffRegister tmp = GetUnusedRegister(kGpReg);
522  TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
523  sw(tmp.gp(), dst);
524  break;
525  }
526  case kWasmI64: {
527  LiftoffRegister tmp = GetUnusedRegister(kGpRegPair);
528 
529  int32_t low_word = value.to_i64();
530  int32_t high_word = value.to_i64() >> 32;
531  TurboAssembler::li(tmp.low_gp(), Operand(low_word));
532  TurboAssembler::li(tmp.high_gp(), Operand(high_word));
533 
534  sw(tmp.low_gp(), dst);
535  sw(tmp.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
536  break;
537  }
538  default:
539  // kWasmF32 and kWasmF64 are unreachable, since those
540  // constants are not tracked.
541  UNREACHABLE();
542  }
543 }
544 
545 void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
546  ValueType type) {
547  MemOperand src = liftoff::GetStackSlot(index);
548  switch (type) {
549  case kWasmI32:
550  lw(reg.gp(), src);
551  break;
552  case kWasmI64:
553  lw(reg.low_gp(), src);
554  lw(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
555  break;
556  case kWasmF32:
557  lwc1(reg.fp(), src);
558  break;
559  case kWasmF64:
560  TurboAssembler::Ldc1(reg.fp(), src);
561  break;
562  default:
563  UNREACHABLE();
564  }
565 }
566 
567 void LiftoffAssembler::FillI64Half(Register reg, uint32_t half_index) {
568  lw(reg, liftoff::GetHalfStackSlot(half_index));
569 }
570 
571 void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
572  TurboAssembler::Mul(dst, lhs, rhs);
573 }
574 
575 void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
576  Label* trap_div_by_zero,
577  Label* trap_div_unrepresentable) {
578  TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
579 
580  // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable.
581  TurboAssembler::li(kScratchReg, 1);
582  TurboAssembler::li(kScratchReg2, 1);
583  TurboAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq);
584  TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq);
585  addu(kScratchReg, kScratchReg, kScratchReg2);
586  TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
587  Operand(zero_reg));
588 
589  TurboAssembler::Div(dst, lhs, rhs);
590 }
591 
592 void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
593  Label* trap_div_by_zero) {
594  TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
595  TurboAssembler::Divu(dst, lhs, rhs);
596 }
597 
598 void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
599  Label* trap_div_by_zero) {
600  TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
601  TurboAssembler::Mod(dst, lhs, rhs);
602 }
603 
604 void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
605  Label* trap_div_by_zero) {
606  TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
607  TurboAssembler::Modu(dst, lhs, rhs);
608 }
609 
610 #define I32_BINOP(name, instruction) \
611  void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
612  Register rhs) { \
613  instruction(dst, lhs, rhs); \
614  }
615 
616 // clang-format off
617 I32_BINOP(add, addu)
618 I32_BINOP(sub, subu)
619 I32_BINOP(and, and_)
620 I32_BINOP(or, or_)
621 I32_BINOP(xor, xor_)
622 // clang-format on
623 
624 #undef I32_BINOP
625 
626 bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
627  TurboAssembler::Clz(dst, src);
628  return true;
629 }
630 
631 bool LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
632  TurboAssembler::Ctz(dst, src);
633  return true;
634 }
635 
636 bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
637  TurboAssembler::Popcnt(dst, src);
638  return true;
639 }
640 
641 #define I32_SHIFTOP(name, instruction) \
642  void LiftoffAssembler::emit_i32_##name( \
643  Register dst, Register src, Register amount, LiftoffRegList pinned) { \
644  instruction(dst, src, amount); \
645  }
646 #define I32_SHIFTOP_I(name, instruction) \
647  I32_SHIFTOP(name, instruction##v) \
648  void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \
649  int amount) { \
650  DCHECK(is_uint5(amount)); \
651  instruction(dst, src, amount); \
652  }
653 
654 I32_SHIFTOP(shl, sllv)
655 I32_SHIFTOP(sar, srav)
656 I32_SHIFTOP_I(shr, srl)
657 
658 #undef I32_SHIFTOP
659 #undef I32_SHIFTOP_I
660 
661 void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
662  LiftoffRegister rhs) {
663  TurboAssembler::MulPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
664  lhs.high_gp(), rhs.low_gp(), rhs.high_gp(),
665  kScratchReg, kScratchReg2);
666 }
667 
668 bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
669  LiftoffRegister rhs,
670  Label* trap_div_by_zero,
671  Label* trap_div_unrepresentable) {
672  return false;
673 }
674 
675 bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
676  LiftoffRegister rhs,
677  Label* trap_div_by_zero) {
678  return false;
679 }
680 
681 bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
682  LiftoffRegister rhs,
683  Label* trap_div_by_zero) {
684  return false;
685 }
686 
687 bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
688  LiftoffRegister rhs,
689  Label* trap_div_by_zero) {
690  return false;
691 }
692 
693 void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
694  LiftoffRegister rhs) {
695  TurboAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
696  lhs.high_gp(), rhs.low_gp(), rhs.high_gp(),
697  kScratchReg, kScratchReg2);
698 }
699 
700 void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
701  LiftoffRegister rhs) {
702  TurboAssembler::SubPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(),
703  lhs.high_gp(), rhs.low_gp(), rhs.high_gp(),
704  kScratchReg, kScratchReg2);
705 }
706 
707 namespace liftoff {
708 
709 inline bool IsRegInRegPair(LiftoffRegister pair, Register reg) {
710  DCHECK(pair.is_pair());
711  return pair.low_gp() == reg || pair.high_gp() == reg;
712 }
713 
714 inline void Emit64BitShiftOperation(
715  LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister src,
716  Register amount,
717  void (TurboAssembler::*emit_shift)(Register, Register, Register, Register,
718  Register, Register, Register),
719  LiftoffRegList pinned) {
720  Label move, done;
721  pinned.set(dst);
722  pinned.set(src);
723  pinned.set(amount);
724 
725  // If some of destination registers are in use, get another, unused pair.
726  // That way we prevent overwriting some input registers while shifting.
727  // Do this before any branch so that the cache state will be correct for
728  // all conditions.
729  LiftoffRegister tmp = assm->GetUnusedRegister(kGpRegPair, pinned);
730 
731  // If shift amount is 0, don't do the shifting.
732  assm->TurboAssembler::Branch(&move, eq, amount, Operand(zero_reg));
733 
734  if (liftoff::IsRegInRegPair(dst, amount) || dst.overlaps(src)) {
735  // Do the actual shift.
736  (assm->*emit_shift)(tmp.low_gp(), tmp.high_gp(), src.low_gp(),
737  src.high_gp(), amount, kScratchReg, kScratchReg2);
738 
739  // Place result in destination register.
740  assm->TurboAssembler::Move(dst.high_gp(), tmp.high_gp());
741  assm->TurboAssembler::Move(dst.low_gp(), tmp.low_gp());
742  } else {
743  (assm->*emit_shift)(dst.low_gp(), dst.high_gp(), src.low_gp(),
744  src.high_gp(), amount, kScratchReg, kScratchReg2);
745  }
746  assm->TurboAssembler::Branch(&done);
747 
748  // If shift amount is 0, move src to dst.
749  assm->bind(&move);
750  assm->TurboAssembler::Move(dst.high_gp(), src.high_gp());
751  assm->TurboAssembler::Move(dst.low_gp(), src.low_gp());
752 
753  assm->bind(&done);
754 }
755 } // namespace liftoff
756 
757 void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
758  Register amount, LiftoffRegList pinned) {
759  liftoff::Emit64BitShiftOperation(this, dst, src, amount,
760  &TurboAssembler::ShlPair, pinned);
761 }
762 
763 void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
764  Register amount, LiftoffRegList pinned) {
765  liftoff::Emit64BitShiftOperation(this, dst, src, amount,
766  &TurboAssembler::SarPair, pinned);
767 }
768 
769 void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
770  Register amount, LiftoffRegList pinned) {
771  liftoff::Emit64BitShiftOperation(this, dst, src, amount,
772  &TurboAssembler::ShrPair, pinned);
773 }
774 
775 void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
776  int amount) {
777  DCHECK(is_uint6(amount));
778  ShrPair(dst.high_gp(), dst.low_gp(), src.high_gp(), src.low_gp(), amount,
779  kScratchReg);
780 }
781 
782 void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) {
783  // This is a nop on mips32.
784 }
785 
786 void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
787  TurboAssembler::Neg_s(dst, src);
788 }
789 
790 void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
791  TurboAssembler::Neg_d(dst, src);
792 }
793 
794 void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
795  DoubleRegister rhs) {
796  Label ool, done;
797  TurboAssembler::Float32Min(dst, lhs, rhs, &ool);
798  Branch(&done);
799 
800  bind(&ool);
801  TurboAssembler::Float32MinOutOfLine(dst, lhs, rhs);
802  bind(&done);
803 }
804 
805 void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
806  DoubleRegister rhs) {
807  Label ool, done;
808  TurboAssembler::Float32Max(dst, lhs, rhs, &ool);
809  Branch(&done);
810 
811  bind(&ool);
812  TurboAssembler::Float32MaxOutOfLine(dst, lhs, rhs);
813  bind(&done);
814 }
815 
816 void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
817  DoubleRegister rhs) {
818  BAILOUT("f32_copysign");
819 }
820 
821 void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
822  DoubleRegister rhs) {
823  Label ool, done;
824  TurboAssembler::Float64Min(dst, lhs, rhs, &ool);
825  Branch(&done);
826 
827  bind(&ool);
828  TurboAssembler::Float64MinOutOfLine(dst, lhs, rhs);
829  bind(&done);
830 }
831 
832 void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
833  DoubleRegister rhs) {
834  Label ool, done;
835  TurboAssembler::Float64Max(dst, lhs, rhs, &ool);
836  Branch(&done);
837 
838  bind(&ool);
839  TurboAssembler::Float64MaxOutOfLine(dst, lhs, rhs);
840  bind(&done);
841 }
842 
843 void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
844  DoubleRegister rhs) {
845  BAILOUT("f64_copysign");
846 }
847 
848 #define FP_BINOP(name, instruction) \
849  void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
850  DoubleRegister rhs) { \
851  instruction(dst, lhs, rhs); \
852  }
853 #define FP_UNOP(name, instruction) \
854  void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
855  instruction(dst, src); \
856  }
857 #define FP_UNOP_RETURN_TRUE(name, instruction) \
858  bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
859  instruction(dst, src); \
860  return true; \
861  }
862 
863 FP_BINOP(f32_add, add_s)
864 FP_BINOP(f32_sub, sub_s)
865 FP_BINOP(f32_mul, mul_s)
866 FP_BINOP(f32_div, div_s)
867 FP_UNOP(f32_abs, abs_s)
868 FP_UNOP_RETURN_TRUE(f32_ceil, Ceil_s_s)
869 FP_UNOP_RETURN_TRUE(f32_floor, Floor_s_s)
870 FP_UNOP_RETURN_TRUE(f32_trunc, Trunc_s_s)
871 FP_UNOP_RETURN_TRUE(f32_nearest_int, Round_s_s)
872 FP_UNOP(f32_sqrt, sqrt_s)
873 FP_BINOP(f64_add, add_d)
874 FP_BINOP(f64_sub, sub_d)
875 FP_BINOP(f64_mul, mul_d)
876 FP_BINOP(f64_div, div_d)
877 FP_UNOP(f64_abs, abs_d)
878 FP_UNOP(f64_sqrt, sqrt_d)
879 
880 #undef FP_BINOP
881 #undef FP_UNOP
882 
883 bool LiftoffAssembler::emit_f64_ceil(DoubleRegister dst, DoubleRegister src) {
884  if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
885  IsFp64Mode()) {
886  Ceil_d_d(dst, src);
887  return true;
888  }
889  return false;
890 }
891 
892 bool LiftoffAssembler::emit_f64_floor(DoubleRegister dst, DoubleRegister src) {
893  if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
894  IsFp64Mode()) {
895  Floor_d_d(dst, src);
896  return true;
897  }
898  return false;
899 }
900 
901 bool LiftoffAssembler::emit_f64_trunc(DoubleRegister dst, DoubleRegister src) {
902  if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
903  IsFp64Mode()) {
904  Trunc_d_d(dst, src);
905  return true;
906  }
907  return false;
908 }
909 
910 bool LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst,
911  DoubleRegister src) {
912  if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
913  IsFp64Mode()) {
914  Round_d_d(dst, src);
915  return true;
916  }
917  return false;
918 }
919 
920 bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
921  LiftoffRegister dst,
922  LiftoffRegister src, Label* trap) {
923  switch (opcode) {
924  case kExprI32ConvertI64:
925  TurboAssembler::Move(dst.gp(), src.low_gp());
926  return true;
927  case kExprI32SConvertF32: {
928  LiftoffRegister rounded =
929  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
930  LiftoffRegister converted_back =
931  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
932 
933  // Real conversion.
934  TurboAssembler::Trunc_s_s(rounded.fp(), src.fp());
935  trunc_w_s(kScratchDoubleReg, rounded.fp());
936  mfc1(dst.gp(), kScratchDoubleReg);
937  // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
938  // because INT32_MIN allows easier out-of-bounds detection.
939  TurboAssembler::Addu(kScratchReg, dst.gp(), 1);
940  TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
941  TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
942 
943  // Checking if trap.
944  mtc1(dst.gp(), kScratchDoubleReg);
945  cvt_s_w(converted_back.fp(), kScratchDoubleReg);
946  TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp());
947  TurboAssembler::BranchFalseF(trap);
948  return true;
949  }
950  case kExprI32UConvertF32: {
951  LiftoffRegister rounded =
952  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
953  LiftoffRegister converted_back =
954  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
955 
956  // Real conversion.
957  TurboAssembler::Trunc_s_s(rounded.fp(), src.fp());
958  TurboAssembler::Trunc_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg);
959  // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
960  // because 0 allows easier out-of-bounds detection.
961  TurboAssembler::Addu(kScratchReg, dst.gp(), 1);
962  TurboAssembler::Movz(dst.gp(), zero_reg, kScratchReg);
963 
964  // Checking if trap.
965  TurboAssembler::Cvt_d_uw(converted_back.fp(), dst.gp(),
966  kScratchDoubleReg);
967  cvt_s_d(converted_back.fp(), converted_back.fp());
968  TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp());
969  TurboAssembler::BranchFalseF(trap);
970  return true;
971  }
972  case kExprI32SConvertF64: {
973  if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
974  IsFp64Mode()) {
975  LiftoffRegister rounded =
976  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
977  LiftoffRegister converted_back =
978  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
979 
980  // Real conversion.
981  TurboAssembler::Trunc_d_d(rounded.fp(), src.fp());
982  TurboAssembler::Trunc_w_d(kScratchDoubleReg, rounded.fp());
983  mfc1(dst.gp(), kScratchDoubleReg);
984 
985  // Checking if trap.
986  cvt_d_w(converted_back.fp(), kScratchDoubleReg);
987  TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
988  TurboAssembler::BranchFalseF(trap);
989  return true;
990  } else {
991  BAILOUT("emit_type_conversion kExprI32SConvertF64");
992  return true;
993  }
994  }
995  case kExprI32UConvertF64: {
996  if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
997  IsFp64Mode()) {
998  LiftoffRegister rounded =
999  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
1000  LiftoffRegister converted_back =
1001  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
1002 
1003  // Real conversion.
1004  TurboAssembler::Trunc_d_d(rounded.fp(), src.fp());
1005  TurboAssembler::Trunc_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg);
1006 
1007  // Checking if trap.
1008  TurboAssembler::Cvt_d_uw(converted_back.fp(), dst.gp(),
1009  kScratchDoubleReg);
1010  TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
1011  TurboAssembler::BranchFalseF(trap);
1012  return true;
1013  } else {
1014  BAILOUT("emit_type_conversion kExprI32UConvertF64");
1015  return true;
1016  }
1017  }
1018  case kExprI32ReinterpretF32:
1019  mfc1(dst.gp(), src.fp());
1020  return true;
1021  case kExprI64SConvertI32:
1022  TurboAssembler::Move(dst.low_gp(), src.gp());
1023  TurboAssembler::Move(dst.high_gp(), src.gp());
1024  sra(dst.high_gp(), dst.high_gp(), 31);
1025  return true;
1026  case kExprI64UConvertI32:
1027  TurboAssembler::Move(dst.low_gp(), src.gp());
1028  TurboAssembler::Move(dst.high_gp(), zero_reg);
1029  return true;
1030  case kExprI64ReinterpretF64:
1031  mfc1(dst.low_gp(), src.fp());
1032  TurboAssembler::Mfhc1(dst.high_gp(), src.fp());
1033  return true;
1034  case kExprF32SConvertI32: {
1035  LiftoffRegister scratch =
1036  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
1037  mtc1(src.gp(), scratch.fp());
1038  cvt_s_w(dst.fp(), scratch.fp());
1039  return true;
1040  }
1041  case kExprF32UConvertI32: {
1042  LiftoffRegister scratch =
1043  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
1044  TurboAssembler::Cvt_d_uw(dst.fp(), src.gp(), scratch.fp());
1045  cvt_s_d(dst.fp(), dst.fp());
1046  return true;
1047  }
1048  case kExprF32ConvertF64:
1049  cvt_s_d(dst.fp(), src.fp());
1050  return true;
1051  case kExprF32ReinterpretI32:
1052  TurboAssembler::FmoveLow(dst.fp(), src.gp());
1053  return true;
1054  case kExprF64SConvertI32: {
1055  LiftoffRegister scratch =
1056  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
1057  mtc1(src.gp(), scratch.fp());
1058  cvt_d_w(dst.fp(), scratch.fp());
1059  return true;
1060  }
1061  case kExprF64UConvertI32: {
1062  LiftoffRegister scratch =
1063  GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
1064  TurboAssembler::Cvt_d_uw(dst.fp(), src.gp(), scratch.fp());
1065  return true;
1066  }
1067  case kExprF64ConvertF32:
1068  cvt_d_s(dst.fp(), src.fp());
1069  return true;
1070  case kExprF64ReinterpretI64:
1071  mtc1(src.low_gp(), dst.fp());
1072  TurboAssembler::Mthc1(src.high_gp(), dst.fp());
1073  return true;
1074  default:
1075  return false;
1076  }
1077 }
1078 
1079 void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
1080  BAILOUT("emit_i32_signextend_i8");
1081 }
1082 
1083 void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
1084  BAILOUT("emit_i32_signextend_i16");
1085 }
1086 
1087 void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
1088  LiftoffRegister src) {
1089  BAILOUT("emit_i64_signextend_i8");
1090 }
1091 
1092 void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
1093  LiftoffRegister src) {
1094  BAILOUT("emit_i64_signextend_i16");
1095 }
1096 
1097 void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
1098  LiftoffRegister src) {
1099  BAILOUT("emit_i64_signextend_i32");
1100 }
1101 
1102 void LiftoffAssembler::emit_jump(Label* label) {
1103  TurboAssembler::Branch(label);
1104 }
1105 
1106 void LiftoffAssembler::emit_jump(Register target) {
1107  TurboAssembler::Jump(target);
1108 }
1109 
1110 void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
1111  ValueType type, Register lhs,
1112  Register rhs) {
1113  if (rhs != no_reg) {
1114  TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
1115  } else {
1116  TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
1117  }
1118 }
1119 
1120 void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
1121  sltiu(dst, src, 1);
1122 }
1123 
1124 void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
1125  Register lhs, Register rhs) {
1126  Register tmp = dst;
1127  if (dst == lhs || dst == rhs) {
1128  tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
1129  }
1130  // Write 1 as result.
1131  TurboAssembler::li(tmp, 1);
1132 
1133  // If negative condition is true, write 0 as result.
1134  Condition neg_cond = NegateCondition(cond);
1135  TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond);
1136 
1137  // If tmp != dst, result will be moved.
1138  TurboAssembler::Move(dst, tmp);
1139 }
1140 
1141 void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
1142  Register tmp =
1143  GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(src, dst)).gp();
1144  sltiu(tmp, src.low_gp(), 1);
1145  sltiu(dst, src.high_gp(), 1);
1146  and_(dst, dst, tmp);
1147 }
1148 
1149 namespace liftoff {
1150 inline Condition cond_make_unsigned(Condition cond) {
1151  switch (cond) {
1152  case kSignedLessThan:
1153  return kUnsignedLessThan;
1154  case kSignedLessEqual:
1155  return kUnsignedLessEqual;
1156  case kSignedGreaterThan:
1157  return kUnsignedGreaterThan;
1158  case kSignedGreaterEqual:
1159  return kUnsignedGreaterEqual;
1160  default:
1161  return cond;
1162  }
1163 }
1164 } // namespace liftoff
1165 
1166 void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
1167  LiftoffRegister lhs,
1168  LiftoffRegister rhs) {
1169  Label low, cont;
1170 
1171  // For signed i64 comparisons, we still need to use unsigned comparison for
1172  // the low word (the only bit carrying signedness information is the MSB in
1173  // the high word).
1174  Condition unsigned_cond = liftoff::cond_make_unsigned(cond);
1175 
1176  Register tmp = dst;
1177  if (liftoff::IsRegInRegPair(lhs, dst) || liftoff::IsRegInRegPair(rhs, dst)) {
1178  tmp =
1179  GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst, lhs, rhs)).gp();
1180  }
1181 
1182  // Write 1 initially in tmp register.
1183  TurboAssembler::li(tmp, 1);
1184 
1185  // If high words are equal, then compare low words, else compare high.
1186  Branch(&low, eq, lhs.high_gp(), Operand(rhs.high_gp()));
1187 
1188  TurboAssembler::LoadZeroOnCondition(
1189  tmp, lhs.high_gp(), Operand(rhs.high_gp()), NegateCondition(cond));
1190  Branch(&cont);
1191 
1192  bind(&low);
1193  TurboAssembler::LoadZeroOnCondition(tmp, lhs.low_gp(), Operand(rhs.low_gp()),
1194  NegateCondition(unsigned_cond));
1195 
1196  bind(&cont);
1197  // Move result to dst register if needed.
1198  TurboAssembler::Move(dst, tmp);
1199 }
1200 
1201 namespace liftoff {
1202 
1203 inline FPUCondition ConditionToConditionCmpFPU(bool& predicate,
1204  Condition condition) {
1205  switch (condition) {
1206  case kEqual:
1207  predicate = true;
1208  return EQ;
1209  case kUnequal:
1210  predicate = false;
1211  return EQ;
1212  case kUnsignedLessThan:
1213  predicate = true;
1214  return OLT;
1215  case kUnsignedGreaterEqual:
1216  predicate = false;
1217  return OLT;
1218  case kUnsignedLessEqual:
1219  predicate = true;
1220  return OLE;
1221  case kUnsignedGreaterThan:
1222  predicate = false;
1223  return OLE;
1224  default:
1225  predicate = true;
1226  break;
1227  }
1228  UNREACHABLE();
1229 }
1230 
1231 }; // namespace liftoff
1232 
1233 void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
1234  DoubleRegister lhs,
1235  DoubleRegister rhs) {
1236  Label not_nan, cont;
1237  TurboAssembler::CompareIsNanF32(lhs, rhs);
1238  TurboAssembler::BranchFalseF(&not_nan);
1239  // If one of the operands is NaN, return 1 for f32.ne, else 0.
1240  if (cond == ne) {
1241  TurboAssembler::li(dst, 1);
1242  } else {
1243  TurboAssembler::Move(dst, zero_reg);
1244  }
1245  TurboAssembler::Branch(&cont);
1246 
1247  bind(&not_nan);
1248 
1249  TurboAssembler::li(dst, 1);
1250  bool predicate;
1251  FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(predicate, cond);
1252  TurboAssembler::CompareF32(fcond, lhs, rhs);
1253  if (predicate) {
1254  TurboAssembler::LoadZeroIfNotFPUCondition(dst);
1255  } else {
1256  TurboAssembler::LoadZeroIfFPUCondition(dst);
1257  }
1258 
1259  bind(&cont);
1260 }
1261 
1262 void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
1263  DoubleRegister lhs,
1264  DoubleRegister rhs) {
1265  Label not_nan, cont;
1266  TurboAssembler::CompareIsNanF64(lhs, rhs);
1267  TurboAssembler::BranchFalseF(&not_nan);
1268  // If one of the operands is NaN, return 1 for f64.ne, else 0.
1269  if (cond == ne) {
1270  TurboAssembler::li(dst, 1);
1271  } else {
1272  TurboAssembler::Move(dst, zero_reg);
1273  }
1274  TurboAssembler::Branch(&cont);
1275 
1276  bind(&not_nan);
1277 
1278  TurboAssembler::li(dst, 1);
1279  bool predicate;
1280  FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(predicate, cond);
1281  TurboAssembler::CompareF64(fcond, lhs, rhs);
1282  if (predicate) {
1283  TurboAssembler::LoadZeroIfNotFPUCondition(dst);
1284  } else {
1285  TurboAssembler::LoadZeroIfFPUCondition(dst);
1286  }
1287 
1288  bind(&cont);
1289 }
1290 
1291 void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
1292  TurboAssembler::Ulw(limit_address, MemOperand(limit_address));
1293  TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
1294 }
1295 
1296 void LiftoffAssembler::CallTrapCallbackForTesting() {
1297  PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
1298  CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
1299 }
1300 
1301 void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
1302  if (emit_debug_code()) Abort(reason);
1303 }
1304 
1305 void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
1306  LiftoffRegList gp_regs = regs & kGpCacheRegList;
1307  unsigned num_gp_regs = gp_regs.GetNumRegsSet();
1308  if (num_gp_regs) {
1309  unsigned offset = num_gp_regs * kPointerSize;
1310  addiu(sp, sp, -offset);
1311  while (!gp_regs.is_empty()) {
1312  LiftoffRegister reg = gp_regs.GetFirstRegSet();
1313  offset -= kPointerSize;
1314  sw(reg.gp(), MemOperand(sp, offset));
1315  gp_regs.clear(reg);
1316  }
1317  DCHECK_EQ(offset, 0);
1318  }
1319  LiftoffRegList fp_regs = regs & kFpCacheRegList;
1320  unsigned num_fp_regs = fp_regs.GetNumRegsSet();
1321  if (num_fp_regs) {
1322  addiu(sp, sp, -(num_fp_regs * kStackSlotSize));
1323  unsigned offset = 0;
1324  while (!fp_regs.is_empty()) {
1325  LiftoffRegister reg = fp_regs.GetFirstRegSet();
1326  TurboAssembler::Sdc1(reg.fp(), MemOperand(sp, offset));
1327  fp_regs.clear(reg);
1328  offset += sizeof(double);
1329  }
1330  DCHECK_EQ(offset, num_fp_regs * sizeof(double));
1331  }
1332 }
1333 
1334 void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
1335  LiftoffRegList fp_regs = regs & kFpCacheRegList;
1336  unsigned fp_offset = 0;
1337  while (!fp_regs.is_empty()) {
1338  LiftoffRegister reg = fp_regs.GetFirstRegSet();
1339  TurboAssembler::Ldc1(reg.fp(), MemOperand(sp, fp_offset));
1340  fp_regs.clear(reg);
1341  fp_offset += sizeof(double);
1342  }
1343  if (fp_offset) addiu(sp, sp, fp_offset);
1344  LiftoffRegList gp_regs = regs & kGpCacheRegList;
1345  unsigned gp_offset = 0;
1346  while (!gp_regs.is_empty()) {
1347  LiftoffRegister reg = gp_regs.GetLastRegSet();
1348  lw(reg.gp(), MemOperand(sp, gp_offset));
1349  gp_regs.clear(reg);
1350  gp_offset += kPointerSize;
1351  }
1352  addiu(sp, sp, gp_offset);
1353 }
1354 
1355 void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
1356  DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
1357  TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
1358 }
1359 
1360 void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
1361  const LiftoffRegister* args,
1362  const LiftoffRegister* rets,
1363  ValueType out_argument_type, int stack_bytes,
1364  ExternalReference ext_ref) {
1365  addiu(sp, sp, -stack_bytes);
1366 
1367  int arg_bytes = 0;
1368  for (ValueType param_type : sig->parameters()) {
1369  liftoff::Store(this, sp, arg_bytes, *args++, param_type);
1370  arg_bytes += ValueTypes::MemSize(param_type);
1371  }
1372  DCHECK_LE(arg_bytes, stack_bytes);
1373 
1374  // Pass a pointer to the buffer with the arguments to the C function.
1375  // On mips, the first argument is passed in {a0}.
1376  constexpr Register kFirstArgReg = a0;
1377  mov(kFirstArgReg, sp);
1378 
1379  // Now call the C function.
1380  constexpr int kNumCCallArgs = 1;
1381  PrepareCallCFunction(kNumCCallArgs, kScratchReg);
1382  CallCFunction(ext_ref, kNumCCallArgs);
1383 
1384  // Move return value to the right register.
1385  const LiftoffRegister* next_result_reg = rets;
1386  if (sig->return_count() > 0) {
1387  DCHECK_EQ(1, sig->return_count());
1388  constexpr Register kReturnReg = v0;
1389  if (kReturnReg != next_result_reg->gp()) {
1390  Move(*next_result_reg, LiftoffRegister(kReturnReg), sig->GetReturn(0));
1391  }
1392  ++next_result_reg;
1393  }
1394 
1395  // Load potential output value from the buffer on the stack.
1396  if (out_argument_type != kWasmStmt) {
1397  liftoff::Load(this, *next_result_reg, sp, 0, out_argument_type);
1398  }
1399 
1400  addiu(sp, sp, stack_bytes);
1401 }
1402 
1403 void LiftoffAssembler::CallNativeWasmCode(Address addr) {
1404  Call(addr, RelocInfo::WASM_CALL);
1405 }
1406 
1407 void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
1408  compiler::CallDescriptor* call_descriptor,
1409  Register target) {
1410  if (target == no_reg) {
1411  pop(kScratchReg);
1412  Call(kScratchReg);
1413  } else {
1414  Call(target);
1415  }
1416 }
1417 
1418 void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
1419  // A direct call to a wasm runtime stub defined in this module.
1420  // Just encode the stub index. This will be patched at relocation.
1421  Call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
1422 }
1423 
1424 void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
1425  addiu(sp, sp, -size);
1426  TurboAssembler::Move(addr, sp);
1427 }
1428 
1429 void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
1430  addiu(sp, sp, size);
1431 }
1432 
1433 void LiftoffStackSlots::Construct() {
1434  for (auto& slot : slots_) {
1435  const LiftoffAssembler::VarState& src = slot.src_;
1436  switch (src.loc()) {
1437  case LiftoffAssembler::VarState::kStack: {
1438  if (src.type() == kWasmF64) {
1439  DCHECK_EQ(kLowWord, slot.half_);
1440  asm_->lw(kScratchReg,
1441  liftoff::GetHalfStackSlot(2 * slot.src_index_ - 1));
1442  asm_->push(kScratchReg);
1443  }
1444  asm_->lw(kScratchReg,
1445  liftoff::GetHalfStackSlot(2 * slot.src_index_ +
1446  (slot.half_ == kLowWord ? 0 : 1)));
1447  asm_->push(kScratchReg);
1448  break;
1449  }
1450  case LiftoffAssembler::VarState::kRegister:
1451  if (src.type() == kWasmI64) {
1452  liftoff::push(
1453  asm_, slot.half_ == kLowWord ? src.reg().low() : src.reg().high(),
1454  kWasmI32);
1455  } else {
1456  liftoff::push(asm_, src.reg(), src.type());
1457  }
1458  break;
1459  case LiftoffAssembler::VarState::KIntConst: {
1460  // The high word is the sign extension of the low word.
1461  asm_->li(kScratchReg,
1462  Operand(slot.half_ == kLowWord ? src.i32_const()
1463  : src.i32_const() >> 31));
1464  asm_->push(kScratchReg);
1465  break;
1466  }
1467  }
1468  }
1469 }
1470 
1471 } // namespace wasm
1472 } // namespace internal
1473 } // namespace v8
1474 
1475 #undef BAILOUT
1476 
1477 #endif // V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_
Definition: libplatform.h:13