V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
code-generator-mips.cc
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/assembler-inl.h"
6 #include "src/callable.h"
7 #include "src/compiler/backend/code-generator-impl.h"
8 #include "src/compiler/backend/code-generator.h"
9 #include "src/compiler/backend/gap-resolver.h"
10 #include "src/compiler/node-matchers.h"
11 #include "src/compiler/osr.h"
12 #include "src/heap/heap-inl.h" // crbug.com/v8/8499
13 #include "src/macro-assembler.h"
14 #include "src/optimized-compilation-info.h"
15 #include "src/wasm/wasm-code-manager.h"
16 
17 namespace v8 {
18 namespace internal {
19 namespace compiler {
20 
21 #define __ tasm()->
22 
23 // TODO(plind): consider renaming these macros.
24 #define TRACE_MSG(msg) \
25  PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
26  __LINE__)
27 
28 #define TRACE_UNIMPL() \
29  PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
30  __LINE__)
31 
32 // Adds Mips-specific methods to convert InstructionOperands.
34  public:
36  : InstructionOperandConverter(gen, instr) {}
37 
38  FloatRegister OutputSingleRegister(size_t index = 0) {
39  return ToSingleRegister(instr_->OutputAt(index));
40  }
41 
42  FloatRegister InputSingleRegister(size_t index) {
43  return ToSingleRegister(instr_->InputAt(index));
44  }
45 
46  FloatRegister ToSingleRegister(InstructionOperand* op) {
47  // Single (Float) and Double register namespace is same on MIPS,
48  // both are typedefs of FPURegister.
49  return ToDoubleRegister(op);
50  }
51 
52  Register InputOrZeroRegister(size_t index) {
53  if (instr_->InputAt(index)->IsImmediate()) {
54  DCHECK_EQ(0, InputInt32(index));
55  return zero_reg;
56  }
57  return InputRegister(index);
58  }
59 
60  DoubleRegister InputOrZeroDoubleRegister(size_t index) {
61  if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
62 
63  return InputDoubleRegister(index);
64  }
65 
66  DoubleRegister InputOrZeroSingleRegister(size_t index) {
67  if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
68 
69  return InputSingleRegister(index);
70  }
71 
72  Operand InputImmediate(size_t index) {
73  Constant constant = ToConstant(instr_->InputAt(index));
74  switch (constant.type()) {
75  case Constant::kInt32:
76  return Operand(constant.ToInt32());
77  case Constant::kFloat32:
78  return Operand::EmbeddedNumber(constant.ToFloat32());
79  case Constant::kFloat64:
80  return Operand::EmbeddedNumber(constant.ToFloat64().value());
81  case Constant::kInt64:
82  case Constant::kExternalReference:
83  case Constant::kHeapObject:
84  // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
85  // maybe not done on arm due to const pool ??
86  break;
87  case Constant::kDelayedStringConstant:
88  return Operand::EmbeddedStringConstant(
89  constant.ToDelayedStringConstant());
90  case Constant::kRpoNumber:
91  UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
92  break;
93  }
94  UNREACHABLE();
95  }
96 
97  Operand InputOperand(size_t index) {
98  InstructionOperand* op = instr_->InputAt(index);
99  if (op->IsRegister()) {
100  return Operand(ToRegister(op));
101  }
102  return InputImmediate(index);
103  }
104 
105  MemOperand MemoryOperand(size_t* first_index) {
106  const size_t index = *first_index;
107  switch (AddressingModeField::decode(instr_->opcode())) {
108  case kMode_None:
109  break;
110  case kMode_MRI:
111  *first_index += 2;
112  return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
113  case kMode_MRR:
114  // TODO(plind): r6 address mode, to be implemented ...
115  UNREACHABLE();
116  }
117  UNREACHABLE();
118  }
119 
120  MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
121 
122  MemOperand ToMemOperand(InstructionOperand* op) const {
123  DCHECK_NOT_NULL(op);
124  DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
125  return SlotToMemOperand(AllocatedOperand::cast(op)->index());
126  }
127 
128  MemOperand SlotToMemOperand(int slot) const {
129  FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
130  return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
131  }
132 };
133 
134 static inline bool HasRegisterInput(Instruction* instr, size_t index) {
135  return instr->InputAt(index)->IsRegister();
136 }
137 
138 namespace {
139 
140 class OutOfLineRecordWrite final : public OutOfLineCode {
141  public:
142  OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
143  Register value, Register scratch0, Register scratch1,
144  RecordWriteMode mode, StubCallMode stub_mode)
145  : OutOfLineCode(gen),
146  object_(object),
147  index_(index),
148  value_(value),
149  scratch0_(scratch0),
150  scratch1_(scratch1),
151  mode_(mode),
152  stub_mode_(stub_mode),
153  must_save_lr_(!gen->frame_access_state()->has_frame()),
154  zone_(gen->zone()) {}
155 
156  void Generate() final {
157  if (mode_ > RecordWriteMode::kValueIsPointer) {
158  __ JumpIfSmi(value_, exit());
159  }
160  __ CheckPageFlag(value_, scratch0_,
161  MemoryChunk::kPointersToHereAreInterestingMask, eq,
162  exit());
163  __ Addu(scratch1_, object_, index_);
164  RememberedSetAction const remembered_set_action =
165  mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
166  : OMIT_REMEMBERED_SET;
167  SaveFPRegsMode const save_fp_mode =
168  frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
169  if (must_save_lr_) {
170  // We need to save and restore ra if the frame was elided.
171  __ Push(ra);
172  }
173 
174  if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
175  // A direct call to a wasm runtime stub defined in this module.
176  // Just encode the stub index. This will be patched when the code
177  // is added to the native module and copied into wasm code space.
178  __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
179  save_fp_mode, wasm::WasmCode::kWasmRecordWrite);
180  } else {
181  __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
182  save_fp_mode);
183  }
184  if (must_save_lr_) {
185  __ Pop(ra);
186  }
187  }
188 
189  private:
190  Register const object_;
191  Register const index_;
192  Register const value_;
193  Register const scratch0_;
194  Register const scratch1_;
195  RecordWriteMode const mode_;
196  StubCallMode const stub_mode_;
197  bool must_save_lr_;
198  Zone* zone_;
199 };
200 
201 #define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T) \
202  class ool_name final : public OutOfLineCode { \
203  public: \
204  ool_name(CodeGenerator* gen, T dst, T src1, T src2) \
205  : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
206  \
207  void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); } \
208  \
209  private: \
210  T const dst_; \
211  T const src1_; \
212  T const src2_; \
213  }
214 
215 CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister);
216 CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister);
217 CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, DoubleRegister);
218 CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, DoubleRegister);
219 
220 #undef CREATE_OOL_CLASS
221 
222 Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
223  switch (condition) {
224  case kEqual:
225  return eq;
226  case kNotEqual:
227  return ne;
228  case kSignedLessThan:
229  return lt;
230  case kSignedGreaterThanOrEqual:
231  return ge;
232  case kSignedLessThanOrEqual:
233  return le;
234  case kSignedGreaterThan:
235  return gt;
236  case kUnsignedLessThan:
237  return lo;
238  case kUnsignedGreaterThanOrEqual:
239  return hs;
240  case kUnsignedLessThanOrEqual:
241  return ls;
242  case kUnsignedGreaterThan:
243  return hi;
244  case kUnorderedEqual:
245  case kUnorderedNotEqual:
246  break;
247  default:
248  break;
249  }
250  UNREACHABLE();
251 }
252 
253 Condition FlagsConditionToConditionTst(FlagsCondition condition) {
254  switch (condition) {
255  case kNotEqual:
256  return ne;
257  case kEqual:
258  return eq;
259  default:
260  break;
261  }
262  UNREACHABLE();
263 }
264 
265 FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
266  FlagsCondition condition) {
267  switch (condition) {
268  case kEqual:
269  predicate = true;
270  return EQ;
271  case kNotEqual:
272  predicate = false;
273  return EQ;
274  case kUnsignedLessThan:
275  predicate = true;
276  return OLT;
277  case kUnsignedGreaterThanOrEqual:
278  predicate = false;
279  return OLT;
280  case kUnsignedLessThanOrEqual:
281  predicate = true;
282  return OLE;
283  case kUnsignedGreaterThan:
284  predicate = false;
285  return OLE;
286  case kUnorderedEqual:
287  case kUnorderedNotEqual:
288  predicate = true;
289  break;
290  default:
291  predicate = true;
292  break;
293  }
294  UNREACHABLE();
295 }
296 
297 #define UNSUPPORTED_COND(opcode, condition) \
298  StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \
299  << "\""; \
300  UNIMPLEMENTED();
301 
302 void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
303  InstructionCode opcode, Instruction* instr,
304  MipsOperandConverter& i) {
305  const MemoryAccessMode access_mode =
306  static_cast<MemoryAccessMode>(MiscField::decode(opcode));
307  if (access_mode == kMemoryAccessPoisoned) {
308  Register value = i.OutputRegister();
309  codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
310  }
311 }
312 
313 } // namespace
314 
315 #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
316  do { \
317  __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
318  __ sync(); \
319  } while (0)
320 
321 #define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
322  do { \
323  __ sync(); \
324  __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \
325  __ sync(); \
326  } while (0)
327 
328 #define ASSEMBLE_ATOMIC_BINOP(bin_instr) \
329  do { \
330  Label binop; \
331  __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
332  __ sync(); \
333  __ bind(&binop); \
334  __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
335  __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \
336  Operand(i.InputRegister(2))); \
337  __ Sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
338  __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
339  __ sync(); \
340  } while (0)
341 
342 #define ASSEMBLE_ATOMIC64_LOGIC_BINOP(bin_instr, external) \
343  do { \
344  if (IsMipsArchVariant(kMips32r6)) { \
345  Label binop; \
346  Register oldval_low = \
347  instr->OutputCount() >= 1 ? i.OutputRegister(0) : i.TempRegister(1); \
348  Register oldval_high = \
349  instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(2); \
350  __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
351  __ sync(); \
352  __ bind(&binop); \
353  __ llx(oldval_high, MemOperand(i.TempRegister(0), 4)); \
354  __ ll(oldval_low, MemOperand(i.TempRegister(0), 0)); \
355  __ bin_instr(i.TempRegister(1), i.TempRegister(2), oldval_low, \
356  oldval_high, i.InputRegister(2), i.InputRegister(3)); \
357  __ scx(i.TempRegister(2), MemOperand(i.TempRegister(0), 4)); \
358  __ sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
359  __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
360  __ sync(); \
361  } else { \
362  FrameScope scope(tasm(), StackFrame::MANUAL); \
363  __ Addu(a0, i.InputRegister(0), i.InputRegister(1)); \
364  __ PushCallerSaved(kDontSaveFPRegs, v0, v1); \
365  __ PrepareCallCFunction(3, 0, kScratchReg); \
366  __ CallCFunction(ExternalReference::external(), 3, 0); \
367  __ PopCallerSaved(kDontSaveFPRegs, v0, v1); \
368  } \
369  } while (0)
370 
371 #define ASSEMBLE_ATOMIC64_ARITH_BINOP(bin_instr, external) \
372  do { \
373  if (IsMipsArchVariant(kMips32r6)) { \
374  Label binop; \
375  Register oldval_low = \
376  instr->OutputCount() >= 1 ? i.OutputRegister(0) : i.TempRegister(1); \
377  Register oldval_high = \
378  instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(2); \
379  __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
380  __ sync(); \
381  __ bind(&binop); \
382  __ llx(oldval_high, MemOperand(i.TempRegister(0), 4)); \
383  __ ll(oldval_low, MemOperand(i.TempRegister(0), 0)); \
384  __ bin_instr(i.TempRegister(1), i.TempRegister(2), oldval_low, \
385  oldval_high, i.InputRegister(2), i.InputRegister(3), \
386  kScratchReg, kScratchReg2); \
387  __ scx(i.TempRegister(2), MemOperand(i.TempRegister(0), 4)); \
388  __ sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
389  __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
390  __ sync(); \
391  } else { \
392  FrameScope scope(tasm(), StackFrame::MANUAL); \
393  __ Addu(a0, i.InputRegister(0), i.InputRegister(1)); \
394  __ PushCallerSaved(kDontSaveFPRegs, v0, v1); \
395  __ PrepareCallCFunction(3, 0, kScratchReg); \
396  __ CallCFunction(ExternalReference::external(), 3, 0); \
397  __ PopCallerSaved(kDontSaveFPRegs, v0, v1); \
398  } \
399  } while (0)
400 
401 #define ASSEMBLE_ATOMIC_BINOP_EXT(sign_extend, size, bin_instr) \
402  do { \
403  Label binop; \
404  __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
405  __ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \
406  __ Subu(i.TempRegister(0), i.TempRegister(0), Operand(i.TempRegister(3))); \
407  __ sll(i.TempRegister(3), i.TempRegister(3), 3); \
408  __ sync(); \
409  __ bind(&binop); \
410  __ Ll(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
411  __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \
412  size, sign_extend); \
413  __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \
414  Operand(i.InputRegister(2))); \
415  __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \
416  size); \
417  __ Sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
418  __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
419  __ sync(); \
420  } while (0)
421 
422 #define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER() \
423  do { \
424  Label exchange; \
425  __ sync(); \
426  __ bind(&exchange); \
427  __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
428  __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
429  __ mov(i.TempRegister(1), i.InputRegister(2)); \
430  __ Sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
431  __ BranchShort(&exchange, eq, i.TempRegister(1), Operand(zero_reg)); \
432  __ sync(); \
433  } while (0)
434 
435 #define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(sign_extend, size) \
436  do { \
437  Label exchange; \
438  __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
439  __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
440  __ Subu(i.TempRegister(0), i.TempRegister(0), Operand(i.TempRegister(1))); \
441  __ sll(i.TempRegister(1), i.TempRegister(1), 3); \
442  __ sync(); \
443  __ bind(&exchange); \
444  __ Ll(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
445  __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
446  size, sign_extend); \
447  __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \
448  size); \
449  __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
450  __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \
451  __ sync(); \
452  } while (0)
453 
454 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER() \
455  do { \
456  Label compareExchange; \
457  Label exit; \
458  __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
459  __ sync(); \
460  __ bind(&compareExchange); \
461  __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
462  __ BranchShort(&exit, ne, i.InputRegister(2), \
463  Operand(i.OutputRegister(0))); \
464  __ mov(i.TempRegister(2), i.InputRegister(3)); \
465  __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
466  __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
467  Operand(zero_reg)); \
468  __ bind(&exit); \
469  __ sync(); \
470  } while (0)
471 
472 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(sign_extend, size) \
473  do { \
474  Label compareExchange; \
475  Label exit; \
476  __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
477  __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
478  __ Subu(i.TempRegister(0), i.TempRegister(0), Operand(i.TempRegister(1))); \
479  __ sll(i.TempRegister(1), i.TempRegister(1), 3); \
480  __ sync(); \
481  __ bind(&compareExchange); \
482  __ Ll(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
483  __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
484  size, sign_extend); \
485  __ BranchShort(&exit, ne, i.InputRegister(2), \
486  Operand(i.OutputRegister(0))); \
487  __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
488  size); \
489  __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
490  __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
491  Operand(zero_reg)); \
492  __ bind(&exit); \
493  __ sync(); \
494  } while (0)
495 
496 #define ASSEMBLE_IEEE754_BINOP(name) \
497  do { \
498  FrameScope scope(tasm(), StackFrame::MANUAL); \
499  __ PrepareCallCFunction(0, 2, kScratchReg); \
500  __ MovToFloatParameters(i.InputDoubleRegister(0), \
501  i.InputDoubleRegister(1)); \
502  __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
503  /* Move the result in the double result register. */ \
504  __ MovFromFloatResult(i.OutputDoubleRegister()); \
505  } while (0)
506 
507 #define ASSEMBLE_IEEE754_UNOP(name) \
508  do { \
509  FrameScope scope(tasm(), StackFrame::MANUAL); \
510  __ PrepareCallCFunction(0, 1, kScratchReg); \
511  __ MovToFloatParameter(i.InputDoubleRegister(0)); \
512  __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
513  /* Move the result in the double result register. */ \
514  __ MovFromFloatResult(i.OutputDoubleRegister()); \
515  } while (0)
516 
517 void CodeGenerator::AssembleDeconstructFrame() {
518  __ mov(sp, fp);
519  __ Pop(ra, fp);
520 }
521 
522 void CodeGenerator::AssemblePrepareTailCall() {
523  if (frame_access_state()->has_frame()) {
524  __ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
525  __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
526  }
527  frame_access_state()->SetFrameAccessToSP();
528 }
529 
530 void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
531  Register scratch1,
532  Register scratch2,
533  Register scratch3) {
534  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
535  Label done;
536 
537  // Check if current frame is an arguments adaptor frame.
538  __ lw(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
539  __ Branch(&done, ne, scratch1,
540  Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
541 
542  // Load arguments count from current arguments adaptor frame (note, it
543  // does not include receiver).
544  Register caller_args_count_reg = scratch1;
545  __ lw(caller_args_count_reg,
546  MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
547  __ SmiUntag(caller_args_count_reg);
548 
549  ParameterCount callee_args_count(args_reg);
550  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
551  scratch3);
552  __ bind(&done);
553 }
554 
555 namespace {
556 
557 void AdjustStackPointerForTailCall(TurboAssembler* tasm,
558  FrameAccessState* state,
559  int new_slot_above_sp,
560  bool allow_shrinkage = true) {
561  int current_sp_offset = state->GetSPToFPSlotCount() +
562  StandardFrameConstants::kFixedSlotCountAboveFp;
563  int stack_slot_delta = new_slot_above_sp - current_sp_offset;
564  if (stack_slot_delta > 0) {
565  tasm->Subu(sp, sp, stack_slot_delta * kPointerSize);
566  state->IncreaseSPDelta(stack_slot_delta);
567  } else if (allow_shrinkage && stack_slot_delta < 0) {
568  tasm->Addu(sp, sp, -stack_slot_delta * kPointerSize);
569  state->IncreaseSPDelta(stack_slot_delta);
570  }
571 }
572 
573 } // namespace
574 
575 void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
576  int first_unused_stack_slot) {
577  AdjustStackPointerForTailCall(tasm(), frame_access_state(),
578  first_unused_stack_slot, false);
579 }
580 
581 void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
582  int first_unused_stack_slot) {
583  AdjustStackPointerForTailCall(tasm(), frame_access_state(),
584  first_unused_stack_slot);
585 }
586 
587 // Check that {kJavaScriptCallCodeStartRegister} is correct.
588 void CodeGenerator::AssembleCodeStartRegisterCheck() {
589  __ ComputeCodeStartAddress(kScratchReg);
590  __ Assert(eq, AbortReason::kWrongFunctionCodeStart,
591  kJavaScriptCallCodeStartRegister, Operand(kScratchReg));
592 }
593 
594 // Check if the code object is marked for deoptimization. If it is, then it
595 // jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
596 // to:
597 // 1. read from memory the word that contains that bit, which can be found in
598 // the flags in the referenced {CodeDataContainer} object;
599 // 2. test kMarkedForDeoptimizationBit in those flags; and
600 // 3. if it is not zero then it jumps to the builtin.
601 void CodeGenerator::BailoutIfDeoptimized() {
602  int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
603  __ lw(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset));
604  __ lw(kScratchReg,
605  FieldMemOperand(kScratchReg,
606  CodeDataContainer::kKindSpecificFlagsOffset));
607  __ And(kScratchReg, kScratchReg,
608  Operand(1 << Code::kMarkedForDeoptimizationBit));
609  // Ensure we're not serializing (otherwise we'd need to use an indirection to
610  // access the builtin below).
611  DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
612  Handle<Code> code = isolate()->builtins()->builtin_handle(
613  Builtins::kCompileLazyDeoptimizedCode);
614  __ Jump(code, RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
615 }
616 
617 void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
618  // Calculate a mask which has all bits set in the normal case, but has all
619  // bits cleared if we are speculatively executing the wrong PC.
620  // difference = (current - expected) | (expected - current)
621  // poison = ~(difference >> (kBitsPerSystemPointer - 1))
622  __ ComputeCodeStartAddress(kScratchReg);
623  __ Move(kSpeculationPoisonRegister, kScratchReg);
624  __ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
625  kJavaScriptCallCodeStartRegister);
626  __ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
627  kScratchReg);
628  __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
629  kJavaScriptCallCodeStartRegister);
630  __ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
631  kBitsPerSystemPointer - 1);
632  __ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
633  kSpeculationPoisonRegister);
634 }
635 
636 void CodeGenerator::AssembleRegisterArgumentPoisoning() {
637  __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
638  __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
639  __ And(sp, sp, kSpeculationPoisonRegister);
640 }
641 
642 // Assembles an instruction after register allocation, producing machine code.
643 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
644  Instruction* instr) {
645  MipsOperandConverter i(this, instr);
646  InstructionCode opcode = instr->opcode();
647  ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
648  switch (arch_opcode) {
649  case kArchCallCodeObject: {
650  if (instr->InputAt(0)->IsImmediate()) {
651  __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
652  } else {
653  Register reg = i.InputRegister(0);
654  DCHECK_IMPLIES(
655  HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
656  reg == kJavaScriptCallCodeStartRegister);
657  __ Call(reg, reg, Code::kHeaderSize - kHeapObjectTag);
658  }
659  RecordCallPosition(instr);
660  frame_access_state()->ClearSPDelta();
661  break;
662  }
663  case kArchCallWasmFunction: {
664  if (instr->InputAt(0)->IsImmediate()) {
665  Constant constant = i.ToConstant(instr->InputAt(0));
666  Address wasm_code = static_cast<Address>(constant.ToInt32());
667  __ Call(wasm_code, constant.rmode());
668  } else {
669  __ Call(i.InputRegister(0));
670  }
671  RecordCallPosition(instr);
672  frame_access_state()->ClearSPDelta();
673  break;
674  }
675  case kArchTailCallCodeObjectFromJSFunction:
676  case kArchTailCallCodeObject: {
677  if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
678  AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
679  i.TempRegister(0), i.TempRegister(1),
680  i.TempRegister(2));
681  }
682  if (instr->InputAt(0)->IsImmediate()) {
683  __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
684  } else {
685  Register reg = i.InputRegister(0);
686  DCHECK_IMPLIES(
687  HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
688  reg == kJavaScriptCallCodeStartRegister);
689  __ Addu(reg, reg, Code::kHeaderSize - kHeapObjectTag);
690  __ Jump(reg);
691  }
692  frame_access_state()->ClearSPDelta();
693  frame_access_state()->SetFrameAccessToDefault();
694  break;
695  }
696  case kArchTailCallWasm: {
697  if (instr->InputAt(0)->IsImmediate()) {
698  Constant constant = i.ToConstant(instr->InputAt(0));
699  Address wasm_code = static_cast<Address>(constant.ToInt32());
700  __ Jump(wasm_code, constant.rmode());
701  } else {
702  __ Jump(i.InputRegister(0));
703  }
704  frame_access_state()->ClearSPDelta();
705  frame_access_state()->SetFrameAccessToDefault();
706  break;
707  }
708  case kArchTailCallAddress: {
709  CHECK(!instr->InputAt(0)->IsImmediate());
710  Register reg = i.InputRegister(0);
711  DCHECK_IMPLIES(
712  HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
713  reg == kJavaScriptCallCodeStartRegister);
714  __ Jump(reg);
715  frame_access_state()->ClearSPDelta();
716  frame_access_state()->SetFrameAccessToDefault();
717  break;
718  }
719  case kArchCallJSFunction: {
720  Register func = i.InputRegister(0);
721  if (FLAG_debug_code) {
722  // Check the function's context matches the context argument.
723  __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
724  __ Assert(eq, AbortReason::kWrongFunctionContext, cp,
725  Operand(kScratchReg));
726  }
727  static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
728  __ lw(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
729  __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
730  __ Call(a2);
731  RecordCallPosition(instr);
732  frame_access_state()->ClearSPDelta();
733  frame_access_state()->SetFrameAccessToDefault();
734  break;
735  }
736  case kArchPrepareCallCFunction: {
737  int const num_parameters = MiscField::decode(instr->opcode());
738  __ PrepareCallCFunction(num_parameters, kScratchReg);
739  // Frame alignment requires using FP-relative frame addressing.
740  frame_access_state()->SetFrameAccessToFP();
741  break;
742  }
743  case kArchSaveCallerRegisters: {
744  fp_mode_ =
745  static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
746  DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
747  // kReturnRegister0 should have been saved before entering the stub.
748  int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
749  DCHECK_EQ(0, bytes % kPointerSize);
750  DCHECK_EQ(0, frame_access_state()->sp_delta());
751  frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
752  DCHECK(!caller_registers_saved_);
753  caller_registers_saved_ = true;
754  break;
755  }
756  case kArchRestoreCallerRegisters: {
757  DCHECK(fp_mode_ ==
758  static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
759  DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
760  // Don't overwrite the returned value.
761  int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
762  frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
763  DCHECK_EQ(0, frame_access_state()->sp_delta());
764  DCHECK(caller_registers_saved_);
765  caller_registers_saved_ = false;
766  break;
767  }
768  case kArchPrepareTailCall:
769  AssemblePrepareTailCall();
770  break;
771  case kArchCallCFunction: {
772  int const num_parameters = MiscField::decode(instr->opcode());
773  if (instr->InputAt(0)->IsImmediate()) {
774  ExternalReference ref = i.InputExternalReference(0);
775  __ CallCFunction(ref, num_parameters);
776  } else {
777  Register func = i.InputRegister(0);
778  __ CallCFunction(func, num_parameters);
779  }
780  frame_access_state()->SetFrameAccessToDefault();
781  // Ideally, we should decrement SP delta to match the change of stack
782  // pointer in CallCFunction. However, for certain architectures (e.g.
783  // ARM), there may be more strict alignment requirement, causing old SP
784  // to be saved on the stack. In those cases, we can not calculate the SP
785  // delta statically.
786  frame_access_state()->ClearSPDelta();
787  if (caller_registers_saved_) {
788  // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
789  // Here, we assume the sequence to be:
790  // kArchSaveCallerRegisters;
791  // kArchCallCFunction;
792  // kArchRestoreCallerRegisters;
793  int bytes =
794  __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
795  frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
796  }
797  break;
798  }
799  case kArchJmp:
800  AssembleArchJump(i.InputRpo(0));
801  break;
802  case kArchBinarySearchSwitch:
803  AssembleArchBinarySearchSwitch(instr);
804  break;
805  case kArchLookupSwitch:
806  AssembleArchLookupSwitch(instr);
807  break;
808  case kArchTableSwitch:
809  AssembleArchTableSwitch(instr);
810  break;
811  case kArchDebugAbort:
812  DCHECK(i.InputRegister(0) == a0);
813  if (!frame_access_state()->has_frame()) {
814  // We don't actually want to generate a pile of code for this, so just
815  // claim there is a stack frame, without generating one.
816  FrameScope scope(tasm(), StackFrame::NONE);
817  __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
818  RelocInfo::CODE_TARGET);
819  } else {
820  __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
821  RelocInfo::CODE_TARGET);
822  }
823  __ stop("kArchDebugAbort");
824  break;
825  case kArchDebugBreak:
826  __ stop("kArchDebugBreak");
827  break;
828  case kArchComment:
829  __ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
830  break;
831  case kArchNop:
832  case kArchThrowTerminator:
833  // don't emit code for nops.
834  break;
835  case kArchDeoptimize: {
836  int deopt_state_id =
837  BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
838  CodeGenResult result =
839  AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
840  if (result != kSuccess) return result;
841  break;
842  }
843  case kArchRet:
844  AssembleReturn(instr->InputAt(0));
845  break;
846  case kArchStackPointer:
847  __ mov(i.OutputRegister(), sp);
848  break;
849  case kArchFramePointer:
850  __ mov(i.OutputRegister(), fp);
851  break;
852  case kArchParentFramePointer:
853  if (frame_access_state()->has_frame()) {
854  __ lw(i.OutputRegister(), MemOperand(fp, 0));
855  } else {
856  __ mov(i.OutputRegister(), fp);
857  }
858  break;
859  case kArchTruncateDoubleToI:
860  __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
861  i.InputDoubleRegister(0), DetermineStubCallMode());
862  break;
863  case kArchStoreWithWriteBarrier: {
864  RecordWriteMode mode =
865  static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
866  Register object = i.InputRegister(0);
867  Register index = i.InputRegister(1);
868  Register value = i.InputRegister(2);
869  Register scratch0 = i.TempRegister(0);
870  Register scratch1 = i.TempRegister(1);
871  auto ool = new (zone())
872  OutOfLineRecordWrite(this, object, index, value, scratch0, scratch1,
873  mode, DetermineStubCallMode());
874  __ Addu(kScratchReg, object, index);
875  __ sw(value, MemOperand(kScratchReg));
876  __ CheckPageFlag(object, scratch0,
877  MemoryChunk::kPointersFromHereAreInterestingMask, ne,
878  ool->entry());
879  __ bind(ool->exit());
880  break;
881  }
882  case kArchStackSlot: {
883  FrameOffset offset =
884  frame_access_state()->GetFrameOffset(i.InputInt32(0));
885  Register base_reg = offset.from_stack_pointer() ? sp : fp;
886  __ Addu(i.OutputRegister(), base_reg, Operand(offset.offset()));
887  int alignment = i.InputInt32(1);
888  DCHECK(alignment == 0 || alignment == 4 || alignment == 8 ||
889  alignment == 16);
890  if (FLAG_debug_code && alignment > 0) {
891  // Verify that the output_register is properly aligned
892  __ And(kScratchReg, i.OutputRegister(), Operand(kPointerSize - 1));
893  __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg,
894  Operand(zero_reg));
895  }
896 
897  if (alignment == 2 * kPointerSize) {
898  Label done;
899  __ Addu(kScratchReg, base_reg, Operand(offset.offset()));
900  __ And(kScratchReg, kScratchReg, Operand(alignment - 1));
901  __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
902  __ Addu(i.OutputRegister(), i.OutputRegister(), kPointerSize);
903  __ bind(&done);
904  } else if (alignment > 2 * kPointerSize) {
905  Label done;
906  __ Addu(kScratchReg, base_reg, Operand(offset.offset()));
907  __ And(kScratchReg, kScratchReg, Operand(alignment - 1));
908  __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
909  __ li(kScratchReg2, alignment);
910  __ Subu(kScratchReg2, kScratchReg2, Operand(kScratchReg));
911  __ Addu(i.OutputRegister(), i.OutputRegister(), kScratchReg2);
912  __ bind(&done);
913  }
914  break;
915  }
916  case kArchWordPoisonOnSpeculation:
917  __ And(i.OutputRegister(), i.InputRegister(0),
918  kSpeculationPoisonRegister);
919  break;
920  case kIeee754Float64Acos:
921  ASSEMBLE_IEEE754_UNOP(acos);
922  break;
923  case kIeee754Float64Acosh:
924  ASSEMBLE_IEEE754_UNOP(acosh);
925  break;
926  case kIeee754Float64Asin:
927  ASSEMBLE_IEEE754_UNOP(asin);
928  break;
929  case kIeee754Float64Asinh:
930  ASSEMBLE_IEEE754_UNOP(asinh);
931  break;
932  case kIeee754Float64Atan:
933  ASSEMBLE_IEEE754_UNOP(atan);
934  break;
935  case kIeee754Float64Atanh:
936  ASSEMBLE_IEEE754_UNOP(atanh);
937  break;
938  case kIeee754Float64Atan2:
939  ASSEMBLE_IEEE754_BINOP(atan2);
940  break;
941  case kIeee754Float64Cos:
942  ASSEMBLE_IEEE754_UNOP(cos);
943  break;
944  case kIeee754Float64Cosh:
945  ASSEMBLE_IEEE754_UNOP(cosh);
946  break;
947  case kIeee754Float64Cbrt:
948  ASSEMBLE_IEEE754_UNOP(cbrt);
949  break;
950  case kIeee754Float64Exp:
951  ASSEMBLE_IEEE754_UNOP(exp);
952  break;
953  case kIeee754Float64Expm1:
954  ASSEMBLE_IEEE754_UNOP(expm1);
955  break;
956  case kIeee754Float64Log:
957  ASSEMBLE_IEEE754_UNOP(log);
958  break;
959  case kIeee754Float64Log1p:
960  ASSEMBLE_IEEE754_UNOP(log1p);
961  break;
962  case kIeee754Float64Log10:
963  ASSEMBLE_IEEE754_UNOP(log10);
964  break;
965  case kIeee754Float64Log2:
966  ASSEMBLE_IEEE754_UNOP(log2);
967  break;
968  case kIeee754Float64Pow: {
969  __ Call(BUILTIN_CODE(isolate(), MathPowInternal), RelocInfo::CODE_TARGET);
970  break;
971  }
972  case kIeee754Float64Sin:
973  ASSEMBLE_IEEE754_UNOP(sin);
974  break;
975  case kIeee754Float64Sinh:
976  ASSEMBLE_IEEE754_UNOP(sinh);
977  break;
978  case kIeee754Float64Tan:
979  ASSEMBLE_IEEE754_UNOP(tan);
980  break;
981  case kIeee754Float64Tanh:
982  ASSEMBLE_IEEE754_UNOP(tanh);
983  break;
984  case kMipsAdd:
985  __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
986  break;
987  case kMipsAddOvf:
988  __ AddOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
989  kScratchReg);
990  break;
991  case kMipsSub:
992  __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
993  break;
994  case kMipsSubOvf:
995  __ SubOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
996  kScratchReg);
997  break;
998  case kMipsMul:
999  __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1000  break;
1001  case kMipsMulOvf:
1002  __ MulOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
1003  kScratchReg);
1004  break;
1005  case kMipsMulHigh:
1006  __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1007  break;
1008  case kMipsMulHighU:
1009  __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1010  break;
1011  case kMipsDiv:
1012  __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1013  if (IsMipsArchVariant(kMips32r6)) {
1014  __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1015  } else {
1016  __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
1017  }
1018  break;
1019  case kMipsDivU:
1020  __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1021  if (IsMipsArchVariant(kMips32r6)) {
1022  __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1023  } else {
1024  __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
1025  }
1026  break;
1027  case kMipsMod:
1028  __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1029  break;
1030  case kMipsModU:
1031  __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1032  break;
1033  case kMipsAnd:
1034  __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1035  break;
1036  case kMipsOr:
1037  __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1038  break;
1039  case kMipsNor:
1040  if (instr->InputAt(1)->IsRegister()) {
1041  __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1042  } else {
1043  DCHECK_EQ(0, i.InputOperand(1).immediate());
1044  __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
1045  }
1046  break;
1047  case kMipsXor:
1048  __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1049  break;
1050  case kMipsClz:
1051  __ Clz(i.OutputRegister(), i.InputRegister(0));
1052  break;
1053  case kMipsCtz: {
1054  Register src = i.InputRegister(0);
1055  Register dst = i.OutputRegister();
1056  __ Ctz(dst, src);
1057  } break;
1058  case kMipsPopcnt: {
1059  Register src = i.InputRegister(0);
1060  Register dst = i.OutputRegister();
1061  __ Popcnt(dst, src);
1062  } break;
1063  case kMipsShl:
1064  if (instr->InputAt(1)->IsRegister()) {
1065  __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1066  } else {
1067  int32_t imm = i.InputOperand(1).immediate();
1068  __ sll(i.OutputRegister(), i.InputRegister(0), imm);
1069  }
1070  break;
1071  case kMipsShr:
1072  if (instr->InputAt(1)->IsRegister()) {
1073  __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1074  } else {
1075  int32_t imm = i.InputOperand(1).immediate();
1076  __ srl(i.OutputRegister(), i.InputRegister(0), imm);
1077  }
1078  break;
1079  case kMipsSar:
1080  if (instr->InputAt(1)->IsRegister()) {
1081  __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1082  } else {
1083  int32_t imm = i.InputOperand(1).immediate();
1084  __ sra(i.OutputRegister(), i.InputRegister(0), imm);
1085  }
1086  break;
1087  case kMipsShlPair: {
1088  Register second_output =
1089  instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1090  if (instr->InputAt(2)->IsRegister()) {
1091  __ ShlPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1092  i.InputRegister(1), i.InputRegister(2), kScratchReg,
1093  kScratchReg2);
1094  } else {
1095  uint32_t imm = i.InputOperand(2).immediate();
1096  __ ShlPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1097  i.InputRegister(1), imm, kScratchReg);
1098  }
1099  } break;
1100  case kMipsShrPair: {
1101  Register second_output =
1102  instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1103  if (instr->InputAt(2)->IsRegister()) {
1104  __ ShrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1105  i.InputRegister(1), i.InputRegister(2), kScratchReg,
1106  kScratchReg2);
1107  } else {
1108  uint32_t imm = i.InputOperand(2).immediate();
1109  __ ShrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1110  i.InputRegister(1), imm, kScratchReg);
1111  }
1112  } break;
1113  case kMipsSarPair: {
1114  Register second_output =
1115  instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1116  if (instr->InputAt(2)->IsRegister()) {
1117  __ SarPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1118  i.InputRegister(1), i.InputRegister(2), kScratchReg,
1119  kScratchReg2);
1120  } else {
1121  uint32_t imm = i.InputOperand(2).immediate();
1122  __ SarPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1123  i.InputRegister(1), imm, kScratchReg);
1124  }
1125  } break;
1126  case kMipsExt:
1127  __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1128  i.InputInt8(2));
1129  break;
1130  case kMipsIns:
1131  if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
1132  __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
1133  } else {
1134  __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1135  i.InputInt8(2));
1136  }
1137  break;
1138  case kMipsRor:
1139  __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1140  break;
1141  case kMipsTst:
1142  __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
1143  break;
1144  case kMipsCmp:
1145  // Pseudo-instruction used for cmp/branch. No opcode emitted here.
1146  break;
1147  case kMipsMov:
1148  // TODO(plind): Should we combine mov/li like this, or use separate instr?
1149  // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
1150  if (HasRegisterInput(instr, 0)) {
1151  __ mov(i.OutputRegister(), i.InputRegister(0));
1152  } else {
1153  __ li(i.OutputRegister(), i.InputOperand(0));
1154  }
1155  break;
1156  case kMipsLsa:
1157  DCHECK(instr->InputAt(2)->IsImmediate());
1158  __ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1159  i.InputInt8(2));
1160  break;
1161  case kMipsCmpS: {
1162  FPURegister left = i.InputOrZeroSingleRegister(0);
1163  FPURegister right = i.InputOrZeroSingleRegister(1);
1164  bool predicate;
1165  FPUCondition cc =
1166  FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition());
1167 
1168  if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
1169  !__ IsDoubleZeroRegSet()) {
1170  __ Move(kDoubleRegZero, 0.0);
1171  }
1172 
1173  __ CompareF32(cc, left, right);
1174  } break;
1175  case kMipsAddS:
1176  // TODO(plind): add special case: combine mult & add.
1177  __ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1178  i.InputDoubleRegister(1));
1179  break;
1180  case kMipsSubS:
1181  __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1182  i.InputDoubleRegister(1));
1183  break;
1184  case kMipsMulS:
1185  // TODO(plind): add special case: right op is -1.0, see arm port.
1186  __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1187  i.InputDoubleRegister(1));
1188  break;
1189  case kMipsDivS:
1190  __ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1191  i.InputDoubleRegister(1));
1192  break;
1193  case kMipsModS: {
1194  // TODO(bmeurer): We should really get rid of this special instruction,
1195  // and generate a CallAddress instruction instead.
1196  FrameScope scope(tasm(), StackFrame::MANUAL);
1197  __ PrepareCallCFunction(0, 2, kScratchReg);
1198  __ MovToFloatParameters(i.InputDoubleRegister(0),
1199  i.InputDoubleRegister(1));
1200  // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate())
1201  __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
1202  // Move the result in the double result register.
1203  __ MovFromFloatResult(i.OutputSingleRegister());
1204  break;
1205  }
1206  case kMipsAbsS:
1207  __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1208  break;
1209  case kMipsSqrtS: {
1210  __ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1211  break;
1212  }
1213  case kMipsMaxS:
1214  __ max_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1215  i.InputDoubleRegister(1));
1216  break;
1217  case kMipsMinS:
1218  __ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1219  i.InputDoubleRegister(1));
1220  break;
1221  case kMipsCmpD: {
1222  FPURegister left = i.InputOrZeroDoubleRegister(0);
1223  FPURegister right = i.InputOrZeroDoubleRegister(1);
1224  bool predicate;
1225  FPUCondition cc =
1226  FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition());
1227  if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
1228  !__ IsDoubleZeroRegSet()) {
1229  __ Move(kDoubleRegZero, 0.0);
1230  }
1231  __ CompareF64(cc, left, right);
1232  } break;
1233  case kMipsAddPair:
1234  __ AddPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
1235  i.InputRegister(1), i.InputRegister(2), i.InputRegister(3),
1236  kScratchReg, kScratchReg2);
1237  break;
1238  case kMipsSubPair:
1239  __ SubPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
1240  i.InputRegister(1), i.InputRegister(2), i.InputRegister(3),
1241  kScratchReg, kScratchReg2);
1242  break;
1243  case kMipsMulPair: {
1244  __ MulPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
1245  i.InputRegister(1), i.InputRegister(2), i.InputRegister(3),
1246  kScratchReg, kScratchReg2);
1247  } break;
1248  case kMipsAddD:
1249  // TODO(plind): add special case: combine mult & add.
1250  __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1251  i.InputDoubleRegister(1));
1252  break;
1253  case kMipsSubD:
1254  __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1255  i.InputDoubleRegister(1));
1256  break;
1257  case kMipsMaddS:
1258  __ Madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
1259  i.InputFloatRegister(1), i.InputFloatRegister(2),
1260  kScratchDoubleReg);
1261  break;
1262  case kMipsMaddD:
1263  __ Madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1264  i.InputDoubleRegister(1), i.InputDoubleRegister(2),
1265  kScratchDoubleReg);
1266  break;
1267  case kMipsMsubS:
1268  __ Msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
1269  i.InputFloatRegister(1), i.InputFloatRegister(2),
1270  kScratchDoubleReg);
1271  break;
1272  case kMipsMsubD:
1273  __ Msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1274  i.InputDoubleRegister(1), i.InputDoubleRegister(2),
1275  kScratchDoubleReg);
1276  break;
1277  case kMipsMulD:
1278  // TODO(plind): add special case: right op is -1.0, see arm port.
1279  __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1280  i.InputDoubleRegister(1));
1281  break;
1282  case kMipsDivD:
1283  __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1284  i.InputDoubleRegister(1));
1285  break;
1286  case kMipsModD: {
1287  // TODO(bmeurer): We should really get rid of this special instruction,
1288  // and generate a CallAddress instruction instead.
1289  FrameScope scope(tasm(), StackFrame::MANUAL);
1290  __ PrepareCallCFunction(0, 2, kScratchReg);
1291  __ MovToFloatParameters(i.InputDoubleRegister(0),
1292  i.InputDoubleRegister(1));
1293  __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
1294  // Move the result in the double result register.
1295  __ MovFromFloatResult(i.OutputDoubleRegister());
1296  break;
1297  }
1298  case kMipsAbsD:
1299  __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1300  break;
1301  case kMipsNegS:
1302  __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1303  break;
1304  case kMipsNegD:
1305  __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1306  break;
1307  case kMipsSqrtD: {
1308  __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1309  break;
1310  }
1311  case kMipsMaxD:
1312  __ max_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1313  i.InputDoubleRegister(1));
1314  break;
1315  case kMipsMinD:
1316  __ min_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1317  i.InputDoubleRegister(1));
1318  break;
1319  case kMipsFloat64RoundDown: {
1320  __ Floor_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1321  break;
1322  }
1323  case kMipsFloat32RoundDown: {
1324  __ Floor_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1325  break;
1326  }
1327  case kMipsFloat64RoundTruncate: {
1328  __ Trunc_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1329  break;
1330  }
1331  case kMipsFloat32RoundTruncate: {
1332  __ Trunc_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1333  break;
1334  }
1335  case kMipsFloat64RoundUp: {
1336  __ Ceil_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1337  break;
1338  }
1339  case kMipsFloat32RoundUp: {
1340  __ Ceil_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1341  break;
1342  }
1343  case kMipsFloat64RoundTiesEven: {
1344  __ Round_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1345  break;
1346  }
1347  case kMipsFloat32RoundTiesEven: {
1348  __ Round_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1349  break;
1350  }
1351  case kMipsFloat32Max: {
1352  FPURegister dst = i.OutputSingleRegister();
1353  FPURegister src1 = i.InputSingleRegister(0);
1354  FPURegister src2 = i.InputSingleRegister(1);
1355  auto ool = new (zone()) OutOfLineFloat32Max(this, dst, src1, src2);
1356  __ Float32Max(dst, src1, src2, ool->entry());
1357  __ bind(ool->exit());
1358  break;
1359  }
1360  case kMipsFloat64Max: {
1361  DoubleRegister dst = i.OutputDoubleRegister();
1362  DoubleRegister src1 = i.InputDoubleRegister(0);
1363  DoubleRegister src2 = i.InputDoubleRegister(1);
1364  auto ool = new (zone()) OutOfLineFloat64Max(this, dst, src1, src2);
1365  __ Float64Max(dst, src1, src2, ool->entry());
1366  __ bind(ool->exit());
1367  break;
1368  }
1369  case kMipsFloat32Min: {
1370  FPURegister dst = i.OutputSingleRegister();
1371  FPURegister src1 = i.InputSingleRegister(0);
1372  FPURegister src2 = i.InputSingleRegister(1);
1373  auto ool = new (zone()) OutOfLineFloat32Min(this, dst, src1, src2);
1374  __ Float32Min(dst, src1, src2, ool->entry());
1375  __ bind(ool->exit());
1376  break;
1377  }
1378  case kMipsFloat64Min: {
1379  DoubleRegister dst = i.OutputDoubleRegister();
1380  DoubleRegister src1 = i.InputDoubleRegister(0);
1381  DoubleRegister src2 = i.InputDoubleRegister(1);
1382  auto ool = new (zone()) OutOfLineFloat64Min(this, dst, src1, src2);
1383  __ Float64Min(dst, src1, src2, ool->entry());
1384  __ bind(ool->exit());
1385  break;
1386  }
1387  case kMipsCvtSD: {
1388  __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
1389  break;
1390  }
1391  case kMipsCvtDS: {
1392  __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
1393  break;
1394  }
1395  case kMipsCvtDW: {
1396  FPURegister scratch = kScratchDoubleReg;
1397  __ mtc1(i.InputRegister(0), scratch);
1398  __ cvt_d_w(i.OutputDoubleRegister(), scratch);
1399  break;
1400  }
1401  case kMipsCvtSW: {
1402  FPURegister scratch = kScratchDoubleReg;
1403  __ mtc1(i.InputRegister(0), scratch);
1404  __ cvt_s_w(i.OutputDoubleRegister(), scratch);
1405  break;
1406  }
1407  case kMipsCvtSUw: {
1408  FPURegister scratch = kScratchDoubleReg;
1409  __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
1410  __ cvt_s_d(i.OutputDoubleRegister(), i.OutputDoubleRegister());
1411  break;
1412  }
1413  case kMipsCvtDUw: {
1414  FPURegister scratch = kScratchDoubleReg;
1415  __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
1416  break;
1417  }
1418  case kMipsFloorWD: {
1419  FPURegister scratch = kScratchDoubleReg;
1420  __ Floor_w_d(scratch, i.InputDoubleRegister(0));
1421  __ mfc1(i.OutputRegister(), scratch);
1422  break;
1423  }
1424  case kMipsCeilWD: {
1425  FPURegister scratch = kScratchDoubleReg;
1426  __ Ceil_w_d(scratch, i.InputDoubleRegister(0));
1427  __ mfc1(i.OutputRegister(), scratch);
1428  break;
1429  }
1430  case kMipsRoundWD: {
1431  FPURegister scratch = kScratchDoubleReg;
1432  __ Round_w_d(scratch, i.InputDoubleRegister(0));
1433  __ mfc1(i.OutputRegister(), scratch);
1434  break;
1435  }
1436  case kMipsTruncWD: {
1437  FPURegister scratch = kScratchDoubleReg;
1438  // Other arches use round to zero here, so we follow.
1439  __ Trunc_w_d(scratch, i.InputDoubleRegister(0));
1440  __ mfc1(i.OutputRegister(), scratch);
1441  break;
1442  }
1443  case kMipsFloorWS: {
1444  FPURegister scratch = kScratchDoubleReg;
1445  __ floor_w_s(scratch, i.InputDoubleRegister(0));
1446  __ mfc1(i.OutputRegister(), scratch);
1447  break;
1448  }
1449  case kMipsCeilWS: {
1450  FPURegister scratch = kScratchDoubleReg;
1451  __ ceil_w_s(scratch, i.InputDoubleRegister(0));
1452  __ mfc1(i.OutputRegister(), scratch);
1453  break;
1454  }
1455  case kMipsRoundWS: {
1456  FPURegister scratch = kScratchDoubleReg;
1457  __ round_w_s(scratch, i.InputDoubleRegister(0));
1458  __ mfc1(i.OutputRegister(), scratch);
1459  break;
1460  }
1461  case kMipsTruncWS: {
1462  FPURegister scratch = kScratchDoubleReg;
1463  __ trunc_w_s(scratch, i.InputDoubleRegister(0));
1464  __ mfc1(i.OutputRegister(), scratch);
1465  // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
1466  // because INT32_MIN allows easier out-of-bounds detection.
1467  __ Addu(kScratchReg, i.OutputRegister(), 1);
1468  __ Slt(kScratchReg2, kScratchReg, i.OutputRegister());
1469  __ Movn(i.OutputRegister(), kScratchReg, kScratchReg2);
1470  break;
1471  }
1472  case kMipsTruncUwD: {
1473  FPURegister scratch = kScratchDoubleReg;
1474  __ Trunc_uw_d(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
1475  break;
1476  }
1477  case kMipsTruncUwS: {
1478  FPURegister scratch = kScratchDoubleReg;
1479  __ Trunc_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
1480  // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
1481  // because 0 allows easier out-of-bounds detection.
1482  __ Addu(kScratchReg, i.OutputRegister(), 1);
1483  __ Movz(i.OutputRegister(), zero_reg, kScratchReg);
1484  break;
1485  }
1486  case kMipsFloat64ExtractLowWord32:
1487  __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
1488  break;
1489  case kMipsFloat64ExtractHighWord32:
1490  __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0));
1491  break;
1492  case kMipsFloat64InsertLowWord32:
1493  __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1));
1494  break;
1495  case kMipsFloat64InsertHighWord32:
1496  __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
1497  break;
1498  case kMipsFloat64SilenceNaN:
1499  __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1500  break;
1501 
1502  // ... more basic instructions ...
1503  case kMipsSeb:
1504  __ Seb(i.OutputRegister(), i.InputRegister(0));
1505  break;
1506  case kMipsSeh:
1507  __ Seh(i.OutputRegister(), i.InputRegister(0));
1508  break;
1509  case kMipsLbu:
1510  __ lbu(i.OutputRegister(), i.MemoryOperand());
1511  EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1512  break;
1513  case kMipsLb:
1514  __ lb(i.OutputRegister(), i.MemoryOperand());
1515  EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1516  break;
1517  case kMipsSb:
1518  __ sb(i.InputOrZeroRegister(2), i.MemoryOperand());
1519  break;
1520  case kMipsLhu:
1521  __ lhu(i.OutputRegister(), i.MemoryOperand());
1522  EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1523  break;
1524  case kMipsUlhu:
1525  __ Ulhu(i.OutputRegister(), i.MemoryOperand());
1526  EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1527  break;
1528  case kMipsLh:
1529  __ lh(i.OutputRegister(), i.MemoryOperand());
1530  EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1531  break;
1532  case kMipsUlh:
1533  __ Ulh(i.OutputRegister(), i.MemoryOperand());
1534  EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1535  break;
1536  case kMipsSh:
1537  __ sh(i.InputOrZeroRegister(2), i.MemoryOperand());
1538  break;
1539  case kMipsUsh:
1540  __ Ush(i.InputOrZeroRegister(2), i.MemoryOperand(), kScratchReg);
1541  break;
1542  case kMipsLw:
1543  __ lw(i.OutputRegister(), i.MemoryOperand());
1544  EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1545  break;
1546  case kMipsUlw:
1547  __ Ulw(i.OutputRegister(), i.MemoryOperand());
1548  EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1549  break;
1550  case kMipsSw:
1551  __ sw(i.InputOrZeroRegister(2), i.MemoryOperand());
1552  break;
1553  case kMipsUsw:
1554  __ Usw(i.InputOrZeroRegister(2), i.MemoryOperand());
1555  break;
1556  case kMipsLwc1: {
1557  __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
1558  break;
1559  }
1560  case kMipsUlwc1: {
1561  __ Ulwc1(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg);
1562  break;
1563  }
1564  case kMipsSwc1: {
1565  size_t index = 0;
1566  MemOperand operand = i.MemoryOperand(&index);
1567  FPURegister ft = i.InputOrZeroSingleRegister(index);
1568  if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1569  __ Move(kDoubleRegZero, 0.0);
1570  }
1571  __ swc1(ft, operand);
1572  break;
1573  }
1574  case kMipsUswc1: {
1575  size_t index = 0;
1576  MemOperand operand = i.MemoryOperand(&index);
1577  FPURegister ft = i.InputOrZeroSingleRegister(index);
1578  if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1579  __ Move(kDoubleRegZero, 0.0);
1580  }
1581  __ Uswc1(ft, operand, kScratchReg);
1582  break;
1583  }
1584  case kMipsLdc1:
1585  __ Ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
1586  break;
1587  case kMipsUldc1:
1588  __ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
1589  break;
1590  case kMipsSdc1: {
1591  FPURegister ft = i.InputOrZeroDoubleRegister(2);
1592  if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1593  __ Move(kDoubleRegZero, 0.0);
1594  }
1595  __ Sdc1(ft, i.MemoryOperand());
1596  break;
1597  }
1598  case kMipsUsdc1: {
1599  FPURegister ft = i.InputOrZeroDoubleRegister(2);
1600  if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1601  __ Move(kDoubleRegZero, 0.0);
1602  }
1603  __ Usdc1(ft, i.MemoryOperand(), kScratchReg);
1604  break;
1605  }
1606  case kMipsPush:
1607  if (instr->InputAt(0)->IsFPRegister()) {
1608  LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1609  switch (op->representation()) {
1610  case MachineRepresentation::kFloat32:
1611  __ swc1(i.InputFloatRegister(0), MemOperand(sp, -kFloatSize));
1612  __ Subu(sp, sp, Operand(kFloatSize));
1613  frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
1614  break;
1615  case MachineRepresentation::kFloat64:
1616  __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
1617  __ Subu(sp, sp, Operand(kDoubleSize));
1618  frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1619  break;
1620  default: {
1621  UNREACHABLE();
1622  break;
1623  }
1624  }
1625  } else {
1626  __ Push(i.InputRegister(0));
1627  frame_access_state()->IncreaseSPDelta(1);
1628  }
1629  break;
1630  case kMipsPeek: {
1631  // The incoming value is 0-based, but we need a 1-based value.
1632  int reverse_slot = i.InputInt32(0) + 1;
1633  int offset =
1634  FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
1635  if (instr->OutputAt(0)->IsFPRegister()) {
1636  LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
1637  if (op->representation() == MachineRepresentation::kFloat64) {
1638  __ Ldc1(i.OutputDoubleRegister(), MemOperand(fp, offset));
1639  } else {
1640  DCHECK_EQ(op->representation(), MachineRepresentation::kFloat32);
1641  __ lwc1(i.OutputSingleRegister(0), MemOperand(fp, offset));
1642  }
1643  } else {
1644  __ lw(i.OutputRegister(0), MemOperand(fp, offset));
1645  }
1646  break;
1647  }
1648  case kMipsStackClaim: {
1649  __ Subu(sp, sp, Operand(i.InputInt32(0)));
1650  frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize);
1651  break;
1652  }
1653  case kMipsStoreToStackSlot: {
1654  if (instr->InputAt(0)->IsFPRegister()) {
1655  LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1656  if (op->representation() == MachineRepresentation::kFloat64) {
1657  __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
1658  } else if (op->representation() == MachineRepresentation::kFloat32) {
1659  __ swc1(i.InputSingleRegister(0), MemOperand(sp, i.InputInt32(1)));
1660  } else {
1661  DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
1662  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1663  __ st_b(i.InputSimd128Register(0), MemOperand(sp, i.InputInt32(1)));
1664  }
1665  } else {
1666  __ sw(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
1667  }
1668  break;
1669  }
1670  case kMipsByteSwap32: {
1671  __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
1672  break;
1673  }
1674  case kWord32AtomicLoadInt8:
1675  ASSEMBLE_ATOMIC_LOAD_INTEGER(lb);
1676  break;
1677  case kWord32AtomicLoadUint8:
1678  ASSEMBLE_ATOMIC_LOAD_INTEGER(lbu);
1679  break;
1680  case kWord32AtomicLoadInt16:
1681  ASSEMBLE_ATOMIC_LOAD_INTEGER(lh);
1682  break;
1683  case kWord32AtomicLoadUint16:
1684  ASSEMBLE_ATOMIC_LOAD_INTEGER(lhu);
1685  break;
1686  case kWord32AtomicLoadWord32:
1687  ASSEMBLE_ATOMIC_LOAD_INTEGER(lw);
1688  break;
1689  case kWord32AtomicStoreWord8:
1690  ASSEMBLE_ATOMIC_STORE_INTEGER(sb);
1691  break;
1692  case kWord32AtomicStoreWord16:
1693  ASSEMBLE_ATOMIC_STORE_INTEGER(sh);
1694  break;
1695  case kWord32AtomicStoreWord32:
1696  ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
1697  break;
1698  case kWord32AtomicExchangeInt8:
1699  ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 8);
1700  break;
1701  case kWord32AtomicExchangeUint8:
1702  ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 8);
1703  break;
1704  case kWord32AtomicExchangeInt16:
1705  ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 16);
1706  break;
1707  case kWord32AtomicExchangeUint16:
1708  ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 16);
1709  break;
1710  case kWord32AtomicExchangeWord32:
1711  ASSEMBLE_ATOMIC_EXCHANGE_INTEGER();
1712  break;
1713  case kWord32AtomicCompareExchangeInt8:
1714  ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 8);
1715  break;
1716  case kWord32AtomicCompareExchangeUint8:
1717  ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 8);
1718  break;
1719  case kWord32AtomicCompareExchangeInt16:
1720  ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 16);
1721  break;
1722  case kWord32AtomicCompareExchangeUint16:
1723  ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 16);
1724  break;
1725  case kWord32AtomicCompareExchangeWord32:
1726  ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER();
1727  break;
1728 #define ATOMIC_BINOP_CASE(op, inst) \
1729  case kWord32Atomic##op##Int8: \
1730  ASSEMBLE_ATOMIC_BINOP_EXT(true, 8, inst); \
1731  break; \
1732  case kWord32Atomic##op##Uint8: \
1733  ASSEMBLE_ATOMIC_BINOP_EXT(false, 8, inst); \
1734  break; \
1735  case kWord32Atomic##op##Int16: \
1736  ASSEMBLE_ATOMIC_BINOP_EXT(true, 16, inst); \
1737  break; \
1738  case kWord32Atomic##op##Uint16: \
1739  ASSEMBLE_ATOMIC_BINOP_EXT(false, 16, inst); \
1740  break; \
1741  case kWord32Atomic##op##Word32: \
1742  ASSEMBLE_ATOMIC_BINOP(inst); \
1743  break;
1744  ATOMIC_BINOP_CASE(Add, Addu)
1745  ATOMIC_BINOP_CASE(Sub, Subu)
1746  ATOMIC_BINOP_CASE(And, And)
1747  ATOMIC_BINOP_CASE(Or, Or)
1748  ATOMIC_BINOP_CASE(Xor, Xor)
1749 #undef ATOMIC_BINOP_CASE
1750  case kMipsWord32AtomicPairLoad: {
1751  if (IsMipsArchVariant(kMips32r6)) {
1752  if (instr->OutputCount() > 0) {
1753  Register second_output = instr->OutputCount() == 2
1754  ? i.OutputRegister(1)
1755  : i.TempRegister(1);
1756  __ Addu(a0, i.InputRegister(0), i.InputRegister(1));
1757  __ llx(second_output, MemOperand(a0, 4));
1758  __ ll(i.OutputRegister(0), MemOperand(a0, 0));
1759  __ sync();
1760  }
1761  } else {
1762  FrameScope scope(tasm(), StackFrame::MANUAL);
1763  __ Addu(a0, i.InputRegister(0), i.InputRegister(1));
1764  __ PushCallerSaved(kDontSaveFPRegs, v0, v1);
1765  __ PrepareCallCFunction(1, 0, kScratchReg);
1766  __ CallCFunction(ExternalReference::atomic_pair_load_function(), 1, 0);
1767  __ PopCallerSaved(kDontSaveFPRegs, v0, v1);
1768  }
1769  break;
1770  }
1771  case kMipsWord32AtomicPairStore: {
1772  if (IsMipsArchVariant(kMips32r6)) {
1773  Label store;
1774  __ Addu(a0, i.InputRegister(0), i.InputRegister(1));
1775  __ sync();
1776  __ bind(&store);
1777  __ llx(i.TempRegister(2), MemOperand(a0, 4));
1778  __ ll(i.TempRegister(1), MemOperand(a0, 0));
1779  __ Move(i.TempRegister(1), i.InputRegister(2));
1780  __ scx(i.InputRegister(3), MemOperand(a0, 4));
1781  __ sc(i.TempRegister(1), MemOperand(a0, 0));
1782  __ BranchShort(&store, eq, i.TempRegister(1), Operand(zero_reg));
1783  __ sync();
1784  } else {
1785  FrameScope scope(tasm(), StackFrame::MANUAL);
1786  __ Addu(a0, i.InputRegister(0), i.InputRegister(1));
1787  __ PushCallerSaved(kDontSaveFPRegs);
1788  __ PrepareCallCFunction(3, 0, kScratchReg);
1789  __ CallCFunction(ExternalReference::atomic_pair_store_function(), 3, 0);
1790  __ PopCallerSaved(kDontSaveFPRegs);
1791  }
1792  break;
1793  }
1794 #define ATOMIC64_BINOP_ARITH_CASE(op, instr, external) \
1795  case kMipsWord32AtomicPair##op: \
1796  ASSEMBLE_ATOMIC64_ARITH_BINOP(instr, external); \
1797  break;
1798  ATOMIC64_BINOP_ARITH_CASE(Add, AddPair, atomic_pair_add_function)
1799  ATOMIC64_BINOP_ARITH_CASE(Sub, SubPair, atomic_pair_sub_function)
1800 #undef ATOMIC64_BINOP_ARITH_CASE
1801 #define ATOMIC64_BINOP_LOGIC_CASE(op, instr, external) \
1802  case kMipsWord32AtomicPair##op: \
1803  ASSEMBLE_ATOMIC64_LOGIC_BINOP(instr, external); \
1804  break;
1805  ATOMIC64_BINOP_LOGIC_CASE(And, AndPair, atomic_pair_and_function)
1806  ATOMIC64_BINOP_LOGIC_CASE(Or, OrPair, atomic_pair_or_function)
1807  ATOMIC64_BINOP_LOGIC_CASE(Xor, XorPair, atomic_pair_xor_function)
1808 #undef ATOMIC64_BINOP_LOGIC_CASE
1809  case kMipsWord32AtomicPairExchange:
1810  if (IsMipsArchVariant(kMips32r6)) {
1811  Label binop;
1812  Register oldval_low =
1813  instr->OutputCount() >= 1 ? i.OutputRegister(0) : i.TempRegister(1);
1814  Register oldval_high =
1815  instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(2);
1816  __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
1817  __ sync();
1818  __ bind(&binop);
1819  __ llx(oldval_high, MemOperand(i.TempRegister(0), 4));
1820  __ ll(oldval_low, MemOperand(i.TempRegister(0), 0));
1821  __ Move(i.TempRegister(1), i.InputRegister(2));
1822  __ scx(i.InputRegister(3), MemOperand(i.TempRegister(0), 4));
1823  __ sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0));
1824  __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg));
1825  __ sync();
1826  } else {
1827  FrameScope scope(tasm(), StackFrame::MANUAL);
1828  __ PushCallerSaved(kDontSaveFPRegs, v0, v1);
1829  __ PrepareCallCFunction(3, 0, kScratchReg);
1830  __ Addu(a0, i.InputRegister(0), i.InputRegister(1));
1831  __ CallCFunction(ExternalReference::atomic_pair_exchange_function(), 3,
1832  0);
1833  __ PopCallerSaved(kDontSaveFPRegs, v0, v1);
1834  }
1835  break;
1836  case kMipsWord32AtomicPairCompareExchange: {
1837  if (IsMipsArchVariant(kMips32r6)) {
1838  Label compareExchange, exit;
1839  Register oldval_low =
1840  instr->OutputCount() >= 1 ? i.OutputRegister(0) : kScratchReg;
1841  Register oldval_high =
1842  instr->OutputCount() >= 2 ? i.OutputRegister(1) : kScratchReg2;
1843  __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
1844  __ sync();
1845  __ bind(&compareExchange);
1846  __ llx(oldval_high, MemOperand(i.TempRegister(0), 4));
1847  __ ll(oldval_low, MemOperand(i.TempRegister(0), 0));
1848  __ BranchShort(&exit, ne, i.InputRegister(2), Operand(oldval_low));
1849  __ BranchShort(&exit, ne, i.InputRegister(3), Operand(oldval_high));
1850  __ mov(kScratchReg, i.InputRegister(4));
1851  __ scx(i.InputRegister(5), MemOperand(i.TempRegister(0), 4));
1852  __ sc(kScratchReg, MemOperand(i.TempRegister(0), 0));
1853  __ BranchShort(&compareExchange, eq, kScratchReg, Operand(zero_reg));
1854  __ bind(&exit);
1855  __ sync();
1856  } else {
1857  FrameScope scope(tasm(), StackFrame::MANUAL);
1858  __ PushCallerSaved(kDontSaveFPRegs, v0, v1);
1859  __ PrepareCallCFunction(5, 0, kScratchReg);
1860  __ addu(a0, i.InputRegister(0), i.InputRegister(1));
1861  __ sw(i.InputRegister(5), MemOperand(sp, 16));
1862  __ CallCFunction(
1863  ExternalReference::atomic_pair_compare_exchange_function(), 5, 0);
1864  __ PopCallerSaved(kDontSaveFPRegs, v0, v1);
1865  }
1866  break;
1867  }
1868  case kMipsS128Zero: {
1869  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1870  __ xor_v(i.OutputSimd128Register(), i.OutputSimd128Register(),
1871  i.OutputSimd128Register());
1872  break;
1873  }
1874  case kMipsI32x4Splat: {
1875  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1876  __ fill_w(i.OutputSimd128Register(), i.InputRegister(0));
1877  break;
1878  }
1879  case kMipsI32x4ExtractLane: {
1880  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1881  __ copy_s_w(i.OutputRegister(), i.InputSimd128Register(0),
1882  i.InputInt8(1));
1883  break;
1884  }
1885  case kMipsI32x4ReplaceLane: {
1886  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1887  Simd128Register src = i.InputSimd128Register(0);
1888  Simd128Register dst = i.OutputSimd128Register();
1889  if (src != dst) {
1890  __ move_v(dst, src);
1891  }
1892  __ insert_w(dst, i.InputInt8(1), i.InputRegister(2));
1893  break;
1894  }
1895  case kMipsI32x4Add: {
1896  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1897  __ addv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
1898  i.InputSimd128Register(1));
1899  break;
1900  }
1901  case kMipsI32x4Sub: {
1902  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1903  __ subv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
1904  i.InputSimd128Register(1));
1905  break;
1906  }
1907  case kMipsF32x4Splat: {
1908  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1909  __ FmoveLow(kScratchReg, i.InputSingleRegister(0));
1910  __ fill_w(i.OutputSimd128Register(), kScratchReg);
1911  break;
1912  }
1913  case kMipsF32x4ExtractLane: {
1914  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1915  __ copy_u_w(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1));
1916  __ FmoveLow(i.OutputSingleRegister(), kScratchReg);
1917  break;
1918  }
1919  case kMipsF32x4ReplaceLane: {
1920  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1921  Simd128Register src = i.InputSimd128Register(0);
1922  Simd128Register dst = i.OutputSimd128Register();
1923  if (src != dst) {
1924  __ move_v(dst, src);
1925  }
1926  __ FmoveLow(kScratchReg, i.InputSingleRegister(2));
1927  __ insert_w(dst, i.InputInt8(1), kScratchReg);
1928  break;
1929  }
1930  case kMipsF32x4SConvertI32x4: {
1931  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1932  __ ffint_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
1933  break;
1934  }
1935  case kMipsF32x4UConvertI32x4: {
1936  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1937  __ ffint_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
1938  break;
1939  }
1940  case kMipsI32x4Mul: {
1941  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1942  __ mulv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
1943  i.InputSimd128Register(1));
1944  break;
1945  }
1946  case kMipsI32x4MaxS: {
1947  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1948  __ max_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
1949  i.InputSimd128Register(1));
1950  break;
1951  }
1952  case kMipsI32x4MinS: {
1953  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1954  __ min_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
1955  i.InputSimd128Register(1));
1956  break;
1957  }
1958  case kMipsI32x4Eq: {
1959  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1960  __ ceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
1961  i.InputSimd128Register(1));
1962  break;
1963  }
1964  case kMipsI32x4Ne: {
1965  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1966  Simd128Register dst = i.OutputSimd128Register();
1967  __ ceq_w(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
1968  __ nor_v(dst, dst, dst);
1969  break;
1970  }
1971  case kMipsI32x4Shl: {
1972  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1973  __ slli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
1974  i.InputInt5(1));
1975  break;
1976  }
1977  case kMipsI32x4ShrS: {
1978  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1979  __ srai_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
1980  i.InputInt5(1));
1981  break;
1982  }
1983  case kMipsI32x4ShrU: {
1984  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1985  __ srli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
1986  i.InputInt5(1));
1987  break;
1988  }
1989  case kMipsI32x4MaxU: {
1990  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1991  __ max_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
1992  i.InputSimd128Register(1));
1993  break;
1994  }
1995  case kMipsI32x4MinU: {
1996  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1997  __ min_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
1998  i.InputSimd128Register(1));
1999  break;
2000  }
2001  case kMipsS128Select: {
2002  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2003  DCHECK(i.OutputSimd128Register() == i.InputSimd128Register(0));
2004  __ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2),
2005  i.InputSimd128Register(1));
2006  break;
2007  }
2008  case kMipsF32x4Abs: {
2009  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2010  __ bclri_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
2011  break;
2012  }
2013  case kMipsF32x4Neg: {
2014  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2015  __ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
2016  break;
2017  }
2018  case kMipsF32x4RecipApprox: {
2019  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2020  __ frcp_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2021  break;
2022  }
2023  case kMipsF32x4RecipSqrtApprox: {
2024  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2025  __ frsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2026  break;
2027  }
2028  case kMipsF32x4Add: {
2029  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2030  __ fadd_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2031  i.InputSimd128Register(1));
2032  break;
2033  }
2034  case kMipsF32x4Sub: {
2035  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2036  __ fsub_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2037  i.InputSimd128Register(1));
2038  break;
2039  }
2040  case kMipsF32x4Mul: {
2041  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2042  __ fmul_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2043  i.InputSimd128Register(1));
2044  break;
2045  }
2046  case kMipsF32x4Max: {
2047  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2048  __ fmax_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2049  i.InputSimd128Register(1));
2050  break;
2051  }
2052  case kMipsF32x4Min: {
2053  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2054  __ fmin_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2055  i.InputSimd128Register(1));
2056  break;
2057  }
2058  case kMipsF32x4Eq: {
2059  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2060  __ fceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2061  i.InputSimd128Register(1));
2062  break;
2063  }
2064  case kMipsF32x4Ne: {
2065  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2066  __ fcne_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2067  i.InputSimd128Register(1));
2068  break;
2069  }
2070  case kMipsF32x4Lt: {
2071  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2072  __ fclt_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2073  i.InputSimd128Register(1));
2074  break;
2075  }
2076  case kMipsF32x4Le: {
2077  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2078  __ fcle_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2079  i.InputSimd128Register(1));
2080  break;
2081  }
2082  case kMipsI32x4SConvertF32x4: {
2083  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2084  __ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2085  break;
2086  }
2087  case kMipsI32x4UConvertF32x4: {
2088  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2089  __ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2090  break;
2091  }
2092  case kMipsI32x4Neg: {
2093  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2094  __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2095  __ subv_w(i.OutputSimd128Register(), kSimd128RegZero,
2096  i.InputSimd128Register(0));
2097  break;
2098  }
2099  case kMipsI32x4GtS: {
2100  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2101  __ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2102  i.InputSimd128Register(0));
2103  break;
2104  }
2105  case kMipsI32x4GeS: {
2106  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2107  __ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2108  i.InputSimd128Register(0));
2109  break;
2110  }
2111  case kMipsI32x4GtU: {
2112  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2113  __ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2114  i.InputSimd128Register(0));
2115  break;
2116  }
2117  case kMipsI32x4GeU: {
2118  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2119  __ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2120  i.InputSimd128Register(0));
2121  break;
2122  }
2123  case kMipsI16x8Splat: {
2124  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2125  __ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
2126  break;
2127  }
2128  case kMipsI16x8ExtractLane: {
2129  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2130  __ copy_s_h(i.OutputRegister(), i.InputSimd128Register(0),
2131  i.InputInt8(1));
2132  break;
2133  }
2134  case kMipsI16x8ReplaceLane: {
2135  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2136  Simd128Register src = i.InputSimd128Register(0);
2137  Simd128Register dst = i.OutputSimd128Register();
2138  if (src != dst) {
2139  __ move_v(dst, src);
2140  }
2141  __ insert_h(dst, i.InputInt8(1), i.InputRegister(2));
2142  break;
2143  }
2144  case kMipsI16x8Neg: {
2145  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2146  __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2147  __ subv_h(i.OutputSimd128Register(), kSimd128RegZero,
2148  i.InputSimd128Register(0));
2149  break;
2150  }
2151  case kMipsI16x8Shl: {
2152  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2153  __ slli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2154  i.InputInt4(1));
2155  break;
2156  }
2157  case kMipsI16x8ShrS: {
2158  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2159  __ srai_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2160  i.InputInt4(1));
2161  break;
2162  }
2163  case kMipsI16x8ShrU: {
2164  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2165  __ srli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2166  i.InputInt4(1));
2167  break;
2168  }
2169  case kMipsI16x8Add: {
2170  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2171  __ addv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2172  i.InputSimd128Register(1));
2173  break;
2174  }
2175  case kMipsI16x8AddSaturateS: {
2176  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2177  __ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2178  i.InputSimd128Register(1));
2179  break;
2180  }
2181  case kMipsI16x8Sub: {
2182  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2183  __ subv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2184  i.InputSimd128Register(1));
2185  break;
2186  }
2187  case kMipsI16x8SubSaturateS: {
2188  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2189  __ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2190  i.InputSimd128Register(1));
2191  break;
2192  }
2193  case kMipsI16x8Mul: {
2194  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2195  __ mulv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2196  i.InputSimd128Register(1));
2197  break;
2198  }
2199  case kMipsI16x8MaxS: {
2200  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2201  __ max_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2202  i.InputSimd128Register(1));
2203  break;
2204  }
2205  case kMipsI16x8MinS: {
2206  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2207  __ min_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2208  i.InputSimd128Register(1));
2209  break;
2210  }
2211  case kMipsI16x8Eq: {
2212  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2213  __ ceq_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2214  i.InputSimd128Register(1));
2215  break;
2216  }
2217  case kMipsI16x8Ne: {
2218  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2219  Simd128Register dst = i.OutputSimd128Register();
2220  __ ceq_h(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
2221  __ nor_v(dst, dst, dst);
2222  break;
2223  }
2224  case kMipsI16x8GtS: {
2225  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2226  __ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
2227  i.InputSimd128Register(0));
2228  break;
2229  }
2230  case kMipsI16x8GeS: {
2231  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2232  __ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
2233  i.InputSimd128Register(0));
2234  break;
2235  }
2236  case kMipsI16x8AddSaturateU: {
2237  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2238  __ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2239  i.InputSimd128Register(1));
2240  break;
2241  }
2242  case kMipsI16x8SubSaturateU: {
2243  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2244  __ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2245  i.InputSimd128Register(1));
2246  break;
2247  }
2248  case kMipsI16x8MaxU: {
2249  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2250  __ max_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2251  i.InputSimd128Register(1));
2252  break;
2253  }
2254  case kMipsI16x8MinU: {
2255  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2256  __ min_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2257  i.InputSimd128Register(1));
2258  break;
2259  }
2260  case kMipsI16x8GtU: {
2261  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2262  __ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
2263  i.InputSimd128Register(0));
2264  break;
2265  }
2266  case kMipsI16x8GeU: {
2267  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2268  __ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
2269  i.InputSimd128Register(0));
2270  break;
2271  }
2272  case kMipsI8x16Splat: {
2273  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2274  __ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
2275  break;
2276  }
2277  case kMipsI8x16ExtractLane: {
2278  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2279  __ copy_s_b(i.OutputRegister(), i.InputSimd128Register(0),
2280  i.InputInt8(1));
2281  break;
2282  }
2283  case kMipsI8x16ReplaceLane: {
2284  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2285  Simd128Register src = i.InputSimd128Register(0);
2286  Simd128Register dst = i.OutputSimd128Register();
2287  if (src != dst) {
2288  __ move_v(dst, src);
2289  }
2290  __ insert_b(dst, i.InputInt8(1), i.InputRegister(2));
2291  break;
2292  }
2293  case kMipsI8x16Neg: {
2294  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2295  __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2296  __ subv_b(i.OutputSimd128Register(), kSimd128RegZero,
2297  i.InputSimd128Register(0));
2298  break;
2299  }
2300  case kMipsI8x16Shl: {
2301  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2302  __ slli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2303  i.InputInt3(1));
2304  break;
2305  }
2306  case kMipsI8x16ShrS: {
2307  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2308  __ srai_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2309  i.InputInt3(1));
2310  break;
2311  }
2312  case kMipsI8x16Add: {
2313  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2314  __ addv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2315  i.InputSimd128Register(1));
2316  break;
2317  }
2318  case kMipsI8x16AddSaturateS: {
2319  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2320  __ adds_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2321  i.InputSimd128Register(1));
2322  break;
2323  }
2324  case kMipsI8x16Sub: {
2325  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2326  __ subv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2327  i.InputSimd128Register(1));
2328  break;
2329  }
2330  case kMipsI8x16SubSaturateS: {
2331  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2332  __ subs_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2333  i.InputSimd128Register(1));
2334  break;
2335  }
2336  case kMipsI8x16Mul: {
2337  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2338  __ mulv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2339  i.InputSimd128Register(1));
2340  break;
2341  }
2342  case kMipsI8x16MaxS: {
2343  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2344  __ max_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2345  i.InputSimd128Register(1));
2346  break;
2347  }
2348  case kMipsI8x16MinS: {
2349  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2350  __ min_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2351  i.InputSimd128Register(1));
2352  break;
2353  }
2354  case kMipsI8x16Eq: {
2355  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2356  __ ceq_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2357  i.InputSimd128Register(1));
2358  break;
2359  }
2360  case kMipsI8x16Ne: {
2361  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2362  Simd128Register dst = i.OutputSimd128Register();
2363  __ ceq_b(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
2364  __ nor_v(dst, dst, dst);
2365  break;
2366  }
2367  case kMipsI8x16GtS: {
2368  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2369  __ clt_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
2370  i.InputSimd128Register(0));
2371  break;
2372  }
2373  case kMipsI8x16GeS: {
2374  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2375  __ cle_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
2376  i.InputSimd128Register(0));
2377  break;
2378  }
2379  case kMipsI8x16ShrU: {
2380  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2381  __ srli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2382  i.InputInt3(1));
2383  break;
2384  }
2385  case kMipsI8x16AddSaturateU: {
2386  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2387  __ adds_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2388  i.InputSimd128Register(1));
2389  break;
2390  }
2391  case kMipsI8x16SubSaturateU: {
2392  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2393  __ subs_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2394  i.InputSimd128Register(1));
2395  break;
2396  }
2397  case kMipsI8x16MaxU: {
2398  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2399  __ max_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2400  i.InputSimd128Register(1));
2401  break;
2402  }
2403  case kMipsI8x16MinU: {
2404  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2405  __ min_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2406  i.InputSimd128Register(1));
2407  break;
2408  }
2409  case kMipsI8x16GtU: {
2410  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2411  __ clt_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
2412  i.InputSimd128Register(0));
2413  break;
2414  }
2415  case kMipsI8x16GeU: {
2416  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2417  __ cle_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
2418  i.InputSimd128Register(0));
2419  break;
2420  }
2421  case kMipsS128And: {
2422  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2423  __ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
2424  i.InputSimd128Register(1));
2425  break;
2426  }
2427  case kMipsS128Or: {
2428  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2429  __ or_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
2430  i.InputSimd128Register(1));
2431  break;
2432  }
2433  case kMipsS128Xor: {
2434  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2435  __ xor_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
2436  i.InputSimd128Register(1));
2437  break;
2438  }
2439  case kMipsS128Not: {
2440  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2441  __ nor_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
2442  i.InputSimd128Register(0));
2443  break;
2444  }
2445  case kMipsS1x4AnyTrue:
2446  case kMipsS1x8AnyTrue:
2447  case kMipsS1x16AnyTrue: {
2448  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2449  Register dst = i.OutputRegister();
2450  Label all_false;
2451 
2452  __ BranchMSA(&all_false, MSA_BRANCH_V, all_zero,
2453  i.InputSimd128Register(0), USE_DELAY_SLOT);
2454  __ li(dst, 0); // branch delay slot
2455  __ li(dst, -1);
2456  __ bind(&all_false);
2457  break;
2458  }
2459  case kMipsS1x4AllTrue: {
2460  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2461  Register dst = i.OutputRegister();
2462  Label all_true;
2463  __ BranchMSA(&all_true, MSA_BRANCH_W, all_not_zero,
2464  i.InputSimd128Register(0), USE_DELAY_SLOT);
2465  __ li(dst, -1); // branch delay slot
2466  __ li(dst, 0);
2467  __ bind(&all_true);
2468  break;
2469  }
2470  case kMipsS1x8AllTrue: {
2471  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2472  Register dst = i.OutputRegister();
2473  Label all_true;
2474  __ BranchMSA(&all_true, MSA_BRANCH_H, all_not_zero,
2475  i.InputSimd128Register(0), USE_DELAY_SLOT);
2476  __ li(dst, -1); // branch delay slot
2477  __ li(dst, 0);
2478  __ bind(&all_true);
2479  break;
2480  }
2481  case kMipsS1x16AllTrue: {
2482  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2483  Register dst = i.OutputRegister();
2484  Label all_true;
2485  __ BranchMSA(&all_true, MSA_BRANCH_B, all_not_zero,
2486  i.InputSimd128Register(0), USE_DELAY_SLOT);
2487  __ li(dst, -1); // branch delay slot
2488  __ li(dst, 0);
2489  __ bind(&all_true);
2490  break;
2491  }
2492  case kMipsMsaLd: {
2493  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2494  __ ld_b(i.OutputSimd128Register(), i.MemoryOperand());
2495  break;
2496  }
2497  case kMipsMsaSt: {
2498  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2499  __ st_b(i.InputSimd128Register(2), i.MemoryOperand());
2500  break;
2501  }
2502  case kMipsS32x4InterleaveRight: {
2503  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2504  Simd128Register dst = i.OutputSimd128Register(),
2505  src0 = i.InputSimd128Register(0),
2506  src1 = i.InputSimd128Register(1);
2507  // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
2508  // dst = [5, 1, 4, 0]
2509  __ ilvr_w(dst, src1, src0);
2510  break;
2511  }
2512  case kMipsS32x4InterleaveLeft: {
2513  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2514  Simd128Register dst = i.OutputSimd128Register(),
2515  src0 = i.InputSimd128Register(0),
2516  src1 = i.InputSimd128Register(1);
2517  // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
2518  // dst = [7, 3, 6, 2]
2519  __ ilvl_w(dst, src1, src0);
2520  break;
2521  }
2522  case kMipsS32x4PackEven: {
2523  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2524  Simd128Register dst = i.OutputSimd128Register(),
2525  src0 = i.InputSimd128Register(0),
2526  src1 = i.InputSimd128Register(1);
2527  // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
2528  // dst = [6, 4, 2, 0]
2529  __ pckev_w(dst, src1, src0);
2530  break;
2531  }
2532  case kMipsS32x4PackOdd: {
2533  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2534  Simd128Register dst = i.OutputSimd128Register(),
2535  src0 = i.InputSimd128Register(0),
2536  src1 = i.InputSimd128Register(1);
2537  // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
2538  // dst = [7, 5, 3, 1]
2539  __ pckod_w(dst, src1, src0);
2540  break;
2541  }
2542  case kMipsS32x4InterleaveEven: {
2543  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2544  Simd128Register dst = i.OutputSimd128Register(),
2545  src0 = i.InputSimd128Register(0),
2546  src1 = i.InputSimd128Register(1);
2547  // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
2548  // dst = [6, 2, 4, 0]
2549  __ ilvev_w(dst, src1, src0);
2550  break;
2551  }
2552  case kMipsS32x4InterleaveOdd: {
2553  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2554  Simd128Register dst = i.OutputSimd128Register(),
2555  src0 = i.InputSimd128Register(0),
2556  src1 = i.InputSimd128Register(1);
2557  // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
2558  // dst = [7, 3, 5, 1]
2559  __ ilvod_w(dst, src1, src0);
2560  break;
2561  }
2562  case kMipsS32x4Shuffle: {
2563  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2564  Simd128Register dst = i.OutputSimd128Register(),
2565  src0 = i.InputSimd128Register(0),
2566  src1 = i.InputSimd128Register(1);
2567 
2568  int32_t shuffle = i.InputInt32(2);
2569 
2570  if (src0 == src1) {
2571  // Unary S32x4 shuffles are handled with shf.w instruction
2572  unsigned lane = shuffle & 0xFF;
2573  if (FLAG_debug_code) {
2574  // range of all four lanes, for unary instruction,
2575  // should belong to the same range, which can be one of these:
2576  // [0, 3] or [4, 7]
2577  if (lane >= 4) {
2578  int32_t shuffle_helper = shuffle;
2579  for (int i = 0; i < 4; ++i) {
2580  lane = shuffle_helper & 0xFF;
2581  CHECK_GE(lane, 4);
2582  shuffle_helper >>= 8;
2583  }
2584  }
2585  }
2586  uint32_t i8 = 0;
2587  for (int i = 0; i < 4; i++) {
2588  lane = shuffle & 0xFF;
2589  if (lane >= 4) {
2590  lane -= 4;
2591  }
2592  DCHECK_GT(4, lane);
2593  i8 |= lane << (2 * i);
2594  shuffle >>= 8;
2595  }
2596  __ shf_w(dst, src0, i8);
2597  } else {
2598  // For binary shuffles use vshf.w instruction
2599  if (dst == src0) {
2600  __ move_v(kSimd128ScratchReg, src0);
2601  src0 = kSimd128ScratchReg;
2602  } else if (dst == src1) {
2603  __ move_v(kSimd128ScratchReg, src1);
2604  src1 = kSimd128ScratchReg;
2605  }
2606 
2607  __ li(kScratchReg, i.InputInt32(2));
2608  __ insert_w(dst, 0, kScratchReg);
2609  __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2610  __ ilvr_b(dst, kSimd128RegZero, dst);
2611  __ ilvr_h(dst, kSimd128RegZero, dst);
2612  __ vshf_w(dst, src1, src0);
2613  }
2614  break;
2615  }
2616  case kMipsS16x8InterleaveRight: {
2617  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2618  Simd128Register dst = i.OutputSimd128Register(),
2619  src0 = i.InputSimd128Register(0),
2620  src1 = i.InputSimd128Register(1);
2621  // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
2622  // dst = [11, 3, 10, 2, 9, 1, 8, 0]
2623  __ ilvr_h(dst, src1, src0);
2624  break;
2625  }
2626  case kMipsS16x8InterleaveLeft: {
2627  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2628  Simd128Register dst = i.OutputSimd128Register(),
2629  src0 = i.InputSimd128Register(0),
2630  src1 = i.InputSimd128Register(1);
2631  // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
2632  // dst = [15, 7, 14, 6, 13, 5, 12, 4]
2633  __ ilvl_h(dst, src1, src0);
2634  break;
2635  }
2636  case kMipsS16x8PackEven: {
2637  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2638  Simd128Register dst = i.OutputSimd128Register(),
2639  src0 = i.InputSimd128Register(0),
2640  src1 = i.InputSimd128Register(1);
2641  // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
2642  // dst = [14, 12, 10, 8, 6, 4, 2, 0]
2643  __ pckev_h(dst, src1, src0);
2644  break;
2645  }
2646  case kMipsS16x8PackOdd: {
2647  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2648  Simd128Register dst = i.OutputSimd128Register(),
2649  src0 = i.InputSimd128Register(0),
2650  src1 = i.InputSimd128Register(1);
2651  // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
2652  // dst = [15, 13, 11, 9, 7, 5, 3, 1]
2653  __ pckod_h(dst, src1, src0);
2654  break;
2655  }
2656  case kMipsS16x8InterleaveEven: {
2657  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2658  Simd128Register dst = i.OutputSimd128Register(),
2659  src0 = i.InputSimd128Register(0),
2660  src1 = i.InputSimd128Register(1);
2661  // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
2662  // dst = [14, 6, 12, 4, 10, 2, 8, 0]
2663  __ ilvev_h(dst, src1, src0);
2664  break;
2665  }
2666  case kMipsS16x8InterleaveOdd: {
2667  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2668  Simd128Register dst = i.OutputSimd128Register(),
2669  src0 = i.InputSimd128Register(0),
2670  src1 = i.InputSimd128Register(1);
2671  // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
2672  // dst = [15, 7, ... 11, 3, 9, 1]
2673  __ ilvod_h(dst, src1, src0);
2674  break;
2675  }
2676  case kMipsS16x4Reverse: {
2677  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2678  // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [4, 5, 6, 7, 0, 1, 2, 3]
2679  // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B
2680  __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B);
2681  break;
2682  }
2683  case kMipsS16x2Reverse: {
2684  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2685  // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [6, 7, 4, 5, 3, 2, 0, 1]
2686  // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1
2687  __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
2688  break;
2689  }
2690  case kMipsS8x16InterleaveRight: {
2691  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2692  Simd128Register dst = i.OutputSimd128Register(),
2693  src0 = i.InputSimd128Register(0),
2694  src1 = i.InputSimd128Register(1);
2695  // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
2696  // dst = [23, 7, ... 17, 1, 16, 0]
2697  __ ilvr_b(dst, src1, src0);
2698  break;
2699  }
2700  case kMipsS8x16InterleaveLeft: {
2701  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2702  Simd128Register dst = i.OutputSimd128Register(),
2703  src0 = i.InputSimd128Register(0),
2704  src1 = i.InputSimd128Register(1);
2705  // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
2706  // dst = [31, 15, ... 25, 9, 24, 8]
2707  __ ilvl_b(dst, src1, src0);
2708  break;
2709  }
2710  case kMipsS8x16PackEven: {
2711  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2712  Simd128Register dst = i.OutputSimd128Register(),
2713  src0 = i.InputSimd128Register(0),
2714  src1 = i.InputSimd128Register(1);
2715  // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
2716  // dst = [30, 28, ... 6, 4, 2, 0]
2717  __ pckev_b(dst, src1, src0);
2718  break;
2719  }
2720  case kMipsS8x16PackOdd: {
2721  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2722  Simd128Register dst = i.OutputSimd128Register(),
2723  src0 = i.InputSimd128Register(0),
2724  src1 = i.InputSimd128Register(1);
2725  // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
2726  // dst = [31, 29, ... 7, 5, 3, 1]
2727  __ pckod_b(dst, src1, src0);
2728  break;
2729  }
2730  case kMipsS8x16InterleaveEven: {
2731  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2732  Simd128Register dst = i.OutputSimd128Register(),
2733  src0 = i.InputSimd128Register(0),
2734  src1 = i.InputSimd128Register(1);
2735  // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
2736  // dst = [30, 14, ... 18, 2, 16, 0]
2737  __ ilvev_b(dst, src1, src0);
2738  break;
2739  }
2740  case kMipsS8x16InterleaveOdd: {
2741  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2742  Simd128Register dst = i.OutputSimd128Register(),
2743  src0 = i.InputSimd128Register(0),
2744  src1 = i.InputSimd128Register(1);
2745  // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
2746  // dst = [31, 15, ... 19, 3, 17, 1]
2747  __ ilvod_b(dst, src1, src0);
2748  break;
2749  }
2750  case kMipsS8x16Concat: {
2751  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2752  Simd128Register dst = i.OutputSimd128Register();
2753  DCHECK(dst == i.InputSimd128Register(0));
2754  __ sldi_b(dst, i.InputSimd128Register(1), i.InputInt4(2));
2755  break;
2756  }
2757  case kMipsS8x16Shuffle: {
2758  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2759  Simd128Register dst = i.OutputSimd128Register(),
2760  src0 = i.InputSimd128Register(0),
2761  src1 = i.InputSimd128Register(1);
2762 
2763  if (dst == src0) {
2764  __ move_v(kSimd128ScratchReg, src0);
2765  src0 = kSimd128ScratchReg;
2766  } else if (dst == src1) {
2767  __ move_v(kSimd128ScratchReg, src1);
2768  src1 = kSimd128ScratchReg;
2769  }
2770 
2771  __ li(kScratchReg, i.InputInt32(2));
2772  __ insert_w(dst, 0, kScratchReg);
2773  __ li(kScratchReg, i.InputInt32(3));
2774  __ insert_w(dst, 1, kScratchReg);
2775  __ li(kScratchReg, i.InputInt32(4));
2776  __ insert_w(dst, 2, kScratchReg);
2777  __ li(kScratchReg, i.InputInt32(5));
2778  __ insert_w(dst, 3, kScratchReg);
2779  __ vshf_b(dst, src1, src0);
2780  break;
2781  }
2782  case kMipsS8x8Reverse: {
2783  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2784  // src = [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
2785  // dst = [8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7]
2786  // [A B C D] => [B A D C]: shf.w imm: 2 3 0 1 = 10110001 = 0xB1
2787  // C: [7, 6, 5, 4] => A': [4, 5, 6, 7]: shf.b imm: 00011011 = 0x1B
2788  __ shf_w(kSimd128ScratchReg, i.InputSimd128Register(0), 0xB1);
2789  __ shf_b(i.OutputSimd128Register(), kSimd128ScratchReg, 0x1B);
2790  break;
2791  }
2792  case kMipsS8x4Reverse: {
2793  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2794  // src = [15, 14, ... 3, 2, 1, 0], dst = [12, 13, 14, 15, ... 0, 1, 2, 3]
2795  // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B
2796  __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B);
2797  break;
2798  }
2799  case kMipsS8x2Reverse: {
2800  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2801  // src = [15, 14, ... 3, 2, 1, 0], dst = [14, 15, 12, 13, ... 2, 3, 0, 1]
2802  // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1
2803  __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
2804  break;
2805  }
2806  case kMipsI32x4SConvertI16x8Low: {
2807  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2808  Simd128Register dst = i.OutputSimd128Register();
2809  Simd128Register src = i.InputSimd128Register(0);
2810  __ ilvr_h(kSimd128ScratchReg, src, src);
2811  __ slli_w(dst, kSimd128ScratchReg, 16);
2812  __ srai_w(dst, dst, 16);
2813  break;
2814  }
2815  case kMipsI32x4SConvertI16x8High: {
2816  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2817  Simd128Register dst = i.OutputSimd128Register();
2818  Simd128Register src = i.InputSimd128Register(0);
2819  __ ilvl_h(kSimd128ScratchReg, src, src);
2820  __ slli_w(dst, kSimd128ScratchReg, 16);
2821  __ srai_w(dst, dst, 16);
2822  break;
2823  }
2824  case kMipsI32x4UConvertI16x8Low: {
2825  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2826  __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2827  __ ilvr_h(i.OutputSimd128Register(), kSimd128RegZero,
2828  i.InputSimd128Register(0));
2829  break;
2830  }
2831  case kMipsI32x4UConvertI16x8High: {
2832  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2833  __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2834  __ ilvl_h(i.OutputSimd128Register(), kSimd128RegZero,
2835  i.InputSimd128Register(0));
2836  break;
2837  }
2838  case kMipsI16x8SConvertI8x16Low: {
2839  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2840  Simd128Register dst = i.OutputSimd128Register();
2841  Simd128Register src = i.InputSimd128Register(0);
2842  __ ilvr_b(kSimd128ScratchReg, src, src);
2843  __ slli_h(dst, kSimd128ScratchReg, 8);
2844  __ srai_h(dst, dst, 8);
2845  break;
2846  }
2847  case kMipsI16x8SConvertI8x16High: {
2848  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2849  Simd128Register dst = i.OutputSimd128Register();
2850  Simd128Register src = i.InputSimd128Register(0);
2851  __ ilvl_b(kSimd128ScratchReg, src, src);
2852  __ slli_h(dst, kSimd128ScratchReg, 8);
2853  __ srai_h(dst, dst, 8);
2854  break;
2855  }
2856  case kMipsI16x8SConvertI32x4: {
2857  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2858  Simd128Register dst = i.OutputSimd128Register();
2859  Simd128Register src0 = i.InputSimd128Register(0);
2860  Simd128Register src1 = i.InputSimd128Register(1);
2861  __ sat_s_w(kSimd128ScratchReg, src0, 15);
2862  __ sat_s_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch
2863  __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
2864  break;
2865  }
2866  case kMipsI16x8UConvertI32x4: {
2867  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2868  Simd128Register dst = i.OutputSimd128Register();
2869  Simd128Register src0 = i.InputSimd128Register(0);
2870  Simd128Register src1 = i.InputSimd128Register(1);
2871  __ sat_u_w(kSimd128ScratchReg, src0, 15);
2872  __ sat_u_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch
2873  __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
2874  break;
2875  }
2876  case kMipsI16x8UConvertI8x16Low: {
2877  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2878  __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2879  __ ilvr_b(i.OutputSimd128Register(), kSimd128RegZero,
2880  i.InputSimd128Register(0));
2881  break;
2882  }
2883  case kMipsI16x8UConvertI8x16High: {
2884  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2885  __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2886  __ ilvl_b(i.OutputSimd128Register(), kSimd128RegZero,
2887  i.InputSimd128Register(0));
2888  break;
2889  }
2890  case kMipsI8x16SConvertI16x8: {
2891  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2892  Simd128Register dst = i.OutputSimd128Register();
2893  Simd128Register src0 = i.InputSimd128Register(0);
2894  Simd128Register src1 = i.InputSimd128Register(1);
2895  __ sat_s_h(kSimd128ScratchReg, src0, 7);
2896  __ sat_s_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch
2897  __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
2898  break;
2899  }
2900  case kMipsI8x16UConvertI16x8: {
2901  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2902  Simd128Register dst = i.OutputSimd128Register();
2903  Simd128Register src0 = i.InputSimd128Register(0);
2904  Simd128Register src1 = i.InputSimd128Register(1);
2905  __ sat_u_h(kSimd128ScratchReg, src0, 7);
2906  __ sat_u_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch
2907  __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
2908  break;
2909  }
2910  case kMipsF32x4AddHoriz: {
2911  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2912  Simd128Register src0 = i.InputSimd128Register(0);
2913  Simd128Register src1 = i.InputSimd128Register(1);
2914  Simd128Register dst = i.OutputSimd128Register();
2915  __ shf_w(kSimd128ScratchReg, src0, 0xB1); // 2 3 0 1 : 10110001 : 0xB1
2916  __ shf_w(kSimd128RegZero, src1, 0xB1); // kSimd128RegZero as scratch
2917  __ fadd_w(kSimd128ScratchReg, kSimd128ScratchReg, src0);
2918  __ fadd_w(kSimd128RegZero, kSimd128RegZero, src1);
2919  __ pckev_w(dst, kSimd128RegZero, kSimd128ScratchReg);
2920  break;
2921  }
2922  case kMipsI32x4AddHoriz: {
2923  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2924  Simd128Register src0 = i.InputSimd128Register(0);
2925  Simd128Register src1 = i.InputSimd128Register(1);
2926  Simd128Register dst = i.OutputSimd128Register();
2927  __ hadd_s_d(kSimd128ScratchReg, src0, src0);
2928  __ hadd_s_d(kSimd128RegZero, src1, src1); // kSimd128RegZero as scratch
2929  __ pckev_w(dst, kSimd128RegZero, kSimd128ScratchReg);
2930  break;
2931  }
2932  case kMipsI16x8AddHoriz: {
2933  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2934  Simd128Register src0 = i.InputSimd128Register(0);
2935  Simd128Register src1 = i.InputSimd128Register(1);
2936  Simd128Register dst = i.OutputSimd128Register();
2937  __ hadd_s_w(kSimd128ScratchReg, src0, src0);
2938  __ hadd_s_w(kSimd128RegZero, src1, src1); // kSimd128RegZero as scratch
2939  __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
2940  break;
2941  }
2942  }
2943  return kSuccess;
2944 } // NOLINT(readability/fn_size)
2945 
2946 void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
2947  Instruction* instr, FlagsCondition condition,
2948  Label* tlabel, Label* flabel, bool fallthru) {
2949 #undef __
2950 #define __ tasm->
2951 
2952  Condition cc = kNoCondition;
2953  // MIPS does not have condition code flags, so compare and branch are
2954  // implemented differently than on the other arch's. The compare operations
2955  // emit mips pseudo-instructions, which are handled here by branch
2956  // instructions that do the actual comparison. Essential that the input
2957  // registers to compare pseudo-op are not modified before this branch op, as
2958  // they are tested here.
2959 
2960  MipsOperandConverter i(gen, instr);
2961  if (instr->arch_opcode() == kMipsTst) {
2962  cc = FlagsConditionToConditionTst(condition);
2963  __ Branch(tlabel, cc, kScratchReg, Operand(zero_reg));
2964  } else if (instr->arch_opcode() == kMipsAddOvf ||
2965  instr->arch_opcode() == kMipsSubOvf) {
2966  // Overflow occurs if overflow register is negative
2967  switch (condition) {
2968  case kOverflow:
2969  __ Branch(tlabel, lt, kScratchReg, Operand(zero_reg));
2970  break;
2971  case kNotOverflow:
2972  __ Branch(tlabel, ge, kScratchReg, Operand(zero_reg));
2973  break;
2974  default:
2975  UNSUPPORTED_COND(instr->arch_opcode(), condition);
2976  break;
2977  }
2978  } else if (instr->arch_opcode() == kMipsMulOvf) {
2979  // Overflow occurs if overflow register is not zero
2980  switch (condition) {
2981  case kOverflow:
2982  __ Branch(tlabel, ne, kScratchReg, Operand(zero_reg));
2983  break;
2984  case kNotOverflow:
2985  __ Branch(tlabel, eq, kScratchReg, Operand(zero_reg));
2986  break;
2987  default:
2988  UNSUPPORTED_COND(kMipsMulOvf, condition);
2989  break;
2990  }
2991  } else if (instr->arch_opcode() == kMipsCmp) {
2992  cc = FlagsConditionToConditionCmp(condition);
2993  __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
2994  } else if (instr->arch_opcode() == kMipsCmpS ||
2995  instr->arch_opcode() == kMipsCmpD) {
2996  bool predicate;
2997  FlagsConditionToConditionCmpFPU(predicate, condition);
2998  if (predicate) {
2999  __ BranchTrueF(tlabel);
3000  } else {
3001  __ BranchFalseF(tlabel);
3002  }
3003  } else {
3004  PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
3005  instr->arch_opcode());
3006  UNIMPLEMENTED();
3007  }
3008  if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
3009 #undef __
3010 #define __ tasm()->
3011 }
3012 
3013 // Assembles branches after an instruction.
3014 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
3015  Label* tlabel = branch->true_label;
3016  Label* flabel = branch->false_label;
3017  AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel,
3018  branch->fallthru);
3019 }
3020 
3021 void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
3022  Instruction* instr) {
3023  // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
3024  if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
3025  return;
3026  }
3027 
3028  MipsOperandConverter i(this, instr);
3029  condition = NegateFlagsCondition(condition);
3030 
3031  switch (instr->arch_opcode()) {
3032  case kMipsCmp: {
3033  __ LoadZeroOnCondition(kSpeculationPoisonRegister, i.InputRegister(0),
3034  i.InputOperand(1),
3035  FlagsConditionToConditionCmp(condition));
3036  }
3037  return;
3038  case kMipsTst: {
3039  switch (condition) {
3040  case kEqual:
3041  __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
3042  break;
3043  case kNotEqual:
3044  __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
3045  kScratchReg);
3046  break;
3047  default:
3048  UNREACHABLE();
3049  }
3050  }
3051  return;
3052  case kMipsAddOvf:
3053  case kMipsSubOvf: {
3054  // Overflow occurs if overflow register is negative
3055  __ Slt(kScratchReg2, kScratchReg, zero_reg);
3056  switch (condition) {
3057  case kOverflow:
3058  __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
3059  kScratchReg2);
3060  break;
3061  case kNotOverflow:
3062  __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
3063  break;
3064  default:
3065  UNSUPPORTED_COND(instr->arch_opcode(), condition);
3066  }
3067  }
3068  return;
3069  case kMipsMulOvf: {
3070  // Overflow occurs if overflow register is not zero
3071  switch (condition) {
3072  case kOverflow:
3073  __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
3074  kScratchReg);
3075  break;
3076  case kNotOverflow:
3077  __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
3078  break;
3079  default:
3080  UNSUPPORTED_COND(instr->arch_opcode(), condition);
3081  }
3082  }
3083  return;
3084  case kMipsCmpS:
3085  case kMipsCmpD: {
3086  bool predicate;
3087  FlagsConditionToConditionCmpFPU(predicate, condition);
3088  if (predicate) {
3089  __ LoadZeroIfFPUCondition(kSpeculationPoisonRegister);
3090  } else {
3091  __ LoadZeroIfNotFPUCondition(kSpeculationPoisonRegister);
3092  }
3093  }
3094  return;
3095  default:
3096  UNREACHABLE();
3097  break;
3098  }
3099 }
3100 
3101 void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
3102  BranchInfo* branch) {
3103  AssembleArchBranch(instr, branch);
3104 }
3105 
3106 void CodeGenerator::AssembleArchJump(RpoNumber target) {
3107  if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
3108 }
3109 
3110 void CodeGenerator::AssembleArchTrap(Instruction* instr,
3111  FlagsCondition condition) {
3112  class OutOfLineTrap final : public OutOfLineCode {
3113  public:
3114  OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
3115  : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
3116 
3117  void Generate() final {
3118  MipsOperandConverter i(gen_, instr_);
3119  TrapId trap_id =
3120  static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
3121  GenerateCallToTrap(trap_id);
3122  }
3123 
3124  private:
3125  void GenerateCallToTrap(TrapId trap_id) {
3126  if (trap_id == TrapId::kInvalid) {
3127  // We cannot test calls to the runtime in cctest/test-run-wasm.
3128  // Therefore we emit a call to C here instead of a call to the runtime.
3129  // We use the context register as the scratch register, because we do
3130  // not have a context here.
3131  __ PrepareCallCFunction(0, 0, cp);
3132  __ CallCFunction(
3133  ExternalReference::wasm_call_trap_callback_for_testing(), 0);
3134  __ LeaveFrame(StackFrame::WASM_COMPILED);
3135  auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
3136  int pop_count =
3137  static_cast<int>(call_descriptor->StackParameterCount());
3138  __ Drop(pop_count);
3139  __ Ret();
3140  } else {
3141  gen_->AssembleSourcePosition(instr_);
3142  // A direct call to a wasm runtime stub defined in this module.
3143  // Just encode the stub index. This will be patched when the code
3144  // is added to the native module and copied into wasm code space.
3145  __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
3146  ReferenceMap* reference_map =
3147  new (gen_->zone()) ReferenceMap(gen_->zone());
3148  gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
3149  Safepoint::kNoLazyDeopt);
3150  if (FLAG_debug_code) {
3151  __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
3152  }
3153  }
3154  }
3155 
3156  Instruction* instr_;
3157  CodeGenerator* gen_;
3158  };
3159  auto ool = new (zone()) OutOfLineTrap(this, instr);
3160  Label* tlabel = ool->entry();
3161  AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
3162 }
3163 
3164 // Assembles boolean materializations after an instruction.
3165 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
3166  FlagsCondition condition) {
3167  MipsOperandConverter i(this, instr);
3168  Label done;
3169 
3170  // Materialize a full 32-bit 1 or 0 value. The result register is always the
3171  // last output of the instruction.
3172  Label false_value;
3173  DCHECK_NE(0u, instr->OutputCount());
3174  Register result = i.OutputRegister(instr->OutputCount() - 1);
3175  Condition cc = kNoCondition;
3176  // MIPS does not have condition code flags, so compare and branch are
3177  // implemented differently than on the other arch's. The compare operations
3178  // emit mips pseudo-instructions, which are checked and handled here.
3179 
3180  if (instr->arch_opcode() == kMipsTst) {
3181  cc = FlagsConditionToConditionTst(condition);
3182  if (cc == eq) {
3183  __ Sltu(result, kScratchReg, 1);
3184  } else {
3185  __ Sltu(result, zero_reg, kScratchReg);
3186  }
3187  return;
3188  } else if (instr->arch_opcode() == kMipsAddOvf ||
3189  instr->arch_opcode() == kMipsSubOvf) {
3190  // Overflow occurs if overflow register is negative
3191  __ slt(result, kScratchReg, zero_reg);
3192  } else if (instr->arch_opcode() == kMipsMulOvf) {
3193  // Overflow occurs if overflow register is not zero
3194  __ Sgtu(result, kScratchReg, zero_reg);
3195  } else if (instr->arch_opcode() == kMipsCmp) {
3196  cc = FlagsConditionToConditionCmp(condition);
3197  switch (cc) {
3198  case eq:
3199  case ne: {
3200  Register left = i.InputRegister(0);
3201  Operand right = i.InputOperand(1);
3202  if (instr->InputAt(1)->IsImmediate()) {
3203  if (is_int16(-right.immediate())) {
3204  if (right.immediate() == 0) {
3205  if (cc == eq) {
3206  __ Sltu(result, left, 1);
3207  } else {
3208  __ Sltu(result, zero_reg, left);
3209  }
3210  } else {
3211  __ Addu(result, left, -right.immediate());
3212  if (cc == eq) {
3213  __ Sltu(result, result, 1);
3214  } else {
3215  __ Sltu(result, zero_reg, result);
3216  }
3217  }
3218  } else {
3219  if (is_uint16(right.immediate())) {
3220  __ Xor(result, left, right);
3221  } else {
3222  __ li(kScratchReg, right);
3223  __ Xor(result, left, kScratchReg);
3224  }
3225  if (cc == eq) {
3226  __ Sltu(result, result, 1);
3227  } else {
3228  __ Sltu(result, zero_reg, result);
3229  }
3230  }
3231  } else {
3232  __ Xor(result, left, right);
3233  if (cc == eq) {
3234  __ Sltu(result, result, 1);
3235  } else {
3236  __ Sltu(result, zero_reg, result);
3237  }
3238  }
3239  } break;
3240  case lt:
3241  case ge: {
3242  Register left = i.InputRegister(0);
3243  Operand right = i.InputOperand(1);
3244  __ Slt(result, left, right);
3245  if (cc == ge) {
3246  __ xori(result, result, 1);
3247  }
3248  } break;
3249  case gt:
3250  case le: {
3251  Register left = i.InputRegister(1);
3252  Operand right = i.InputOperand(0);
3253  __ Slt(result, left, right);
3254  if (cc == le) {
3255  __ xori(result, result, 1);
3256  }
3257  } break;
3258  case lo:
3259  case hs: {
3260  Register left = i.InputRegister(0);
3261  Operand right = i.InputOperand(1);
3262  __ Sltu(result, left, right);
3263  if (cc == hs) {
3264  __ xori(result, result, 1);
3265  }
3266  } break;
3267  case hi:
3268  case ls: {
3269  Register left = i.InputRegister(1);
3270  Operand right = i.InputOperand(0);
3271  __ Sltu(result, left, right);
3272  if (cc == ls) {
3273  __ xori(result, result, 1);
3274  }
3275  } break;
3276  default:
3277  UNREACHABLE();
3278  }
3279  return;
3280  } else if (instr->arch_opcode() == kMipsCmpD ||
3281  instr->arch_opcode() == kMipsCmpS) {
3282  FPURegister left = i.InputOrZeroDoubleRegister(0);
3283  FPURegister right = i.InputOrZeroDoubleRegister(1);
3284  if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
3285  !__ IsDoubleZeroRegSet()) {
3286  __ Move(kDoubleRegZero, 0.0);
3287  }
3288  bool predicate;
3289  FlagsConditionToConditionCmpFPU(predicate, condition);
3290  if (!IsMipsArchVariant(kMips32r6)) {
3291  __ li(result, Operand(1));
3292  if (predicate) {
3293  __ Movf(result, zero_reg);
3294  } else {
3295  __ Movt(result, zero_reg);
3296  }
3297  } else {
3298  __ mfc1(result, kDoubleCompareReg);
3299  if (predicate) {
3300  __ And(result, result, 1); // cmp returns all 1's/0's, use only LSB.
3301  } else {
3302  __ Addu(result, result, 1); // Toggle result for not equal.
3303  }
3304  }
3305  return;
3306  } else {
3307  PrintF("AssembleArchBoolean Unimplemented arch_opcode is : %d\n",
3308  instr->arch_opcode());
3309  TRACE_UNIMPL();
3310  UNIMPLEMENTED();
3311  }
3312 }
3313 
3314 void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
3315  MipsOperandConverter i(this, instr);
3316  Register input = i.InputRegister(0);
3317  std::vector<std::pair<int32_t, Label*>> cases;
3318  for (size_t index = 2; index < instr->InputCount(); index += 2) {
3319  cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
3320  }
3321  AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
3322  cases.data() + cases.size());
3323 }
3324 
3325 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
3326  MipsOperandConverter i(this, instr);
3327  Register input = i.InputRegister(0);
3328  for (size_t index = 2; index < instr->InputCount(); index += 2) {
3329  __ li(kScratchReg, Operand(i.InputInt32(index + 0)));
3330  __ Branch(GetLabel(i.InputRpo(index + 1)), eq, input, Operand(kScratchReg));
3331  }
3332  AssembleArchJump(i.InputRpo(1));
3333 }
3334 
3335 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
3336  MipsOperandConverter i(this, instr);
3337  Register input = i.InputRegister(0);
3338  size_t const case_count = instr->InputCount() - 2;
3339  __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
3340  __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) {
3341  return GetLabel(i.InputRpo(index + 2));
3342  });
3343 }
3344 
3345 void CodeGenerator::FinishFrame(Frame* frame) {
3346  auto call_descriptor = linkage()->GetIncomingDescriptor();
3347 
3348  const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
3349  if (saves_fpu != 0) {
3350  frame->AlignSavedCalleeRegisterSlots();
3351  }
3352 
3353  if (saves_fpu != 0) {
3354  int count = base::bits::CountPopulation(saves_fpu);
3355  DCHECK_EQ(kNumCalleeSavedFPU, count);
3356  frame->AllocateSavedCalleeRegisterSlots(count *
3357  (kDoubleSize / kPointerSize));
3358  }
3359 
3360  const RegList saves = call_descriptor->CalleeSavedRegisters();
3361  if (saves != 0) {
3362  int count = base::bits::CountPopulation(saves);
3363  DCHECK_EQ(kNumCalleeSaved, count + 1);
3364  frame->AllocateSavedCalleeRegisterSlots(count);
3365  }
3366 }
3367 
3368 void CodeGenerator::AssembleConstructFrame() {
3369  auto call_descriptor = linkage()->GetIncomingDescriptor();
3370  if (frame_access_state()->has_frame()) {
3371  if (call_descriptor->IsCFunctionCall()) {
3372  __ Push(ra, fp);
3373  __ mov(fp, sp);
3374  } else if (call_descriptor->IsJSFunctionCall()) {
3375  __ Prologue();
3376  if (call_descriptor->PushArgumentCount()) {
3377  __ Push(kJavaScriptCallArgCountRegister);
3378  }
3379  } else {
3380  __ StubPrologue(info()->GetOutputStackFrameType());
3381  if (call_descriptor->IsWasmFunctionCall()) {
3382  __ Push(kWasmInstanceRegister);
3383  } else if (call_descriptor->IsWasmImportWrapper()) {
3384  // WASM import wrappers are passed a tuple in the place of the instance.
3385  // Unpack the tuple into the instance and the target callable.
3386  // This must be done here in the codegen because it cannot be expressed
3387  // properly in the graph.
3388  __ lw(kJSFunctionRegister,
3389  FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
3390  __ lw(kWasmInstanceRegister,
3391  FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
3392  __ Push(kWasmInstanceRegister);
3393  }
3394  }
3395  }
3396 
3397  int shrink_slots = frame()->GetTotalFrameSlotCount() -
3398  call_descriptor->CalculateFixedFrameSize();
3399 
3400  if (info()->is_osr()) {
3401  // TurboFan OSR-compiled functions cannot be entered directly.
3402  __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
3403 
3404  // Unoptimized code jumps directly to this entrypoint while the unoptimized
3405  // frame is still on the stack. Optimized code uses OSR values directly from
3406  // the unoptimized frame. Thus, all that needs to be done is to allocate the
3407  // remaining stack slots.
3408  if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
3409  osr_pc_offset_ = __ pc_offset();
3410  shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
3411  ResetSpeculationPoison();
3412  }
3413 
3414  const RegList saves = call_descriptor->CalleeSavedRegisters();
3415  const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
3416  const int returns = frame()->GetReturnSlotCount();
3417 
3418  // Skip callee-saved and return slots, which are pushed below.
3419  shrink_slots -= base::bits::CountPopulation(saves);
3420  shrink_slots -= 2 * base::bits::CountPopulation(saves_fpu);
3421  shrink_slots -= returns;
3422  if (shrink_slots > 0) {
3423  __ Subu(sp, sp, Operand(shrink_slots * kPointerSize));
3424  }
3425 
3426  // Save callee-saved FPU registers.
3427  if (saves_fpu != 0) {
3428  __ MultiPushFPU(saves_fpu);
3429  }
3430 
3431  if (saves != 0) {
3432  // Save callee-saved registers.
3433  __ MultiPush(saves);
3434  DCHECK_EQ(kNumCalleeSaved, base::bits::CountPopulation(saves) + 1);
3435  }
3436 
3437  if (returns != 0) {
3438  // Create space for returns.
3439  __ Subu(sp, sp, Operand(returns * kPointerSize));
3440  }
3441 }
3442 
3443 void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
3444  auto call_descriptor = linkage()->GetIncomingDescriptor();
3445  int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
3446 
3447  const int returns = frame()->GetReturnSlotCount();
3448  if (returns != 0) {
3449  __ Addu(sp, sp, Operand(returns * kPointerSize));
3450  }
3451 
3452  // Restore GP registers.
3453  const RegList saves = call_descriptor->CalleeSavedRegisters();
3454  if (saves != 0) {
3455  __ MultiPop(saves);
3456  }
3457 
3458  // Restore FPU registers.
3459  const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
3460  if (saves_fpu != 0) {
3461  __ MultiPopFPU(saves_fpu);
3462  }
3463 
3464  MipsOperandConverter g(this, nullptr);
3465  if (call_descriptor->IsCFunctionCall()) {
3466  AssembleDeconstructFrame();
3467  } else if (frame_access_state()->has_frame()) {
3468  // Canonicalize JSFunction return sites for now unless they have an variable
3469  // number of stack slot pops.
3470  if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
3471  if (return_label_.is_bound()) {
3472  __ Branch(&return_label_);
3473  return;
3474  } else {
3475  __ bind(&return_label_);
3476  AssembleDeconstructFrame();
3477  }
3478  } else {
3479  AssembleDeconstructFrame();
3480  }
3481  }
3482  if (pop->IsImmediate()) {
3483  DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
3484  pop_count += g.ToConstant(pop).ToInt32();
3485  } else {
3486  Register pop_reg = g.ToRegister(pop);
3487  __ sll(pop_reg, pop_reg, kPointerSizeLog2);
3488  __ Addu(sp, sp, Operand(pop_reg));
3489  }
3490  if (pop_count != 0) {
3491  __ DropAndRet(pop_count);
3492  } else {
3493  __ Ret();
3494  }
3495 }
3496 
3497 void CodeGenerator::FinishCode() {}
3498 
3499 void CodeGenerator::AssembleMove(InstructionOperand* source,
3500  InstructionOperand* destination) {
3501  MipsOperandConverter g(this, nullptr);
3502  // Dispatch on the source and destination operand kinds. Not all
3503  // combinations are possible.
3504  if (source->IsRegister()) {
3505  DCHECK(destination->IsRegister() || destination->IsStackSlot());
3506  Register src = g.ToRegister(source);
3507  if (destination->IsRegister()) {
3508  __ mov(g.ToRegister(destination), src);
3509  } else {
3510  __ sw(src, g.ToMemOperand(destination));
3511  }
3512  } else if (source->IsStackSlot()) {
3513  DCHECK(destination->IsRegister() || destination->IsStackSlot());
3514  MemOperand src = g.ToMemOperand(source);
3515  if (destination->IsRegister()) {
3516  __ lw(g.ToRegister(destination), src);
3517  } else {
3518  Register temp = kScratchReg;
3519  __ lw(temp, src);
3520  __ sw(temp, g.ToMemOperand(destination));
3521  }
3522  } else if (source->IsConstant()) {
3523  Constant src = g.ToConstant(source);
3524  if (destination->IsRegister() || destination->IsStackSlot()) {
3525  Register dst =
3526  destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
3527  switch (src.type()) {
3528  case Constant::kInt32:
3529  if (RelocInfo::IsWasmReference(src.rmode())) {
3530  __ li(dst, Operand(src.ToInt32(), src.rmode()));
3531  } else {
3532  __ li(dst, Operand(src.ToInt32()));
3533  }
3534  break;
3535  case Constant::kFloat32:
3536  __ li(dst, Operand::EmbeddedNumber(src.ToFloat32()));
3537  break;
3538  case Constant::kInt64:
3539  UNREACHABLE();
3540  break;
3541  case Constant::kFloat64:
3542  __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
3543  break;
3544  case Constant::kExternalReference:
3545  __ li(dst, src.ToExternalReference());
3546  break;
3547  case Constant::kDelayedStringConstant:
3548  __ li(dst, src.ToDelayedStringConstant());
3549  break;
3550  case Constant::kHeapObject: {
3551  Handle<HeapObject> src_object = src.ToHeapObject();
3552  RootIndex index;
3553  if (IsMaterializableFromRoot(src_object, &index)) {
3554  __ LoadRoot(dst, index);
3555  } else {
3556  __ li(dst, src_object);
3557  }
3558  break;
3559  }
3560  case Constant::kRpoNumber:
3561  UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips.
3562  break;
3563  }
3564  if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination));
3565  } else if (src.type() == Constant::kFloat32) {
3566  if (destination->IsFPStackSlot()) {
3567  MemOperand dst = g.ToMemOperand(destination);
3568  if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
3569  __ sw(zero_reg, dst);
3570  } else {
3571  __ li(kScratchReg, Operand(bit_cast<int32_t>(src.ToFloat32())));
3572  __ sw(kScratchReg, dst);
3573  }
3574  } else {
3575  DCHECK(destination->IsFPRegister());
3576  FloatRegister dst = g.ToSingleRegister(destination);
3577  __ Move(dst, src.ToFloat32());
3578  }
3579  } else {
3580  DCHECK_EQ(Constant::kFloat64, src.type());
3581  DoubleRegister dst = destination->IsFPRegister()
3582  ? g.ToDoubleRegister(destination)
3583  : kScratchDoubleReg;
3584  __ Move(dst, src.ToFloat64().value());
3585  if (destination->IsFPStackSlot()) {
3586  __ Sdc1(dst, g.ToMemOperand(destination));
3587  }
3588  }
3589  } else if (source->IsFPRegister()) {
3590  MachineRepresentation rep = LocationOperand::cast(source)->representation();
3591  if (rep == MachineRepresentation::kSimd128) {
3592  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3593  MSARegister src = g.ToSimd128Register(source);
3594  if (destination->IsSimd128Register()) {
3595  MSARegister dst = g.ToSimd128Register(destination);
3596  __ move_v(dst, src);
3597  } else {
3598  DCHECK(destination->IsSimd128StackSlot());
3599  __ st_b(src, g.ToMemOperand(destination));
3600  }
3601  } else {
3602  FPURegister src = g.ToDoubleRegister(source);
3603  if (destination->IsFPRegister()) {
3604  FPURegister dst = g.ToDoubleRegister(destination);
3605  __ Move(dst, src);
3606  } else {
3607  DCHECK(destination->IsFPStackSlot());
3608  MachineRepresentation rep =
3609  LocationOperand::cast(source)->representation();
3610  if (rep == MachineRepresentation::kFloat64) {
3611  __ Sdc1(src, g.ToMemOperand(destination));
3612  } else if (rep == MachineRepresentation::kFloat32) {
3613  __ swc1(src, g.ToMemOperand(destination));
3614  } else {
3615  UNREACHABLE();
3616  }
3617  }
3618  }
3619  } else if (source->IsFPStackSlot()) {
3620  DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
3621  MemOperand src = g.ToMemOperand(source);
3622  MachineRepresentation rep = LocationOperand::cast(source)->representation();
3623  if (destination->IsFPRegister()) {
3624  if (rep == MachineRepresentation::kFloat64) {
3625  __ Ldc1(g.ToDoubleRegister(destination), src);
3626  } else if (rep == MachineRepresentation::kFloat32) {
3627  __ lwc1(g.ToDoubleRegister(destination), src);
3628  } else {
3629  DCHECK_EQ(MachineRepresentation::kSimd128, rep);
3630  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3631  __ ld_b(g.ToSimd128Register(destination), src);
3632  }
3633  } else {
3634  FPURegister temp = kScratchDoubleReg;
3635  if (rep == MachineRepresentation::kFloat64) {
3636  __ Ldc1(temp, src);
3637  __ Sdc1(temp, g.ToMemOperand(destination));
3638  } else if (rep == MachineRepresentation::kFloat32) {
3639  __ lwc1(temp, src);
3640  __ swc1(temp, g.ToMemOperand(destination));
3641  } else {
3642  DCHECK_EQ(MachineRepresentation::kSimd128, rep);
3643  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3644  MSARegister temp = kSimd128ScratchReg;
3645  __ ld_b(temp, src);
3646  __ st_b(temp, g.ToMemOperand(destination));
3647  }
3648  }
3649  } else {
3650  UNREACHABLE();
3651  }
3652 }
3653 
3654 void CodeGenerator::AssembleSwap(InstructionOperand* source,
3655  InstructionOperand* destination) {
3656  MipsOperandConverter g(this, nullptr);
3657  // Dispatch on the source and destination operand kinds. Not all
3658  // combinations are possible.
3659  if (source->IsRegister()) {
3660  // Register-register.
3661  Register temp = kScratchReg;
3662  Register src = g.ToRegister(source);
3663  if (destination->IsRegister()) {
3664  Register dst = g.ToRegister(destination);
3665  __ Move(temp, src);
3666  __ Move(src, dst);
3667  __ Move(dst, temp);
3668  } else {
3669  DCHECK(destination->IsStackSlot());
3670  MemOperand dst = g.ToMemOperand(destination);
3671  __ mov(temp, src);
3672  __ lw(src, dst);
3673  __ sw(temp, dst);
3674  }
3675  } else if (source->IsStackSlot()) {
3676  DCHECK(destination->IsStackSlot());
3677  Register temp_0 = kScratchReg;
3678  Register temp_1 = kScratchReg2;
3679  MemOperand src = g.ToMemOperand(source);
3680  MemOperand dst = g.ToMemOperand(destination);
3681  __ lw(temp_0, src);
3682  __ lw(temp_1, dst);
3683  __ sw(temp_0, dst);
3684  __ sw(temp_1, src);
3685  } else if (source->IsFPRegister()) {
3686  if (destination->IsFPRegister()) {
3687  MachineRepresentation rep =
3688  LocationOperand::cast(source)->representation();
3689  if (rep == MachineRepresentation::kSimd128) {
3690  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3691  MSARegister temp = kSimd128ScratchReg;
3692  MSARegister src = g.ToSimd128Register(source);
3693  MSARegister dst = g.ToSimd128Register(destination);
3694  __ move_v(temp, src);
3695  __ move_v(src, dst);
3696  __ move_v(dst, temp);
3697  } else {
3698  FPURegister temp = kScratchDoubleReg;
3699  FPURegister src = g.ToDoubleRegister(source);
3700  FPURegister dst = g.ToDoubleRegister(destination);
3701  __ Move(temp, src);
3702  __ Move(src, dst);
3703  __ Move(dst, temp);
3704  }
3705  } else {
3706  DCHECK(destination->IsFPStackSlot());
3707  MemOperand dst = g.ToMemOperand(destination);
3708  MachineRepresentation rep =
3709  LocationOperand::cast(source)->representation();
3710  if (rep == MachineRepresentation::kFloat64) {
3711  FPURegister temp = kScratchDoubleReg;
3712  FPURegister src = g.ToDoubleRegister(source);
3713  __ Move(temp, src);
3714  __ Ldc1(src, dst);
3715  __ Sdc1(temp, dst);
3716  } else if (rep == MachineRepresentation::kFloat32) {
3717  FPURegister temp = kScratchDoubleReg;
3718  FPURegister src = g.ToFloatRegister(source);
3719  __ Move(temp, src);
3720  __ lwc1(src, dst);
3721  __ swc1(temp, dst);
3722  } else {
3723  DCHECK_EQ(MachineRepresentation::kSimd128, rep);
3724  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3725  MSARegister temp = kSimd128ScratchReg;
3726  MSARegister src = g.ToSimd128Register(source);
3727  __ move_v(temp, src);
3728  __ ld_b(src, dst);
3729  __ st_b(temp, dst);
3730  }
3731  }
3732  } else if (source->IsFPStackSlot()) {
3733  DCHECK(destination->IsFPStackSlot());
3734  Register temp_0 = kScratchReg;
3735  FPURegister temp_1 = kScratchDoubleReg;
3736  MemOperand src0 = g.ToMemOperand(source);
3737  MemOperand dst0 = g.ToMemOperand(destination);
3738  MachineRepresentation rep = LocationOperand::cast(source)->representation();
3739  if (rep == MachineRepresentation::kFloat64) {
3740  MemOperand src1(src0.rm(), src0.offset() + kIntSize);
3741  MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
3742  __ Ldc1(temp_1, dst0); // Save destination in temp_1.
3743  __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
3744  __ sw(temp_0, dst0);
3745  __ lw(temp_0, src1);
3746  __ sw(temp_0, dst1);
3747  __ Sdc1(temp_1, src0);
3748  } else if (rep == MachineRepresentation::kFloat32) {
3749  __ lwc1(temp_1, dst0); // Save destination in temp_1.
3750  __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
3751  __ sw(temp_0, dst0);
3752  __ swc1(temp_1, src0);
3753  } else {
3754  DCHECK_EQ(MachineRepresentation::kSimd128, rep);
3755  MemOperand src1(src0.rm(), src0.offset() + kIntSize);
3756  MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
3757  MemOperand src2(src0.rm(), src0.offset() + 2 * kIntSize);
3758  MemOperand dst2(dst0.rm(), dst0.offset() + 2 * kIntSize);
3759  MemOperand src3(src0.rm(), src0.offset() + 3 * kIntSize);
3760  MemOperand dst3(dst0.rm(), dst0.offset() + 3 * kIntSize);
3761  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3762  MSARegister temp_1 = kSimd128ScratchReg;
3763  __ ld_b(temp_1, dst0); // Save destination in temp_1.
3764  __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
3765  __ sw(temp_0, dst0);
3766  __ lw(temp_0, src1);
3767  __ sw(temp_0, dst1);
3768  __ lw(temp_0, src2);
3769  __ sw(temp_0, dst2);
3770  __ lw(temp_0, src3);
3771  __ sw(temp_0, dst3);
3772  __ st_b(temp_1, src0);
3773  }
3774  } else {
3775  // No other combinations are possible.
3776  UNREACHABLE();
3777  }
3778 }
3779 
3780 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
3781  // On 32-bit MIPS we emit the jump tables inline.
3782  UNREACHABLE();
3783 }
3784 
3785 #undef __
3786 
3787 } // namespace compiler
3788 } // namespace internal
3789 } // namespace v8
Definition: libplatform.h:13