V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
code-generator-mips64.cc
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/assembler-inl.h"
6 #include "src/callable.h"
7 #include "src/compiler/backend/code-generator-impl.h"
8 #include "src/compiler/backend/code-generator.h"
9 #include "src/compiler/backend/gap-resolver.h"
10 #include "src/compiler/node-matchers.h"
11 #include "src/compiler/osr.h"
12 #include "src/heap/heap-inl.h" // crbug.com/v8/8499
13 #include "src/macro-assembler.h"
14 #include "src/mips64/constants-mips64.h"
15 #include "src/optimized-compilation-info.h"
16 #include "src/wasm/wasm-code-manager.h"
17 
18 namespace v8 {
19 namespace internal {
20 namespace compiler {
21 
22 #define __ tasm()->
23 
24 // TODO(plind): consider renaming these macros.
25 #define TRACE_MSG(msg) \
26  PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
27  __LINE__)
28 
29 #define TRACE_UNIMPL() \
30  PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
31  __LINE__)
32 
33 // Adds Mips-specific methods to convert InstructionOperands.
34 class MipsOperandConverter final : public InstructionOperandConverter {
35  public:
36  MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
37  : InstructionOperandConverter(gen, instr) {}
38 
39  FloatRegister OutputSingleRegister(size_t index = 0) {
40  return ToSingleRegister(instr_->OutputAt(index));
41  }
42 
43  FloatRegister InputSingleRegister(size_t index) {
44  return ToSingleRegister(instr_->InputAt(index));
45  }
46 
47  FloatRegister ToSingleRegister(InstructionOperand* op) {
48  // Single (Float) and Double register namespace is same on MIPS,
49  // both are typedefs of FPURegister.
50  return ToDoubleRegister(op);
51  }
52 
53  Register InputOrZeroRegister(size_t index) {
54  if (instr_->InputAt(index)->IsImmediate()) {
55  DCHECK_EQ(0, InputInt32(index));
56  return zero_reg;
57  }
58  return InputRegister(index);
59  }
60 
61  DoubleRegister InputOrZeroDoubleRegister(size_t index) {
62  if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
63 
64  return InputDoubleRegister(index);
65  }
66 
67  DoubleRegister InputOrZeroSingleRegister(size_t index) {
68  if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
69 
70  return InputSingleRegister(index);
71  }
72 
73  Operand InputImmediate(size_t index) {
74  Constant constant = ToConstant(instr_->InputAt(index));
75  switch (constant.type()) {
76  case Constant::kInt32:
77  return Operand(constant.ToInt32());
78  case Constant::kInt64:
79  return Operand(constant.ToInt64());
80  case Constant::kFloat32:
81  return Operand::EmbeddedNumber(constant.ToFloat32());
82  case Constant::kFloat64:
83  return Operand::EmbeddedNumber(constant.ToFloat64().value());
84  case Constant::kExternalReference:
85  case Constant::kHeapObject:
86  // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
87  // maybe not done on arm due to const pool ??
88  break;
89  case Constant::kDelayedStringConstant:
90  return Operand::EmbeddedStringConstant(
91  constant.ToDelayedStringConstant());
92  case Constant::kRpoNumber:
93  UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
94  break;
95  }
96  UNREACHABLE();
97  }
98 
99  Operand InputOperand(size_t index) {
100  InstructionOperand* op = instr_->InputAt(index);
101  if (op->IsRegister()) {
102  return Operand(ToRegister(op));
103  }
104  return InputImmediate(index);
105  }
106 
107  MemOperand MemoryOperand(size_t* first_index) {
108  const size_t index = *first_index;
109  switch (AddressingModeField::decode(instr_->opcode())) {
110  case kMode_None:
111  break;
112  case kMode_MRI:
113  *first_index += 2;
114  return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
115  case kMode_MRR:
116  // TODO(plind): r6 address mode, to be implemented ...
117  UNREACHABLE();
118  }
119  UNREACHABLE();
120  }
121 
122  MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
123 
124  MemOperand ToMemOperand(InstructionOperand* op) const {
125  DCHECK_NOT_NULL(op);
126  DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
127  return SlotToMemOperand(AllocatedOperand::cast(op)->index());
128  }
129 
130  MemOperand SlotToMemOperand(int slot) const {
131  FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
132  return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
133  }
134 };
135 
136 static inline bool HasRegisterInput(Instruction* instr, size_t index) {
137  return instr->InputAt(index)->IsRegister();
138 }
139 
140 namespace {
141 
142 class OutOfLineRecordWrite final : public OutOfLineCode {
143  public:
144  OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
145  Register value, Register scratch0, Register scratch1,
146  RecordWriteMode mode, StubCallMode stub_mode)
147  : OutOfLineCode(gen),
148  object_(object),
149  index_(index),
150  value_(value),
151  scratch0_(scratch0),
152  scratch1_(scratch1),
153  mode_(mode),
154  stub_mode_(stub_mode),
155  must_save_lr_(!gen->frame_access_state()->has_frame()),
156  zone_(gen->zone()) {}
157 
158  void Generate() final {
159  if (mode_ > RecordWriteMode::kValueIsPointer) {
160  __ JumpIfSmi(value_, exit());
161  }
162  __ CheckPageFlag(value_, scratch0_,
163  MemoryChunk::kPointersToHereAreInterestingMask, eq,
164  exit());
165  __ Daddu(scratch1_, object_, index_);
166  RememberedSetAction const remembered_set_action =
167  mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
168  : OMIT_REMEMBERED_SET;
169  SaveFPRegsMode const save_fp_mode =
170  frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
171  if (must_save_lr_) {
172  // We need to save and restore ra if the frame was elided.
173  __ Push(ra);
174  }
175  if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
176  // A direct call to a wasm runtime stub defined in this module.
177  // Just encode the stub index. This will be patched when the code
178  // is added to the native module and copied into wasm code space.
179  __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
180  save_fp_mode, wasm::WasmCode::kWasmRecordWrite);
181  } else {
182  __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
183  save_fp_mode);
184  }
185  if (must_save_lr_) {
186  __ Pop(ra);
187  }
188  }
189 
190  private:
191  Register const object_;
192  Register const index_;
193  Register const value_;
194  Register const scratch0_;
195  Register const scratch1_;
196  RecordWriteMode const mode_;
197  StubCallMode const stub_mode_;
198  bool must_save_lr_;
199  Zone* zone_;
200 };
201 
202 #define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T) \
203  class ool_name final : public OutOfLineCode { \
204  public: \
205  ool_name(CodeGenerator* gen, T dst, T src1, T src2) \
206  : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
207  \
208  void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); } \
209  \
210  private: \
211  T const dst_; \
212  T const src1_; \
213  T const src2_; \
214  }
215 
216 CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister);
217 CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister);
218 CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, FPURegister);
219 CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, FPURegister);
220 
221 #undef CREATE_OOL_CLASS
222 
223 Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
224  switch (condition) {
225  case kEqual:
226  return eq;
227  case kNotEqual:
228  return ne;
229  case kSignedLessThan:
230  return lt;
231  case kSignedGreaterThanOrEqual:
232  return ge;
233  case kSignedLessThanOrEqual:
234  return le;
235  case kSignedGreaterThan:
236  return gt;
237  case kUnsignedLessThan:
238  return lo;
239  case kUnsignedGreaterThanOrEqual:
240  return hs;
241  case kUnsignedLessThanOrEqual:
242  return ls;
243  case kUnsignedGreaterThan:
244  return hi;
245  case kUnorderedEqual:
246  case kUnorderedNotEqual:
247  break;
248  default:
249  break;
250  }
251  UNREACHABLE();
252 }
253 
254 Condition FlagsConditionToConditionTst(FlagsCondition condition) {
255  switch (condition) {
256  case kNotEqual:
257  return ne;
258  case kEqual:
259  return eq;
260  default:
261  break;
262  }
263  UNREACHABLE();
264 }
265 
266 Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
267  switch (condition) {
268  case kOverflow:
269  return ne;
270  case kNotOverflow:
271  return eq;
272  default:
273  break;
274  }
275  UNREACHABLE();
276 }
277 
278 FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
279  FlagsCondition condition) {
280  switch (condition) {
281  case kEqual:
282  predicate = true;
283  return EQ;
284  case kNotEqual:
285  predicate = false;
286  return EQ;
287  case kUnsignedLessThan:
288  predicate = true;
289  return OLT;
290  case kUnsignedGreaterThanOrEqual:
291  predicate = false;
292  return OLT;
293  case kUnsignedLessThanOrEqual:
294  predicate = true;
295  return OLE;
296  case kUnsignedGreaterThan:
297  predicate = false;
298  return OLE;
299  case kUnorderedEqual:
300  case kUnorderedNotEqual:
301  predicate = true;
302  break;
303  default:
304  predicate = true;
305  break;
306  }
307  UNREACHABLE();
308 }
309 
310 void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
311  InstructionCode opcode, Instruction* instr,
312  MipsOperandConverter& i) {
313  const MemoryAccessMode access_mode =
314  static_cast<MemoryAccessMode>(MiscField::decode(opcode));
315  if (access_mode == kMemoryAccessPoisoned) {
316  Register value = i.OutputRegister();
317  codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
318  }
319 }
320 
321 } // namespace
322 
323 #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
324  do { \
325  __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
326  __ sync(); \
327  } while (0)
328 
329 #define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
330  do { \
331  __ sync(); \
332  __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \
333  __ sync(); \
334  } while (0)
335 
336 #define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \
337  do { \
338  Label binop; \
339  __ Daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
340  __ sync(); \
341  __ bind(&binop); \
342  __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
343  __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \
344  Operand(i.InputRegister(2))); \
345  __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
346  __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
347  __ sync(); \
348  } while (0)
349 
350 #define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, \
351  size, bin_instr, representation) \
352  do { \
353  Label binop; \
354  __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
355  if (representation == 32) { \
356  __ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \
357  } else { \
358  DCHECK_EQ(representation, 64); \
359  __ andi(i.TempRegister(3), i.TempRegister(0), 0x7); \
360  } \
361  __ Dsubu(i.TempRegister(0), i.TempRegister(0), \
362  Operand(i.TempRegister(3))); \
363  __ sll(i.TempRegister(3), i.TempRegister(3), 3); \
364  __ sync(); \
365  __ bind(&binop); \
366  __ load_linked(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
367  __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \
368  size, sign_extend); \
369  __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \
370  Operand(i.InputRegister(2))); \
371  __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \
372  size); \
373  __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
374  __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
375  __ sync(); \
376  } while (0)
377 
378 #define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_linked, store_conditional) \
379  do { \
380  Label exchange; \
381  __ sync(); \
382  __ bind(&exchange); \
383  __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
384  __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
385  __ mov(i.TempRegister(1), i.InputRegister(2)); \
386  __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
387  __ BranchShort(&exchange, eq, i.TempRegister(1), Operand(zero_reg)); \
388  __ sync(); \
389  } while (0)
390 
391 #define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( \
392  load_linked, store_conditional, sign_extend, size, representation) \
393  do { \
394  Label exchange; \
395  __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
396  if (representation == 32) { \
397  __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
398  } else { \
399  DCHECK_EQ(representation, 64); \
400  __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \
401  } \
402  __ Dsubu(i.TempRegister(0), i.TempRegister(0), \
403  Operand(i.TempRegister(1))); \
404  __ sll(i.TempRegister(1), i.TempRegister(1), 3); \
405  __ sync(); \
406  __ bind(&exchange); \
407  __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
408  __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
409  size, sign_extend); \
410  __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \
411  size); \
412  __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
413  __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \
414  __ sync(); \
415  } while (0)
416 
417 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \
418  store_conditional) \
419  do { \
420  Label compareExchange; \
421  Label exit; \
422  __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
423  __ sync(); \
424  __ bind(&compareExchange); \
425  __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
426  __ BranchShort(&exit, ne, i.InputRegister(2), \
427  Operand(i.OutputRegister(0))); \
428  __ mov(i.TempRegister(2), i.InputRegister(3)); \
429  __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
430  __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
431  Operand(zero_reg)); \
432  __ bind(&exit); \
433  __ sync(); \
434  } while (0)
435 
436 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \
437  load_linked, store_conditional, sign_extend, size, representation) \
438  do { \
439  Label compareExchange; \
440  Label exit; \
441  __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
442  if (representation == 32) { \
443  __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
444  } else { \
445  DCHECK_EQ(representation, 64); \
446  __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \
447  } \
448  __ Dsubu(i.TempRegister(0), i.TempRegister(0), \
449  Operand(i.TempRegister(1))); \
450  __ sll(i.TempRegister(1), i.TempRegister(1), 3); \
451  __ sync(); \
452  __ bind(&compareExchange); \
453  __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
454  __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
455  size, sign_extend); \
456  __ BranchShort(&exit, ne, i.InputRegister(2), \
457  Operand(i.OutputRegister(0))); \
458  __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
459  size); \
460  __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
461  __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
462  Operand(zero_reg)); \
463  __ bind(&exit); \
464  __ sync(); \
465  } while (0)
466 
467 #define ASSEMBLE_IEEE754_BINOP(name) \
468  do { \
469  FrameScope scope(tasm(), StackFrame::MANUAL); \
470  __ PrepareCallCFunction(0, 2, kScratchReg); \
471  __ MovToFloatParameters(i.InputDoubleRegister(0), \
472  i.InputDoubleRegister(1)); \
473  __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
474  /* Move the result in the double result register. */ \
475  __ MovFromFloatResult(i.OutputDoubleRegister()); \
476  } while (0)
477 
478 #define ASSEMBLE_IEEE754_UNOP(name) \
479  do { \
480  FrameScope scope(tasm(), StackFrame::MANUAL); \
481  __ PrepareCallCFunction(0, 1, kScratchReg); \
482  __ MovToFloatParameter(i.InputDoubleRegister(0)); \
483  __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
484  /* Move the result in the double result register. */ \
485  __ MovFromFloatResult(i.OutputDoubleRegister()); \
486  } while (0)
487 
488 void CodeGenerator::AssembleDeconstructFrame() {
489  __ mov(sp, fp);
490  __ Pop(ra, fp);
491 }
492 
493 void CodeGenerator::AssemblePrepareTailCall() {
494  if (frame_access_state()->has_frame()) {
495  __ Ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
496  __ Ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
497  }
498  frame_access_state()->SetFrameAccessToSP();
499 }
500 
501 void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
502  Register scratch1,
503  Register scratch2,
504  Register scratch3) {
505  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
506  Label done;
507 
508  // Check if current frame is an arguments adaptor frame.
509  __ Ld(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset));
510  __ Branch(&done, ne, scratch3,
511  Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
512 
513  // Load arguments count from current arguments adaptor frame (note, it
514  // does not include receiver).
515  Register caller_args_count_reg = scratch1;
516  __ Ld(caller_args_count_reg,
517  MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
518  __ SmiUntag(caller_args_count_reg);
519 
520  ParameterCount callee_args_count(args_reg);
521  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
522  scratch3);
523  __ bind(&done);
524 }
525 
526 namespace {
527 
528 void AdjustStackPointerForTailCall(TurboAssembler* tasm,
529  FrameAccessState* state,
530  int new_slot_above_sp,
531  bool allow_shrinkage = true) {
532  int current_sp_offset = state->GetSPToFPSlotCount() +
533  StandardFrameConstants::kFixedSlotCountAboveFp;
534  int stack_slot_delta = new_slot_above_sp - current_sp_offset;
535  if (stack_slot_delta > 0) {
536  tasm->Dsubu(sp, sp, stack_slot_delta * kPointerSize);
537  state->IncreaseSPDelta(stack_slot_delta);
538  } else if (allow_shrinkage && stack_slot_delta < 0) {
539  tasm->Daddu(sp, sp, -stack_slot_delta * kPointerSize);
540  state->IncreaseSPDelta(stack_slot_delta);
541  }
542 }
543 
544 } // namespace
545 
546 void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
547  int first_unused_stack_slot) {
548  AdjustStackPointerForTailCall(tasm(), frame_access_state(),
549  first_unused_stack_slot, false);
550 }
551 
552 void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
553  int first_unused_stack_slot) {
554  AdjustStackPointerForTailCall(tasm(), frame_access_state(),
555  first_unused_stack_slot);
556 }
557 
558 // Check that {kJavaScriptCallCodeStartRegister} is correct.
559 void CodeGenerator::AssembleCodeStartRegisterCheck() {
560  __ ComputeCodeStartAddress(kScratchReg);
561  __ Assert(eq, AbortReason::kWrongFunctionCodeStart,
562  kJavaScriptCallCodeStartRegister, Operand(kScratchReg));
563 }
564 
565 // Check if the code object is marked for deoptimization. If it is, then it
566 // jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
567 // to:
568 // 1. read from memory the word that contains that bit, which can be found in
569 // the flags in the referenced {CodeDataContainer} object;
570 // 2. test kMarkedForDeoptimizationBit in those flags; and
571 // 3. if it is not zero then it jumps to the builtin.
572 void CodeGenerator::BailoutIfDeoptimized() {
573  int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
574  __ Ld(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset));
575  __ Lw(kScratchReg,
576  FieldMemOperand(kScratchReg,
577  CodeDataContainer::kKindSpecificFlagsOffset));
578  __ And(kScratchReg, kScratchReg,
579  Operand(1 << Code::kMarkedForDeoptimizationBit));
580  // Ensure we're not serializing (otherwise we'd need to use an indirection to
581  // access the builtin below).
582  DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
583  Handle<Code> code = isolate()->builtins()->builtin_handle(
584  Builtins::kCompileLazyDeoptimizedCode);
585  __ Jump(code, RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
586 }
587 
588 void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
589  // Calculate a mask which has all bits set in the normal case, but has all
590  // bits cleared if we are speculatively executing the wrong PC.
591  // difference = (current - expected) | (expected - current)
592  // poison = ~(difference >> (kBitsPerSystemPointer - 1))
593  __ ComputeCodeStartAddress(kScratchReg);
594  __ Move(kSpeculationPoisonRegister, kScratchReg);
595  __ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
596  kJavaScriptCallCodeStartRegister);
597  __ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
598  kScratchReg);
599  __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
600  kJavaScriptCallCodeStartRegister);
601  __ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
602  kBitsPerSystemPointer - 1);
603  __ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
604  kSpeculationPoisonRegister);
605 }
606 
607 void CodeGenerator::AssembleRegisterArgumentPoisoning() {
608  __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
609  __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
610  __ And(sp, sp, kSpeculationPoisonRegister);
611 }
612 
613 // Assembles an instruction after register allocation, producing machine code.
614 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
615  Instruction* instr) {
616  MipsOperandConverter i(this, instr);
617  InstructionCode opcode = instr->opcode();
618  ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
619  switch (arch_opcode) {
620  case kArchCallCodeObject: {
621  if (instr->InputAt(0)->IsImmediate()) {
622  __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
623  } else {
624  Register reg = i.InputRegister(0);
625  DCHECK_IMPLIES(
626  HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
627  reg == kJavaScriptCallCodeStartRegister);
628  __ daddiu(reg, reg, Code::kHeaderSize - kHeapObjectTag);
629  __ Call(reg);
630  }
631  RecordCallPosition(instr);
632  frame_access_state()->ClearSPDelta();
633  break;
634  }
635  case kArchCallWasmFunction: {
636  if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
637  AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
638  i.TempRegister(0), i.TempRegister(1),
639  i.TempRegister(2));
640  }
641  if (instr->InputAt(0)->IsImmediate()) {
642  Constant constant = i.ToConstant(instr->InputAt(0));
643  Address wasm_code = static_cast<Address>(constant.ToInt64());
644  __ Call(wasm_code, constant.rmode());
645  } else {
646  __ daddiu(kScratchReg, i.InputRegister(0), 0);
647  __ Call(kScratchReg);
648  }
649  RecordCallPosition(instr);
650  frame_access_state()->ClearSPDelta();
651  break;
652  }
653  case kArchTailCallCodeObjectFromJSFunction:
654  case kArchTailCallCodeObject: {
655  if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
656  AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
657  i.TempRegister(0), i.TempRegister(1),
658  i.TempRegister(2));
659  }
660  if (instr->InputAt(0)->IsImmediate()) {
661  __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
662  } else {
663  Register reg = i.InputRegister(0);
664  DCHECK_IMPLIES(
665  HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
666  reg == kJavaScriptCallCodeStartRegister);
667  __ daddiu(reg, reg, Code::kHeaderSize - kHeapObjectTag);
668  __ Jump(reg);
669  }
670  frame_access_state()->ClearSPDelta();
671  frame_access_state()->SetFrameAccessToDefault();
672  break;
673  }
674  case kArchTailCallWasm: {
675  if (instr->InputAt(0)->IsImmediate()) {
676  Constant constant = i.ToConstant(instr->InputAt(0));
677  Address wasm_code = static_cast<Address>(constant.ToInt64());
678  __ Jump(wasm_code, constant.rmode());
679  } else {
680  __ daddiu(kScratchReg, i.InputRegister(0), 0);
681  __ Jump(kScratchReg);
682  }
683  frame_access_state()->ClearSPDelta();
684  frame_access_state()->SetFrameAccessToDefault();
685  break;
686  }
687  case kArchTailCallAddress: {
688  CHECK(!instr->InputAt(0)->IsImmediate());
689  Register reg = i.InputRegister(0);
690  DCHECK_IMPLIES(
691  HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
692  reg == kJavaScriptCallCodeStartRegister);
693  __ Jump(reg);
694  frame_access_state()->ClearSPDelta();
695  frame_access_state()->SetFrameAccessToDefault();
696  break;
697  }
698  case kArchCallJSFunction: {
699  Register func = i.InputRegister(0);
700  if (FLAG_debug_code) {
701  // Check the function's context matches the context argument.
702  __ Ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
703  __ Assert(eq, AbortReason::kWrongFunctionContext, cp,
704  Operand(kScratchReg));
705  }
706  static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
707  __ Ld(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
708  __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
709  __ Call(a2);
710  RecordCallPosition(instr);
711  frame_access_state()->ClearSPDelta();
712  break;
713  }
714  case kArchPrepareCallCFunction: {
715  int const num_parameters = MiscField::decode(instr->opcode());
716  __ PrepareCallCFunction(num_parameters, kScratchReg);
717  // Frame alignment requires using FP-relative frame addressing.
718  frame_access_state()->SetFrameAccessToFP();
719  break;
720  }
721  case kArchSaveCallerRegisters: {
722  fp_mode_ =
723  static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
724  DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
725  // kReturnRegister0 should have been saved before entering the stub.
726  int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
727  DCHECK_EQ(0, bytes % kPointerSize);
728  DCHECK_EQ(0, frame_access_state()->sp_delta());
729  frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
730  DCHECK(!caller_registers_saved_);
731  caller_registers_saved_ = true;
732  break;
733  }
734  case kArchRestoreCallerRegisters: {
735  DCHECK(fp_mode_ ==
736  static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
737  DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
738  // Don't overwrite the returned value.
739  int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
740  frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
741  DCHECK_EQ(0, frame_access_state()->sp_delta());
742  DCHECK(caller_registers_saved_);
743  caller_registers_saved_ = false;
744  break;
745  }
746  case kArchPrepareTailCall:
747  AssemblePrepareTailCall();
748  break;
749  case kArchCallCFunction: {
750  int const num_parameters = MiscField::decode(instr->opcode());
751  if (instr->InputAt(0)->IsImmediate()) {
752  ExternalReference ref = i.InputExternalReference(0);
753  __ CallCFunction(ref, num_parameters);
754  } else {
755  Register func = i.InputRegister(0);
756  __ CallCFunction(func, num_parameters);
757  }
758  frame_access_state()->SetFrameAccessToDefault();
759  // Ideally, we should decrement SP delta to match the change of stack
760  // pointer in CallCFunction. However, for certain architectures (e.g.
761  // ARM), there may be more strict alignment requirement, causing old SP
762  // to be saved on the stack. In those cases, we can not calculate the SP
763  // delta statically.
764  frame_access_state()->ClearSPDelta();
765  if (caller_registers_saved_) {
766  // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
767  // Here, we assume the sequence to be:
768  // kArchSaveCallerRegisters;
769  // kArchCallCFunction;
770  // kArchRestoreCallerRegisters;
771  int bytes =
772  __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
773  frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
774  }
775  break;
776  }
777  case kArchJmp:
778  AssembleArchJump(i.InputRpo(0));
779  break;
780  case kArchBinarySearchSwitch:
781  AssembleArchBinarySearchSwitch(instr);
782  break;
783  case kArchLookupSwitch:
784  AssembleArchLookupSwitch(instr);
785  break;
786  case kArchTableSwitch:
787  AssembleArchTableSwitch(instr);
788  break;
789  case kArchDebugAbort:
790  DCHECK(i.InputRegister(0) == a0);
791  if (!frame_access_state()->has_frame()) {
792  // We don't actually want to generate a pile of code for this, so just
793  // claim there is a stack frame, without generating one.
794  FrameScope scope(tasm(), StackFrame::NONE);
795  __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
796  RelocInfo::CODE_TARGET);
797  } else {
798  __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
799  RelocInfo::CODE_TARGET);
800  }
801  __ stop("kArchDebugAbort");
802  break;
803  case kArchDebugBreak:
804  __ stop("kArchDebugBreak");
805  break;
806  case kArchComment:
807  __ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
808  break;
809  case kArchNop:
810  case kArchThrowTerminator:
811  // don't emit code for nops.
812  break;
813  case kArchDeoptimize: {
814  int deopt_state_id =
815  BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
816  CodeGenResult result =
817  AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
818  if (result != kSuccess) return result;
819  break;
820  }
821  case kArchRet:
822  AssembleReturn(instr->InputAt(0));
823  break;
824  case kArchStackPointer:
825  __ mov(i.OutputRegister(), sp);
826  break;
827  case kArchFramePointer:
828  __ mov(i.OutputRegister(), fp);
829  break;
830  case kArchParentFramePointer:
831  if (frame_access_state()->has_frame()) {
832  __ Ld(i.OutputRegister(), MemOperand(fp, 0));
833  } else {
834  __ mov(i.OutputRegister(), fp);
835  }
836  break;
837  case kArchTruncateDoubleToI:
838  __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
839  i.InputDoubleRegister(0), DetermineStubCallMode());
840  break;
841  case kArchStoreWithWriteBarrier: {
842  RecordWriteMode mode =
843  static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
844  Register object = i.InputRegister(0);
845  Register index = i.InputRegister(1);
846  Register value = i.InputRegister(2);
847  Register scratch0 = i.TempRegister(0);
848  Register scratch1 = i.TempRegister(1);
849  auto ool = new (zone())
850  OutOfLineRecordWrite(this, object, index, value, scratch0, scratch1,
851  mode, DetermineStubCallMode());
852  __ Daddu(kScratchReg, object, index);
853  __ Sd(value, MemOperand(kScratchReg));
854  __ CheckPageFlag(object, scratch0,
855  MemoryChunk::kPointersFromHereAreInterestingMask, ne,
856  ool->entry());
857  __ bind(ool->exit());
858  break;
859  }
860  case kArchStackSlot: {
861  FrameOffset offset =
862  frame_access_state()->GetFrameOffset(i.InputInt32(0));
863  Register base_reg = offset.from_stack_pointer() ? sp : fp;
864  __ Daddu(i.OutputRegister(), base_reg, Operand(offset.offset()));
865  int alignment = i.InputInt32(1);
866  DCHECK(alignment == 0 || alignment == 4 || alignment == 8 ||
867  alignment == 16);
868  if (FLAG_debug_code && alignment > 0) {
869  // Verify that the output_register is properly aligned
870  __ And(kScratchReg, i.OutputRegister(), Operand(kPointerSize - 1));
871  __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg,
872  Operand(zero_reg));
873  }
874  if (alignment == 2 * kPointerSize) {
875  Label done;
876  __ Daddu(kScratchReg, base_reg, Operand(offset.offset()));
877  __ And(kScratchReg, kScratchReg, Operand(alignment - 1));
878  __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
879  __ Daddu(i.OutputRegister(), i.OutputRegister(), kPointerSize);
880  __ bind(&done);
881  } else if (alignment > 2 * kPointerSize) {
882  Label done;
883  __ Daddu(kScratchReg, base_reg, Operand(offset.offset()));
884  __ And(kScratchReg, kScratchReg, Operand(alignment - 1));
885  __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
886  __ li(kScratchReg2, alignment);
887  __ Dsubu(kScratchReg2, kScratchReg2, Operand(kScratchReg));
888  __ Daddu(i.OutputRegister(), i.OutputRegister(), kScratchReg2);
889  __ bind(&done);
890  }
891 
892  break;
893  }
894  case kArchWordPoisonOnSpeculation:
895  __ And(i.OutputRegister(), i.InputRegister(0),
896  kSpeculationPoisonRegister);
897  break;
898  case kIeee754Float64Acos:
899  ASSEMBLE_IEEE754_UNOP(acos);
900  break;
901  case kIeee754Float64Acosh:
902  ASSEMBLE_IEEE754_UNOP(acosh);
903  break;
904  case kIeee754Float64Asin:
905  ASSEMBLE_IEEE754_UNOP(asin);
906  break;
907  case kIeee754Float64Asinh:
908  ASSEMBLE_IEEE754_UNOP(asinh);
909  break;
910  case kIeee754Float64Atan:
911  ASSEMBLE_IEEE754_UNOP(atan);
912  break;
913  case kIeee754Float64Atanh:
914  ASSEMBLE_IEEE754_UNOP(atanh);
915  break;
916  case kIeee754Float64Atan2:
917  ASSEMBLE_IEEE754_BINOP(atan2);
918  break;
919  case kIeee754Float64Cos:
920  ASSEMBLE_IEEE754_UNOP(cos);
921  break;
922  case kIeee754Float64Cosh:
923  ASSEMBLE_IEEE754_UNOP(cosh);
924  break;
925  case kIeee754Float64Cbrt:
926  ASSEMBLE_IEEE754_UNOP(cbrt);
927  break;
928  case kIeee754Float64Exp:
929  ASSEMBLE_IEEE754_UNOP(exp);
930  break;
931  case kIeee754Float64Expm1:
932  ASSEMBLE_IEEE754_UNOP(expm1);
933  break;
934  case kIeee754Float64Log:
935  ASSEMBLE_IEEE754_UNOP(log);
936  break;
937  case kIeee754Float64Log1p:
938  ASSEMBLE_IEEE754_UNOP(log1p);
939  break;
940  case kIeee754Float64Log2:
941  ASSEMBLE_IEEE754_UNOP(log2);
942  break;
943  case kIeee754Float64Log10:
944  ASSEMBLE_IEEE754_UNOP(log10);
945  break;
946  case kIeee754Float64Pow: {
947  __ Call(BUILTIN_CODE(isolate(), MathPowInternal), RelocInfo::CODE_TARGET);
948  break;
949  }
950  case kIeee754Float64Sin:
951  ASSEMBLE_IEEE754_UNOP(sin);
952  break;
953  case kIeee754Float64Sinh:
954  ASSEMBLE_IEEE754_UNOP(sinh);
955  break;
956  case kIeee754Float64Tan:
957  ASSEMBLE_IEEE754_UNOP(tan);
958  break;
959  case kIeee754Float64Tanh:
960  ASSEMBLE_IEEE754_UNOP(tanh);
961  break;
962  case kMips64Add:
963  __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
964  break;
965  case kMips64Dadd:
966  __ Daddu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
967  break;
968  case kMips64DaddOvf:
969  __ DaddOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
970  kScratchReg);
971  break;
972  case kMips64Sub:
973  __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
974  break;
975  case kMips64Dsub:
976  __ Dsubu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
977  break;
978  case kMips64DsubOvf:
979  __ DsubOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
980  kScratchReg);
981  break;
982  case kMips64Mul:
983  __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
984  break;
985  case kMips64MulOvf:
986  __ MulOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
987  kScratchReg);
988  break;
989  case kMips64MulHigh:
990  __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
991  break;
992  case kMips64MulHighU:
993  __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
994  break;
995  case kMips64DMulHigh:
996  __ Dmulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
997  break;
998  case kMips64Div:
999  __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1000  if (kArchVariant == kMips64r6) {
1001  __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1002  } else {
1003  __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
1004  }
1005  break;
1006  case kMips64DivU:
1007  __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1008  if (kArchVariant == kMips64r6) {
1009  __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1010  } else {
1011  __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
1012  }
1013  break;
1014  case kMips64Mod:
1015  __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1016  break;
1017  case kMips64ModU:
1018  __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1019  break;
1020  case kMips64Dmul:
1021  __ Dmul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1022  break;
1023  case kMips64Ddiv:
1024  __ Ddiv(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1025  if (kArchVariant == kMips64r6) {
1026  __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1027  } else {
1028  __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
1029  }
1030  break;
1031  case kMips64DdivU:
1032  __ Ddivu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1033  if (kArchVariant == kMips64r6) {
1034  __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1035  } else {
1036  __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
1037  }
1038  break;
1039  case kMips64Dmod:
1040  __ Dmod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1041  break;
1042  case kMips64DmodU:
1043  __ Dmodu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1044  break;
1045  case kMips64Dlsa:
1046  DCHECK(instr->InputAt(2)->IsImmediate());
1047  __ Dlsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1048  i.InputInt8(2));
1049  break;
1050  case kMips64Lsa:
1051  DCHECK(instr->InputAt(2)->IsImmediate());
1052  __ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1053  i.InputInt8(2));
1054  break;
1055  case kMips64And:
1056  __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1057  break;
1058  case kMips64And32:
1059  if (instr->InputAt(1)->IsRegister()) {
1060  __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1061  __ sll(i.InputRegister(1), i.InputRegister(1), 0x0);
1062  __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1063  } else {
1064  __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1065  __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1066  }
1067  break;
1068  case kMips64Or:
1069  __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1070  break;
1071  case kMips64Or32:
1072  if (instr->InputAt(1)->IsRegister()) {
1073  __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1074  __ sll(i.InputRegister(1), i.InputRegister(1), 0x0);
1075  __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1076  } else {
1077  __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1078  __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1079  }
1080  break;
1081  case kMips64Nor:
1082  if (instr->InputAt(1)->IsRegister()) {
1083  __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1084  } else {
1085  DCHECK_EQ(0, i.InputOperand(1).immediate());
1086  __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
1087  }
1088  break;
1089  case kMips64Nor32:
1090  if (instr->InputAt(1)->IsRegister()) {
1091  __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1092  __ sll(i.InputRegister(1), i.InputRegister(1), 0x0);
1093  __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1094  } else {
1095  DCHECK_EQ(0, i.InputOperand(1).immediate());
1096  __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1097  __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
1098  }
1099  break;
1100  case kMips64Xor:
1101  __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1102  break;
1103  case kMips64Xor32:
1104  if (instr->InputAt(1)->IsRegister()) {
1105  __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1106  __ sll(i.InputRegister(1), i.InputRegister(1), 0x0);
1107  __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1108  } else {
1109  __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1110  __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1111  }
1112  break;
1113  case kMips64Clz:
1114  __ Clz(i.OutputRegister(), i.InputRegister(0));
1115  break;
1116  case kMips64Dclz:
1117  __ dclz(i.OutputRegister(), i.InputRegister(0));
1118  break;
1119  case kMips64Ctz: {
1120  Register src = i.InputRegister(0);
1121  Register dst = i.OutputRegister();
1122  __ Ctz(dst, src);
1123  } break;
1124  case kMips64Dctz: {
1125  Register src = i.InputRegister(0);
1126  Register dst = i.OutputRegister();
1127  __ Dctz(dst, src);
1128  } break;
1129  case kMips64Popcnt: {
1130  Register src = i.InputRegister(0);
1131  Register dst = i.OutputRegister();
1132  __ Popcnt(dst, src);
1133  } break;
1134  case kMips64Dpopcnt: {
1135  Register src = i.InputRegister(0);
1136  Register dst = i.OutputRegister();
1137  __ Dpopcnt(dst, src);
1138  } break;
1139  case kMips64Shl:
1140  if (instr->InputAt(1)->IsRegister()) {
1141  __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1142  } else {
1143  int64_t imm = i.InputOperand(1).immediate();
1144  __ sll(i.OutputRegister(), i.InputRegister(0),
1145  static_cast<uint16_t>(imm));
1146  }
1147  break;
1148  case kMips64Shr:
1149  if (instr->InputAt(1)->IsRegister()) {
1150  __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1151  __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1152  } else {
1153  int64_t imm = i.InputOperand(1).immediate();
1154  __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1155  __ srl(i.OutputRegister(), i.InputRegister(0),
1156  static_cast<uint16_t>(imm));
1157  }
1158  break;
1159  case kMips64Sar:
1160  if (instr->InputAt(1)->IsRegister()) {
1161  __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1162  __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1163  } else {
1164  int64_t imm = i.InputOperand(1).immediate();
1165  __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1166  __ sra(i.OutputRegister(), i.InputRegister(0),
1167  static_cast<uint16_t>(imm));
1168  }
1169  break;
1170  case kMips64Ext:
1171  __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1172  i.InputInt8(2));
1173  break;
1174  case kMips64Ins:
1175  if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
1176  __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
1177  } else {
1178  __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1179  i.InputInt8(2));
1180  }
1181  break;
1182  case kMips64Dext: {
1183  __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1184  i.InputInt8(2));
1185  break;
1186  }
1187  case kMips64Dins:
1188  if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
1189  __ Dins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
1190  } else {
1191  __ Dins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1192  i.InputInt8(2));
1193  }
1194  break;
1195  case kMips64Dshl:
1196  if (instr->InputAt(1)->IsRegister()) {
1197  __ dsllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1198  } else {
1199  int64_t imm = i.InputOperand(1).immediate();
1200  if (imm < 32) {
1201  __ dsll(i.OutputRegister(), i.InputRegister(0),
1202  static_cast<uint16_t>(imm));
1203  } else {
1204  __ dsll32(i.OutputRegister(), i.InputRegister(0),
1205  static_cast<uint16_t>(imm - 32));
1206  }
1207  }
1208  break;
1209  case kMips64Dshr:
1210  if (instr->InputAt(1)->IsRegister()) {
1211  __ dsrlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1212  } else {
1213  int64_t imm = i.InputOperand(1).immediate();
1214  if (imm < 32) {
1215  __ dsrl(i.OutputRegister(), i.InputRegister(0),
1216  static_cast<uint16_t>(imm));
1217  } else {
1218  __ dsrl32(i.OutputRegister(), i.InputRegister(0),
1219  static_cast<uint16_t>(imm - 32));
1220  }
1221  }
1222  break;
1223  case kMips64Dsar:
1224  if (instr->InputAt(1)->IsRegister()) {
1225  __ dsrav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1226  } else {
1227  int64_t imm = i.InputOperand(1).immediate();
1228  if (imm < 32) {
1229  __ dsra(i.OutputRegister(), i.InputRegister(0), imm);
1230  } else {
1231  __ dsra32(i.OutputRegister(), i.InputRegister(0), imm - 32);
1232  }
1233  }
1234  break;
1235  case kMips64Ror:
1236  __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1237  break;
1238  case kMips64Dror:
1239  __ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1240  break;
1241  case kMips64Tst:
1242  __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
1243  // Pseudo-instruction used for cmp/branch. No opcode emitted here.
1244  break;
1245  case kMips64Cmp:
1246  // Pseudo-instruction used for cmp/branch. No opcode emitted here.
1247  break;
1248  case kMips64Mov:
1249  // TODO(plind): Should we combine mov/li like this, or use separate instr?
1250  // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
1251  if (HasRegisterInput(instr, 0)) {
1252  __ mov(i.OutputRegister(), i.InputRegister(0));
1253  } else {
1254  __ li(i.OutputRegister(), i.InputOperand(0));
1255  }
1256  break;
1257 
1258  case kMips64CmpS: {
1259  FPURegister left = i.InputOrZeroSingleRegister(0);
1260  FPURegister right = i.InputOrZeroSingleRegister(1);
1261  bool predicate;
1262  FPUCondition cc =
1263  FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition());
1264 
1265  if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
1266  !__ IsDoubleZeroRegSet()) {
1267  __ Move(kDoubleRegZero, 0.0);
1268  }
1269 
1270  __ CompareF32(cc, left, right);
1271  } break;
1272  case kMips64AddS:
1273  // TODO(plind): add special case: combine mult & add.
1274  __ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1275  i.InputDoubleRegister(1));
1276  break;
1277  case kMips64SubS:
1278  __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1279  i.InputDoubleRegister(1));
1280  break;
1281  case kMips64MulS:
1282  // TODO(plind): add special case: right op is -1.0, see arm port.
1283  __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1284  i.InputDoubleRegister(1));
1285  break;
1286  case kMips64DivS:
1287  __ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1288  i.InputDoubleRegister(1));
1289  break;
1290  case kMips64ModS: {
1291  // TODO(bmeurer): We should really get rid of this special instruction,
1292  // and generate a CallAddress instruction instead.
1293  FrameScope scope(tasm(), StackFrame::MANUAL);
1294  __ PrepareCallCFunction(0, 2, kScratchReg);
1295  __ MovToFloatParameters(i.InputDoubleRegister(0),
1296  i.InputDoubleRegister(1));
1297  // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate())
1298  __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
1299  // Move the result in the double result register.
1300  __ MovFromFloatResult(i.OutputSingleRegister());
1301  break;
1302  }
1303  case kMips64AbsS:
1304  __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1305  break;
1306  case kMips64NegS:
1307  __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1308  break;
1309  case kMips64SqrtS: {
1310  __ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1311  break;
1312  }
1313  case kMips64MaxS:
1314  __ max_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1315  i.InputDoubleRegister(1));
1316  break;
1317  case kMips64MinS:
1318  __ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1319  i.InputDoubleRegister(1));
1320  break;
1321  case kMips64CmpD: {
1322  FPURegister left = i.InputOrZeroDoubleRegister(0);
1323  FPURegister right = i.InputOrZeroDoubleRegister(1);
1324  bool predicate;
1325  FPUCondition cc =
1326  FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition());
1327  if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
1328  !__ IsDoubleZeroRegSet()) {
1329  __ Move(kDoubleRegZero, 0.0);
1330  }
1331  __ CompareF64(cc, left, right);
1332  } break;
1333  case kMips64AddD:
1334  // TODO(plind): add special case: combine mult & add.
1335  __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1336  i.InputDoubleRegister(1));
1337  break;
1338  case kMips64SubD:
1339  __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1340  i.InputDoubleRegister(1));
1341  break;
1342  case kMips64MulD:
1343  // TODO(plind): add special case: right op is -1.0, see arm port.
1344  __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1345  i.InputDoubleRegister(1));
1346  break;
1347  case kMips64DivD:
1348  __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1349  i.InputDoubleRegister(1));
1350  break;
1351  case kMips64ModD: {
1352  // TODO(bmeurer): We should really get rid of this special instruction,
1353  // and generate a CallAddress instruction instead.
1354  FrameScope scope(tasm(), StackFrame::MANUAL);
1355  __ PrepareCallCFunction(0, 2, kScratchReg);
1356  __ MovToFloatParameters(i.InputDoubleRegister(0),
1357  i.InputDoubleRegister(1));
1358  __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
1359  // Move the result in the double result register.
1360  __ MovFromFloatResult(i.OutputDoubleRegister());
1361  break;
1362  }
1363  case kMips64AbsD:
1364  __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1365  break;
1366  case kMips64NegD:
1367  __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1368  break;
1369  case kMips64SqrtD: {
1370  __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1371  break;
1372  }
1373  case kMips64MaxD:
1374  __ max_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1375  i.InputDoubleRegister(1));
1376  break;
1377  case kMips64MinD:
1378  __ min_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1379  i.InputDoubleRegister(1));
1380  break;
1381  case kMips64Float64RoundDown: {
1382  __ Floor_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1383  break;
1384  }
1385  case kMips64Float32RoundDown: {
1386  __ Floor_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1387  break;
1388  }
1389  case kMips64Float64RoundTruncate: {
1390  __ Trunc_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1391  break;
1392  }
1393  case kMips64Float32RoundTruncate: {
1394  __ Trunc_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1395  break;
1396  }
1397  case kMips64Float64RoundUp: {
1398  __ Ceil_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1399  break;
1400  }
1401  case kMips64Float32RoundUp: {
1402  __ Ceil_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1403  break;
1404  }
1405  case kMips64Float64RoundTiesEven: {
1406  __ Round_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1407  break;
1408  }
1409  case kMips64Float32RoundTiesEven: {
1410  __ Round_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1411  break;
1412  }
1413  case kMips64Float32Max: {
1414  FPURegister dst = i.OutputSingleRegister();
1415  FPURegister src1 = i.InputSingleRegister(0);
1416  FPURegister src2 = i.InputSingleRegister(1);
1417  auto ool = new (zone()) OutOfLineFloat32Max(this, dst, src1, src2);
1418  __ Float32Max(dst, src1, src2, ool->entry());
1419  __ bind(ool->exit());
1420  break;
1421  }
1422  case kMips64Float64Max: {
1423  FPURegister dst = i.OutputDoubleRegister();
1424  FPURegister src1 = i.InputDoubleRegister(0);
1425  FPURegister src2 = i.InputDoubleRegister(1);
1426  auto ool = new (zone()) OutOfLineFloat64Max(this, dst, src1, src2);
1427  __ Float64Max(dst, src1, src2, ool->entry());
1428  __ bind(ool->exit());
1429  break;
1430  }
1431  case kMips64Float32Min: {
1432  FPURegister dst = i.OutputSingleRegister();
1433  FPURegister src1 = i.InputSingleRegister(0);
1434  FPURegister src2 = i.InputSingleRegister(1);
1435  auto ool = new (zone()) OutOfLineFloat32Min(this, dst, src1, src2);
1436  __ Float32Min(dst, src1, src2, ool->entry());
1437  __ bind(ool->exit());
1438  break;
1439  }
1440  case kMips64Float64Min: {
1441  FPURegister dst = i.OutputDoubleRegister();
1442  FPURegister src1 = i.InputDoubleRegister(0);
1443  FPURegister src2 = i.InputDoubleRegister(1);
1444  auto ool = new (zone()) OutOfLineFloat64Min(this, dst, src1, src2);
1445  __ Float64Min(dst, src1, src2, ool->entry());
1446  __ bind(ool->exit());
1447  break;
1448  }
1449  case kMips64Float64SilenceNaN:
1450  __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1451  break;
1452  case kMips64CvtSD:
1453  __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
1454  break;
1455  case kMips64CvtDS:
1456  __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
1457  break;
1458  case kMips64CvtDW: {
1459  FPURegister scratch = kScratchDoubleReg;
1460  __ mtc1(i.InputRegister(0), scratch);
1461  __ cvt_d_w(i.OutputDoubleRegister(), scratch);
1462  break;
1463  }
1464  case kMips64CvtSW: {
1465  FPURegister scratch = kScratchDoubleReg;
1466  __ mtc1(i.InputRegister(0), scratch);
1467  __ cvt_s_w(i.OutputDoubleRegister(), scratch);
1468  break;
1469  }
1470  case kMips64CvtSUw: {
1471  __ Cvt_s_uw(i.OutputDoubleRegister(), i.InputRegister(0));
1472  break;
1473  }
1474  case kMips64CvtSL: {
1475  FPURegister scratch = kScratchDoubleReg;
1476  __ dmtc1(i.InputRegister(0), scratch);
1477  __ cvt_s_l(i.OutputDoubleRegister(), scratch);
1478  break;
1479  }
1480  case kMips64CvtDL: {
1481  FPURegister scratch = kScratchDoubleReg;
1482  __ dmtc1(i.InputRegister(0), scratch);
1483  __ cvt_d_l(i.OutputDoubleRegister(), scratch);
1484  break;
1485  }
1486  case kMips64CvtDUw: {
1487  __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0));
1488  break;
1489  }
1490  case kMips64CvtDUl: {
1491  __ Cvt_d_ul(i.OutputDoubleRegister(), i.InputRegister(0));
1492  break;
1493  }
1494  case kMips64CvtSUl: {
1495  __ Cvt_s_ul(i.OutputDoubleRegister(), i.InputRegister(0));
1496  break;
1497  }
1498  case kMips64FloorWD: {
1499  FPURegister scratch = kScratchDoubleReg;
1500  __ floor_w_d(scratch, i.InputDoubleRegister(0));
1501  __ mfc1(i.OutputRegister(), scratch);
1502  break;
1503  }
1504  case kMips64CeilWD: {
1505  FPURegister scratch = kScratchDoubleReg;
1506  __ ceil_w_d(scratch, i.InputDoubleRegister(0));
1507  __ mfc1(i.OutputRegister(), scratch);
1508  break;
1509  }
1510  case kMips64RoundWD: {
1511  FPURegister scratch = kScratchDoubleReg;
1512  __ round_w_d(scratch, i.InputDoubleRegister(0));
1513  __ mfc1(i.OutputRegister(), scratch);
1514  break;
1515  }
1516  case kMips64TruncWD: {
1517  FPURegister scratch = kScratchDoubleReg;
1518  // Other arches use round to zero here, so we follow.
1519  __ trunc_w_d(scratch, i.InputDoubleRegister(0));
1520  __ mfc1(i.OutputRegister(), scratch);
1521  break;
1522  }
1523  case kMips64FloorWS: {
1524  FPURegister scratch = kScratchDoubleReg;
1525  __ floor_w_s(scratch, i.InputDoubleRegister(0));
1526  __ mfc1(i.OutputRegister(), scratch);
1527  break;
1528  }
1529  case kMips64CeilWS: {
1530  FPURegister scratch = kScratchDoubleReg;
1531  __ ceil_w_s(scratch, i.InputDoubleRegister(0));
1532  __ mfc1(i.OutputRegister(), scratch);
1533  break;
1534  }
1535  case kMips64RoundWS: {
1536  FPURegister scratch = kScratchDoubleReg;
1537  __ round_w_s(scratch, i.InputDoubleRegister(0));
1538  __ mfc1(i.OutputRegister(), scratch);
1539  break;
1540  }
1541  case kMips64TruncWS: {
1542  FPURegister scratch = kScratchDoubleReg;
1543  __ trunc_w_s(scratch, i.InputDoubleRegister(0));
1544  __ mfc1(i.OutputRegister(), scratch);
1545  // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
1546  // because INT32_MIN allows easier out-of-bounds detection.
1547  __ addiu(kScratchReg, i.OutputRegister(), 1);
1548  __ slt(kScratchReg2, kScratchReg, i.OutputRegister());
1549  __ Movn(i.OutputRegister(), kScratchReg, kScratchReg2);
1550  break;
1551  }
1552  case kMips64TruncLS: {
1553  FPURegister scratch = kScratchDoubleReg;
1554  Register tmp_fcsr = kScratchReg;
1555  Register result = kScratchReg2;
1556 
1557  bool load_status = instr->OutputCount() > 1;
1558  if (load_status) {
1559  // Save FCSR.
1560  __ cfc1(tmp_fcsr, FCSR);
1561  // Clear FPU flags.
1562  __ ctc1(zero_reg, FCSR);
1563  }
1564  // Other arches use round to zero here, so we follow.
1565  __ trunc_l_s(scratch, i.InputDoubleRegister(0));
1566  __ dmfc1(i.OutputRegister(), scratch);
1567  if (load_status) {
1568  __ cfc1(result, FCSR);
1569  // Check for overflow and NaNs.
1570  __ andi(result, result,
1571  (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask));
1572  __ Slt(result, zero_reg, result);
1573  __ xori(result, result, 1);
1574  __ mov(i.OutputRegister(1), result);
1575  // Restore FCSR
1576  __ ctc1(tmp_fcsr, FCSR);
1577  }
1578  break;
1579  }
1580  case kMips64TruncLD: {
1581  FPURegister scratch = kScratchDoubleReg;
1582  Register tmp_fcsr = kScratchReg;
1583  Register result = kScratchReg2;
1584 
1585  bool load_status = instr->OutputCount() > 1;
1586  if (load_status) {
1587  // Save FCSR.
1588  __ cfc1(tmp_fcsr, FCSR);
1589  // Clear FPU flags.
1590  __ ctc1(zero_reg, FCSR);
1591  }
1592  // Other arches use round to zero here, so we follow.
1593  __ trunc_l_d(scratch, i.InputDoubleRegister(0));
1594  __ dmfc1(i.OutputRegister(0), scratch);
1595  if (load_status) {
1596  __ cfc1(result, FCSR);
1597  // Check for overflow and NaNs.
1598  __ andi(result, result,
1599  (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask));
1600  __ Slt(result, zero_reg, result);
1601  __ xori(result, result, 1);
1602  __ mov(i.OutputRegister(1), result);
1603  // Restore FCSR
1604  __ ctc1(tmp_fcsr, FCSR);
1605  }
1606  break;
1607  }
1608  case kMips64TruncUwD: {
1609  FPURegister scratch = kScratchDoubleReg;
1610  __ Trunc_uw_d(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
1611  break;
1612  }
1613  case kMips64TruncUwS: {
1614  FPURegister scratch = kScratchDoubleReg;
1615  __ Trunc_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
1616  // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
1617  // because 0 allows easier out-of-bounds detection.
1618  __ addiu(kScratchReg, i.OutputRegister(), 1);
1619  __ Movz(i.OutputRegister(), zero_reg, kScratchReg);
1620  break;
1621  }
1622  case kMips64TruncUlS: {
1623  FPURegister scratch = kScratchDoubleReg;
1624  Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
1625  __ Trunc_ul_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch,
1626  result);
1627  break;
1628  }
1629  case kMips64TruncUlD: {
1630  FPURegister scratch = kScratchDoubleReg;
1631  Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
1632  __ Trunc_ul_d(i.OutputRegister(0), i.InputDoubleRegister(0), scratch,
1633  result);
1634  break;
1635  }
1636  case kMips64BitcastDL:
1637  __ dmfc1(i.OutputRegister(), i.InputDoubleRegister(0));
1638  break;
1639  case kMips64BitcastLD:
1640  __ dmtc1(i.InputRegister(0), i.OutputDoubleRegister());
1641  break;
1642  case kMips64Float64ExtractLowWord32:
1643  __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
1644  break;
1645  case kMips64Float64ExtractHighWord32:
1646  __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0));
1647  break;
1648  case kMips64Float64InsertLowWord32:
1649  __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1));
1650  break;
1651  case kMips64Float64InsertHighWord32:
1652  __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
1653  break;
1654  // ... more basic instructions ...
1655 
1656  case kMips64Seb:
1657  __ seb(i.OutputRegister(), i.InputRegister(0));
1658  break;
1659  case kMips64Seh:
1660  __ seh(i.OutputRegister(), i.InputRegister(0));
1661  break;
1662  case kMips64Lbu:
1663  __ Lbu(i.OutputRegister(), i.MemoryOperand());
1664  EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1665  break;
1666  case kMips64Lb:
1667  __ Lb(i.OutputRegister(), i.MemoryOperand());
1668  EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1669  break;
1670  case kMips64Sb:
1671  __ Sb(i.InputOrZeroRegister(2), i.MemoryOperand());
1672  break;
1673  case kMips64Lhu:
1674  __ Lhu(i.OutputRegister(), i.MemoryOperand());
1675  EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1676  break;
1677  case kMips64Ulhu:
1678  __ Ulhu(i.OutputRegister(), i.MemoryOperand());
1679  EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1680  break;
1681  case kMips64Lh:
1682  __ Lh(i.OutputRegister(), i.MemoryOperand());
1683  EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1684  break;
1685  case kMips64Ulh:
1686  __ Ulh(i.OutputRegister(), i.MemoryOperand());
1687  EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1688  break;
1689  case kMips64Sh:
1690  __ Sh(i.InputOrZeroRegister(2), i.MemoryOperand());
1691  break;
1692  case kMips64Ush:
1693  __ Ush(i.InputOrZeroRegister(2), i.MemoryOperand(), kScratchReg);
1694  break;
1695  case kMips64Lw:
1696  __ Lw(i.OutputRegister(), i.MemoryOperand());
1697  EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1698  break;
1699  case kMips64Ulw:
1700  __ Ulw(i.OutputRegister(), i.MemoryOperand());
1701  EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1702  break;
1703  case kMips64Lwu:
1704  __ Lwu(i.OutputRegister(), i.MemoryOperand());
1705  EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1706  break;
1707  case kMips64Ulwu:
1708  __ Ulwu(i.OutputRegister(), i.MemoryOperand());
1709  EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1710  break;
1711  case kMips64Ld:
1712  __ Ld(i.OutputRegister(), i.MemoryOperand());
1713  EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1714  break;
1715  case kMips64Uld:
1716  __ Uld(i.OutputRegister(), i.MemoryOperand());
1717  EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
1718  break;
1719  case kMips64Sw:
1720  __ Sw(i.InputOrZeroRegister(2), i.MemoryOperand());
1721  break;
1722  case kMips64Usw:
1723  __ Usw(i.InputOrZeroRegister(2), i.MemoryOperand());
1724  break;
1725  case kMips64Sd:
1726  __ Sd(i.InputOrZeroRegister(2), i.MemoryOperand());
1727  break;
1728  case kMips64Usd:
1729  __ Usd(i.InputOrZeroRegister(2), i.MemoryOperand());
1730  break;
1731  case kMips64Lwc1: {
1732  __ Lwc1(i.OutputSingleRegister(), i.MemoryOperand());
1733  break;
1734  }
1735  case kMips64Ulwc1: {
1736  __ Ulwc1(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg);
1737  break;
1738  }
1739  case kMips64Swc1: {
1740  size_t index = 0;
1741  MemOperand operand = i.MemoryOperand(&index);
1742  FPURegister ft = i.InputOrZeroSingleRegister(index);
1743  if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1744  __ Move(kDoubleRegZero, 0.0);
1745  }
1746  __ Swc1(ft, operand);
1747  break;
1748  }
1749  case kMips64Uswc1: {
1750  size_t index = 0;
1751  MemOperand operand = i.MemoryOperand(&index);
1752  FPURegister ft = i.InputOrZeroSingleRegister(index);
1753  if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1754  __ Move(kDoubleRegZero, 0.0);
1755  }
1756  __ Uswc1(ft, operand, kScratchReg);
1757  break;
1758  }
1759  case kMips64Ldc1:
1760  __ Ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
1761  break;
1762  case kMips64Uldc1:
1763  __ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
1764  break;
1765  case kMips64Sdc1: {
1766  FPURegister ft = i.InputOrZeroDoubleRegister(2);
1767  if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1768  __ Move(kDoubleRegZero, 0.0);
1769  }
1770  __ Sdc1(ft, i.MemoryOperand());
1771  break;
1772  }
1773  case kMips64Usdc1: {
1774  FPURegister ft = i.InputOrZeroDoubleRegister(2);
1775  if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1776  __ Move(kDoubleRegZero, 0.0);
1777  }
1778  __ Usdc1(ft, i.MemoryOperand(), kScratchReg);
1779  break;
1780  }
1781  case kMips64Push:
1782  if (instr->InputAt(0)->IsFPRegister()) {
1783  __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
1784  __ Subu(sp, sp, Operand(kDoubleSize));
1785  frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1786  } else {
1787  __ Push(i.InputRegister(0));
1788  frame_access_state()->IncreaseSPDelta(1);
1789  }
1790  break;
1791  case kMips64Peek: {
1792  // The incoming value is 0-based, but we need a 1-based value.
1793  int reverse_slot = i.InputInt32(0) + 1;
1794  int offset =
1795  FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
1796  if (instr->OutputAt(0)->IsFPRegister()) {
1797  LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
1798  if (op->representation() == MachineRepresentation::kFloat64) {
1799  __ Ldc1(i.OutputDoubleRegister(), MemOperand(fp, offset));
1800  } else {
1801  DCHECK_EQ(op->representation(), MachineRepresentation::kFloat32);
1802  __ lwc1(
1803  i.OutputSingleRegister(0),
1804  MemOperand(fp, offset + kLessSignificantWordInDoublewordOffset));
1805  }
1806  } else {
1807  __ Ld(i.OutputRegister(0), MemOperand(fp, offset));
1808  }
1809  break;
1810  }
1811  case kMips64StackClaim: {
1812  __ Dsubu(sp, sp, Operand(i.InputInt32(0)));
1813  frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize);
1814  break;
1815  }
1816  case kMips64StoreToStackSlot: {
1817  if (instr->InputAt(0)->IsFPRegister()) {
1818  if (instr->InputAt(0)->IsSimd128Register()) {
1819  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1820  __ st_b(i.InputSimd128Register(0), MemOperand(sp, i.InputInt32(1)));
1821  } else {
1822  __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
1823  }
1824  } else {
1825  __ Sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
1826  }
1827  break;
1828  }
1829  case kMips64ByteSwap64: {
1830  __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 8);
1831  break;
1832  }
1833  case kMips64ByteSwap32: {
1834  __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
1835  break;
1836  }
1837  case kWord32AtomicLoadInt8:
1838  ASSEMBLE_ATOMIC_LOAD_INTEGER(Lb);
1839  break;
1840  case kWord32AtomicLoadUint8:
1841  ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
1842  break;
1843  case kWord32AtomicLoadInt16:
1844  ASSEMBLE_ATOMIC_LOAD_INTEGER(Lh);
1845  break;
1846  case kWord32AtomicLoadUint16:
1847  ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
1848  break;
1849  case kWord32AtomicLoadWord32:
1850  ASSEMBLE_ATOMIC_LOAD_INTEGER(Lw);
1851  break;
1852  case kMips64Word64AtomicLoadUint8:
1853  ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
1854  break;
1855  case kMips64Word64AtomicLoadUint16:
1856  ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
1857  break;
1858  case kMips64Word64AtomicLoadUint32:
1859  ASSEMBLE_ATOMIC_LOAD_INTEGER(Lwu);
1860  break;
1861  case kMips64Word64AtomicLoadUint64:
1862  ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld);
1863  break;
1864  case kWord32AtomicStoreWord8:
1865  ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
1866  break;
1867  case kWord32AtomicStoreWord16:
1868  ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
1869  break;
1870  case kWord32AtomicStoreWord32:
1871  ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
1872  break;
1873  case kMips64Word64AtomicStoreWord8:
1874  ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
1875  break;
1876  case kMips64Word64AtomicStoreWord16:
1877  ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
1878  break;
1879  case kMips64Word64AtomicStoreWord32:
1880  ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
1881  break;
1882  case kMips64Word64AtomicStoreWord64:
1883  ASSEMBLE_ATOMIC_STORE_INTEGER(Sd);
1884  break;
1885  case kWord32AtomicExchangeInt8:
1886  ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
1887  break;
1888  case kWord32AtomicExchangeUint8:
1889  ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
1890  break;
1891  case kWord32AtomicExchangeInt16:
1892  ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
1893  break;
1894  case kWord32AtomicExchangeUint16:
1895  ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
1896  break;
1897  case kWord32AtomicExchangeWord32:
1898  ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
1899  break;
1900  case kMips64Word64AtomicExchangeUint8:
1901  ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
1902  break;
1903  case kMips64Word64AtomicExchangeUint16:
1904  ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
1905  break;
1906  case kMips64Word64AtomicExchangeUint32:
1907  ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
1908  break;
1909  case kMips64Word64AtomicExchangeUint64:
1910  ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Lld, Scd);
1911  break;
1912  case kWord32AtomicCompareExchangeInt8:
1913  ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
1914  break;
1915  case kWord32AtomicCompareExchangeUint8:
1916  ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
1917  break;
1918  case kWord32AtomicCompareExchangeInt16:
1919  ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
1920  break;
1921  case kWord32AtomicCompareExchangeUint16:
1922  ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
1923  break;
1924  case kWord32AtomicCompareExchangeWord32:
1925  __ sll(i.InputRegister(2), i.InputRegister(2), 0);
1926  ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
1927  break;
1928  case kMips64Word64AtomicCompareExchangeUint8:
1929  ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
1930  break;
1931  case kMips64Word64AtomicCompareExchangeUint16:
1932  ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
1933  break;
1934  case kMips64Word64AtomicCompareExchangeUint32:
1935  ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
1936  break;
1937  case kMips64Word64AtomicCompareExchangeUint64:
1938  ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Lld, Scd);
1939  break;
1940 #define ATOMIC_BINOP_CASE(op, inst) \
1941  case kWord32Atomic##op##Int8: \
1942  ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst, 32); \
1943  break; \
1944  case kWord32Atomic##op##Uint8: \
1945  ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst, 32); \
1946  break; \
1947  case kWord32Atomic##op##Int16: \
1948  ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst, 32); \
1949  break; \
1950  case kWord32Atomic##op##Uint16: \
1951  ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst, 32); \
1952  break; \
1953  case kWord32Atomic##op##Word32: \
1954  ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst); \
1955  break;
1956  ATOMIC_BINOP_CASE(Add, Addu)
1957  ATOMIC_BINOP_CASE(Sub, Subu)
1958  ATOMIC_BINOP_CASE(And, And)
1959  ATOMIC_BINOP_CASE(Or, Or)
1960  ATOMIC_BINOP_CASE(Xor, Xor)
1961 #undef ATOMIC_BINOP_CASE
1962 #define ATOMIC_BINOP_CASE(op, inst) \
1963  case kMips64Word64Atomic##op##Uint8: \
1964  ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst, 64); \
1965  break; \
1966  case kMips64Word64Atomic##op##Uint16: \
1967  ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst, 64); \
1968  break; \
1969  case kMips64Word64Atomic##op##Uint32: \
1970  ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst, 64); \
1971  break; \
1972  case kMips64Word64Atomic##op##Uint64: \
1973  ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst); \
1974  break;
1975  ATOMIC_BINOP_CASE(Add, Daddu)
1976  ATOMIC_BINOP_CASE(Sub, Dsubu)
1977  ATOMIC_BINOP_CASE(And, And)
1978  ATOMIC_BINOP_CASE(Or, Or)
1979  ATOMIC_BINOP_CASE(Xor, Xor)
1980 #undef ATOMIC_BINOP_CASE
1981  case kMips64AssertEqual:
1982  __ Assert(eq, static_cast<AbortReason>(i.InputOperand(2).immediate()),
1983  i.InputRegister(0), Operand(i.InputRegister(1)));
1984  break;
1985  case kMips64S128Zero: {
1986  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1987  __ xor_v(i.OutputSimd128Register(), i.OutputSimd128Register(),
1988  i.OutputSimd128Register());
1989  break;
1990  }
1991  case kMips64I32x4Splat: {
1992  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1993  __ fill_w(i.OutputSimd128Register(), i.InputRegister(0));
1994  break;
1995  }
1996  case kMips64I32x4ExtractLane: {
1997  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1998  __ copy_s_w(i.OutputRegister(), i.InputSimd128Register(0),
1999  i.InputInt8(1));
2000  break;
2001  }
2002  case kMips64I32x4ReplaceLane: {
2003  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2004  Simd128Register src = i.InputSimd128Register(0);
2005  Simd128Register dst = i.OutputSimd128Register();
2006  if (src != dst) {
2007  __ move_v(dst, src);
2008  }
2009  __ insert_w(dst, i.InputInt8(1), i.InputRegister(2));
2010  break;
2011  }
2012  case kMips64I32x4Add: {
2013  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2014  __ addv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2015  i.InputSimd128Register(1));
2016  break;
2017  }
2018  case kMips64I32x4Sub: {
2019  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2020  __ subv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2021  i.InputSimd128Register(1));
2022  break;
2023  }
2024  case kMips64F32x4Splat: {
2025  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2026  __ FmoveLow(kScratchReg, i.InputSingleRegister(0));
2027  __ fill_w(i.OutputSimd128Register(), kScratchReg);
2028  break;
2029  }
2030  case kMips64F32x4ExtractLane: {
2031  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2032  __ copy_u_w(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1));
2033  __ FmoveLow(i.OutputSingleRegister(), kScratchReg);
2034  break;
2035  }
2036  case kMips64F32x4ReplaceLane: {
2037  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2038  Simd128Register src = i.InputSimd128Register(0);
2039  Simd128Register dst = i.OutputSimd128Register();
2040  if (src != dst) {
2041  __ move_v(dst, src);
2042  }
2043  __ FmoveLow(kScratchReg, i.InputSingleRegister(2));
2044  __ insert_w(dst, i.InputInt8(1), kScratchReg);
2045  break;
2046  }
2047  case kMips64F32x4SConvertI32x4: {
2048  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2049  __ ffint_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2050  break;
2051  }
2052  case kMips64F32x4UConvertI32x4: {
2053  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2054  __ ffint_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2055  break;
2056  }
2057  case kMips64I32x4Mul: {
2058  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2059  __ mulv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2060  i.InputSimd128Register(1));
2061  break;
2062  }
2063  case kMips64I32x4MaxS: {
2064  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2065  __ max_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2066  i.InputSimd128Register(1));
2067  break;
2068  }
2069  case kMips64I32x4MinS: {
2070  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2071  __ min_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2072  i.InputSimd128Register(1));
2073  break;
2074  }
2075  case kMips64I32x4Eq: {
2076  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2077  __ ceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2078  i.InputSimd128Register(1));
2079  break;
2080  }
2081  case kMips64I32x4Ne: {
2082  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2083  Simd128Register dst = i.OutputSimd128Register();
2084  __ ceq_w(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
2085  __ nor_v(dst, dst, dst);
2086  break;
2087  }
2088  case kMips64I32x4Shl: {
2089  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2090  __ slli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2091  i.InputInt5(1));
2092  break;
2093  }
2094  case kMips64I32x4ShrS: {
2095  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2096  __ srai_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2097  i.InputInt5(1));
2098  break;
2099  }
2100  case kMips64I32x4ShrU: {
2101  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2102  __ srli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2103  i.InputInt5(1));
2104  break;
2105  }
2106  case kMips64I32x4MaxU: {
2107  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2108  __ max_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2109  i.InputSimd128Register(1));
2110  break;
2111  }
2112  case kMips64I32x4MinU: {
2113  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2114  __ min_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2115  i.InputSimd128Register(1));
2116  break;
2117  }
2118  case kMips64S128Select: {
2119  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2120  DCHECK(i.OutputSimd128Register() == i.InputSimd128Register(0));
2121  __ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2),
2122  i.InputSimd128Register(1));
2123  break;
2124  }
2125  case kMips64F32x4Abs: {
2126  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2127  __ bclri_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
2128  break;
2129  }
2130  case kMips64F32x4Neg: {
2131  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2132  __ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
2133  break;
2134  }
2135  case kMips64F32x4RecipApprox: {
2136  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2137  __ frcp_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2138  break;
2139  }
2140  case kMips64F32x4RecipSqrtApprox: {
2141  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2142  __ frsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2143  break;
2144  }
2145  case kMips64F32x4Add: {
2146  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2147  __ fadd_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2148  i.InputSimd128Register(1));
2149  break;
2150  }
2151  case kMips64F32x4Sub: {
2152  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2153  __ fsub_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2154  i.InputSimd128Register(1));
2155  break;
2156  }
2157  case kMips64F32x4Mul: {
2158  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2159  __ fmul_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2160  i.InputSimd128Register(1));
2161  break;
2162  }
2163  case kMips64F32x4Max: {
2164  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2165  __ fmax_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2166  i.InputSimd128Register(1));
2167  break;
2168  }
2169  case kMips64F32x4Min: {
2170  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2171  __ fmin_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2172  i.InputSimd128Register(1));
2173  break;
2174  }
2175  case kMips64F32x4Eq: {
2176  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2177  __ fceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2178  i.InputSimd128Register(1));
2179  break;
2180  }
2181  case kMips64F32x4Ne: {
2182  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2183  __ fcne_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2184  i.InputSimd128Register(1));
2185  break;
2186  }
2187  case kMips64F32x4Lt: {
2188  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2189  __ fclt_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2190  i.InputSimd128Register(1));
2191  break;
2192  }
2193  case kMips64F32x4Le: {
2194  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2195  __ fcle_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
2196  i.InputSimd128Register(1));
2197  break;
2198  }
2199  case kMips64I32x4SConvertF32x4: {
2200  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2201  __ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2202  break;
2203  }
2204  case kMips64I32x4UConvertF32x4: {
2205  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2206  __ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
2207  break;
2208  }
2209  case kMips64I32x4Neg: {
2210  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2211  __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2212  __ subv_w(i.OutputSimd128Register(), kSimd128RegZero,
2213  i.InputSimd128Register(0));
2214  break;
2215  }
2216  case kMips64I32x4GtS: {
2217  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2218  __ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2219  i.InputSimd128Register(0));
2220  break;
2221  }
2222  case kMips64I32x4GeS: {
2223  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2224  __ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2225  i.InputSimd128Register(0));
2226  break;
2227  }
2228  case kMips64I32x4GtU: {
2229  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2230  __ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2231  i.InputSimd128Register(0));
2232  break;
2233  }
2234  case kMips64I32x4GeU: {
2235  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2236  __ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
2237  i.InputSimd128Register(0));
2238  break;
2239  }
2240  case kMips64I16x8Splat: {
2241  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2242  __ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
2243  break;
2244  }
2245  case kMips64I16x8ExtractLane: {
2246  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2247  __ copy_s_h(i.OutputRegister(), i.InputSimd128Register(0),
2248  i.InputInt8(1));
2249  break;
2250  }
2251  case kMips64I16x8ReplaceLane: {
2252  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2253  Simd128Register src = i.InputSimd128Register(0);
2254  Simd128Register dst = i.OutputSimd128Register();
2255  if (src != dst) {
2256  __ move_v(dst, src);
2257  }
2258  __ insert_h(dst, i.InputInt8(1), i.InputRegister(2));
2259  break;
2260  }
2261  case kMips64I16x8Neg: {
2262  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2263  __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2264  __ subv_h(i.OutputSimd128Register(), kSimd128RegZero,
2265  i.InputSimd128Register(0));
2266  break;
2267  }
2268  case kMips64I16x8Shl: {
2269  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2270  __ slli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2271  i.InputInt4(1));
2272  break;
2273  }
2274  case kMips64I16x8ShrS: {
2275  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2276  __ srai_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2277  i.InputInt4(1));
2278  break;
2279  }
2280  case kMips64I16x8ShrU: {
2281  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2282  __ srli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2283  i.InputInt4(1));
2284  break;
2285  }
2286  case kMips64I16x8Add: {
2287  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2288  __ addv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2289  i.InputSimd128Register(1));
2290  break;
2291  }
2292  case kMips64I16x8AddSaturateS: {
2293  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2294  __ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2295  i.InputSimd128Register(1));
2296  break;
2297  }
2298  case kMips64I16x8Sub: {
2299  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2300  __ subv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2301  i.InputSimd128Register(1));
2302  break;
2303  }
2304  case kMips64I16x8SubSaturateS: {
2305  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2306  __ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2307  i.InputSimd128Register(1));
2308  break;
2309  }
2310  case kMips64I16x8Mul: {
2311  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2312  __ mulv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2313  i.InputSimd128Register(1));
2314  break;
2315  }
2316  case kMips64I16x8MaxS: {
2317  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2318  __ max_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2319  i.InputSimd128Register(1));
2320  break;
2321  }
2322  case kMips64I16x8MinS: {
2323  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2324  __ min_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2325  i.InputSimd128Register(1));
2326  break;
2327  }
2328  case kMips64I16x8Eq: {
2329  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2330  __ ceq_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2331  i.InputSimd128Register(1));
2332  break;
2333  }
2334  case kMips64I16x8Ne: {
2335  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2336  Simd128Register dst = i.OutputSimd128Register();
2337  __ ceq_h(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
2338  __ nor_v(dst, dst, dst);
2339  break;
2340  }
2341  case kMips64I16x8GtS: {
2342  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2343  __ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
2344  i.InputSimd128Register(0));
2345  break;
2346  }
2347  case kMips64I16x8GeS: {
2348  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2349  __ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
2350  i.InputSimd128Register(0));
2351  break;
2352  }
2353  case kMips64I16x8AddSaturateU: {
2354  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2355  __ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2356  i.InputSimd128Register(1));
2357  break;
2358  }
2359  case kMips64I16x8SubSaturateU: {
2360  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2361  __ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2362  i.InputSimd128Register(1));
2363  break;
2364  }
2365  case kMips64I16x8MaxU: {
2366  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2367  __ max_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2368  i.InputSimd128Register(1));
2369  break;
2370  }
2371  case kMips64I16x8MinU: {
2372  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2373  __ min_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
2374  i.InputSimd128Register(1));
2375  break;
2376  }
2377  case kMips64I16x8GtU: {
2378  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2379  __ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
2380  i.InputSimd128Register(0));
2381  break;
2382  }
2383  case kMips64I16x8GeU: {
2384  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2385  __ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
2386  i.InputSimd128Register(0));
2387  break;
2388  }
2389  case kMips64I8x16Splat: {
2390  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2391  __ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
2392  break;
2393  }
2394  case kMips64I8x16ExtractLane: {
2395  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2396  __ copy_s_b(i.OutputRegister(), i.InputSimd128Register(0),
2397  i.InputInt8(1));
2398  break;
2399  }
2400  case kMips64I8x16ReplaceLane: {
2401  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2402  Simd128Register src = i.InputSimd128Register(0);
2403  Simd128Register dst = i.OutputSimd128Register();
2404  if (src != dst) {
2405  __ move_v(dst, src);
2406  }
2407  __ insert_b(dst, i.InputInt8(1), i.InputRegister(2));
2408  break;
2409  }
2410  case kMips64I8x16Neg: {
2411  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2412  __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2413  __ subv_b(i.OutputSimd128Register(), kSimd128RegZero,
2414  i.InputSimd128Register(0));
2415  break;
2416  }
2417  case kMips64I8x16Shl: {
2418  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2419  __ slli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2420  i.InputInt3(1));
2421  break;
2422  }
2423  case kMips64I8x16ShrS: {
2424  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2425  __ srai_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2426  i.InputInt3(1));
2427  break;
2428  }
2429  case kMips64I8x16Add: {
2430  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2431  __ addv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2432  i.InputSimd128Register(1));
2433  break;
2434  }
2435  case kMips64I8x16AddSaturateS: {
2436  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2437  __ adds_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2438  i.InputSimd128Register(1));
2439  break;
2440  }
2441  case kMips64I8x16Sub: {
2442  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2443  __ subv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2444  i.InputSimd128Register(1));
2445  break;
2446  }
2447  case kMips64I8x16SubSaturateS: {
2448  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2449  __ subs_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2450  i.InputSimd128Register(1));
2451  break;
2452  }
2453  case kMips64I8x16Mul: {
2454  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2455  __ mulv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2456  i.InputSimd128Register(1));
2457  break;
2458  }
2459  case kMips64I8x16MaxS: {
2460  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2461  __ max_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2462  i.InputSimd128Register(1));
2463  break;
2464  }
2465  case kMips64I8x16MinS: {
2466  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2467  __ min_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2468  i.InputSimd128Register(1));
2469  break;
2470  }
2471  case kMips64I8x16Eq: {
2472  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2473  __ ceq_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2474  i.InputSimd128Register(1));
2475  break;
2476  }
2477  case kMips64I8x16Ne: {
2478  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2479  Simd128Register dst = i.OutputSimd128Register();
2480  __ ceq_b(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
2481  __ nor_v(dst, dst, dst);
2482  break;
2483  }
2484  case kMips64I8x16GtS: {
2485  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2486  __ clt_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
2487  i.InputSimd128Register(0));
2488  break;
2489  }
2490  case kMips64I8x16GeS: {
2491  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2492  __ cle_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
2493  i.InputSimd128Register(0));
2494  break;
2495  }
2496  case kMips64I8x16ShrU: {
2497  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2498  __ srli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2499  i.InputInt3(1));
2500  break;
2501  }
2502  case kMips64I8x16AddSaturateU: {
2503  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2504  __ adds_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2505  i.InputSimd128Register(1));
2506  break;
2507  }
2508  case kMips64I8x16SubSaturateU: {
2509  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2510  __ subs_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2511  i.InputSimd128Register(1));
2512  break;
2513  }
2514  case kMips64I8x16MaxU: {
2515  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2516  __ max_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2517  i.InputSimd128Register(1));
2518  break;
2519  }
2520  case kMips64I8x16MinU: {
2521  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2522  __ min_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
2523  i.InputSimd128Register(1));
2524  break;
2525  }
2526  case kMips64I8x16GtU: {
2527  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2528  __ clt_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
2529  i.InputSimd128Register(0));
2530  break;
2531  }
2532  case kMips64I8x16GeU: {
2533  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2534  __ cle_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
2535  i.InputSimd128Register(0));
2536  break;
2537  }
2538  case kMips64S128And: {
2539  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2540  __ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
2541  i.InputSimd128Register(1));
2542  break;
2543  }
2544  case kMips64S128Or: {
2545  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2546  __ or_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
2547  i.InputSimd128Register(1));
2548  break;
2549  }
2550  case kMips64S128Xor: {
2551  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2552  __ xor_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
2553  i.InputSimd128Register(1));
2554  break;
2555  }
2556  case kMips64S128Not: {
2557  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2558  __ nor_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
2559  i.InputSimd128Register(0));
2560  break;
2561  }
2562  case kMips64S1x4AnyTrue:
2563  case kMips64S1x8AnyTrue:
2564  case kMips64S1x16AnyTrue: {
2565  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2566  Register dst = i.OutputRegister();
2567  Label all_false;
2568  __ BranchMSA(&all_false, MSA_BRANCH_V, all_zero,
2569  i.InputSimd128Register(0), USE_DELAY_SLOT);
2570  __ li(dst, 0l); // branch delay slot
2571  __ li(dst, -1);
2572  __ bind(&all_false);
2573  break;
2574  }
2575  case kMips64S1x4AllTrue: {
2576  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2577  Register dst = i.OutputRegister();
2578  Label all_true;
2579  __ BranchMSA(&all_true, MSA_BRANCH_W, all_not_zero,
2580  i.InputSimd128Register(0), USE_DELAY_SLOT);
2581  __ li(dst, -1); // branch delay slot
2582  __ li(dst, 0l);
2583  __ bind(&all_true);
2584  break;
2585  }
2586  case kMips64S1x8AllTrue: {
2587  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2588  Register dst = i.OutputRegister();
2589  Label all_true;
2590  __ BranchMSA(&all_true, MSA_BRANCH_H, all_not_zero,
2591  i.InputSimd128Register(0), USE_DELAY_SLOT);
2592  __ li(dst, -1); // branch delay slot
2593  __ li(dst, 0l);
2594  __ bind(&all_true);
2595  break;
2596  }
2597  case kMips64S1x16AllTrue: {
2598  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2599  Register dst = i.OutputRegister();
2600  Label all_true;
2601  __ BranchMSA(&all_true, MSA_BRANCH_B, all_not_zero,
2602  i.InputSimd128Register(0), USE_DELAY_SLOT);
2603  __ li(dst, -1); // branch delay slot
2604  __ li(dst, 0l);
2605  __ bind(&all_true);
2606  break;
2607  }
2608  case kMips64MsaLd: {
2609  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2610  __ ld_b(i.OutputSimd128Register(), i.MemoryOperand());
2611  break;
2612  }
2613  case kMips64MsaSt: {
2614  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2615  __ st_b(i.InputSimd128Register(2), i.MemoryOperand());
2616  break;
2617  }
2618  case kMips64S32x4InterleaveRight: {
2619  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2620  Simd128Register dst = i.OutputSimd128Register(),
2621  src0 = i.InputSimd128Register(0),
2622  src1 = i.InputSimd128Register(1);
2623  // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
2624  // dst = [5, 1, 4, 0]
2625  __ ilvr_w(dst, src1, src0);
2626  break;
2627  }
2628  case kMips64S32x4InterleaveLeft: {
2629  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2630  Simd128Register dst = i.OutputSimd128Register(),
2631  src0 = i.InputSimd128Register(0),
2632  src1 = i.InputSimd128Register(1);
2633  // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
2634  // dst = [7, 3, 6, 2]
2635  __ ilvl_w(dst, src1, src0);
2636  break;
2637  }
2638  case kMips64S32x4PackEven: {
2639  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2640  Simd128Register dst = i.OutputSimd128Register(),
2641  src0 = i.InputSimd128Register(0),
2642  src1 = i.InputSimd128Register(1);
2643  // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
2644  // dst = [6, 4, 2, 0]
2645  __ pckev_w(dst, src1, src0);
2646  break;
2647  }
2648  case kMips64S32x4PackOdd: {
2649  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2650  Simd128Register dst = i.OutputSimd128Register(),
2651  src0 = i.InputSimd128Register(0),
2652  src1 = i.InputSimd128Register(1);
2653  // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
2654  // dst = [7, 5, 3, 1]
2655  __ pckod_w(dst, src1, src0);
2656  break;
2657  }
2658  case kMips64S32x4InterleaveEven: {
2659  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2660  Simd128Register dst = i.OutputSimd128Register(),
2661  src0 = i.InputSimd128Register(0),
2662  src1 = i.InputSimd128Register(1);
2663  // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
2664  // dst = [6, 2, 4, 0]
2665  __ ilvev_w(dst, src1, src0);
2666  break;
2667  }
2668  case kMips64S32x4InterleaveOdd: {
2669  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2670  Simd128Register dst = i.OutputSimd128Register(),
2671  src0 = i.InputSimd128Register(0),
2672  src1 = i.InputSimd128Register(1);
2673  // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
2674  // dst = [7, 3, 5, 1]
2675  __ ilvod_w(dst, src1, src0);
2676  break;
2677  }
2678  case kMips64S32x4Shuffle: {
2679  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2680  Simd128Register dst = i.OutputSimd128Register(),
2681  src0 = i.InputSimd128Register(0),
2682  src1 = i.InputSimd128Register(1);
2683 
2684  int32_t shuffle = i.InputInt32(2);
2685 
2686  if (src0 == src1) {
2687  // Unary S32x4 shuffles are handled with shf.w instruction
2688  unsigned lane = shuffle & 0xFF;
2689  if (FLAG_debug_code) {
2690  // range of all four lanes, for unary instruction,
2691  // should belong to the same range, which can be one of these:
2692  // [0, 3] or [4, 7]
2693  if (lane >= 4) {
2694  int32_t shuffle_helper = shuffle;
2695  for (int i = 0; i < 4; ++i) {
2696  lane = shuffle_helper & 0xFF;
2697  CHECK_GE(lane, 4);
2698  shuffle_helper >>= 8;
2699  }
2700  }
2701  }
2702  uint32_t i8 = 0;
2703  for (int i = 0; i < 4; i++) {
2704  lane = shuffle & 0xFF;
2705  if (lane >= 4) {
2706  lane -= 4;
2707  }
2708  DCHECK_GT(4, lane);
2709  i8 |= lane << (2 * i);
2710  shuffle >>= 8;
2711  }
2712  __ shf_w(dst, src0, i8);
2713  } else {
2714  // For binary shuffles use vshf.w instruction
2715  if (dst == src0) {
2716  __ move_v(kSimd128ScratchReg, src0);
2717  src0 = kSimd128ScratchReg;
2718  } else if (dst == src1) {
2719  __ move_v(kSimd128ScratchReg, src1);
2720  src1 = kSimd128ScratchReg;
2721  }
2722 
2723  __ li(kScratchReg, i.InputInt32(2));
2724  __ insert_w(dst, 0, kScratchReg);
2725  __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2726  __ ilvr_b(dst, kSimd128RegZero, dst);
2727  __ ilvr_h(dst, kSimd128RegZero, dst);
2728  __ vshf_w(dst, src1, src0);
2729  }
2730  break;
2731  }
2732  case kMips64S16x8InterleaveRight: {
2733  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2734  Simd128Register dst = i.OutputSimd128Register(),
2735  src0 = i.InputSimd128Register(0),
2736  src1 = i.InputSimd128Register(1);
2737  // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
2738  // dst = [11, 3, 10, 2, 9, 1, 8, 0]
2739  __ ilvr_h(dst, src1, src0);
2740  break;
2741  }
2742  case kMips64S16x8InterleaveLeft: {
2743  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2744  Simd128Register dst = i.OutputSimd128Register(),
2745  src0 = i.InputSimd128Register(0),
2746  src1 = i.InputSimd128Register(1);
2747  // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
2748  // dst = [15, 7, 14, 6, 13, 5, 12, 4]
2749  __ ilvl_h(dst, src1, src0);
2750  break;
2751  }
2752  case kMips64S16x8PackEven: {
2753  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2754  Simd128Register dst = i.OutputSimd128Register(),
2755  src0 = i.InputSimd128Register(0),
2756  src1 = i.InputSimd128Register(1);
2757  // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
2758  // dst = [14, 12, 10, 8, 6, 4, 2, 0]
2759  __ pckev_h(dst, src1, src0);
2760  break;
2761  }
2762  case kMips64S16x8PackOdd: {
2763  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2764  Simd128Register dst = i.OutputSimd128Register(),
2765  src0 = i.InputSimd128Register(0),
2766  src1 = i.InputSimd128Register(1);
2767  // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
2768  // dst = [15, 13, 11, 9, 7, 5, 3, 1]
2769  __ pckod_h(dst, src1, src0);
2770  break;
2771  }
2772  case kMips64S16x8InterleaveEven: {
2773  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2774  Simd128Register dst = i.OutputSimd128Register(),
2775  src0 = i.InputSimd128Register(0),
2776  src1 = i.InputSimd128Register(1);
2777  // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
2778  // dst = [14, 6, 12, 4, 10, 2, 8, 0]
2779  __ ilvev_h(dst, src1, src0);
2780  break;
2781  }
2782  case kMips64S16x8InterleaveOdd: {
2783  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2784  Simd128Register dst = i.OutputSimd128Register(),
2785  src0 = i.InputSimd128Register(0),
2786  src1 = i.InputSimd128Register(1);
2787  // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
2788  // dst = [15, 7, ... 11, 3, 9, 1]
2789  __ ilvod_h(dst, src1, src0);
2790  break;
2791  }
2792  case kMips64S16x4Reverse: {
2793  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2794  // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [4, 5, 6, 7, 0, 1, 2, 3]
2795  // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B
2796  __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B);
2797  break;
2798  }
2799  case kMips64S16x2Reverse: {
2800  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2801  // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [6, 7, 4, 5, 3, 2, 0, 1]
2802  // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1
2803  __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
2804  break;
2805  }
2806  case kMips64S8x16InterleaveRight: {
2807  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2808  Simd128Register dst = i.OutputSimd128Register(),
2809  src0 = i.InputSimd128Register(0),
2810  src1 = i.InputSimd128Register(1);
2811  // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
2812  // dst = [23, 7, ... 17, 1, 16, 0]
2813  __ ilvr_b(dst, src1, src0);
2814  break;
2815  }
2816  case kMips64S8x16InterleaveLeft: {
2817  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2818  Simd128Register dst = i.OutputSimd128Register(),
2819  src0 = i.InputSimd128Register(0),
2820  src1 = i.InputSimd128Register(1);
2821  // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
2822  // dst = [31, 15, ... 25, 9, 24, 8]
2823  __ ilvl_b(dst, src1, src0);
2824  break;
2825  }
2826  case kMips64S8x16PackEven: {
2827  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2828  Simd128Register dst = i.OutputSimd128Register(),
2829  src0 = i.InputSimd128Register(0),
2830  src1 = i.InputSimd128Register(1);
2831  // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
2832  // dst = [30, 28, ... 6, 4, 2, 0]
2833  __ pckev_b(dst, src1, src0);
2834  break;
2835  }
2836  case kMips64S8x16PackOdd: {
2837  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2838  Simd128Register dst = i.OutputSimd128Register(),
2839  src0 = i.InputSimd128Register(0),
2840  src1 = i.InputSimd128Register(1);
2841  // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
2842  // dst = [31, 29, ... 7, 5, 3, 1]
2843  __ pckod_b(dst, src1, src0);
2844  break;
2845  }
2846  case kMips64S8x16InterleaveEven: {
2847  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2848  Simd128Register dst = i.OutputSimd128Register(),
2849  src0 = i.InputSimd128Register(0),
2850  src1 = i.InputSimd128Register(1);
2851  // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
2852  // dst = [30, 14, ... 18, 2, 16, 0]
2853  __ ilvev_b(dst, src1, src0);
2854  break;
2855  }
2856  case kMips64S8x16InterleaveOdd: {
2857  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2858  Simd128Register dst = i.OutputSimd128Register(),
2859  src0 = i.InputSimd128Register(0),
2860  src1 = i.InputSimd128Register(1);
2861  // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
2862  // dst = [31, 15, ... 19, 3, 17, 1]
2863  __ ilvod_b(dst, src1, src0);
2864  break;
2865  }
2866  case kMips64S8x16Concat: {
2867  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2868  Simd128Register dst = i.OutputSimd128Register();
2869  DCHECK(dst == i.InputSimd128Register(0));
2870  __ sldi_b(dst, i.InputSimd128Register(1), i.InputInt4(2));
2871  break;
2872  }
2873  case kMips64S8x16Shuffle: {
2874  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2875  Simd128Register dst = i.OutputSimd128Register(),
2876  src0 = i.InputSimd128Register(0),
2877  src1 = i.InputSimd128Register(1);
2878 
2879  if (dst == src0) {
2880  __ move_v(kSimd128ScratchReg, src0);
2881  src0 = kSimd128ScratchReg;
2882  } else if (dst == src1) {
2883  __ move_v(kSimd128ScratchReg, src1);
2884  src1 = kSimd128ScratchReg;
2885  }
2886 
2887  int64_t control_low =
2888  static_cast<int64_t>(i.InputInt32(3)) << 32 | i.InputInt32(2);
2889  int64_t control_hi =
2890  static_cast<int64_t>(i.InputInt32(5)) << 32 | i.InputInt32(4);
2891  __ li(kScratchReg, control_low);
2892  __ insert_d(dst, 0, kScratchReg);
2893  __ li(kScratchReg, control_hi);
2894  __ insert_d(dst, 1, kScratchReg);
2895  __ vshf_b(dst, src1, src0);
2896  break;
2897  }
2898  case kMips64S8x8Reverse: {
2899  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2900  // src = [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
2901  // dst = [8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7]
2902  // [A B C D] => [B A D C]: shf.w imm: 2 3 0 1 = 10110001 = 0xB1
2903  // C: [7, 6, 5, 4] => A': [4, 5, 6, 7]: shf.b imm: 00011011 = 0x1B
2904  __ shf_w(kSimd128ScratchReg, i.InputSimd128Register(0), 0xB1);
2905  __ shf_b(i.OutputSimd128Register(), kSimd128ScratchReg, 0x1B);
2906  break;
2907  }
2908  case kMips64S8x4Reverse: {
2909  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2910  // src = [15, 14, ... 3, 2, 1, 0], dst = [12, 13, 14, 15, ... 0, 1, 2, 3]
2911  // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B
2912  __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B);
2913  break;
2914  }
2915  case kMips64S8x2Reverse: {
2916  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2917  // src = [15, 14, ... 3, 2, 1, 0], dst = [14, 15, 12, 13, ... 2, 3, 0, 1]
2918  // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1
2919  __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
2920  break;
2921  }
2922  case kMips64I32x4SConvertI16x8Low: {
2923  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2924  Simd128Register dst = i.OutputSimd128Register();
2925  Simd128Register src = i.InputSimd128Register(0);
2926  __ ilvr_h(kSimd128ScratchReg, src, src);
2927  __ slli_w(dst, kSimd128ScratchReg, 16);
2928  __ srai_w(dst, dst, 16);
2929  break;
2930  }
2931  case kMips64I32x4SConvertI16x8High: {
2932  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2933  Simd128Register dst = i.OutputSimd128Register();
2934  Simd128Register src = i.InputSimd128Register(0);
2935  __ ilvl_h(kSimd128ScratchReg, src, src);
2936  __ slli_w(dst, kSimd128ScratchReg, 16);
2937  __ srai_w(dst, dst, 16);
2938  break;
2939  }
2940  case kMips64I32x4UConvertI16x8Low: {
2941  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2942  __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2943  __ ilvr_h(i.OutputSimd128Register(), kSimd128RegZero,
2944  i.InputSimd128Register(0));
2945  break;
2946  }
2947  case kMips64I32x4UConvertI16x8High: {
2948  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2949  __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2950  __ ilvl_h(i.OutputSimd128Register(), kSimd128RegZero,
2951  i.InputSimd128Register(0));
2952  break;
2953  }
2954  case kMips64I16x8SConvertI8x16Low: {
2955  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2956  Simd128Register dst = i.OutputSimd128Register();
2957  Simd128Register src = i.InputSimd128Register(0);
2958  __ ilvr_b(kSimd128ScratchReg, src, src);
2959  __ slli_h(dst, kSimd128ScratchReg, 8);
2960  __ srai_h(dst, dst, 8);
2961  break;
2962  }
2963  case kMips64I16x8SConvertI8x16High: {
2964  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2965  Simd128Register dst = i.OutputSimd128Register();
2966  Simd128Register src = i.InputSimd128Register(0);
2967  __ ilvl_b(kSimd128ScratchReg, src, src);
2968  __ slli_h(dst, kSimd128ScratchReg, 8);
2969  __ srai_h(dst, dst, 8);
2970  break;
2971  }
2972  case kMips64I16x8SConvertI32x4: {
2973  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2974  Simd128Register dst = i.OutputSimd128Register();
2975  Simd128Register src0 = i.InputSimd128Register(0);
2976  Simd128Register src1 = i.InputSimd128Register(1);
2977  __ sat_s_w(kSimd128ScratchReg, src0, 15);
2978  __ sat_s_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch
2979  __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
2980  break;
2981  }
2982  case kMips64I16x8UConvertI32x4: {
2983  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2984  Simd128Register dst = i.OutputSimd128Register();
2985  Simd128Register src0 = i.InputSimd128Register(0);
2986  Simd128Register src1 = i.InputSimd128Register(1);
2987  __ sat_u_w(kSimd128ScratchReg, src0, 15);
2988  __ sat_u_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch
2989  __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
2990  break;
2991  }
2992  case kMips64I16x8UConvertI8x16Low: {
2993  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2994  __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2995  __ ilvr_b(i.OutputSimd128Register(), kSimd128RegZero,
2996  i.InputSimd128Register(0));
2997  break;
2998  }
2999  case kMips64I16x8UConvertI8x16High: {
3000  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3001  __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
3002  __ ilvl_b(i.OutputSimd128Register(), kSimd128RegZero,
3003  i.InputSimd128Register(0));
3004  break;
3005  }
3006  case kMips64I8x16SConvertI16x8: {
3007  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3008  Simd128Register dst = i.OutputSimd128Register();
3009  Simd128Register src0 = i.InputSimd128Register(0);
3010  Simd128Register src1 = i.InputSimd128Register(1);
3011  __ sat_s_h(kSimd128ScratchReg, src0, 7);
3012  __ sat_s_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch
3013  __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
3014  break;
3015  }
3016  case kMips64I8x16UConvertI16x8: {
3017  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3018  Simd128Register dst = i.OutputSimd128Register();
3019  Simd128Register src0 = i.InputSimd128Register(0);
3020  Simd128Register src1 = i.InputSimd128Register(1);
3021  __ sat_u_h(kSimd128ScratchReg, src0, 7);
3022  __ sat_u_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch
3023  __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
3024  break;
3025  }
3026  case kMips64F32x4AddHoriz: {
3027  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3028  Simd128Register src0 = i.InputSimd128Register(0);
3029  Simd128Register src1 = i.InputSimd128Register(1);
3030  Simd128Register dst = i.OutputSimd128Register();
3031  __ shf_w(kSimd128ScratchReg, src0, 0xB1); // 2 3 0 1 : 10110001 : 0xB1
3032  __ shf_w(kSimd128RegZero, src1, 0xB1); // kSimd128RegZero as scratch
3033  __ fadd_w(kSimd128ScratchReg, kSimd128ScratchReg, src0);
3034  __ fadd_w(kSimd128RegZero, kSimd128RegZero, src1);
3035  __ pckev_w(dst, kSimd128RegZero, kSimd128ScratchReg);
3036  break;
3037  }
3038  case kMips64I32x4AddHoriz: {
3039  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3040  Simd128Register src0 = i.InputSimd128Register(0);
3041  Simd128Register src1 = i.InputSimd128Register(1);
3042  Simd128Register dst = i.OutputSimd128Register();
3043  __ hadd_s_d(kSimd128ScratchReg, src0, src0);
3044  __ hadd_s_d(kSimd128RegZero, src1, src1); // kSimd128RegZero as scratch
3045  __ pckev_w(dst, kSimd128RegZero, kSimd128ScratchReg);
3046  break;
3047  }
3048  case kMips64I16x8AddHoriz: {
3049  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3050  Simd128Register src0 = i.InputSimd128Register(0);
3051  Simd128Register src1 = i.InputSimd128Register(1);
3052  Simd128Register dst = i.OutputSimd128Register();
3053  __ hadd_s_w(kSimd128ScratchReg, src0, src0);
3054  __ hadd_s_w(kSimd128RegZero, src1, src1); // kSimd128RegZero as scratch
3055  __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
3056  break;
3057  }
3058  }
3059  return kSuccess;
3060 } // NOLINT(readability/fn_size)
3061 
3062 #define UNSUPPORTED_COND(opcode, condition) \
3063  StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \
3064  << "\""; \
3065  UNIMPLEMENTED();
3066 
3067 void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
3068  Instruction* instr, FlagsCondition condition,
3069  Label* tlabel, Label* flabel, bool fallthru) {
3070 #undef __
3071 #define __ tasm->
3072  MipsOperandConverter i(gen, instr);
3073 
3074  Condition cc = kNoCondition;
3075  // MIPS does not have condition code flags, so compare and branch are
3076  // implemented differently than on the other arch's. The compare operations
3077  // emit mips pseudo-instructions, which are handled here by branch
3078  // instructions that do the actual comparison. Essential that the input
3079  // registers to compare pseudo-op are not modified before this branch op, as
3080  // they are tested here.
3081 
3082  if (instr->arch_opcode() == kMips64Tst) {
3083  cc = FlagsConditionToConditionTst(condition);
3084  __ Branch(tlabel, cc, kScratchReg, Operand(zero_reg));
3085  } else if (instr->arch_opcode() == kMips64Dadd ||
3086  instr->arch_opcode() == kMips64Dsub) {
3087  cc = FlagsConditionToConditionOvf(condition);
3088  __ dsra32(kScratchReg, i.OutputRegister(), 0);
3089  __ sra(kScratchReg2, i.OutputRegister(), 31);
3090  __ Branch(tlabel, cc, kScratchReg2, Operand(kScratchReg));
3091  } else if (instr->arch_opcode() == kMips64DaddOvf ||
3092  instr->arch_opcode() == kMips64DsubOvf) {
3093  switch (condition) {
3094  // Overflow occurs if overflow register is negative
3095  case kOverflow:
3096  __ Branch(tlabel, lt, kScratchReg, Operand(zero_reg));
3097  break;
3098  case kNotOverflow:
3099  __ Branch(tlabel, ge, kScratchReg, Operand(zero_reg));
3100  break;
3101  default:
3102  UNSUPPORTED_COND(instr->arch_opcode(), condition);
3103  break;
3104  }
3105  } else if (instr->arch_opcode() == kMips64MulOvf) {
3106  // Overflow occurs if overflow register is not zero
3107  switch (condition) {
3108  case kOverflow:
3109  __ Branch(tlabel, ne, kScratchReg, Operand(zero_reg));
3110  break;
3111  case kNotOverflow:
3112  __ Branch(tlabel, eq, kScratchReg, Operand(zero_reg));
3113  break;
3114  default:
3115  UNSUPPORTED_COND(kMipsMulOvf, condition);
3116  break;
3117  }
3118  } else if (instr->arch_opcode() == kMips64Cmp) {
3119  cc = FlagsConditionToConditionCmp(condition);
3120  __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
3121  } else if (instr->arch_opcode() == kMips64CmpS ||
3122  instr->arch_opcode() == kMips64CmpD) {
3123  bool predicate;
3124  FlagsConditionToConditionCmpFPU(predicate, condition);
3125  if (predicate) {
3126  __ BranchTrueF(tlabel);
3127  } else {
3128  __ BranchFalseF(tlabel);
3129  }
3130  } else {
3131  PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
3132  instr->arch_opcode());
3133  UNIMPLEMENTED();
3134  }
3135  if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
3136 #undef __
3137 #define __ tasm()->
3138 }
3139 
3140 // Assembles branches after an instruction.
3141 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
3142  Label* tlabel = branch->true_label;
3143  Label* flabel = branch->false_label;
3144 
3145  AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel,
3146  branch->fallthru);
3147 }
3148 
3149 void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
3150  Instruction* instr) {
3151  // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
3152  if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
3153  return;
3154  }
3155 
3156  MipsOperandConverter i(this, instr);
3157  condition = NegateFlagsCondition(condition);
3158 
3159  switch (instr->arch_opcode()) {
3160  case kMips64Cmp: {
3161  __ LoadZeroOnCondition(kSpeculationPoisonRegister, i.InputRegister(0),
3162  i.InputOperand(1),
3163  FlagsConditionToConditionCmp(condition));
3164  }
3165  return;
3166  case kMips64Tst: {
3167  switch (condition) {
3168  case kEqual:
3169  __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
3170  break;
3171  case kNotEqual:
3172  __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
3173  kScratchReg);
3174  break;
3175  default:
3176  UNREACHABLE();
3177  }
3178  }
3179  return;
3180  case kMips64Dadd:
3181  case kMips64Dsub: {
3182  // Check for overflow creates 1 or 0 for result.
3183  __ dsrl32(kScratchReg, i.OutputRegister(), 31);
3184  __ srl(kScratchReg2, i.OutputRegister(), 31);
3185  __ xor_(kScratchReg2, kScratchReg, kScratchReg2);
3186  switch (condition) {
3187  case kOverflow:
3188  __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
3189  kScratchReg2);
3190  break;
3191  case kNotOverflow:
3192  __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
3193  break;
3194  default:
3195  UNSUPPORTED_COND(instr->arch_opcode(), condition);
3196  }
3197  }
3198  return;
3199  case kMips64DaddOvf:
3200  case kMips64DsubOvf: {
3201  // Overflow occurs if overflow register is negative
3202  __ Slt(kScratchReg2, kScratchReg, zero_reg);
3203  switch (condition) {
3204  case kOverflow:
3205  __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
3206  kScratchReg2);
3207  break;
3208  case kNotOverflow:
3209  __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
3210  break;
3211  default:
3212  UNSUPPORTED_COND(instr->arch_opcode(), condition);
3213  }
3214  }
3215  return;
3216  case kMips64MulOvf: {
3217  // Overflow occurs if overflow register is not zero
3218  switch (condition) {
3219  case kOverflow:
3220  __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
3221  kScratchReg);
3222  break;
3223  case kNotOverflow:
3224  __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
3225  break;
3226  default:
3227  UNSUPPORTED_COND(instr->arch_opcode(), condition);
3228  }
3229  }
3230  return;
3231  case kMips64CmpS:
3232  case kMips64CmpD: {
3233  bool predicate;
3234  FlagsConditionToConditionCmpFPU(predicate, condition);
3235  if (predicate) {
3236  __ LoadZeroIfFPUCondition(kSpeculationPoisonRegister);
3237  } else {
3238  __ LoadZeroIfNotFPUCondition(kSpeculationPoisonRegister);
3239  }
3240  }
3241  return;
3242  default:
3243  UNREACHABLE();
3244  break;
3245  }
3246 }
3247 
3248 #undef UNSUPPORTED_COND
3249 
3250 void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
3251  BranchInfo* branch) {
3252  AssembleArchBranch(instr, branch);
3253 }
3254 
3255 void CodeGenerator::AssembleArchJump(RpoNumber target) {
3256  if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
3257 }
3258 
3259 void CodeGenerator::AssembleArchTrap(Instruction* instr,
3260  FlagsCondition condition) {
3261  class OutOfLineTrap final : public OutOfLineCode {
3262  public:
3263  OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
3264  : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
3265  void Generate() final {
3266  MipsOperandConverter i(gen_, instr_);
3267  TrapId trap_id =
3268  static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
3269  GenerateCallToTrap(trap_id);
3270  }
3271 
3272  private:
3273  void GenerateCallToTrap(TrapId trap_id) {
3274  if (trap_id == TrapId::kInvalid) {
3275  // We cannot test calls to the runtime in cctest/test-run-wasm.
3276  // Therefore we emit a call to C here instead of a call to the runtime.
3277  // We use the context register as the scratch register, because we do
3278  // not have a context here.
3279  __ PrepareCallCFunction(0, 0, cp);
3280  __ CallCFunction(
3281  ExternalReference::wasm_call_trap_callback_for_testing(), 0);
3282  __ LeaveFrame(StackFrame::WASM_COMPILED);
3283  auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
3284  int pop_count =
3285  static_cast<int>(call_descriptor->StackParameterCount());
3286  pop_count += (pop_count & 1); // align
3287  __ Drop(pop_count);
3288  __ Ret();
3289  } else {
3290  gen_->AssembleSourcePosition(instr_);
3291  // A direct call to a wasm runtime stub defined in this module.
3292  // Just encode the stub index. This will be patched when the code
3293  // is added to the native module and copied into wasm code space.
3294  __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
3295  ReferenceMap* reference_map =
3296  new (gen_->zone()) ReferenceMap(gen_->zone());
3297  gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
3298  Safepoint::kNoLazyDeopt);
3299  if (FLAG_debug_code) {
3300  __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
3301  }
3302  }
3303  }
3304  Instruction* instr_;
3305  CodeGenerator* gen_;
3306  };
3307  auto ool = new (zone()) OutOfLineTrap(this, instr);
3308  Label* tlabel = ool->entry();
3309  AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
3310 }
3311 
3312 // Assembles boolean materializations after an instruction.
3313 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
3314  FlagsCondition condition) {
3315  MipsOperandConverter i(this, instr);
3316  Label done;
3317 
3318  // Materialize a full 32-bit 1 or 0 value. The result register is always the
3319  // last output of the instruction.
3320  Label false_value;
3321  DCHECK_NE(0u, instr->OutputCount());
3322  Register result = i.OutputRegister(instr->OutputCount() - 1);
3323  Condition cc = kNoCondition;
3324  // MIPS does not have condition code flags, so compare and branch are
3325  // implemented differently than on the other arch's. The compare operations
3326  // emit mips pseudo-instructions, which are checked and handled here.
3327 
3328  if (instr->arch_opcode() == kMips64Tst) {
3329  cc = FlagsConditionToConditionTst(condition);
3330  if (cc == eq) {
3331  __ Sltu(result, kScratchReg, 1);
3332  } else {
3333  __ Sltu(result, zero_reg, kScratchReg);
3334  }
3335  return;
3336  } else if (instr->arch_opcode() == kMips64Dadd ||
3337  instr->arch_opcode() == kMips64Dsub) {
3338  cc = FlagsConditionToConditionOvf(condition);
3339  // Check for overflow creates 1 or 0 for result.
3340  __ dsrl32(kScratchReg, i.OutputRegister(), 31);
3341  __ srl(kScratchReg2, i.OutputRegister(), 31);
3342  __ xor_(result, kScratchReg, kScratchReg2);
3343  if (cc == eq) // Toggle result for not overflow.
3344  __ xori(result, result, 1);
3345  return;
3346  } else if (instr->arch_opcode() == kMips64DaddOvf ||
3347  instr->arch_opcode() == kMips64DsubOvf) {
3348  // Overflow occurs if overflow register is negative
3349  __ slt(result, kScratchReg, zero_reg);
3350  } else if (instr->arch_opcode() == kMips64MulOvf) {
3351  // Overflow occurs if overflow register is not zero
3352  __ Sgtu(result, kScratchReg, zero_reg);
3353  } else if (instr->arch_opcode() == kMips64Cmp) {
3354  cc = FlagsConditionToConditionCmp(condition);
3355  switch (cc) {
3356  case eq:
3357  case ne: {
3358  Register left = i.InputRegister(0);
3359  Operand right = i.InputOperand(1);
3360  if (instr->InputAt(1)->IsImmediate()) {
3361  if (is_int16(-right.immediate())) {
3362  if (right.immediate() == 0) {
3363  if (cc == eq) {
3364  __ Sltu(result, left, 1);
3365  } else {
3366  __ Sltu(result, zero_reg, left);
3367  }
3368  } else {
3369  __ Daddu(result, left, Operand(-right.immediate()));
3370  if (cc == eq) {
3371  __ Sltu(result, result, 1);
3372  } else {
3373  __ Sltu(result, zero_reg, result);
3374  }
3375  }
3376  } else {
3377  if (is_uint16(right.immediate())) {
3378  __ Xor(result, left, right);
3379  } else {
3380  __ li(kScratchReg, right);
3381  __ Xor(result, left, kScratchReg);
3382  }
3383  if (cc == eq) {
3384  __ Sltu(result, result, 1);
3385  } else {
3386  __ Sltu(result, zero_reg, result);
3387  }
3388  }
3389  } else {
3390  __ Xor(result, left, right);
3391  if (cc == eq) {
3392  __ Sltu(result, result, 1);
3393  } else {
3394  __ Sltu(result, zero_reg, result);
3395  }
3396  }
3397  } break;
3398  case lt:
3399  case ge: {
3400  Register left = i.InputRegister(0);
3401  Operand right = i.InputOperand(1);
3402  __ Slt(result, left, right);
3403  if (cc == ge) {
3404  __ xori(result, result, 1);
3405  }
3406  } break;
3407  case gt:
3408  case le: {
3409  Register left = i.InputRegister(1);
3410  Operand right = i.InputOperand(0);
3411  __ Slt(result, left, right);
3412  if (cc == le) {
3413  __ xori(result, result, 1);
3414  }
3415  } break;
3416  case lo:
3417  case hs: {
3418  Register left = i.InputRegister(0);
3419  Operand right = i.InputOperand(1);
3420  __ Sltu(result, left, right);
3421  if (cc == hs) {
3422  __ xori(result, result, 1);
3423  }
3424  } break;
3425  case hi:
3426  case ls: {
3427  Register left = i.InputRegister(1);
3428  Operand right = i.InputOperand(0);
3429  __ Sltu(result, left, right);
3430  if (cc == ls) {
3431  __ xori(result, result, 1);
3432  }
3433  } break;
3434  default:
3435  UNREACHABLE();
3436  }
3437  return;
3438  } else if (instr->arch_opcode() == kMips64CmpD ||
3439  instr->arch_opcode() == kMips64CmpS) {
3440  FPURegister left = i.InputOrZeroDoubleRegister(0);
3441  FPURegister right = i.InputOrZeroDoubleRegister(1);
3442  if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
3443  !__ IsDoubleZeroRegSet()) {
3444  __ Move(kDoubleRegZero, 0.0);
3445  }
3446  bool predicate;
3447  FlagsConditionToConditionCmpFPU(predicate, condition);
3448  if (kArchVariant != kMips64r6) {
3449  __ li(result, Operand(1));
3450  if (predicate) {
3451  __ Movf(result, zero_reg);
3452  } else {
3453  __ Movt(result, zero_reg);
3454  }
3455  } else {
3456  if (instr->arch_opcode() == kMips64CmpD) {
3457  __ dmfc1(result, kDoubleCompareReg);
3458  } else {
3459  DCHECK_EQ(kMips64CmpS, instr->arch_opcode());
3460  __ mfc1(result, kDoubleCompareReg);
3461  }
3462  if (predicate) {
3463  __ And(result, result, 1); // cmp returns all 1's/0's, use only LSB.
3464  } else {
3465  __ Addu(result, result, 1); // Toggle result for not equal.
3466  }
3467  }
3468  return;
3469  } else {
3470  PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
3471  instr->arch_opcode());
3472  TRACE_UNIMPL();
3473  UNIMPLEMENTED();
3474  }
3475 }
3476 
3477 void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
3478  MipsOperandConverter i(this, instr);
3479  Register input = i.InputRegister(0);
3480  std::vector<std::pair<int32_t, Label*>> cases;
3481  for (size_t index = 2; index < instr->InputCount(); index += 2) {
3482  cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
3483  }
3484  AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
3485  cases.data() + cases.size());
3486 }
3487 
3488 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
3489  MipsOperandConverter i(this, instr);
3490  Register input = i.InputRegister(0);
3491  for (size_t index = 2; index < instr->InputCount(); index += 2) {
3492  __ li(kScratchReg, Operand(i.InputInt32(index + 0)));
3493  __ Branch(GetLabel(i.InputRpo(index + 1)), eq, input, Operand(kScratchReg));
3494  }
3495  AssembleArchJump(i.InputRpo(1));
3496 }
3497 
3498 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
3499  MipsOperandConverter i(this, instr);
3500  Register input = i.InputRegister(0);
3501  size_t const case_count = instr->InputCount() - 2;
3502 
3503  __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
3504  __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) {
3505  return GetLabel(i.InputRpo(index + 2));
3506  });
3507 }
3508 
3509 void CodeGenerator::FinishFrame(Frame* frame) {
3510  auto call_descriptor = linkage()->GetIncomingDescriptor();
3511 
3512  const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
3513  if (saves_fpu != 0) {
3514  int count = base::bits::CountPopulation(saves_fpu);
3515  DCHECK_EQ(kNumCalleeSavedFPU, count);
3516  frame->AllocateSavedCalleeRegisterSlots(count *
3517  (kDoubleSize / kPointerSize));
3518  }
3519 
3520  const RegList saves = call_descriptor->CalleeSavedRegisters();
3521  if (saves != 0) {
3522  int count = base::bits::CountPopulation(saves);
3523  DCHECK_EQ(kNumCalleeSaved, count + 1);
3524  frame->AllocateSavedCalleeRegisterSlots(count);
3525  }
3526 }
3527 
3528 void CodeGenerator::AssembleConstructFrame() {
3529  auto call_descriptor = linkage()->GetIncomingDescriptor();
3530 
3531  if (frame_access_state()->has_frame()) {
3532  if (call_descriptor->IsCFunctionCall()) {
3533  __ Push(ra, fp);
3534  __ mov(fp, sp);
3535  } else if (call_descriptor->IsJSFunctionCall()) {
3536  __ Prologue();
3537  if (call_descriptor->PushArgumentCount()) {
3538  __ Push(kJavaScriptCallArgCountRegister);
3539  }
3540  } else {
3541  __ StubPrologue(info()->GetOutputStackFrameType());
3542  if (call_descriptor->IsWasmFunctionCall()) {
3543  __ Push(kWasmInstanceRegister);
3544  } else if (call_descriptor->IsWasmImportWrapper()) {
3545  // WASM import wrappers are passed a tuple in the place of the instance.
3546  // Unpack the tuple into the instance and the target callable.
3547  // This must be done here in the codegen because it cannot be expressed
3548  // properly in the graph.
3549  __ ld(kJSFunctionRegister,
3550  FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
3551  __ ld(kWasmInstanceRegister,
3552  FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
3553  __ Push(kWasmInstanceRegister);
3554  }
3555  }
3556  }
3557 
3558  int shrink_slots = frame()->GetTotalFrameSlotCount() -
3559  call_descriptor->CalculateFixedFrameSize();
3560 
3561  if (info()->is_osr()) {
3562  // TurboFan OSR-compiled functions cannot be entered directly.
3563  __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
3564 
3565  // Unoptimized code jumps directly to this entrypoint while the unoptimized
3566  // frame is still on the stack. Optimized code uses OSR values directly from
3567  // the unoptimized frame. Thus, all that needs to be done is to allocate the
3568  // remaining stack slots.
3569  if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
3570  osr_pc_offset_ = __ pc_offset();
3571  shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
3572  ResetSpeculationPoison();
3573  }
3574 
3575  const RegList saves = call_descriptor->CalleeSavedRegisters();
3576  const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
3577  const int returns = frame()->GetReturnSlotCount();
3578 
3579  // Skip callee-saved and return slots, which are pushed below.
3580  shrink_slots -= base::bits::CountPopulation(saves);
3581  shrink_slots -= base::bits::CountPopulation(saves_fpu);
3582  shrink_slots -= returns;
3583  if (shrink_slots > 0) {
3584  __ Dsubu(sp, sp, Operand(shrink_slots * kPointerSize));
3585  }
3586 
3587  if (saves_fpu != 0) {
3588  // Save callee-saved FPU registers.
3589  __ MultiPushFPU(saves_fpu);
3590  DCHECK_EQ(kNumCalleeSavedFPU, base::bits::CountPopulation(saves_fpu));
3591  }
3592 
3593  if (saves != 0) {
3594  // Save callee-saved registers.
3595  __ MultiPush(saves);
3596  DCHECK_EQ(kNumCalleeSaved, base::bits::CountPopulation(saves) + 1);
3597  }
3598 
3599  if (returns != 0) {
3600  // Create space for returns.
3601  __ Dsubu(sp, sp, Operand(returns * kPointerSize));
3602  }
3603 }
3604 
3605 void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
3606  auto call_descriptor = linkage()->GetIncomingDescriptor();
3607 
3608  const int returns = frame()->GetReturnSlotCount();
3609  if (returns != 0) {
3610  __ Daddu(sp, sp, Operand(returns * kPointerSize));
3611  }
3612 
3613  // Restore GP registers.
3614  const RegList saves = call_descriptor->CalleeSavedRegisters();
3615  if (saves != 0) {
3616  __ MultiPop(saves);
3617  }
3618 
3619  // Restore FPU registers.
3620  const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
3621  if (saves_fpu != 0) {
3622  __ MultiPopFPU(saves_fpu);
3623  }
3624 
3625  MipsOperandConverter g(this, nullptr);
3626  if (call_descriptor->IsCFunctionCall()) {
3627  AssembleDeconstructFrame();
3628  } else if (frame_access_state()->has_frame()) {
3629  // Canonicalize JSFunction return sites for now unless they have an variable
3630  // number of stack slot pops.
3631  if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
3632  if (return_label_.is_bound()) {
3633  __ Branch(&return_label_);
3634  return;
3635  } else {
3636  __ bind(&return_label_);
3637  AssembleDeconstructFrame();
3638  }
3639  } else {
3640  AssembleDeconstructFrame();
3641  }
3642  }
3643  int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
3644  if (pop->IsImmediate()) {
3645  pop_count += g.ToConstant(pop).ToInt32();
3646  } else {
3647  Register pop_reg = g.ToRegister(pop);
3648  __ dsll(pop_reg, pop_reg, kPointerSizeLog2);
3649  __ Daddu(sp, sp, pop_reg);
3650  }
3651  if (pop_count != 0) {
3652  __ DropAndRet(pop_count);
3653  } else {
3654  __ Ret();
3655  }
3656 }
3657 
3658 void CodeGenerator::FinishCode() {}
3659 
3660 void CodeGenerator::AssembleMove(InstructionOperand* source,
3661  InstructionOperand* destination) {
3662  MipsOperandConverter g(this, nullptr);
3663  // Dispatch on the source and destination operand kinds. Not all
3664  // combinations are possible.
3665  if (source->IsRegister()) {
3666  DCHECK(destination->IsRegister() || destination->IsStackSlot());
3667  Register src = g.ToRegister(source);
3668  if (destination->IsRegister()) {
3669  __ mov(g.ToRegister(destination), src);
3670  } else {
3671  __ Sd(src, g.ToMemOperand(destination));
3672  }
3673  } else if (source->IsStackSlot()) {
3674  DCHECK(destination->IsRegister() || destination->IsStackSlot());
3675  MemOperand src = g.ToMemOperand(source);
3676  if (destination->IsRegister()) {
3677  __ Ld(g.ToRegister(destination), src);
3678  } else {
3679  Register temp = kScratchReg;
3680  __ Ld(temp, src);
3681  __ Sd(temp, g.ToMemOperand(destination));
3682  }
3683  } else if (source->IsConstant()) {
3684  Constant src = g.ToConstant(source);
3685  if (destination->IsRegister() || destination->IsStackSlot()) {
3686  Register dst =
3687  destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
3688  switch (src.type()) {
3689  case Constant::kInt32:
3690  __ li(dst, Operand(src.ToInt32()));
3691  break;
3692  case Constant::kFloat32:
3693  __ li(dst, Operand::EmbeddedNumber(src.ToFloat32()));
3694  break;
3695  case Constant::kInt64:
3696  if (RelocInfo::IsWasmReference(src.rmode())) {
3697  __ li(dst, Operand(src.ToInt64(), src.rmode()));
3698  } else {
3699  __ li(dst, Operand(src.ToInt64()));
3700  }
3701  break;
3702  case Constant::kFloat64:
3703  __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
3704  break;
3705  case Constant::kExternalReference:
3706  __ li(dst, src.ToExternalReference());
3707  break;
3708  case Constant::kDelayedStringConstant:
3709  __ li(dst, src.ToDelayedStringConstant());
3710  break;
3711  case Constant::kHeapObject: {
3712  Handle<HeapObject> src_object = src.ToHeapObject();
3713  RootIndex index;
3714  if (IsMaterializableFromRoot(src_object, &index)) {
3715  __ LoadRoot(dst, index);
3716  } else {
3717  __ li(dst, src_object);
3718  }
3719  break;
3720  }
3721  case Constant::kRpoNumber:
3722  UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips64.
3723  break;
3724  }
3725  if (destination->IsStackSlot()) __ Sd(dst, g.ToMemOperand(destination));
3726  } else if (src.type() == Constant::kFloat32) {
3727  if (destination->IsFPStackSlot()) {
3728  MemOperand dst = g.ToMemOperand(destination);
3729  if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
3730  __ Sd(zero_reg, dst);
3731  } else {
3732  __ li(kScratchReg, Operand(bit_cast<int32_t>(src.ToFloat32())));
3733  __ Sd(kScratchReg, dst);
3734  }
3735  } else {
3736  DCHECK(destination->IsFPRegister());
3737  FloatRegister dst = g.ToSingleRegister(destination);
3738  __ Move(dst, src.ToFloat32());
3739  }
3740  } else {
3741  DCHECK_EQ(Constant::kFloat64, src.type());
3742  DoubleRegister dst = destination->IsFPRegister()
3743  ? g.ToDoubleRegister(destination)
3744  : kScratchDoubleReg;
3745  __ Move(dst, src.ToFloat64().value());
3746  if (destination->IsFPStackSlot()) {
3747  __ Sdc1(dst, g.ToMemOperand(destination));
3748  }
3749  }
3750  } else if (source->IsFPRegister()) {
3751  MachineRepresentation rep = LocationOperand::cast(source)->representation();
3752  if (rep == MachineRepresentation::kSimd128) {
3753  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3754  MSARegister src = g.ToSimd128Register(source);
3755  if (destination->IsSimd128Register()) {
3756  MSARegister dst = g.ToSimd128Register(destination);
3757  __ move_v(dst, src);
3758  } else {
3759  DCHECK(destination->IsSimd128StackSlot());
3760  __ st_b(src, g.ToMemOperand(destination));
3761  }
3762  } else {
3763  FPURegister src = g.ToDoubleRegister(source);
3764  if (destination->IsFPRegister()) {
3765  FPURegister dst = g.ToDoubleRegister(destination);
3766  __ Move(dst, src);
3767  } else {
3768  DCHECK(destination->IsFPStackSlot());
3769  __ Sdc1(src, g.ToMemOperand(destination));
3770  }
3771  }
3772  } else if (source->IsFPStackSlot()) {
3773  DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
3774  MemOperand src = g.ToMemOperand(source);
3775  MachineRepresentation rep = LocationOperand::cast(source)->representation();
3776  if (rep == MachineRepresentation::kSimd128) {
3777  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3778  if (destination->IsSimd128Register()) {
3779  __ ld_b(g.ToSimd128Register(destination), src);
3780  } else {
3781  DCHECK(destination->IsSimd128StackSlot());
3782  MSARegister temp = kSimd128ScratchReg;
3783  __ ld_b(temp, src);
3784  __ st_b(temp, g.ToMemOperand(destination));
3785  }
3786  } else {
3787  if (destination->IsFPRegister()) {
3788  __ Ldc1(g.ToDoubleRegister(destination), src);
3789  } else {
3790  DCHECK(destination->IsFPStackSlot());
3791  FPURegister temp = kScratchDoubleReg;
3792  __ Ldc1(temp, src);
3793  __ Sdc1(temp, g.ToMemOperand(destination));
3794  }
3795  }
3796  } else {
3797  UNREACHABLE();
3798  }
3799 }
3800 
3801 void CodeGenerator::AssembleSwap(InstructionOperand* source,
3802  InstructionOperand* destination) {
3803  MipsOperandConverter g(this, nullptr);
3804  // Dispatch on the source and destination operand kinds. Not all
3805  // combinations are possible.
3806  if (source->IsRegister()) {
3807  // Register-register.
3808  Register temp = kScratchReg;
3809  Register src = g.ToRegister(source);
3810  if (destination->IsRegister()) {
3811  Register dst = g.ToRegister(destination);
3812  __ Move(temp, src);
3813  __ Move(src, dst);
3814  __ Move(dst, temp);
3815  } else {
3816  DCHECK(destination->IsStackSlot());
3817  MemOperand dst = g.ToMemOperand(destination);
3818  __ mov(temp, src);
3819  __ Ld(src, dst);
3820  __ Sd(temp, dst);
3821  }
3822  } else if (source->IsStackSlot()) {
3823  DCHECK(destination->IsStackSlot());
3824  Register temp_0 = kScratchReg;
3825  Register temp_1 = kScratchReg2;
3826  MemOperand src = g.ToMemOperand(source);
3827  MemOperand dst = g.ToMemOperand(destination);
3828  __ Ld(temp_0, src);
3829  __ Ld(temp_1, dst);
3830  __ Sd(temp_0, dst);
3831  __ Sd(temp_1, src);
3832  } else if (source->IsFPRegister()) {
3833  MachineRepresentation rep = LocationOperand::cast(source)->representation();
3834  if (rep == MachineRepresentation::kSimd128) {
3835  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3836  MSARegister temp = kSimd128ScratchReg;
3837  MSARegister src = g.ToSimd128Register(source);
3838  if (destination->IsSimd128Register()) {
3839  MSARegister dst = g.ToSimd128Register(destination);
3840  __ move_v(temp, src);
3841  __ move_v(src, dst);
3842  __ move_v(dst, temp);
3843  } else {
3844  DCHECK(destination->IsSimd128StackSlot());
3845  MemOperand dst = g.ToMemOperand(destination);
3846  __ move_v(temp, src);
3847  __ ld_b(src, dst);
3848  __ st_b(temp, dst);
3849  }
3850  } else {
3851  FPURegister temp = kScratchDoubleReg;
3852  FPURegister src = g.ToDoubleRegister(source);
3853  if (destination->IsFPRegister()) {
3854  FPURegister dst = g.ToDoubleRegister(destination);
3855  __ Move(temp, src);
3856  __ Move(src, dst);
3857  __ Move(dst, temp);
3858  } else {
3859  DCHECK(destination->IsFPStackSlot());
3860  MemOperand dst = g.ToMemOperand(destination);
3861  __ Move(temp, src);
3862  __ Ldc1(src, dst);
3863  __ Sdc1(temp, dst);
3864  }
3865  }
3866  } else if (source->IsFPStackSlot()) {
3867  DCHECK(destination->IsFPStackSlot());
3868  Register temp_0 = kScratchReg;
3869  MemOperand src0 = g.ToMemOperand(source);
3870  MemOperand src1(src0.rm(), src0.offset() + kIntSize);
3871  MemOperand dst0 = g.ToMemOperand(destination);
3872  MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
3873  MachineRepresentation rep = LocationOperand::cast(source)->representation();
3874  if (rep == MachineRepresentation::kSimd128) {
3875  MemOperand src2(src0.rm(), src0.offset() + 2 * kIntSize);
3876  MemOperand src3(src0.rm(), src0.offset() + 3 * kIntSize);
3877  MemOperand dst2(dst0.rm(), dst0.offset() + 2 * kIntSize);
3878  MemOperand dst3(dst0.rm(), dst0.offset() + 3 * kIntSize);
3879  CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3880  MSARegister temp_1 = kSimd128ScratchReg;
3881  __ ld_b(temp_1, dst0); // Save destination in temp_1.
3882  __ Lw(temp_0, src0); // Then use temp_0 to copy source to destination.
3883  __ Sw(temp_0, dst0);
3884  __ Lw(temp_0, src1);
3885  __ Sw(temp_0, dst1);
3886  __ Lw(temp_0, src2);
3887  __ Sw(temp_0, dst2);
3888  __ Lw(temp_0, src3);
3889  __ Sw(temp_0, dst3);
3890  __ st_b(temp_1, src0);
3891  } else {
3892  FPURegister temp_1 = kScratchDoubleReg;
3893  __ Ldc1(temp_1, dst0); // Save destination in temp_1.
3894  __ Lw(temp_0, src0); // Then use temp_0 to copy source to destination.
3895  __ Sw(temp_0, dst0);
3896  __ Lw(temp_0, src1);
3897  __ Sw(temp_0, dst1);
3898  __ Sdc1(temp_1, src0);
3899  }
3900  } else {
3901  // No other combinations are possible.
3902  UNREACHABLE();
3903  }
3904 }
3905 
3906 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
3907  // On 64-bit MIPS we emit the jump tables inline.
3908  UNREACHABLE();
3909 }
3910 
3911 #undef ASSEMBLE_ATOMIC_LOAD_INTEGER
3912 #undef ASSEMBLE_ATOMIC_STORE_INTEGER
3913 #undef ASSEMBLE_ATOMIC_BINOP
3914 #undef ASSEMBLE_ATOMIC_BINOP_EXT
3915 #undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
3916 #undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT
3917 #undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
3918 #undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT
3919 #undef ASSEMBLE_IEEE754_BINOP
3920 #undef ASSEMBLE_IEEE754_UNOP
3921 
3922 #undef TRACE_MSG
3923 #undef TRACE_UNIMPL
3924 #undef __
3925 
3926 } // namespace compiler
3927 } // namespace internal
3928 } // namespace v8
Definition: libplatform.h:13