5 #include "src/assembler-inl.h" 6 #include "src/callable.h" 7 #include "src/compiler/backend/code-generator-impl.h" 8 #include "src/compiler/backend/code-generator.h" 9 #include "src/compiler/backend/gap-resolver.h" 10 #include "src/compiler/node-matchers.h" 11 #include "src/compiler/osr.h" 12 #include "src/heap/heap-inl.h" 13 #include "src/macro-assembler.h" 14 #include "src/mips64/constants-mips64.h" 15 #include "src/optimized-compilation-info.h" 16 #include "src/wasm/wasm-code-manager.h" 25 #define TRACE_MSG(msg) \ 26 PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \ 29 #define TRACE_UNIMPL() \ 30 PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \ 34 class MipsOperandConverter final :
public InstructionOperandConverter {
36 MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
37 : InstructionOperandConverter(gen, instr) {}
39 FloatRegister OutputSingleRegister(
size_t index = 0) {
40 return ToSingleRegister(instr_->OutputAt(index));
43 FloatRegister InputSingleRegister(
size_t index) {
44 return ToSingleRegister(instr_->InputAt(index));
47 FloatRegister ToSingleRegister(InstructionOperand* op) {
50 return ToDoubleRegister(op);
53 Register InputOrZeroRegister(
size_t index) {
54 if (instr_->InputAt(index)->IsImmediate()) {
55 DCHECK_EQ(0, InputInt32(index));
58 return InputRegister(index);
61 DoubleRegister InputOrZeroDoubleRegister(
size_t index) {
62 if (instr_->InputAt(index)->IsImmediate())
return kDoubleRegZero;
64 return InputDoubleRegister(index);
67 DoubleRegister InputOrZeroSingleRegister(
size_t index) {
68 if (instr_->InputAt(index)->IsImmediate())
return kDoubleRegZero;
70 return InputSingleRegister(index);
73 Operand InputImmediate(
size_t index) {
74 Constant constant = ToConstant(instr_->InputAt(index));
75 switch (constant.type()) {
76 case Constant::kInt32:
77 return Operand(constant.ToInt32());
78 case Constant::kInt64:
79 return Operand(constant.ToInt64());
80 case Constant::kFloat32:
81 return Operand::EmbeddedNumber(constant.ToFloat32());
82 case Constant::kFloat64:
83 return Operand::EmbeddedNumber(constant.ToFloat64().value());
84 case Constant::kExternalReference:
85 case Constant::kHeapObject:
89 case Constant::kDelayedStringConstant:
90 return Operand::EmbeddedStringConstant(
91 constant.ToDelayedStringConstant());
92 case Constant::kRpoNumber:
99 Operand InputOperand(
size_t index) {
100 InstructionOperand* op = instr_->InputAt(index);
101 if (op->IsRegister()) {
102 return Operand(ToRegister(op));
104 return InputImmediate(index);
107 MemOperand MemoryOperand(
size_t* first_index) {
108 const size_t index = *first_index;
109 switch (AddressingModeField::decode(instr_->opcode())) {
114 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
122 MemOperand MemoryOperand(
size_t index = 0) {
return MemoryOperand(&index); }
124 MemOperand ToMemOperand(InstructionOperand* op)
const {
126 DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
127 return SlotToMemOperand(AllocatedOperand::cast(op)->index());
130 MemOperand SlotToMemOperand(
int slot)
const {
131 FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
132 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
136 static inline bool HasRegisterInput(Instruction* instr,
size_t index) {
137 return instr->InputAt(index)->IsRegister();
142 class OutOfLineRecordWrite final :
public OutOfLineCode {
144 OutOfLineRecordWrite(CodeGenerator* gen, Register
object, Register index,
145 Register value, Register scratch0, Register scratch1,
146 RecordWriteMode mode, StubCallMode stub_mode)
147 : OutOfLineCode(gen),
154 stub_mode_(stub_mode),
155 must_save_lr_(!gen->frame_access_state()->has_frame()),
156 zone_(gen->zone()) {}
158 void Generate() final {
159 if (mode_ > RecordWriteMode::kValueIsPointer) {
160 __ JumpIfSmi(value_, exit());
162 __ CheckPageFlag(value_, scratch0_,
163 MemoryChunk::kPointersToHereAreInterestingMask, eq,
165 __ Daddu(scratch1_, object_, index_);
166 RememberedSetAction
const remembered_set_action =
167 mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
168 : OMIT_REMEMBERED_SET;
169 SaveFPRegsMode
const save_fp_mode =
170 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
175 if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
179 __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
180 save_fp_mode, wasm::WasmCode::kWasmRecordWrite);
182 __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
191 Register
const object_;
192 Register
const index_;
193 Register
const value_;
194 Register
const scratch0_;
195 Register
const scratch1_;
196 RecordWriteMode
const mode_;
197 StubCallMode
const stub_mode_;
202 #define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T) \ 203 class ool_name final : public OutOfLineCode { \ 205 ool_name(CodeGenerator* gen, T dst, T src1, T src2) \ 206 : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \ 208 void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); } \ 216 CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister);
217 CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister);
218 CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, FPURegister);
219 CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, FPURegister);
221 #undef CREATE_OOL_CLASS 223 Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
229 case kSignedLessThan:
231 case kSignedGreaterThanOrEqual:
233 case kSignedLessThanOrEqual:
235 case kSignedGreaterThan:
237 case kUnsignedLessThan:
239 case kUnsignedGreaterThanOrEqual:
241 case kUnsignedLessThanOrEqual:
243 case kUnsignedGreaterThan:
245 case kUnorderedEqual:
246 case kUnorderedNotEqual:
254 Condition FlagsConditionToConditionTst(FlagsCondition condition) {
266 Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
278 FPUCondition FlagsConditionToConditionCmpFPU(
bool& predicate,
279 FlagsCondition condition) {
287 case kUnsignedLessThan:
290 case kUnsignedGreaterThanOrEqual:
293 case kUnsignedLessThanOrEqual:
296 case kUnsignedGreaterThan:
299 case kUnorderedEqual:
300 case kUnorderedNotEqual:
310 void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
311 InstructionCode opcode, Instruction* instr,
312 MipsOperandConverter&
i) {
313 const MemoryAccessMode access_mode =
314 static_cast<MemoryAccessMode
>(MiscField::decode(opcode));
315 if (access_mode == kMemoryAccessPoisoned) {
316 Register value =
i.OutputRegister();
317 codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
323 #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \ 325 __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \ 329 #define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \ 332 __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \ 336 #define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \ 339 __ Daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ 342 __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ 343 __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \ 344 Operand(i.InputRegister(2))); \ 345 __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ 346 __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \ 350 #define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, \ 351 size, bin_instr, representation) \ 354 __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ 355 if (representation == 32) { \ 356 __ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \ 358 DCHECK_EQ(representation, 64); \ 359 __ andi(i.TempRegister(3), i.TempRegister(0), 0x7); \ 361 __ Dsubu(i.TempRegister(0), i.TempRegister(0), \ 362 Operand(i.TempRegister(3))); \ 363 __ sll(i.TempRegister(3), i.TempRegister(3), 3); \ 366 __ load_linked(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ 367 __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \ 368 size, sign_extend); \ 369 __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \ 370 Operand(i.InputRegister(2))); \ 371 __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \ 373 __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ 374 __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \ 378 #define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_linked, store_conditional) \ 382 __ bind(&exchange); \ 383 __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ 384 __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ 385 __ mov(i.TempRegister(1), i.InputRegister(2)); \ 386 __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ 387 __ BranchShort(&exchange, eq, i.TempRegister(1), Operand(zero_reg)); \ 391 #define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( \ 392 load_linked, store_conditional, sign_extend, size, representation) \ 395 __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ 396 if (representation == 32) { \ 397 __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \ 399 DCHECK_EQ(representation, 64); \ 400 __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \ 402 __ Dsubu(i.TempRegister(0), i.TempRegister(0), \ 403 Operand(i.TempRegister(1))); \ 404 __ sll(i.TempRegister(1), i.TempRegister(1), 3); \ 406 __ bind(&exchange); \ 407 __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ 408 __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \ 409 size, sign_extend); \ 410 __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \ 412 __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ 413 __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \ 417 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \ 420 Label compareExchange; \ 422 __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ 424 __ bind(&compareExchange); \ 425 __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ 426 __ BranchShort(&exit, ne, i.InputRegister(2), \ 427 Operand(i.OutputRegister(0))); \ 428 __ mov(i.TempRegister(2), i.InputRegister(3)); \ 429 __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ 430 __ BranchShort(&compareExchange, eq, i.TempRegister(2), \ 431 Operand(zero_reg)); \ 436 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \ 437 load_linked, store_conditional, sign_extend, size, representation) \ 439 Label compareExchange; \ 441 __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ 442 if (representation == 32) { \ 443 __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \ 445 DCHECK_EQ(representation, 64); \ 446 __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \ 448 __ Dsubu(i.TempRegister(0), i.TempRegister(0), \ 449 Operand(i.TempRegister(1))); \ 450 __ sll(i.TempRegister(1), i.TempRegister(1), 3); \ 452 __ bind(&compareExchange); \ 453 __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ 454 __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \ 455 size, sign_extend); \ 456 __ BranchShort(&exit, ne, i.InputRegister(2), \ 457 Operand(i.OutputRegister(0))); \ 458 __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \ 460 __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ 461 __ BranchShort(&compareExchange, eq, i.TempRegister(2), \ 462 Operand(zero_reg)); \ 467 #define ASSEMBLE_IEEE754_BINOP(name) \ 469 FrameScope scope(tasm(), StackFrame::MANUAL); \ 470 __ PrepareCallCFunction(0, 2, kScratchReg); \ 471 __ MovToFloatParameters(i.InputDoubleRegister(0), \ 472 i.InputDoubleRegister(1)); \ 473 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \ 475 __ MovFromFloatResult(i.OutputDoubleRegister()); \ 478 #define ASSEMBLE_IEEE754_UNOP(name) \ 480 FrameScope scope(tasm(), StackFrame::MANUAL); \ 481 __ PrepareCallCFunction(0, 1, kScratchReg); \ 482 __ MovToFloatParameter(i.InputDoubleRegister(0)); \ 483 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ 485 __ MovFromFloatResult(i.OutputDoubleRegister()); \ 488 void CodeGenerator::AssembleDeconstructFrame() {
493 void CodeGenerator::AssemblePrepareTailCall() {
494 if (frame_access_state()->has_frame()) {
495 __ Ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
496 __ Ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
498 frame_access_state()->SetFrameAccessToSP();
501 void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
505 DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
509 __ Ld(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset));
510 __ Branch(&done, ne, scratch3,
511 Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
515 Register caller_args_count_reg = scratch1;
516 __ Ld(caller_args_count_reg,
517 MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
518 __ SmiUntag(caller_args_count_reg);
520 ParameterCount callee_args_count(args_reg);
521 __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
528 void AdjustStackPointerForTailCall(TurboAssembler* tasm,
529 FrameAccessState* state,
530 int new_slot_above_sp,
531 bool allow_shrinkage =
true) {
532 int current_sp_offset = state->GetSPToFPSlotCount() +
533 StandardFrameConstants::kFixedSlotCountAboveFp;
534 int stack_slot_delta = new_slot_above_sp - current_sp_offset;
535 if (stack_slot_delta > 0) {
536 tasm->Dsubu(sp, sp, stack_slot_delta * kPointerSize);
537 state->IncreaseSPDelta(stack_slot_delta);
538 }
else if (allow_shrinkage && stack_slot_delta < 0) {
539 tasm->Daddu(sp, sp, -stack_slot_delta * kPointerSize);
540 state->IncreaseSPDelta(stack_slot_delta);
546 void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
547 int first_unused_stack_slot) {
548 AdjustStackPointerForTailCall(tasm(), frame_access_state(),
549 first_unused_stack_slot,
false);
552 void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
553 int first_unused_stack_slot) {
554 AdjustStackPointerForTailCall(tasm(), frame_access_state(),
555 first_unused_stack_slot);
559 void CodeGenerator::AssembleCodeStartRegisterCheck() {
560 __ ComputeCodeStartAddress(kScratchReg);
561 __ Assert(eq, AbortReason::kWrongFunctionCodeStart,
562 kJavaScriptCallCodeStartRegister, Operand(kScratchReg));
572 void CodeGenerator::BailoutIfDeoptimized() {
573 int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
574 __ Ld(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset));
576 FieldMemOperand(kScratchReg,
577 CodeDataContainer::kKindSpecificFlagsOffset));
578 __ And(kScratchReg, kScratchReg,
579 Operand(1 << Code::kMarkedForDeoptimizationBit));
582 DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
583 Handle<Code> code = isolate()->builtins()->builtin_handle(
584 Builtins::kCompileLazyDeoptimizedCode);
585 __ Jump(code, RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
588 void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
593 __ ComputeCodeStartAddress(kScratchReg);
594 __ Move(kSpeculationPoisonRegister, kScratchReg);
595 __ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
596 kJavaScriptCallCodeStartRegister);
597 __ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
599 __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
600 kJavaScriptCallCodeStartRegister);
601 __ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
602 kBitsPerSystemPointer - 1);
603 __ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
604 kSpeculationPoisonRegister);
607 void CodeGenerator::AssembleRegisterArgumentPoisoning() {
608 __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
609 __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
610 __ And(sp, sp, kSpeculationPoisonRegister);
614 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
615 Instruction* instr) {
616 MipsOperandConverter
i(
this, instr);
617 InstructionCode opcode = instr->opcode();
618 ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
619 switch (arch_opcode) {
620 case kArchCallCodeObject: {
621 if (instr->InputAt(0)->IsImmediate()) {
622 __ Call(
i.InputCode(0), RelocInfo::CODE_TARGET);
624 Register reg =
i.InputRegister(0);
626 HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
627 reg == kJavaScriptCallCodeStartRegister);
628 __ daddiu(reg, reg, Code::kHeaderSize - kHeapObjectTag);
631 RecordCallPosition(instr);
632 frame_access_state()->ClearSPDelta();
635 case kArchCallWasmFunction: {
636 if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
637 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
638 i.TempRegister(0),
i.TempRegister(1),
641 if (instr->InputAt(0)->IsImmediate()) {
642 Constant constant =
i.ToConstant(instr->InputAt(0));
643 Address wasm_code =
static_cast<Address
>(constant.ToInt64());
644 __ Call(wasm_code, constant.rmode());
646 __ daddiu(kScratchReg,
i.InputRegister(0), 0);
647 __ Call(kScratchReg);
649 RecordCallPosition(instr);
650 frame_access_state()->ClearSPDelta();
653 case kArchTailCallCodeObjectFromJSFunction:
654 case kArchTailCallCodeObject: {
655 if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
656 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
657 i.TempRegister(0),
i.TempRegister(1),
660 if (instr->InputAt(0)->IsImmediate()) {
661 __ Jump(
i.InputCode(0), RelocInfo::CODE_TARGET);
663 Register reg =
i.InputRegister(0);
665 HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
666 reg == kJavaScriptCallCodeStartRegister);
667 __ daddiu(reg, reg, Code::kHeaderSize - kHeapObjectTag);
670 frame_access_state()->ClearSPDelta();
671 frame_access_state()->SetFrameAccessToDefault();
674 case kArchTailCallWasm: {
675 if (instr->InputAt(0)->IsImmediate()) {
676 Constant constant =
i.ToConstant(instr->InputAt(0));
677 Address wasm_code =
static_cast<Address
>(constant.ToInt64());
678 __ Jump(wasm_code, constant.rmode());
680 __ daddiu(kScratchReg,
i.InputRegister(0), 0);
681 __ Jump(kScratchReg);
683 frame_access_state()->ClearSPDelta();
684 frame_access_state()->SetFrameAccessToDefault();
687 case kArchTailCallAddress: {
688 CHECK(!instr->InputAt(0)->IsImmediate());
689 Register reg =
i.InputRegister(0);
691 HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
692 reg == kJavaScriptCallCodeStartRegister);
694 frame_access_state()->ClearSPDelta();
695 frame_access_state()->SetFrameAccessToDefault();
698 case kArchCallJSFunction: {
699 Register func =
i.InputRegister(0);
700 if (FLAG_debug_code) {
702 __ Ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
703 __ Assert(eq, AbortReason::kWrongFunctionContext, cp,
704 Operand(kScratchReg));
706 static_assert(kJavaScriptCallCodeStartRegister == a2,
"ABI mismatch");
707 __ Ld(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
708 __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
710 RecordCallPosition(instr);
711 frame_access_state()->ClearSPDelta();
714 case kArchPrepareCallCFunction: {
715 int const num_parameters = MiscField::decode(instr->opcode());
716 __ PrepareCallCFunction(num_parameters, kScratchReg);
718 frame_access_state()->SetFrameAccessToFP();
721 case kArchSaveCallerRegisters: {
723 static_cast<SaveFPRegsMode
>(MiscField::decode(instr->opcode()));
724 DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
726 int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
727 DCHECK_EQ(0, bytes % kPointerSize);
728 DCHECK_EQ(0, frame_access_state()->sp_delta());
729 frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
730 DCHECK(!caller_registers_saved_);
731 caller_registers_saved_ =
true;
734 case kArchRestoreCallerRegisters: {
736 static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
737 DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
739 int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
740 frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
741 DCHECK_EQ(0, frame_access_state()->sp_delta());
742 DCHECK(caller_registers_saved_);
743 caller_registers_saved_ =
false;
746 case kArchPrepareTailCall:
747 AssemblePrepareTailCall();
749 case kArchCallCFunction: {
750 int const num_parameters = MiscField::decode(instr->opcode());
751 if (instr->InputAt(0)->IsImmediate()) {
752 ExternalReference ref =
i.InputExternalReference(0);
753 __ CallCFunction(ref, num_parameters);
755 Register func =
i.InputRegister(0);
756 __ CallCFunction(func, num_parameters);
758 frame_access_state()->SetFrameAccessToDefault();
764 frame_access_state()->ClearSPDelta();
765 if (caller_registers_saved_) {
772 __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
773 frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
778 AssembleArchJump(
i.InputRpo(0));
780 case kArchBinarySearchSwitch:
781 AssembleArchBinarySearchSwitch(instr);
783 case kArchLookupSwitch:
784 AssembleArchLookupSwitch(instr);
786 case kArchTableSwitch:
787 AssembleArchTableSwitch(instr);
789 case kArchDebugAbort:
790 DCHECK(
i.InputRegister(0) == a0);
791 if (!frame_access_state()->has_frame()) {
794 FrameScope scope(tasm(), StackFrame::NONE);
795 __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
796 RelocInfo::CODE_TARGET);
798 __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
799 RelocInfo::CODE_TARGET);
801 __ stop(
"kArchDebugAbort");
803 case kArchDebugBreak:
804 __ stop(
"kArchDebugBreak");
807 __ RecordComment(reinterpret_cast<const char*>(
i.InputInt64(0)));
810 case kArchThrowTerminator:
813 case kArchDeoptimize: {
815 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
816 CodeGenResult result =
817 AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
818 if (result != kSuccess)
return result;
822 AssembleReturn(instr->InputAt(0));
824 case kArchStackPointer:
825 __ mov(
i.OutputRegister(), sp);
827 case kArchFramePointer:
828 __ mov(
i.OutputRegister(), fp);
830 case kArchParentFramePointer:
831 if (frame_access_state()->has_frame()) {
832 __ Ld(
i.OutputRegister(), MemOperand(fp, 0));
834 __ mov(
i.OutputRegister(), fp);
837 case kArchTruncateDoubleToI:
838 __ TruncateDoubleToI(isolate(), zone(),
i.OutputRegister(),
839 i.InputDoubleRegister(0), DetermineStubCallMode());
841 case kArchStoreWithWriteBarrier: {
842 RecordWriteMode mode =
843 static_cast<RecordWriteMode
>(MiscField::decode(instr->opcode()));
844 Register
object =
i.InputRegister(0);
845 Register index =
i.InputRegister(1);
846 Register value =
i.InputRegister(2);
847 Register scratch0 =
i.TempRegister(0);
848 Register scratch1 =
i.TempRegister(1);
849 auto ool =
new (zone())
850 OutOfLineRecordWrite(
this,
object, index, value, scratch0, scratch1,
851 mode, DetermineStubCallMode());
852 __ Daddu(kScratchReg,
object, index);
853 __ Sd(value, MemOperand(kScratchReg));
854 __ CheckPageFlag(
object, scratch0,
855 MemoryChunk::kPointersFromHereAreInterestingMask, ne,
857 __ bind(ool->exit());
860 case kArchStackSlot: {
862 frame_access_state()->GetFrameOffset(
i.InputInt32(0));
863 Register base_reg = offset.from_stack_pointer() ? sp : fp;
864 __ Daddu(
i.OutputRegister(), base_reg, Operand(offset.offset()));
865 int alignment =
i.InputInt32(1);
866 DCHECK(alignment == 0 || alignment == 4 || alignment == 8 ||
868 if (FLAG_debug_code && alignment > 0) {
870 __ And(kScratchReg,
i.OutputRegister(), Operand(kPointerSize - 1));
871 __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg,
874 if (alignment == 2 * kPointerSize) {
876 __ Daddu(kScratchReg, base_reg, Operand(offset.offset()));
877 __ And(kScratchReg, kScratchReg, Operand(alignment - 1));
878 __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
879 __ Daddu(
i.OutputRegister(),
i.OutputRegister(), kPointerSize);
881 }
else if (alignment > 2 * kPointerSize) {
883 __ Daddu(kScratchReg, base_reg, Operand(offset.offset()));
884 __ And(kScratchReg, kScratchReg, Operand(alignment - 1));
885 __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
886 __ li(kScratchReg2, alignment);
887 __ Dsubu(kScratchReg2, kScratchReg2, Operand(kScratchReg));
888 __ Daddu(
i.OutputRegister(),
i.OutputRegister(), kScratchReg2);
894 case kArchWordPoisonOnSpeculation:
895 __ And(
i.OutputRegister(),
i.InputRegister(0),
896 kSpeculationPoisonRegister);
898 case kIeee754Float64Acos:
899 ASSEMBLE_IEEE754_UNOP(acos);
901 case kIeee754Float64Acosh:
902 ASSEMBLE_IEEE754_UNOP(acosh);
904 case kIeee754Float64Asin:
905 ASSEMBLE_IEEE754_UNOP(asin);
907 case kIeee754Float64Asinh:
908 ASSEMBLE_IEEE754_UNOP(asinh);
910 case kIeee754Float64Atan:
911 ASSEMBLE_IEEE754_UNOP(atan);
913 case kIeee754Float64Atanh:
914 ASSEMBLE_IEEE754_UNOP(atanh);
916 case kIeee754Float64Atan2:
917 ASSEMBLE_IEEE754_BINOP(atan2);
919 case kIeee754Float64Cos:
920 ASSEMBLE_IEEE754_UNOP(cos);
922 case kIeee754Float64Cosh:
923 ASSEMBLE_IEEE754_UNOP(cosh);
925 case kIeee754Float64Cbrt:
926 ASSEMBLE_IEEE754_UNOP(cbrt);
928 case kIeee754Float64Exp:
929 ASSEMBLE_IEEE754_UNOP(exp);
931 case kIeee754Float64Expm1:
932 ASSEMBLE_IEEE754_UNOP(expm1);
934 case kIeee754Float64Log:
935 ASSEMBLE_IEEE754_UNOP(log);
937 case kIeee754Float64Log1p:
938 ASSEMBLE_IEEE754_UNOP(log1p);
940 case kIeee754Float64Log2:
941 ASSEMBLE_IEEE754_UNOP(log2);
943 case kIeee754Float64Log10:
944 ASSEMBLE_IEEE754_UNOP(log10);
946 case kIeee754Float64Pow: {
947 __ Call(BUILTIN_CODE(isolate(), MathPowInternal), RelocInfo::CODE_TARGET);
950 case kIeee754Float64Sin:
951 ASSEMBLE_IEEE754_UNOP(sin);
953 case kIeee754Float64Sinh:
954 ASSEMBLE_IEEE754_UNOP(sinh);
956 case kIeee754Float64Tan:
957 ASSEMBLE_IEEE754_UNOP(tan);
959 case kIeee754Float64Tanh:
960 ASSEMBLE_IEEE754_UNOP(tanh);
963 __ Addu(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
966 __ Daddu(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
969 __ DaddOverflow(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1),
973 __ Subu(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
976 __ Dsubu(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
979 __ DsubOverflow(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1),
983 __ Mul(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
986 __ MulOverflow(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1),
990 __ Mulh(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
992 case kMips64MulHighU:
993 __ Mulhu(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
995 case kMips64DMulHigh:
996 __ Dmulh(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
999 __ Div(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1000 if (kArchVariant == kMips64r6) {
1001 __ selnez(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1003 __ Movz(
i.OutputRegister(),
i.InputRegister(1),
i.InputRegister(1));
1007 __ Divu(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1008 if (kArchVariant == kMips64r6) {
1009 __ selnez(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1011 __ Movz(
i.OutputRegister(),
i.InputRegister(1),
i.InputRegister(1));
1015 __ Mod(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1018 __ Modu(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1021 __ Dmul(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1024 __ Ddiv(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1025 if (kArchVariant == kMips64r6) {
1026 __ selnez(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1028 __ Movz(
i.OutputRegister(),
i.InputRegister(1),
i.InputRegister(1));
1032 __ Ddivu(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1033 if (kArchVariant == kMips64r6) {
1034 __ selnez(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1036 __ Movz(
i.OutputRegister(),
i.InputRegister(1),
i.InputRegister(1));
1040 __ Dmod(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1043 __ Dmodu(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1046 DCHECK(instr->InputAt(2)->IsImmediate());
1047 __ Dlsa(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1051 DCHECK(instr->InputAt(2)->IsImmediate());
1052 __ Lsa(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1056 __ And(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1059 if (instr->InputAt(1)->IsRegister()) {
1060 __ sll(
i.InputRegister(0),
i.InputRegister(0), 0x0);
1061 __ sll(
i.InputRegister(1),
i.InputRegister(1), 0x0);
1062 __ And(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1064 __ sll(
i.InputRegister(0),
i.InputRegister(0), 0x0);
1065 __ And(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1069 __ Or(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1072 if (instr->InputAt(1)->IsRegister()) {
1073 __ sll(
i.InputRegister(0),
i.InputRegister(0), 0x0);
1074 __ sll(
i.InputRegister(1),
i.InputRegister(1), 0x0);
1075 __ Or(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1077 __ sll(
i.InputRegister(0),
i.InputRegister(0), 0x0);
1078 __ Or(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1082 if (instr->InputAt(1)->IsRegister()) {
1083 __ Nor(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1085 DCHECK_EQ(0,
i.InputOperand(1).immediate());
1086 __ Nor(
i.OutputRegister(),
i.InputRegister(0), zero_reg);
1090 if (instr->InputAt(1)->IsRegister()) {
1091 __ sll(
i.InputRegister(0),
i.InputRegister(0), 0x0);
1092 __ sll(
i.InputRegister(1),
i.InputRegister(1), 0x0);
1093 __ Nor(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1095 DCHECK_EQ(0,
i.InputOperand(1).immediate());
1096 __ sll(
i.InputRegister(0),
i.InputRegister(0), 0x0);
1097 __ Nor(
i.OutputRegister(),
i.InputRegister(0), zero_reg);
1101 __ Xor(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1104 if (instr->InputAt(1)->IsRegister()) {
1105 __ sll(
i.InputRegister(0),
i.InputRegister(0), 0x0);
1106 __ sll(
i.InputRegister(1),
i.InputRegister(1), 0x0);
1107 __ Xor(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1109 __ sll(
i.InputRegister(0),
i.InputRegister(0), 0x0);
1110 __ Xor(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1114 __ Clz(
i.OutputRegister(),
i.InputRegister(0));
1117 __ dclz(
i.OutputRegister(),
i.InputRegister(0));
1120 Register src =
i.InputRegister(0);
1121 Register dst =
i.OutputRegister();
1125 Register src =
i.InputRegister(0);
1126 Register dst =
i.OutputRegister();
1129 case kMips64Popcnt: {
1130 Register src =
i.InputRegister(0);
1131 Register dst =
i.OutputRegister();
1132 __ Popcnt(dst, src);
1134 case kMips64Dpopcnt: {
1135 Register src =
i.InputRegister(0);
1136 Register dst =
i.OutputRegister();
1137 __ Dpopcnt(dst, src);
1140 if (instr->InputAt(1)->IsRegister()) {
1141 __ sllv(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1143 int64_t imm =
i.InputOperand(1).immediate();
1144 __ sll(
i.OutputRegister(),
i.InputRegister(0),
1145 static_cast<uint16_t
>(imm));
1149 if (instr->InputAt(1)->IsRegister()) {
1150 __ sll(
i.InputRegister(0),
i.InputRegister(0), 0x0);
1151 __ srlv(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1153 int64_t imm =
i.InputOperand(1).immediate();
1154 __ sll(
i.InputRegister(0),
i.InputRegister(0), 0x0);
1155 __ srl(
i.OutputRegister(),
i.InputRegister(0),
1156 static_cast<uint16_t
>(imm));
1160 if (instr->InputAt(1)->IsRegister()) {
1161 __ sll(
i.InputRegister(0),
i.InputRegister(0), 0x0);
1162 __ srav(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1164 int64_t imm =
i.InputOperand(1).immediate();
1165 __ sll(
i.InputRegister(0),
i.InputRegister(0), 0x0);
1166 __ sra(
i.OutputRegister(),
i.InputRegister(0),
1167 static_cast<uint16_t
>(imm));
1171 __ Ext(
i.OutputRegister(),
i.InputRegister(0),
i.InputInt8(1),
1175 if (instr->InputAt(1)->IsImmediate() &&
i.InputInt8(1) == 0) {
1176 __ Ins(
i.OutputRegister(), zero_reg,
i.InputInt8(1),
i.InputInt8(2));
1178 __ Ins(
i.OutputRegister(),
i.InputRegister(0),
i.InputInt8(1),
1183 __ Dext(
i.OutputRegister(),
i.InputRegister(0),
i.InputInt8(1),
1188 if (instr->InputAt(1)->IsImmediate() &&
i.InputInt8(1) == 0) {
1189 __ Dins(
i.OutputRegister(), zero_reg,
i.InputInt8(1),
i.InputInt8(2));
1191 __ Dins(
i.OutputRegister(),
i.InputRegister(0),
i.InputInt8(1),
1196 if (instr->InputAt(1)->IsRegister()) {
1197 __ dsllv(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1199 int64_t imm =
i.InputOperand(1).immediate();
1201 __ dsll(
i.OutputRegister(),
i.InputRegister(0),
1202 static_cast<uint16_t
>(imm));
1204 __ dsll32(
i.OutputRegister(),
i.InputRegister(0),
1205 static_cast<uint16_t
>(imm - 32));
1210 if (instr->InputAt(1)->IsRegister()) {
1211 __ dsrlv(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1213 int64_t imm =
i.InputOperand(1).immediate();
1215 __ dsrl(
i.OutputRegister(),
i.InputRegister(0),
1216 static_cast<uint16_t
>(imm));
1218 __ dsrl32(
i.OutputRegister(),
i.InputRegister(0),
1219 static_cast<uint16_t
>(imm - 32));
1224 if (instr->InputAt(1)->IsRegister()) {
1225 __ dsrav(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1227 int64_t imm =
i.InputOperand(1).immediate();
1229 __ dsra(
i.OutputRegister(),
i.InputRegister(0), imm);
1231 __ dsra32(
i.OutputRegister(),
i.InputRegister(0), imm - 32);
1236 __ Ror(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1239 __ Dror(
i.OutputRegister(),
i.InputRegister(0),
i.InputOperand(1));
1242 __ And(kScratchReg,
i.InputRegister(0),
i.InputOperand(1));
1251 if (HasRegisterInput(instr, 0)) {
1252 __ mov(
i.OutputRegister(),
i.InputRegister(0));
1254 __ li(
i.OutputRegister(),
i.InputOperand(0));
1259 FPURegister left =
i.InputOrZeroSingleRegister(0);
1260 FPURegister right =
i.InputOrZeroSingleRegister(1);
1263 FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition());
1265 if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
1266 !__ IsDoubleZeroRegSet()) {
1267 __ Move(kDoubleRegZero, 0.0);
1270 __ CompareF32(cc, left, right);
1274 __ add_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1275 i.InputDoubleRegister(1));
1278 __ sub_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1279 i.InputDoubleRegister(1));
1283 __ mul_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1284 i.InputDoubleRegister(1));
1287 __ div_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1288 i.InputDoubleRegister(1));
1293 FrameScope scope(tasm(), StackFrame::MANUAL);
1294 __ PrepareCallCFunction(0, 2, kScratchReg);
1295 __ MovToFloatParameters(
i.InputDoubleRegister(0),
1296 i.InputDoubleRegister(1));
1298 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
1300 __ MovFromFloatResult(
i.OutputSingleRegister());
1304 __ abs_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0));
1307 __ Neg_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0));
1309 case kMips64SqrtS: {
1310 __ sqrt_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1314 __ max_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1315 i.InputDoubleRegister(1));
1318 __ min_s(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1319 i.InputDoubleRegister(1));
1322 FPURegister left =
i.InputOrZeroDoubleRegister(0);
1323 FPURegister right =
i.InputOrZeroDoubleRegister(1);
1326 FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition());
1327 if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
1328 !__ IsDoubleZeroRegSet()) {
1329 __ Move(kDoubleRegZero, 0.0);
1331 __ CompareF64(cc, left, right);
1335 __ add_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1336 i.InputDoubleRegister(1));
1339 __ sub_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1340 i.InputDoubleRegister(1));
1344 __ mul_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1345 i.InputDoubleRegister(1));
1348 __ div_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1349 i.InputDoubleRegister(1));
1354 FrameScope scope(tasm(), StackFrame::MANUAL);
1355 __ PrepareCallCFunction(0, 2, kScratchReg);
1356 __ MovToFloatParameters(
i.InputDoubleRegister(0),
1357 i.InputDoubleRegister(1));
1358 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
1360 __ MovFromFloatResult(
i.OutputDoubleRegister());
1364 __ abs_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1367 __ Neg_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1369 case kMips64SqrtD: {
1370 __ sqrt_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1374 __ max_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1375 i.InputDoubleRegister(1));
1378 __ min_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1379 i.InputDoubleRegister(1));
1381 case kMips64Float64RoundDown: {
1382 __ Floor_d_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1385 case kMips64Float32RoundDown: {
1386 __ Floor_s_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0));
1389 case kMips64Float64RoundTruncate: {
1390 __ Trunc_d_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1393 case kMips64Float32RoundTruncate: {
1394 __ Trunc_s_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0));
1397 case kMips64Float64RoundUp: {
1398 __ Ceil_d_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1401 case kMips64Float32RoundUp: {
1402 __ Ceil_s_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0));
1405 case kMips64Float64RoundTiesEven: {
1406 __ Round_d_d(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1409 case kMips64Float32RoundTiesEven: {
1410 __ Round_s_s(
i.OutputSingleRegister(),
i.InputSingleRegister(0));
1413 case kMips64Float32Max: {
1414 FPURegister dst =
i.OutputSingleRegister();
1415 FPURegister src1 =
i.InputSingleRegister(0);
1416 FPURegister src2 =
i.InputSingleRegister(1);
1417 auto ool =
new (zone()) OutOfLineFloat32Max(
this, dst, src1, src2);
1418 __ Float32Max(dst, src1, src2, ool->entry());
1419 __ bind(ool->exit());
1422 case kMips64Float64Max: {
1423 FPURegister dst =
i.OutputDoubleRegister();
1424 FPURegister src1 =
i.InputDoubleRegister(0);
1425 FPURegister src2 =
i.InputDoubleRegister(1);
1426 auto ool =
new (zone()) OutOfLineFloat64Max(
this, dst, src1, src2);
1427 __ Float64Max(dst, src1, src2, ool->entry());
1428 __ bind(ool->exit());
1431 case kMips64Float32Min: {
1432 FPURegister dst =
i.OutputSingleRegister();
1433 FPURegister src1 =
i.InputSingleRegister(0);
1434 FPURegister src2 =
i.InputSingleRegister(1);
1435 auto ool =
new (zone()) OutOfLineFloat32Min(
this, dst, src1, src2);
1436 __ Float32Min(dst, src1, src2, ool->entry());
1437 __ bind(ool->exit());
1440 case kMips64Float64Min: {
1441 FPURegister dst =
i.OutputDoubleRegister();
1442 FPURegister src1 =
i.InputDoubleRegister(0);
1443 FPURegister src2 =
i.InputDoubleRegister(1);
1444 auto ool =
new (zone()) OutOfLineFloat64Min(
this, dst, src1, src2);
1445 __ Float64Min(dst, src1, src2, ool->entry());
1446 __ bind(ool->exit());
1449 case kMips64Float64SilenceNaN:
1450 __ FPUCanonicalizeNaN(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1453 __ cvt_s_d(
i.OutputSingleRegister(),
i.InputDoubleRegister(0));
1456 __ cvt_d_s(
i.OutputDoubleRegister(),
i.InputSingleRegister(0));
1458 case kMips64CvtDW: {
1459 FPURegister scratch = kScratchDoubleReg;
1460 __ mtc1(
i.InputRegister(0), scratch);
1461 __ cvt_d_w(
i.OutputDoubleRegister(), scratch);
1464 case kMips64CvtSW: {
1465 FPURegister scratch = kScratchDoubleReg;
1466 __ mtc1(
i.InputRegister(0), scratch);
1467 __ cvt_s_w(
i.OutputDoubleRegister(), scratch);
1470 case kMips64CvtSUw: {
1471 __ Cvt_s_uw(
i.OutputDoubleRegister(),
i.InputRegister(0));
1474 case kMips64CvtSL: {
1475 FPURegister scratch = kScratchDoubleReg;
1476 __ dmtc1(
i.InputRegister(0), scratch);
1477 __ cvt_s_l(
i.OutputDoubleRegister(), scratch);
1480 case kMips64CvtDL: {
1481 FPURegister scratch = kScratchDoubleReg;
1482 __ dmtc1(
i.InputRegister(0), scratch);
1483 __ cvt_d_l(
i.OutputDoubleRegister(), scratch);
1486 case kMips64CvtDUw: {
1487 __ Cvt_d_uw(
i.OutputDoubleRegister(),
i.InputRegister(0));
1490 case kMips64CvtDUl: {
1491 __ Cvt_d_ul(
i.OutputDoubleRegister(),
i.InputRegister(0));
1494 case kMips64CvtSUl: {
1495 __ Cvt_s_ul(
i.OutputDoubleRegister(),
i.InputRegister(0));
1498 case kMips64FloorWD: {
1499 FPURegister scratch = kScratchDoubleReg;
1500 __ floor_w_d(scratch,
i.InputDoubleRegister(0));
1501 __ mfc1(
i.OutputRegister(), scratch);
1504 case kMips64CeilWD: {
1505 FPURegister scratch = kScratchDoubleReg;
1506 __ ceil_w_d(scratch,
i.InputDoubleRegister(0));
1507 __ mfc1(
i.OutputRegister(), scratch);
1510 case kMips64RoundWD: {
1511 FPURegister scratch = kScratchDoubleReg;
1512 __ round_w_d(scratch,
i.InputDoubleRegister(0));
1513 __ mfc1(
i.OutputRegister(), scratch);
1516 case kMips64TruncWD: {
1517 FPURegister scratch = kScratchDoubleReg;
1519 __ trunc_w_d(scratch,
i.InputDoubleRegister(0));
1520 __ mfc1(
i.OutputRegister(), scratch);
1523 case kMips64FloorWS: {
1524 FPURegister scratch = kScratchDoubleReg;
1525 __ floor_w_s(scratch,
i.InputDoubleRegister(0));
1526 __ mfc1(
i.OutputRegister(), scratch);
1529 case kMips64CeilWS: {
1530 FPURegister scratch = kScratchDoubleReg;
1531 __ ceil_w_s(scratch,
i.InputDoubleRegister(0));
1532 __ mfc1(
i.OutputRegister(), scratch);
1535 case kMips64RoundWS: {
1536 FPURegister scratch = kScratchDoubleReg;
1537 __ round_w_s(scratch,
i.InputDoubleRegister(0));
1538 __ mfc1(
i.OutputRegister(), scratch);
1541 case kMips64TruncWS: {
1542 FPURegister scratch = kScratchDoubleReg;
1543 __ trunc_w_s(scratch,
i.InputDoubleRegister(0));
1544 __ mfc1(
i.OutputRegister(), scratch);
1547 __ addiu(kScratchReg,
i.OutputRegister(), 1);
1548 __ slt(kScratchReg2, kScratchReg,
i.OutputRegister());
1549 __ Movn(
i.OutputRegister(), kScratchReg, kScratchReg2);
1552 case kMips64TruncLS: {
1553 FPURegister scratch = kScratchDoubleReg;
1554 Register tmp_fcsr = kScratchReg;
1555 Register result = kScratchReg2;
1557 bool load_status = instr->OutputCount() > 1;
1560 __ cfc1(tmp_fcsr, FCSR);
1562 __ ctc1(zero_reg, FCSR);
1565 __ trunc_l_s(scratch,
i.InputDoubleRegister(0));
1566 __ dmfc1(
i.OutputRegister(), scratch);
1568 __ cfc1(result, FCSR);
1570 __ andi(result, result,
1571 (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask));
1572 __ Slt(result, zero_reg, result);
1573 __ xori(result, result, 1);
1574 __ mov(
i.OutputRegister(1), result);
1576 __ ctc1(tmp_fcsr, FCSR);
1580 case kMips64TruncLD: {
1581 FPURegister scratch = kScratchDoubleReg;
1582 Register tmp_fcsr = kScratchReg;
1583 Register result = kScratchReg2;
1585 bool load_status = instr->OutputCount() > 1;
1588 __ cfc1(tmp_fcsr, FCSR);
1590 __ ctc1(zero_reg, FCSR);
1593 __ trunc_l_d(scratch,
i.InputDoubleRegister(0));
1594 __ dmfc1(
i.OutputRegister(0), scratch);
1596 __ cfc1(result, FCSR);
1598 __ andi(result, result,
1599 (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask));
1600 __ Slt(result, zero_reg, result);
1601 __ xori(result, result, 1);
1602 __ mov(
i.OutputRegister(1), result);
1604 __ ctc1(tmp_fcsr, FCSR);
1608 case kMips64TruncUwD: {
1609 FPURegister scratch = kScratchDoubleReg;
1610 __ Trunc_uw_d(
i.OutputRegister(),
i.InputDoubleRegister(0), scratch);
1613 case kMips64TruncUwS: {
1614 FPURegister scratch = kScratchDoubleReg;
1615 __ Trunc_uw_s(
i.OutputRegister(),
i.InputDoubleRegister(0), scratch);
1618 __ addiu(kScratchReg,
i.OutputRegister(), 1);
1619 __ Movz(
i.OutputRegister(), zero_reg, kScratchReg);
1622 case kMips64TruncUlS: {
1623 FPURegister scratch = kScratchDoubleReg;
1624 Register result = instr->OutputCount() > 1 ?
i.OutputRegister(1) : no_reg;
1625 __ Trunc_ul_s(
i.OutputRegister(),
i.InputDoubleRegister(0), scratch,
1629 case kMips64TruncUlD: {
1630 FPURegister scratch = kScratchDoubleReg;
1631 Register result = instr->OutputCount() > 1 ?
i.OutputRegister(1) : no_reg;
1632 __ Trunc_ul_d(
i.OutputRegister(0),
i.InputDoubleRegister(0), scratch,
1636 case kMips64BitcastDL:
1637 __ dmfc1(
i.OutputRegister(),
i.InputDoubleRegister(0));
1639 case kMips64BitcastLD:
1640 __ dmtc1(
i.InputRegister(0),
i.OutputDoubleRegister());
1642 case kMips64Float64ExtractLowWord32:
1643 __ FmoveLow(
i.OutputRegister(),
i.InputDoubleRegister(0));
1645 case kMips64Float64ExtractHighWord32:
1646 __ FmoveHigh(
i.OutputRegister(),
i.InputDoubleRegister(0));
1648 case kMips64Float64InsertLowWord32:
1649 __ FmoveLow(
i.OutputDoubleRegister(),
i.InputRegister(1));
1651 case kMips64Float64InsertHighWord32:
1652 __ FmoveHigh(
i.OutputDoubleRegister(),
i.InputRegister(1));
1657 __ seb(
i.OutputRegister(),
i.InputRegister(0));
1660 __ seh(
i.OutputRegister(),
i.InputRegister(0));
1663 __ Lbu(
i.OutputRegister(),
i.MemoryOperand());
1664 EmitWordLoadPoisoningIfNeeded(
this, opcode, instr,
i);
1667 __ Lb(
i.OutputRegister(),
i.MemoryOperand());
1668 EmitWordLoadPoisoningIfNeeded(
this, opcode, instr,
i);
1671 __ Sb(
i.InputOrZeroRegister(2),
i.MemoryOperand());
1674 __ Lhu(
i.OutputRegister(),
i.MemoryOperand());
1675 EmitWordLoadPoisoningIfNeeded(
this, opcode, instr,
i);
1678 __ Ulhu(
i.OutputRegister(),
i.MemoryOperand());
1679 EmitWordLoadPoisoningIfNeeded(
this, opcode, instr,
i);
1682 __ Lh(
i.OutputRegister(),
i.MemoryOperand());
1683 EmitWordLoadPoisoningIfNeeded(
this, opcode, instr,
i);
1686 __ Ulh(
i.OutputRegister(),
i.MemoryOperand());
1687 EmitWordLoadPoisoningIfNeeded(
this, opcode, instr,
i);
1690 __ Sh(
i.InputOrZeroRegister(2),
i.MemoryOperand());
1693 __ Ush(
i.InputOrZeroRegister(2),
i.MemoryOperand(), kScratchReg);
1696 __ Lw(
i.OutputRegister(),
i.MemoryOperand());
1697 EmitWordLoadPoisoningIfNeeded(
this, opcode, instr,
i);
1700 __ Ulw(
i.OutputRegister(),
i.MemoryOperand());
1701 EmitWordLoadPoisoningIfNeeded(
this, opcode, instr,
i);
1704 __ Lwu(
i.OutputRegister(),
i.MemoryOperand());
1705 EmitWordLoadPoisoningIfNeeded(
this, opcode, instr,
i);
1708 __ Ulwu(
i.OutputRegister(),
i.MemoryOperand());
1709 EmitWordLoadPoisoningIfNeeded(
this, opcode, instr,
i);
1712 __ Ld(
i.OutputRegister(),
i.MemoryOperand());
1713 EmitWordLoadPoisoningIfNeeded(
this, opcode, instr,
i);
1716 __ Uld(
i.OutputRegister(),
i.MemoryOperand());
1717 EmitWordLoadPoisoningIfNeeded(
this, opcode, instr,
i);
1720 __ Sw(
i.InputOrZeroRegister(2),
i.MemoryOperand());
1723 __ Usw(
i.InputOrZeroRegister(2),
i.MemoryOperand());
1726 __ Sd(
i.InputOrZeroRegister(2),
i.MemoryOperand());
1729 __ Usd(
i.InputOrZeroRegister(2),
i.MemoryOperand());
1732 __ Lwc1(
i.OutputSingleRegister(),
i.MemoryOperand());
1735 case kMips64Ulwc1: {
1736 __ Ulwc1(
i.OutputSingleRegister(),
i.MemoryOperand(), kScratchReg);
1741 MemOperand operand =
i.MemoryOperand(&index);
1742 FPURegister ft =
i.InputOrZeroSingleRegister(index);
1743 if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1744 __ Move(kDoubleRegZero, 0.0);
1746 __ Swc1(ft, operand);
1749 case kMips64Uswc1: {
1751 MemOperand operand =
i.MemoryOperand(&index);
1752 FPURegister ft =
i.InputOrZeroSingleRegister(index);
1753 if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1754 __ Move(kDoubleRegZero, 0.0);
1756 __ Uswc1(ft, operand, kScratchReg);
1760 __ Ldc1(
i.OutputDoubleRegister(),
i.MemoryOperand());
1763 __ Uldc1(
i.OutputDoubleRegister(),
i.MemoryOperand(), kScratchReg);
1766 FPURegister ft =
i.InputOrZeroDoubleRegister(2);
1767 if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1768 __ Move(kDoubleRegZero, 0.0);
1770 __ Sdc1(ft,
i.MemoryOperand());
1773 case kMips64Usdc1: {
1774 FPURegister ft =
i.InputOrZeroDoubleRegister(2);
1775 if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
1776 __ Move(kDoubleRegZero, 0.0);
1778 __ Usdc1(ft,
i.MemoryOperand(), kScratchReg);
1782 if (instr->InputAt(0)->IsFPRegister()) {
1783 __ Sdc1(
i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
1784 __ Subu(sp, sp, Operand(kDoubleSize));
1785 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1787 __ Push(
i.InputRegister(0));
1788 frame_access_state()->IncreaseSPDelta(1);
1793 int reverse_slot =
i.InputInt32(0) + 1;
1795 FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
1796 if (instr->OutputAt(0)->IsFPRegister()) {
1797 LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
1798 if (op->representation() == MachineRepresentation::kFloat64) {
1799 __ Ldc1(
i.OutputDoubleRegister(), MemOperand(fp, offset));
1801 DCHECK_EQ(op->representation(), MachineRepresentation::kFloat32);
1803 i.OutputSingleRegister(0),
1804 MemOperand(fp, offset + kLessSignificantWordInDoublewordOffset));
1807 __ Ld(
i.OutputRegister(0), MemOperand(fp, offset));
1811 case kMips64StackClaim: {
1812 __ Dsubu(sp, sp, Operand(
i.InputInt32(0)));
1813 frame_access_state()->IncreaseSPDelta(
i.InputInt32(0) / kPointerSize);
1816 case kMips64StoreToStackSlot: {
1817 if (instr->InputAt(0)->IsFPRegister()) {
1818 if (instr->InputAt(0)->IsSimd128Register()) {
1819 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1820 __ st_b(
i.InputSimd128Register(0), MemOperand(sp,
i.InputInt32(1)));
1822 __ Sdc1(
i.InputDoubleRegister(0), MemOperand(sp,
i.InputInt32(1)));
1825 __ Sd(
i.InputRegister(0), MemOperand(sp,
i.InputInt32(1)));
1829 case kMips64ByteSwap64: {
1830 __ ByteSwapSigned(
i.OutputRegister(0),
i.InputRegister(0), 8);
1833 case kMips64ByteSwap32: {
1834 __ ByteSwapSigned(
i.OutputRegister(0),
i.InputRegister(0), 4);
1837 case kWord32AtomicLoadInt8:
1838 ASSEMBLE_ATOMIC_LOAD_INTEGER(Lb);
1840 case kWord32AtomicLoadUint8:
1841 ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
1843 case kWord32AtomicLoadInt16:
1844 ASSEMBLE_ATOMIC_LOAD_INTEGER(Lh);
1846 case kWord32AtomicLoadUint16:
1847 ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
1849 case kWord32AtomicLoadWord32:
1850 ASSEMBLE_ATOMIC_LOAD_INTEGER(Lw);
1852 case kMips64Word64AtomicLoadUint8:
1853 ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
1855 case kMips64Word64AtomicLoadUint16:
1856 ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
1858 case kMips64Word64AtomicLoadUint32:
1859 ASSEMBLE_ATOMIC_LOAD_INTEGER(Lwu);
1861 case kMips64Word64AtomicLoadUint64:
1862 ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld);
1864 case kWord32AtomicStoreWord8:
1865 ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
1867 case kWord32AtomicStoreWord16:
1868 ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
1870 case kWord32AtomicStoreWord32:
1871 ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
1873 case kMips64Word64AtomicStoreWord8:
1874 ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
1876 case kMips64Word64AtomicStoreWord16:
1877 ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
1879 case kMips64Word64AtomicStoreWord32:
1880 ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
1882 case kMips64Word64AtomicStoreWord64:
1883 ASSEMBLE_ATOMIC_STORE_INTEGER(Sd);
1885 case kWord32AtomicExchangeInt8:
1886 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc,
true, 8, 32);
1888 case kWord32AtomicExchangeUint8:
1889 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc,
false, 8, 32);
1891 case kWord32AtomicExchangeInt16:
1892 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc,
true, 16, 32);
1894 case kWord32AtomicExchangeUint16:
1895 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc,
false, 16, 32);
1897 case kWord32AtomicExchangeWord32:
1898 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
1900 case kMips64Word64AtomicExchangeUint8:
1901 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd,
false, 8, 64);
1903 case kMips64Word64AtomicExchangeUint16:
1904 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd,
false, 16, 64);
1906 case kMips64Word64AtomicExchangeUint32:
1907 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd,
false, 32, 64);
1909 case kMips64Word64AtomicExchangeUint64:
1910 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Lld, Scd);
1912 case kWord32AtomicCompareExchangeInt8:
1913 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc,
true, 8, 32);
1915 case kWord32AtomicCompareExchangeUint8:
1916 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc,
false, 8, 32);
1918 case kWord32AtomicCompareExchangeInt16:
1919 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc,
true, 16, 32);
1921 case kWord32AtomicCompareExchangeUint16:
1922 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc,
false, 16, 32);
1924 case kWord32AtomicCompareExchangeWord32:
1925 __ sll(
i.InputRegister(2),
i.InputRegister(2), 0);
1926 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
1928 case kMips64Word64AtomicCompareExchangeUint8:
1929 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd,
false, 8, 64);
1931 case kMips64Word64AtomicCompareExchangeUint16:
1932 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd,
false, 16, 64);
1934 case kMips64Word64AtomicCompareExchangeUint32:
1935 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd,
false, 32, 64);
1937 case kMips64Word64AtomicCompareExchangeUint64:
1938 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Lld, Scd);
1940 #define ATOMIC_BINOP_CASE(op, inst) \ 1941 case kWord32Atomic##op##Int8: \ 1942 ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst, 32); \ 1944 case kWord32Atomic##op##Uint8: \ 1945 ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst, 32); \ 1947 case kWord32Atomic##op##Int16: \ 1948 ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst, 32); \ 1950 case kWord32Atomic##op##Uint16: \ 1951 ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst, 32); \ 1953 case kWord32Atomic##op##Word32: \ 1954 ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst); \ 1956 ATOMIC_BINOP_CASE(Add, Addu)
1957 ATOMIC_BINOP_CASE(Sub, Subu)
1958 ATOMIC_BINOP_CASE(And, And)
1959 ATOMIC_BINOP_CASE(Or, Or)
1960 ATOMIC_BINOP_CASE(Xor, Xor)
1961 #undef ATOMIC_BINOP_CASE 1962 #define ATOMIC_BINOP_CASE(op, inst) \ 1963 case kMips64Word64Atomic##op##Uint8: \ 1964 ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst, 64); \ 1966 case kMips64Word64Atomic##op##Uint16: \ 1967 ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst, 64); \ 1969 case kMips64Word64Atomic##op##Uint32: \ 1970 ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst, 64); \ 1972 case kMips64Word64Atomic##op##Uint64: \ 1973 ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst); \ 1975 ATOMIC_BINOP_CASE(Add, Daddu)
1976 ATOMIC_BINOP_CASE(Sub, Dsubu)
1977 ATOMIC_BINOP_CASE(And, And)
1978 ATOMIC_BINOP_CASE(Or, Or)
1979 ATOMIC_BINOP_CASE(Xor, Xor)
1980 #undef ATOMIC_BINOP_CASE 1981 case kMips64AssertEqual:
1982 __ Assert(eq, static_cast<AbortReason>(
i.InputOperand(2).immediate()),
1983 i.InputRegister(0), Operand(
i.InputRegister(1)));
1985 case kMips64S128Zero: {
1986 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1987 __ xor_v(
i.OutputSimd128Register(),
i.OutputSimd128Register(),
1988 i.OutputSimd128Register());
1991 case kMips64I32x4Splat: {
1992 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1993 __ fill_w(
i.OutputSimd128Register(),
i.InputRegister(0));
1996 case kMips64I32x4ExtractLane: {
1997 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
1998 __ copy_s_w(
i.OutputRegister(),
i.InputSimd128Register(0),
2002 case kMips64I32x4ReplaceLane: {
2003 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2004 Simd128Register src =
i.InputSimd128Register(0);
2005 Simd128Register dst =
i.OutputSimd128Register();
2007 __ move_v(dst, src);
2009 __ insert_w(dst,
i.InputInt8(1),
i.InputRegister(2));
2012 case kMips64I32x4Add: {
2013 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2014 __ addv_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2015 i.InputSimd128Register(1));
2018 case kMips64I32x4Sub: {
2019 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2020 __ subv_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2021 i.InputSimd128Register(1));
2024 case kMips64F32x4Splat: {
2025 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2026 __ FmoveLow(kScratchReg,
i.InputSingleRegister(0));
2027 __ fill_w(
i.OutputSimd128Register(), kScratchReg);
2030 case kMips64F32x4ExtractLane: {
2031 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2032 __ copy_u_w(kScratchReg,
i.InputSimd128Register(0),
i.InputInt8(1));
2033 __ FmoveLow(
i.OutputSingleRegister(), kScratchReg);
2036 case kMips64F32x4ReplaceLane: {
2037 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2038 Simd128Register src =
i.InputSimd128Register(0);
2039 Simd128Register dst =
i.OutputSimd128Register();
2041 __ move_v(dst, src);
2043 __ FmoveLow(kScratchReg,
i.InputSingleRegister(2));
2044 __ insert_w(dst,
i.InputInt8(1), kScratchReg);
2047 case kMips64F32x4SConvertI32x4: {
2048 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2049 __ ffint_s_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
2052 case kMips64F32x4UConvertI32x4: {
2053 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2054 __ ffint_u_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
2057 case kMips64I32x4Mul: {
2058 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2059 __ mulv_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2060 i.InputSimd128Register(1));
2063 case kMips64I32x4MaxS: {
2064 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2065 __ max_s_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2066 i.InputSimd128Register(1));
2069 case kMips64I32x4MinS: {
2070 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2071 __ min_s_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2072 i.InputSimd128Register(1));
2075 case kMips64I32x4Eq: {
2076 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2077 __ ceq_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2078 i.InputSimd128Register(1));
2081 case kMips64I32x4Ne: {
2082 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2083 Simd128Register dst =
i.OutputSimd128Register();
2084 __ ceq_w(dst,
i.InputSimd128Register(0),
i.InputSimd128Register(1));
2085 __ nor_v(dst, dst, dst);
2088 case kMips64I32x4Shl: {
2089 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2090 __ slli_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2094 case kMips64I32x4ShrS: {
2095 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2096 __ srai_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2100 case kMips64I32x4ShrU: {
2101 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2102 __ srli_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2106 case kMips64I32x4MaxU: {
2107 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2108 __ max_u_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2109 i.InputSimd128Register(1));
2112 case kMips64I32x4MinU: {
2113 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2114 __ min_u_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2115 i.InputSimd128Register(1));
2118 case kMips64S128Select: {
2119 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2120 DCHECK(
i.OutputSimd128Register() ==
i.InputSimd128Register(0));
2121 __ bsel_v(
i.OutputSimd128Register(),
i.InputSimd128Register(2),
2122 i.InputSimd128Register(1));
2125 case kMips64F32x4Abs: {
2126 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2127 __ bclri_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0), 31);
2130 case kMips64F32x4Neg: {
2131 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2132 __ bnegi_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0), 31);
2135 case kMips64F32x4RecipApprox: {
2136 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2137 __ frcp_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
2140 case kMips64F32x4RecipSqrtApprox: {
2141 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2142 __ frsqrt_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
2145 case kMips64F32x4Add: {
2146 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2147 __ fadd_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2148 i.InputSimd128Register(1));
2151 case kMips64F32x4Sub: {
2152 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2153 __ fsub_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2154 i.InputSimd128Register(1));
2157 case kMips64F32x4Mul: {
2158 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2159 __ fmul_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2160 i.InputSimd128Register(1));
2163 case kMips64F32x4Max: {
2164 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2165 __ fmax_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2166 i.InputSimd128Register(1));
2169 case kMips64F32x4Min: {
2170 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2171 __ fmin_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2172 i.InputSimd128Register(1));
2175 case kMips64F32x4Eq: {
2176 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2177 __ fceq_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2178 i.InputSimd128Register(1));
2181 case kMips64F32x4Ne: {
2182 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2183 __ fcne_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2184 i.InputSimd128Register(1));
2187 case kMips64F32x4Lt: {
2188 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2189 __ fclt_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2190 i.InputSimd128Register(1));
2193 case kMips64F32x4Le: {
2194 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2195 __ fcle_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2196 i.InputSimd128Register(1));
2199 case kMips64I32x4SConvertF32x4: {
2200 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2201 __ ftrunc_s_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
2204 case kMips64I32x4UConvertF32x4: {
2205 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2206 __ ftrunc_u_w(
i.OutputSimd128Register(),
i.InputSimd128Register(0));
2209 case kMips64I32x4Neg: {
2210 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2211 __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2212 __ subv_w(
i.OutputSimd128Register(), kSimd128RegZero,
2213 i.InputSimd128Register(0));
2216 case kMips64I32x4GtS: {
2217 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2218 __ clt_s_w(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
2219 i.InputSimd128Register(0));
2222 case kMips64I32x4GeS: {
2223 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2224 __ cle_s_w(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
2225 i.InputSimd128Register(0));
2228 case kMips64I32x4GtU: {
2229 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2230 __ clt_u_w(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
2231 i.InputSimd128Register(0));
2234 case kMips64I32x4GeU: {
2235 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2236 __ cle_u_w(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
2237 i.InputSimd128Register(0));
2240 case kMips64I16x8Splat: {
2241 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2242 __ fill_h(
i.OutputSimd128Register(),
i.InputRegister(0));
2245 case kMips64I16x8ExtractLane: {
2246 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2247 __ copy_s_h(
i.OutputRegister(),
i.InputSimd128Register(0),
2251 case kMips64I16x8ReplaceLane: {
2252 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2253 Simd128Register src =
i.InputSimd128Register(0);
2254 Simd128Register dst =
i.OutputSimd128Register();
2256 __ move_v(dst, src);
2258 __ insert_h(dst,
i.InputInt8(1),
i.InputRegister(2));
2261 case kMips64I16x8Neg: {
2262 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2263 __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2264 __ subv_h(
i.OutputSimd128Register(), kSimd128RegZero,
2265 i.InputSimd128Register(0));
2268 case kMips64I16x8Shl: {
2269 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2270 __ slli_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2274 case kMips64I16x8ShrS: {
2275 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2276 __ srai_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2280 case kMips64I16x8ShrU: {
2281 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2282 __ srli_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2286 case kMips64I16x8Add: {
2287 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2288 __ addv_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2289 i.InputSimd128Register(1));
2292 case kMips64I16x8AddSaturateS: {
2293 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2294 __ adds_s_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2295 i.InputSimd128Register(1));
2298 case kMips64I16x8Sub: {
2299 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2300 __ subv_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2301 i.InputSimd128Register(1));
2304 case kMips64I16x8SubSaturateS: {
2305 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2306 __ subs_s_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2307 i.InputSimd128Register(1));
2310 case kMips64I16x8Mul: {
2311 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2312 __ mulv_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2313 i.InputSimd128Register(1));
2316 case kMips64I16x8MaxS: {
2317 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2318 __ max_s_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2319 i.InputSimd128Register(1));
2322 case kMips64I16x8MinS: {
2323 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2324 __ min_s_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2325 i.InputSimd128Register(1));
2328 case kMips64I16x8Eq: {
2329 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2330 __ ceq_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2331 i.InputSimd128Register(1));
2334 case kMips64I16x8Ne: {
2335 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2336 Simd128Register dst =
i.OutputSimd128Register();
2337 __ ceq_h(dst,
i.InputSimd128Register(0),
i.InputSimd128Register(1));
2338 __ nor_v(dst, dst, dst);
2341 case kMips64I16x8GtS: {
2342 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2343 __ clt_s_h(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
2344 i.InputSimd128Register(0));
2347 case kMips64I16x8GeS: {
2348 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2349 __ cle_s_h(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
2350 i.InputSimd128Register(0));
2353 case kMips64I16x8AddSaturateU: {
2354 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2355 __ adds_u_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2356 i.InputSimd128Register(1));
2359 case kMips64I16x8SubSaturateU: {
2360 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2361 __ subs_u_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2362 i.InputSimd128Register(1));
2365 case kMips64I16x8MaxU: {
2366 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2367 __ max_u_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2368 i.InputSimd128Register(1));
2371 case kMips64I16x8MinU: {
2372 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2373 __ min_u_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2374 i.InputSimd128Register(1));
2377 case kMips64I16x8GtU: {
2378 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2379 __ clt_u_h(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
2380 i.InputSimd128Register(0));
2383 case kMips64I16x8GeU: {
2384 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2385 __ cle_u_h(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
2386 i.InputSimd128Register(0));
2389 case kMips64I8x16Splat: {
2390 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2391 __ fill_b(
i.OutputSimd128Register(),
i.InputRegister(0));
2394 case kMips64I8x16ExtractLane: {
2395 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2396 __ copy_s_b(
i.OutputRegister(),
i.InputSimd128Register(0),
2400 case kMips64I8x16ReplaceLane: {
2401 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2402 Simd128Register src =
i.InputSimd128Register(0);
2403 Simd128Register dst =
i.OutputSimd128Register();
2405 __ move_v(dst, src);
2407 __ insert_b(dst,
i.InputInt8(1),
i.InputRegister(2));
2410 case kMips64I8x16Neg: {
2411 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2412 __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2413 __ subv_b(
i.OutputSimd128Register(), kSimd128RegZero,
2414 i.InputSimd128Register(0));
2417 case kMips64I8x16Shl: {
2418 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2419 __ slli_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2423 case kMips64I8x16ShrS: {
2424 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2425 __ srai_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2429 case kMips64I8x16Add: {
2430 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2431 __ addv_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2432 i.InputSimd128Register(1));
2435 case kMips64I8x16AddSaturateS: {
2436 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2437 __ adds_s_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2438 i.InputSimd128Register(1));
2441 case kMips64I8x16Sub: {
2442 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2443 __ subv_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2444 i.InputSimd128Register(1));
2447 case kMips64I8x16SubSaturateS: {
2448 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2449 __ subs_s_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2450 i.InputSimd128Register(1));
2453 case kMips64I8x16Mul: {
2454 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2455 __ mulv_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2456 i.InputSimd128Register(1));
2459 case kMips64I8x16MaxS: {
2460 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2461 __ max_s_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2462 i.InputSimd128Register(1));
2465 case kMips64I8x16MinS: {
2466 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2467 __ min_s_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2468 i.InputSimd128Register(1));
2471 case kMips64I8x16Eq: {
2472 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2473 __ ceq_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2474 i.InputSimd128Register(1));
2477 case kMips64I8x16Ne: {
2478 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2479 Simd128Register dst =
i.OutputSimd128Register();
2480 __ ceq_b(dst,
i.InputSimd128Register(0),
i.InputSimd128Register(1));
2481 __ nor_v(dst, dst, dst);
2484 case kMips64I8x16GtS: {
2485 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2486 __ clt_s_b(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
2487 i.InputSimd128Register(0));
2490 case kMips64I8x16GeS: {
2491 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2492 __ cle_s_b(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
2493 i.InputSimd128Register(0));
2496 case kMips64I8x16ShrU: {
2497 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2498 __ srli_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2502 case kMips64I8x16AddSaturateU: {
2503 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2504 __ adds_u_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2505 i.InputSimd128Register(1));
2508 case kMips64I8x16SubSaturateU: {
2509 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2510 __ subs_u_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2511 i.InputSimd128Register(1));
2514 case kMips64I8x16MaxU: {
2515 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2516 __ max_u_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2517 i.InputSimd128Register(1));
2520 case kMips64I8x16MinU: {
2521 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2522 __ min_u_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2523 i.InputSimd128Register(1));
2526 case kMips64I8x16GtU: {
2527 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2528 __ clt_u_b(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
2529 i.InputSimd128Register(0));
2532 case kMips64I8x16GeU: {
2533 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2534 __ cle_u_b(
i.OutputSimd128Register(),
i.InputSimd128Register(1),
2535 i.InputSimd128Register(0));
2538 case kMips64S128And: {
2539 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2540 __ and_v(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2541 i.InputSimd128Register(1));
2544 case kMips64S128Or: {
2545 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2546 __ or_v(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2547 i.InputSimd128Register(1));
2550 case kMips64S128Xor: {
2551 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2552 __ xor_v(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2553 i.InputSimd128Register(1));
2556 case kMips64S128Not: {
2557 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2558 __ nor_v(
i.OutputSimd128Register(),
i.InputSimd128Register(0),
2559 i.InputSimd128Register(0));
2562 case kMips64S1x4AnyTrue:
2563 case kMips64S1x8AnyTrue:
2564 case kMips64S1x16AnyTrue: {
2565 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2566 Register dst =
i.OutputRegister();
2568 __ BranchMSA(&all_false, MSA_BRANCH_V, all_zero,
2569 i.InputSimd128Register(0), USE_DELAY_SLOT);
2572 __ bind(&all_false);
2575 case kMips64S1x4AllTrue: {
2576 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2577 Register dst =
i.OutputRegister();
2579 __ BranchMSA(&all_true, MSA_BRANCH_W, all_not_zero,
2580 i.InputSimd128Register(0), USE_DELAY_SLOT);
2586 case kMips64S1x8AllTrue: {
2587 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2588 Register dst =
i.OutputRegister();
2590 __ BranchMSA(&all_true, MSA_BRANCH_H, all_not_zero,
2591 i.InputSimd128Register(0), USE_DELAY_SLOT);
2597 case kMips64S1x16AllTrue: {
2598 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2599 Register dst =
i.OutputRegister();
2601 __ BranchMSA(&all_true, MSA_BRANCH_B, all_not_zero,
2602 i.InputSimd128Register(0), USE_DELAY_SLOT);
2608 case kMips64MsaLd: {
2609 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2610 __ ld_b(
i.OutputSimd128Register(),
i.MemoryOperand());
2613 case kMips64MsaSt: {
2614 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2615 __ st_b(
i.InputSimd128Register(2),
i.MemoryOperand());
2618 case kMips64S32x4InterleaveRight: {
2619 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2620 Simd128Register dst =
i.OutputSimd128Register(),
2621 src0 =
i.InputSimd128Register(0),
2622 src1 =
i.InputSimd128Register(1);
2625 __ ilvr_w(dst, src1, src0);
2628 case kMips64S32x4InterleaveLeft: {
2629 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2630 Simd128Register dst =
i.OutputSimd128Register(),
2631 src0 =
i.InputSimd128Register(0),
2632 src1 =
i.InputSimd128Register(1);
2635 __ ilvl_w(dst, src1, src0);
2638 case kMips64S32x4PackEven: {
2639 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2640 Simd128Register dst =
i.OutputSimd128Register(),
2641 src0 =
i.InputSimd128Register(0),
2642 src1 =
i.InputSimd128Register(1);
2645 __ pckev_w(dst, src1, src0);
2648 case kMips64S32x4PackOdd: {
2649 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2650 Simd128Register dst =
i.OutputSimd128Register(),
2651 src0 =
i.InputSimd128Register(0),
2652 src1 =
i.InputSimd128Register(1);
2655 __ pckod_w(dst, src1, src0);
2658 case kMips64S32x4InterleaveEven: {
2659 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2660 Simd128Register dst =
i.OutputSimd128Register(),
2661 src0 =
i.InputSimd128Register(0),
2662 src1 =
i.InputSimd128Register(1);
2665 __ ilvev_w(dst, src1, src0);
2668 case kMips64S32x4InterleaveOdd: {
2669 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2670 Simd128Register dst =
i.OutputSimd128Register(),
2671 src0 =
i.InputSimd128Register(0),
2672 src1 =
i.InputSimd128Register(1);
2675 __ ilvod_w(dst, src1, src0);
2678 case kMips64S32x4Shuffle: {
2679 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2680 Simd128Register dst =
i.OutputSimd128Register(),
2681 src0 =
i.InputSimd128Register(0),
2682 src1 =
i.InputSimd128Register(1);
2684 int32_t shuffle =
i.InputInt32(2);
2688 unsigned lane = shuffle & 0xFF;
2689 if (FLAG_debug_code) {
2694 int32_t shuffle_helper = shuffle;
2695 for (
int i = 0;
i < 4; ++
i) {
2696 lane = shuffle_helper & 0xFF;
2698 shuffle_helper >>= 8;
2703 for (
int i = 0;
i < 4;
i++) {
2704 lane = shuffle & 0xFF;
2709 i8 |= lane << (2 *
i);
2712 __ shf_w(dst, src0, i8);
2716 __ move_v(kSimd128ScratchReg, src0);
2717 src0 = kSimd128ScratchReg;
2718 }
else if (dst == src1) {
2719 __ move_v(kSimd128ScratchReg, src1);
2720 src1 = kSimd128ScratchReg;
2723 __ li(kScratchReg,
i.InputInt32(2));
2724 __ insert_w(dst, 0, kScratchReg);
2725 __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2726 __ ilvr_b(dst, kSimd128RegZero, dst);
2727 __ ilvr_h(dst, kSimd128RegZero, dst);
2728 __ vshf_w(dst, src1, src0);
2732 case kMips64S16x8InterleaveRight: {
2733 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2734 Simd128Register dst =
i.OutputSimd128Register(),
2735 src0 =
i.InputSimd128Register(0),
2736 src1 =
i.InputSimd128Register(1);
2739 __ ilvr_h(dst, src1, src0);
2742 case kMips64S16x8InterleaveLeft: {
2743 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2744 Simd128Register dst =
i.OutputSimd128Register(),
2745 src0 =
i.InputSimd128Register(0),
2746 src1 =
i.InputSimd128Register(1);
2749 __ ilvl_h(dst, src1, src0);
2752 case kMips64S16x8PackEven: {
2753 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2754 Simd128Register dst =
i.OutputSimd128Register(),
2755 src0 =
i.InputSimd128Register(0),
2756 src1 =
i.InputSimd128Register(1);
2759 __ pckev_h(dst, src1, src0);
2762 case kMips64S16x8PackOdd: {
2763 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2764 Simd128Register dst =
i.OutputSimd128Register(),
2765 src0 =
i.InputSimd128Register(0),
2766 src1 =
i.InputSimd128Register(1);
2769 __ pckod_h(dst, src1, src0);
2772 case kMips64S16x8InterleaveEven: {
2773 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2774 Simd128Register dst =
i.OutputSimd128Register(),
2775 src0 =
i.InputSimd128Register(0),
2776 src1 =
i.InputSimd128Register(1);
2779 __ ilvev_h(dst, src1, src0);
2782 case kMips64S16x8InterleaveOdd: {
2783 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2784 Simd128Register dst =
i.OutputSimd128Register(),
2785 src0 =
i.InputSimd128Register(0),
2786 src1 =
i.InputSimd128Register(1);
2789 __ ilvod_h(dst, src1, src0);
2792 case kMips64S16x4Reverse: {
2793 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2796 __ shf_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0), 0x1B);
2799 case kMips64S16x2Reverse: {
2800 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2803 __ shf_h(
i.OutputSimd128Register(),
i.InputSimd128Register(0), 0xB1);
2806 case kMips64S8x16InterleaveRight: {
2807 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2808 Simd128Register dst =
i.OutputSimd128Register(),
2809 src0 =
i.InputSimd128Register(0),
2810 src1 =
i.InputSimd128Register(1);
2813 __ ilvr_b(dst, src1, src0);
2816 case kMips64S8x16InterleaveLeft: {
2817 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2818 Simd128Register dst =
i.OutputSimd128Register(),
2819 src0 =
i.InputSimd128Register(0),
2820 src1 =
i.InputSimd128Register(1);
2823 __ ilvl_b(dst, src1, src0);
2826 case kMips64S8x16PackEven: {
2827 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2828 Simd128Register dst =
i.OutputSimd128Register(),
2829 src0 =
i.InputSimd128Register(0),
2830 src1 =
i.InputSimd128Register(1);
2833 __ pckev_b(dst, src1, src0);
2836 case kMips64S8x16PackOdd: {
2837 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2838 Simd128Register dst =
i.OutputSimd128Register(),
2839 src0 =
i.InputSimd128Register(0),
2840 src1 =
i.InputSimd128Register(1);
2843 __ pckod_b(dst, src1, src0);
2846 case kMips64S8x16InterleaveEven: {
2847 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2848 Simd128Register dst =
i.OutputSimd128Register(),
2849 src0 =
i.InputSimd128Register(0),
2850 src1 =
i.InputSimd128Register(1);
2853 __ ilvev_b(dst, src1, src0);
2856 case kMips64S8x16InterleaveOdd: {
2857 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2858 Simd128Register dst =
i.OutputSimd128Register(),
2859 src0 =
i.InputSimd128Register(0),
2860 src1 =
i.InputSimd128Register(1);
2863 __ ilvod_b(dst, src1, src0);
2866 case kMips64S8x16Concat: {
2867 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2868 Simd128Register dst =
i.OutputSimd128Register();
2869 DCHECK(dst ==
i.InputSimd128Register(0));
2870 __ sldi_b(dst,
i.InputSimd128Register(1),
i.InputInt4(2));
2873 case kMips64S8x16Shuffle: {
2874 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2875 Simd128Register dst =
i.OutputSimd128Register(),
2876 src0 =
i.InputSimd128Register(0),
2877 src1 =
i.InputSimd128Register(1);
2880 __ move_v(kSimd128ScratchReg, src0);
2881 src0 = kSimd128ScratchReg;
2882 }
else if (dst == src1) {
2883 __ move_v(kSimd128ScratchReg, src1);
2884 src1 = kSimd128ScratchReg;
2888 static_cast<int64_t>(
i.InputInt32(3)) << 32 |
i.InputInt32(2);
2890 static_cast<int64_t>(
i.InputInt32(5)) << 32 |
i.InputInt32(4);
2891 __ li(kScratchReg, control_low);
2892 __ insert_d(dst, 0, kScratchReg);
2893 __ li(kScratchReg, control_hi);
2894 __ insert_d(dst, 1, kScratchReg);
2895 __ vshf_b(dst, src1, src0);
2898 case kMips64S8x8Reverse: {
2899 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2904 __ shf_w(kSimd128ScratchReg,
i.InputSimd128Register(0), 0xB1);
2905 __ shf_b(
i.OutputSimd128Register(), kSimd128ScratchReg, 0x1B);
2908 case kMips64S8x4Reverse: {
2909 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2912 __ shf_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0), 0x1B);
2915 case kMips64S8x2Reverse: {
2916 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2919 __ shf_b(
i.OutputSimd128Register(),
i.InputSimd128Register(0), 0xB1);
2922 case kMips64I32x4SConvertI16x8Low: {
2923 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2924 Simd128Register dst =
i.OutputSimd128Register();
2925 Simd128Register src =
i.InputSimd128Register(0);
2926 __ ilvr_h(kSimd128ScratchReg, src, src);
2927 __ slli_w(dst, kSimd128ScratchReg, 16);
2928 __ srai_w(dst, dst, 16);
2931 case kMips64I32x4SConvertI16x8High: {
2932 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2933 Simd128Register dst =
i.OutputSimd128Register();
2934 Simd128Register src =
i.InputSimd128Register(0);
2935 __ ilvl_h(kSimd128ScratchReg, src, src);
2936 __ slli_w(dst, kSimd128ScratchReg, 16);
2937 __ srai_w(dst, dst, 16);
2940 case kMips64I32x4UConvertI16x8Low: {
2941 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2942 __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2943 __ ilvr_h(
i.OutputSimd128Register(), kSimd128RegZero,
2944 i.InputSimd128Register(0));
2947 case kMips64I32x4UConvertI16x8High: {
2948 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2949 __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2950 __ ilvl_h(
i.OutputSimd128Register(), kSimd128RegZero,
2951 i.InputSimd128Register(0));
2954 case kMips64I16x8SConvertI8x16Low: {
2955 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2956 Simd128Register dst =
i.OutputSimd128Register();
2957 Simd128Register src =
i.InputSimd128Register(0);
2958 __ ilvr_b(kSimd128ScratchReg, src, src);
2959 __ slli_h(dst, kSimd128ScratchReg, 8);
2960 __ srai_h(dst, dst, 8);
2963 case kMips64I16x8SConvertI8x16High: {
2964 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2965 Simd128Register dst =
i.OutputSimd128Register();
2966 Simd128Register src =
i.InputSimd128Register(0);
2967 __ ilvl_b(kSimd128ScratchReg, src, src);
2968 __ slli_h(dst, kSimd128ScratchReg, 8);
2969 __ srai_h(dst, dst, 8);
2972 case kMips64I16x8SConvertI32x4: {
2973 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2974 Simd128Register dst =
i.OutputSimd128Register();
2975 Simd128Register src0 =
i.InputSimd128Register(0);
2976 Simd128Register src1 =
i.InputSimd128Register(1);
2977 __ sat_s_w(kSimd128ScratchReg, src0, 15);
2978 __ sat_s_w(kSimd128RegZero, src1, 15);
2979 __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
2982 case kMips64I16x8UConvertI32x4: {
2983 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2984 Simd128Register dst =
i.OutputSimd128Register();
2985 Simd128Register src0 =
i.InputSimd128Register(0);
2986 Simd128Register src1 =
i.InputSimd128Register(1);
2987 __ sat_u_w(kSimd128ScratchReg, src0, 15);
2988 __ sat_u_w(kSimd128RegZero, src1, 15);
2989 __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
2992 case kMips64I16x8UConvertI8x16Low: {
2993 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
2994 __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
2995 __ ilvr_b(
i.OutputSimd128Register(), kSimd128RegZero,
2996 i.InputSimd128Register(0));
2999 case kMips64I16x8UConvertI8x16High: {
3000 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3001 __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
3002 __ ilvl_b(
i.OutputSimd128Register(), kSimd128RegZero,
3003 i.InputSimd128Register(0));
3006 case kMips64I8x16SConvertI16x8: {
3007 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3008 Simd128Register dst =
i.OutputSimd128Register();
3009 Simd128Register src0 =
i.InputSimd128Register(0);
3010 Simd128Register src1 =
i.InputSimd128Register(1);
3011 __ sat_s_h(kSimd128ScratchReg, src0, 7);
3012 __ sat_s_h(kSimd128RegZero, src1, 7);
3013 __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
3016 case kMips64I8x16UConvertI16x8: {
3017 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3018 Simd128Register dst =
i.OutputSimd128Register();
3019 Simd128Register src0 =
i.InputSimd128Register(0);
3020 Simd128Register src1 =
i.InputSimd128Register(1);
3021 __ sat_u_h(kSimd128ScratchReg, src0, 7);
3022 __ sat_u_h(kSimd128RegZero, src1, 7);
3023 __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
3026 case kMips64F32x4AddHoriz: {
3027 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3028 Simd128Register src0 =
i.InputSimd128Register(0);
3029 Simd128Register src1 =
i.InputSimd128Register(1);
3030 Simd128Register dst =
i.OutputSimd128Register();
3031 __ shf_w(kSimd128ScratchReg, src0, 0xB1);
3032 __ shf_w(kSimd128RegZero, src1, 0xB1);
3033 __ fadd_w(kSimd128ScratchReg, kSimd128ScratchReg, src0);
3034 __ fadd_w(kSimd128RegZero, kSimd128RegZero, src1);
3035 __ pckev_w(dst, kSimd128RegZero, kSimd128ScratchReg);
3038 case kMips64I32x4AddHoriz: {
3039 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3040 Simd128Register src0 =
i.InputSimd128Register(0);
3041 Simd128Register src1 =
i.InputSimd128Register(1);
3042 Simd128Register dst =
i.OutputSimd128Register();
3043 __ hadd_s_d(kSimd128ScratchReg, src0, src0);
3044 __ hadd_s_d(kSimd128RegZero, src1, src1);
3045 __ pckev_w(dst, kSimd128RegZero, kSimd128ScratchReg);
3048 case kMips64I16x8AddHoriz: {
3049 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3050 Simd128Register src0 =
i.InputSimd128Register(0);
3051 Simd128Register src1 =
i.InputSimd128Register(1);
3052 Simd128Register dst =
i.OutputSimd128Register();
3053 __ hadd_s_w(kSimd128ScratchReg, src0, src0);
3054 __ hadd_s_w(kSimd128RegZero, src1, src1);
3055 __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
3062 #define UNSUPPORTED_COND(opcode, condition) \ 3063 StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \ 3067 void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
3068 Instruction* instr, FlagsCondition condition,
3069 Label* tlabel, Label* flabel,
bool fallthru) {
3072 MipsOperandConverter
i(gen, instr);
3074 Condition cc = kNoCondition;
3082 if (instr->arch_opcode() == kMips64Tst) {
3083 cc = FlagsConditionToConditionTst(condition);
3084 __ Branch(tlabel, cc, kScratchReg, Operand(zero_reg));
3085 }
else if (instr->arch_opcode() == kMips64Dadd ||
3086 instr->arch_opcode() == kMips64Dsub) {
3087 cc = FlagsConditionToConditionOvf(condition);
3088 __ dsra32(kScratchReg,
i.OutputRegister(), 0);
3089 __ sra(kScratchReg2,
i.OutputRegister(), 31);
3090 __ Branch(tlabel, cc, kScratchReg2, Operand(kScratchReg));
3091 }
else if (instr->arch_opcode() == kMips64DaddOvf ||
3092 instr->arch_opcode() == kMips64DsubOvf) {
3093 switch (condition) {
3096 __ Branch(tlabel, lt, kScratchReg, Operand(zero_reg));
3099 __ Branch(tlabel, ge, kScratchReg, Operand(zero_reg));
3102 UNSUPPORTED_COND(instr->arch_opcode(), condition);
3105 }
else if (instr->arch_opcode() == kMips64MulOvf) {
3107 switch (condition) {
3109 __ Branch(tlabel, ne, kScratchReg, Operand(zero_reg));
3112 __ Branch(tlabel, eq, kScratchReg, Operand(zero_reg));
3115 UNSUPPORTED_COND(kMipsMulOvf, condition);
3118 }
else if (instr->arch_opcode() == kMips64Cmp) {
3119 cc = FlagsConditionToConditionCmp(condition);
3120 __ Branch(tlabel, cc,
i.InputRegister(0),
i.InputOperand(1));
3121 }
else if (instr->arch_opcode() == kMips64CmpS ||
3122 instr->arch_opcode() == kMips64CmpD) {
3124 FlagsConditionToConditionCmpFPU(predicate, condition);
3126 __ BranchTrueF(tlabel);
3128 __ BranchFalseF(tlabel);
3131 PrintF(
"AssembleArchBranch Unimplemented arch_opcode: %d\n",
3132 instr->arch_opcode());
3135 if (!fallthru) __ Branch(flabel);
3141 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
3142 Label* tlabel = branch->true_label;
3143 Label* flabel = branch->false_label;
3145 AssembleBranchToLabels(
this, tasm(), instr, branch->condition, tlabel, flabel,
3149 void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
3150 Instruction* instr) {
3152 if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
3156 MipsOperandConverter
i(
this, instr);
3157 condition = NegateFlagsCondition(condition);
3159 switch (instr->arch_opcode()) {
3161 __ LoadZeroOnCondition(kSpeculationPoisonRegister,
i.InputRegister(0),
3163 FlagsConditionToConditionCmp(condition));
3167 switch (condition) {
3169 __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
3172 __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
3183 __ dsrl32(kScratchReg,
i.OutputRegister(), 31);
3184 __ srl(kScratchReg2,
i.OutputRegister(), 31);
3185 __ xor_(kScratchReg2, kScratchReg, kScratchReg2);
3186 switch (condition) {
3188 __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
3192 __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
3195 UNSUPPORTED_COND(instr->arch_opcode(), condition);
3199 case kMips64DaddOvf:
3200 case kMips64DsubOvf: {
3202 __ Slt(kScratchReg2, kScratchReg, zero_reg);
3203 switch (condition) {
3205 __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
3209 __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
3212 UNSUPPORTED_COND(instr->arch_opcode(), condition);
3216 case kMips64MulOvf: {
3218 switch (condition) {
3220 __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
3224 __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
3227 UNSUPPORTED_COND(instr->arch_opcode(), condition);
3234 FlagsConditionToConditionCmpFPU(predicate, condition);
3236 __ LoadZeroIfFPUCondition(kSpeculationPoisonRegister);
3238 __ LoadZeroIfNotFPUCondition(kSpeculationPoisonRegister);
3248 #undef UNSUPPORTED_COND 3250 void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
3251 BranchInfo* branch) {
3252 AssembleArchBranch(instr, branch);
3255 void CodeGenerator::AssembleArchJump(RpoNumber target) {
3256 if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
3259 void CodeGenerator::AssembleArchTrap(Instruction* instr,
3260 FlagsCondition condition) {
3261 class OutOfLineTrap final :
public OutOfLineCode {
3263 OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
3264 : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
3265 void Generate() final {
3266 MipsOperandConverter
i(gen_, instr_);
3268 static_cast<TrapId
>(
i.InputInt32(instr_->InputCount() - 1));
3269 GenerateCallToTrap(trap_id);
3273 void GenerateCallToTrap(TrapId trap_id) {
3274 if (trap_id == TrapId::kInvalid) {
3279 __ PrepareCallCFunction(0, 0, cp);
3281 ExternalReference::wasm_call_trap_callback_for_testing(), 0);
3282 __ LeaveFrame(StackFrame::WASM_COMPILED);
3283 auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
3285 static_cast<int>(call_descriptor->StackParameterCount());
3286 pop_count += (pop_count & 1);
3290 gen_->AssembleSourcePosition(instr_);
3294 __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
3295 ReferenceMap* reference_map =
3296 new (gen_->zone()) ReferenceMap(gen_->zone());
3297 gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
3298 Safepoint::kNoLazyDeopt);
3299 if (FLAG_debug_code) {
3300 __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
3304 Instruction* instr_;
3305 CodeGenerator* gen_;
3307 auto ool =
new (zone()) OutOfLineTrap(
this, instr);
3308 Label* tlabel = ool->entry();
3309 AssembleBranchToLabels(
this, tasm(), instr, condition, tlabel,
nullptr,
true);
3313 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
3314 FlagsCondition condition) {
3315 MipsOperandConverter
i(
this, instr);
3321 DCHECK_NE(0u, instr->OutputCount());
3322 Register result =
i.OutputRegister(instr->OutputCount() - 1);
3323 Condition cc = kNoCondition;
3328 if (instr->arch_opcode() == kMips64Tst) {
3329 cc = FlagsConditionToConditionTst(condition);
3331 __ Sltu(result, kScratchReg, 1);
3333 __ Sltu(result, zero_reg, kScratchReg);
3336 }
else if (instr->arch_opcode() == kMips64Dadd ||
3337 instr->arch_opcode() == kMips64Dsub) {
3338 cc = FlagsConditionToConditionOvf(condition);
3340 __ dsrl32(kScratchReg,
i.OutputRegister(), 31);
3341 __ srl(kScratchReg2,
i.OutputRegister(), 31);
3342 __ xor_(result, kScratchReg, kScratchReg2);
3344 __ xori(result, result, 1);
3346 }
else if (instr->arch_opcode() == kMips64DaddOvf ||
3347 instr->arch_opcode() == kMips64DsubOvf) {
3349 __ slt(result, kScratchReg, zero_reg);
3350 }
else if (instr->arch_opcode() == kMips64MulOvf) {
3352 __ Sgtu(result, kScratchReg, zero_reg);
3353 }
else if (instr->arch_opcode() == kMips64Cmp) {
3354 cc = FlagsConditionToConditionCmp(condition);
3358 Register left =
i.InputRegister(0);
3359 Operand right =
i.InputOperand(1);
3360 if (instr->InputAt(1)->IsImmediate()) {
3361 if (is_int16(-right.immediate())) {
3362 if (right.immediate() == 0) {
3364 __ Sltu(result, left, 1);
3366 __ Sltu(result, zero_reg, left);
3369 __ Daddu(result, left, Operand(-right.immediate()));
3371 __ Sltu(result, result, 1);
3373 __ Sltu(result, zero_reg, result);
3377 if (is_uint16(right.immediate())) {
3378 __ Xor(result, left, right);
3380 __ li(kScratchReg, right);
3381 __ Xor(result, left, kScratchReg);
3384 __ Sltu(result, result, 1);
3386 __ Sltu(result, zero_reg, result);
3390 __ Xor(result, left, right);
3392 __ Sltu(result, result, 1);
3394 __ Sltu(result, zero_reg, result);
3400 Register left =
i.InputRegister(0);
3401 Operand right =
i.InputOperand(1);
3402 __ Slt(result, left, right);
3404 __ xori(result, result, 1);
3409 Register left =
i.InputRegister(1);
3410 Operand right =
i.InputOperand(0);
3411 __ Slt(result, left, right);
3413 __ xori(result, result, 1);
3418 Register left =
i.InputRegister(0);
3419 Operand right =
i.InputOperand(1);
3420 __ Sltu(result, left, right);
3422 __ xori(result, result, 1);
3427 Register left =
i.InputRegister(1);
3428 Operand right =
i.InputOperand(0);
3429 __ Sltu(result, left, right);
3431 __ xori(result, result, 1);
3438 }
else if (instr->arch_opcode() == kMips64CmpD ||
3439 instr->arch_opcode() == kMips64CmpS) {
3440 FPURegister left =
i.InputOrZeroDoubleRegister(0);
3441 FPURegister right =
i.InputOrZeroDoubleRegister(1);
3442 if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
3443 !__ IsDoubleZeroRegSet()) {
3444 __ Move(kDoubleRegZero, 0.0);
3447 FlagsConditionToConditionCmpFPU(predicate, condition);
3448 if (kArchVariant != kMips64r6) {
3449 __ li(result, Operand(1));
3451 __ Movf(result, zero_reg);
3453 __ Movt(result, zero_reg);
3456 if (instr->arch_opcode() == kMips64CmpD) {
3457 __ dmfc1(result, kDoubleCompareReg);
3459 DCHECK_EQ(kMips64CmpS, instr->arch_opcode());
3460 __ mfc1(result, kDoubleCompareReg);
3463 __ And(result, result, 1);
3465 __ Addu(result, result, 1);
3470 PrintF(
"AssembleArchBranch Unimplemented arch_opcode is : %d\n",
3471 instr->arch_opcode());
3477 void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
3478 MipsOperandConverter
i(
this, instr);
3479 Register input =
i.InputRegister(0);
3480 std::vector<std::pair<int32_t, Label*>> cases;
3481 for (
size_t index = 2; index < instr->InputCount(); index += 2) {
3482 cases.push_back({
i.InputInt32(index + 0), GetLabel(
i.InputRpo(index + 1))});
3484 AssembleArchBinarySearchSwitchRange(input,
i.InputRpo(1), cases.data(),
3485 cases.data() + cases.size());
3488 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
3489 MipsOperandConverter
i(
this, instr);
3490 Register input =
i.InputRegister(0);
3491 for (
size_t index = 2; index < instr->InputCount(); index += 2) {
3492 __ li(kScratchReg, Operand(
i.InputInt32(index + 0)));
3493 __ Branch(GetLabel(
i.InputRpo(index + 1)), eq, input, Operand(kScratchReg));
3495 AssembleArchJump(
i.InputRpo(1));
3498 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
3499 MipsOperandConverter
i(
this, instr);
3500 Register input =
i.InputRegister(0);
3501 size_t const case_count = instr->InputCount() - 2;
3503 __ Branch(GetLabel(
i.InputRpo(1)), hs, input, Operand(case_count));
3504 __ GenerateSwitchTable(input, case_count, [&
i,
this](
size_t index) {
3505 return GetLabel(
i.InputRpo(index + 2));
3509 void CodeGenerator::FinishFrame(Frame* frame) {
3510 auto call_descriptor = linkage()->GetIncomingDescriptor();
3512 const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
3513 if (saves_fpu != 0) {
3514 int count = base::bits::CountPopulation(saves_fpu);
3515 DCHECK_EQ(kNumCalleeSavedFPU, count);
3516 frame->AllocateSavedCalleeRegisterSlots(count *
3517 (kDoubleSize / kPointerSize));
3520 const RegList saves = call_descriptor->CalleeSavedRegisters();
3522 int count = base::bits::CountPopulation(saves);
3523 DCHECK_EQ(kNumCalleeSaved, count + 1);
3524 frame->AllocateSavedCalleeRegisterSlots(count);
3528 void CodeGenerator::AssembleConstructFrame() {
3529 auto call_descriptor = linkage()->GetIncomingDescriptor();
3531 if (frame_access_state()->has_frame()) {
3532 if (call_descriptor->IsCFunctionCall()) {
3535 }
else if (call_descriptor->IsJSFunctionCall()) {
3537 if (call_descriptor->PushArgumentCount()) {
3538 __ Push(kJavaScriptCallArgCountRegister);
3541 __ StubPrologue(info()->GetOutputStackFrameType());
3542 if (call_descriptor->IsWasmFunctionCall()) {
3543 __ Push(kWasmInstanceRegister);
3544 }
else if (call_descriptor->IsWasmImportWrapper()) {
3549 __ ld(kJSFunctionRegister,
3550 FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
3551 __ ld(kWasmInstanceRegister,
3552 FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
3553 __ Push(kWasmInstanceRegister);
3558 int shrink_slots = frame()->GetTotalFrameSlotCount() -
3559 call_descriptor->CalculateFixedFrameSize();
3561 if (info()->is_osr()) {
3563 __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
3569 if (FLAG_code_comments) __ RecordComment(
"-- OSR entrypoint --");
3570 osr_pc_offset_ = __ pc_offset();
3571 shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
3572 ResetSpeculationPoison();
3575 const RegList saves = call_descriptor->CalleeSavedRegisters();
3576 const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
3577 const int returns = frame()->GetReturnSlotCount();
3580 shrink_slots -= base::bits::CountPopulation(saves);
3581 shrink_slots -= base::bits::CountPopulation(saves_fpu);
3582 shrink_slots -= returns;
3583 if (shrink_slots > 0) {
3584 __ Dsubu(sp, sp, Operand(shrink_slots * kPointerSize));
3587 if (saves_fpu != 0) {
3589 __ MultiPushFPU(saves_fpu);
3590 DCHECK_EQ(kNumCalleeSavedFPU, base::bits::CountPopulation(saves_fpu));
3595 __ MultiPush(saves);
3596 DCHECK_EQ(kNumCalleeSaved, base::bits::CountPopulation(saves) + 1);
3601 __ Dsubu(sp, sp, Operand(returns * kPointerSize));
3605 void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
3606 auto call_descriptor = linkage()->GetIncomingDescriptor();
3608 const int returns = frame()->GetReturnSlotCount();
3610 __ Daddu(sp, sp, Operand(returns * kPointerSize));
3614 const RegList saves = call_descriptor->CalleeSavedRegisters();
3620 const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
3621 if (saves_fpu != 0) {
3622 __ MultiPopFPU(saves_fpu);
3625 MipsOperandConverter g(
this,
nullptr);
3626 if (call_descriptor->IsCFunctionCall()) {
3627 AssembleDeconstructFrame();
3628 }
else if (frame_access_state()->has_frame()) {
3631 if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
3632 if (return_label_.is_bound()) {
3633 __ Branch(&return_label_);
3636 __ bind(&return_label_);
3637 AssembleDeconstructFrame();
3640 AssembleDeconstructFrame();
3643 int pop_count =
static_cast<int>(call_descriptor->StackParameterCount());
3644 if (pop->IsImmediate()) {
3645 pop_count += g.ToConstant(pop).ToInt32();
3647 Register pop_reg = g.ToRegister(pop);
3648 __ dsll(pop_reg, pop_reg, kPointerSizeLog2);
3649 __ Daddu(sp, sp, pop_reg);
3651 if (pop_count != 0) {
3652 __ DropAndRet(pop_count);
3658 void CodeGenerator::FinishCode() {}
3660 void CodeGenerator::AssembleMove(InstructionOperand* source,
3661 InstructionOperand* destination) {
3662 MipsOperandConverter g(
this,
nullptr);
3665 if (source->IsRegister()) {
3666 DCHECK(destination->IsRegister() || destination->IsStackSlot());
3667 Register src = g.ToRegister(source);
3668 if (destination->IsRegister()) {
3669 __ mov(g.ToRegister(destination), src);
3671 __ Sd(src, g.ToMemOperand(destination));
3673 }
else if (source->IsStackSlot()) {
3674 DCHECK(destination->IsRegister() || destination->IsStackSlot());
3675 MemOperand src = g.ToMemOperand(source);
3676 if (destination->IsRegister()) {
3677 __ Ld(g.ToRegister(destination), src);
3679 Register temp = kScratchReg;
3681 __ Sd(temp, g.ToMemOperand(destination));
3683 }
else if (source->IsConstant()) {
3684 Constant src = g.ToConstant(source);
3685 if (destination->IsRegister() || destination->IsStackSlot()) {
3687 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
3688 switch (src.type()) {
3689 case Constant::kInt32:
3690 __ li(dst, Operand(src.ToInt32()));
3692 case Constant::kFloat32:
3693 __ li(dst, Operand::EmbeddedNumber(src.ToFloat32()));
3695 case Constant::kInt64:
3696 if (RelocInfo::IsWasmReference(src.rmode())) {
3697 __ li(dst, Operand(src.ToInt64(), src.rmode()));
3699 __ li(dst, Operand(src.ToInt64()));
3702 case Constant::kFloat64:
3703 __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
3705 case Constant::kExternalReference:
3706 __ li(dst, src.ToExternalReference());
3708 case Constant::kDelayedStringConstant:
3709 __ li(dst, src.ToDelayedStringConstant());
3711 case Constant::kHeapObject: {
3712 Handle<HeapObject> src_object = src.ToHeapObject();
3714 if (IsMaterializableFromRoot(src_object, &index)) {
3715 __ LoadRoot(dst, index);
3717 __ li(dst, src_object);
3721 case Constant::kRpoNumber:
3725 if (destination->IsStackSlot()) __ Sd(dst, g.ToMemOperand(destination));
3726 }
else if (src.type() == Constant::kFloat32) {
3727 if (destination->IsFPStackSlot()) {
3728 MemOperand dst = g.ToMemOperand(destination);
3729 if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
3730 __ Sd(zero_reg, dst);
3732 __ li(kScratchReg, Operand(bit_cast<int32_t>(src.ToFloat32())));
3733 __ Sd(kScratchReg, dst);
3736 DCHECK(destination->IsFPRegister());
3737 FloatRegister dst = g.ToSingleRegister(destination);
3738 __ Move(dst, src.ToFloat32());
3741 DCHECK_EQ(Constant::kFloat64, src.type());
3742 DoubleRegister dst = destination->IsFPRegister()
3743 ? g.ToDoubleRegister(destination)
3744 : kScratchDoubleReg;
3745 __ Move(dst, src.ToFloat64().value());
3746 if (destination->IsFPStackSlot()) {
3747 __ Sdc1(dst, g.ToMemOperand(destination));
3750 }
else if (source->IsFPRegister()) {
3751 MachineRepresentation rep = LocationOperand::cast(source)->representation();
3752 if (rep == MachineRepresentation::kSimd128) {
3753 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3754 MSARegister src = g.ToSimd128Register(source);
3755 if (destination->IsSimd128Register()) {
3756 MSARegister dst = g.ToSimd128Register(destination);
3757 __ move_v(dst, src);
3759 DCHECK(destination->IsSimd128StackSlot());
3760 __ st_b(src, g.ToMemOperand(destination));
3763 FPURegister src = g.ToDoubleRegister(source);
3764 if (destination->IsFPRegister()) {
3765 FPURegister dst = g.ToDoubleRegister(destination);
3768 DCHECK(destination->IsFPStackSlot());
3769 __ Sdc1(src, g.ToMemOperand(destination));
3772 }
else if (source->IsFPStackSlot()) {
3773 DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
3774 MemOperand src = g.ToMemOperand(source);
3775 MachineRepresentation rep = LocationOperand::cast(source)->representation();
3776 if (rep == MachineRepresentation::kSimd128) {
3777 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3778 if (destination->IsSimd128Register()) {
3779 __ ld_b(g.ToSimd128Register(destination), src);
3781 DCHECK(destination->IsSimd128StackSlot());
3782 MSARegister temp = kSimd128ScratchReg;
3784 __ st_b(temp, g.ToMemOperand(destination));
3787 if (destination->IsFPRegister()) {
3788 __ Ldc1(g.ToDoubleRegister(destination), src);
3790 DCHECK(destination->IsFPStackSlot());
3791 FPURegister temp = kScratchDoubleReg;
3793 __ Sdc1(temp, g.ToMemOperand(destination));
3801 void CodeGenerator::AssembleSwap(InstructionOperand* source,
3802 InstructionOperand* destination) {
3803 MipsOperandConverter g(
this,
nullptr);
3806 if (source->IsRegister()) {
3808 Register temp = kScratchReg;
3809 Register src = g.ToRegister(source);
3810 if (destination->IsRegister()) {
3811 Register dst = g.ToRegister(destination);
3816 DCHECK(destination->IsStackSlot());
3817 MemOperand dst = g.ToMemOperand(destination);
3822 }
else if (source->IsStackSlot()) {
3823 DCHECK(destination->IsStackSlot());
3824 Register temp_0 = kScratchReg;
3825 Register temp_1 = kScratchReg2;
3826 MemOperand src = g.ToMemOperand(source);
3827 MemOperand dst = g.ToMemOperand(destination);
3832 }
else if (source->IsFPRegister()) {
3833 MachineRepresentation rep = LocationOperand::cast(source)->representation();
3834 if (rep == MachineRepresentation::kSimd128) {
3835 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3836 MSARegister temp = kSimd128ScratchReg;
3837 MSARegister src = g.ToSimd128Register(source);
3838 if (destination->IsSimd128Register()) {
3839 MSARegister dst = g.ToSimd128Register(destination);
3840 __ move_v(temp, src);
3841 __ move_v(src, dst);
3842 __ move_v(dst, temp);
3844 DCHECK(destination->IsSimd128StackSlot());
3845 MemOperand dst = g.ToMemOperand(destination);
3846 __ move_v(temp, src);
3851 FPURegister temp = kScratchDoubleReg;
3852 FPURegister src = g.ToDoubleRegister(source);
3853 if (destination->IsFPRegister()) {
3854 FPURegister dst = g.ToDoubleRegister(destination);
3859 DCHECK(destination->IsFPStackSlot());
3860 MemOperand dst = g.ToMemOperand(destination);
3866 }
else if (source->IsFPStackSlot()) {
3867 DCHECK(destination->IsFPStackSlot());
3868 Register temp_0 = kScratchReg;
3869 MemOperand src0 = g.ToMemOperand(source);
3870 MemOperand src1(src0.rm(), src0.offset() + kIntSize);
3871 MemOperand dst0 = g.ToMemOperand(destination);
3872 MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
3873 MachineRepresentation rep = LocationOperand::cast(source)->representation();
3874 if (rep == MachineRepresentation::kSimd128) {
3875 MemOperand src2(src0.rm(), src0.offset() + 2 * kIntSize);
3876 MemOperand src3(src0.rm(), src0.offset() + 3 * kIntSize);
3877 MemOperand dst2(dst0.rm(), dst0.offset() + 2 * kIntSize);
3878 MemOperand dst3(dst0.rm(), dst0.offset() + 3 * kIntSize);
3879 CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
3880 MSARegister temp_1 = kSimd128ScratchReg;
3881 __ ld_b(temp_1, dst0);
3882 __ Lw(temp_0, src0);
3883 __ Sw(temp_0, dst0);
3884 __ Lw(temp_0, src1);
3885 __ Sw(temp_0, dst1);
3886 __ Lw(temp_0, src2);
3887 __ Sw(temp_0, dst2);
3888 __ Lw(temp_0, src3);
3889 __ Sw(temp_0, dst3);
3890 __ st_b(temp_1, src0);
3892 FPURegister temp_1 = kScratchDoubleReg;
3893 __ Ldc1(temp_1, dst0);
3894 __ Lw(temp_0, src0);
3895 __ Sw(temp_0, dst0);
3896 __ Lw(temp_0, src1);
3897 __ Sw(temp_0, dst1);
3898 __ Sdc1(temp_1, src0);
3906 void CodeGenerator::AssembleJumpTable(Label** targets,
size_t target_count) {
3911 #undef ASSEMBLE_ATOMIC_LOAD_INTEGER 3912 #undef ASSEMBLE_ATOMIC_STORE_INTEGER 3913 #undef ASSEMBLE_ATOMIC_BINOP 3914 #undef ASSEMBLE_ATOMIC_BINOP_EXT 3915 #undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER 3916 #undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT 3917 #undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER 3918 #undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT 3919 #undef ASSEMBLE_IEEE754_BINOP 3920 #undef ASSEMBLE_IEEE754_UNOP