5 #include "src/compiler/backend/code-generator.h" 7 #include "src/arm64/assembler-arm64-inl.h" 8 #include "src/arm64/macro-assembler-arm64-inl.h" 9 #include "src/compiler/backend/code-generator-impl.h" 10 #include "src/compiler/backend/gap-resolver.h" 11 #include "src/compiler/node-matchers.h" 12 #include "src/compiler/osr.h" 13 #include "src/frame-constants.h" 14 #include "src/heap/heap-inl.h" 15 #include "src/optimized-compilation-info.h" 16 #include "src/wasm/wasm-code-manager.h" 17 #include "src/wasm/wasm-objects.h" 32 return InputDoubleRegister(index).S();
36 return InputDoubleRegister(index);
40 return InputDoubleRegister(index).Q();
43 CPURegister InputFloat32OrZeroRegister(
size_t index) {
44 if (instr_->InputAt(index)->IsImmediate()) {
45 DCHECK_EQ(0, bit_cast<int32_t>(InputFloat32(index)));
48 DCHECK(instr_->InputAt(index)->IsFPRegister());
49 return InputDoubleRegister(index).S();
52 CPURegister InputFloat64OrZeroRegister(
size_t index) {
53 if (instr_->InputAt(index)->IsImmediate()) {
54 DCHECK_EQ(0, bit_cast<int64_t>(InputDouble(index)));
57 DCHECK(instr_->InputAt(index)->IsDoubleRegister());
58 return InputDoubleRegister(index);
61 size_t OutputCount() {
return instr_->OutputCount(); }
63 DoubleRegister OutputFloat32Register() {
return OutputDoubleRegister().S(); }
65 DoubleRegister OutputFloat64Register() {
return OutputDoubleRegister(); }
67 DoubleRegister OutputSimd128Register() {
return OutputDoubleRegister().Q(); }
69 Register InputRegister32(
size_t index) {
70 return ToRegister(instr_->InputAt(index)).W();
73 Register InputOrZeroRegister32(
size_t index) {
74 DCHECK(instr_->InputAt(index)->IsRegister() ||
75 (instr_->InputAt(index)->IsImmediate() && (InputInt32(index) == 0)));
76 if (instr_->InputAt(index)->IsImmediate()) {
79 return InputRegister32(index);
82 Register InputRegister64(
size_t index) {
return InputRegister(index); }
84 Register InputOrZeroRegister64(
size_t index) {
85 DCHECK(instr_->InputAt(index)->IsRegister() ||
86 (instr_->InputAt(index)->IsImmediate() && (InputInt64(index) == 0)));
87 if (instr_->InputAt(index)->IsImmediate()) {
90 return InputRegister64(index);
93 Operand InputOperand(
size_t index) {
94 return ToOperand(instr_->InputAt(index));
97 Operand InputOperand64(
size_t index) {
return InputOperand(index); }
99 Operand InputOperand32(
size_t index) {
100 return ToOperand32(instr_->InputAt(index));
103 Register OutputRegister64() {
return OutputRegister(); }
105 Register OutputRegister32() {
return ToRegister(instr_->Output()).W(); }
107 Register TempRegister32(
size_t index) {
108 return ToRegister(instr_->TempAt(index)).W();
111 Operand InputOperand2_32(
size_t index) {
112 switch (AddressingModeField::decode(instr_->opcode())) {
114 return InputOperand32(index);
115 case kMode_Operand2_R_LSL_I:
116 return Operand(InputRegister32(index), LSL, InputInt5(index + 1));
117 case kMode_Operand2_R_LSR_I:
118 return Operand(InputRegister32(index), LSR, InputInt5(index + 1));
119 case kMode_Operand2_R_ASR_I:
120 return Operand(InputRegister32(index), ASR, InputInt5(index + 1));
121 case kMode_Operand2_R_ROR_I:
122 return Operand(InputRegister32(index), ROR, InputInt5(index + 1));
123 case kMode_Operand2_R_UXTB:
124 return Operand(InputRegister32(index), UXTB);
125 case kMode_Operand2_R_UXTH:
126 return Operand(InputRegister32(index), UXTH);
127 case kMode_Operand2_R_SXTB:
128 return Operand(InputRegister32(index), SXTB);
129 case kMode_Operand2_R_SXTH:
130 return Operand(InputRegister32(index), SXTH);
131 case kMode_Operand2_R_SXTW:
132 return Operand(InputRegister32(index), SXTW);
141 Operand InputOperand2_64(
size_t index) {
142 switch (AddressingModeField::decode(instr_->opcode())) {
144 return InputOperand64(index);
145 case kMode_Operand2_R_LSL_I:
146 return Operand(InputRegister64(index), LSL, InputInt6(index + 1));
147 case kMode_Operand2_R_LSR_I:
148 return Operand(InputRegister64(index), LSR, InputInt6(index + 1));
149 case kMode_Operand2_R_ASR_I:
150 return Operand(InputRegister64(index), ASR, InputInt6(index + 1));
151 case kMode_Operand2_R_ROR_I:
152 return Operand(InputRegister64(index), ROR, InputInt6(index + 1));
153 case kMode_Operand2_R_UXTB:
154 return Operand(InputRegister64(index), UXTB);
155 case kMode_Operand2_R_UXTH:
156 return Operand(InputRegister64(index), UXTH);
157 case kMode_Operand2_R_SXTB:
158 return Operand(InputRegister64(index), SXTB);
159 case kMode_Operand2_R_SXTH:
160 return Operand(InputRegister64(index), SXTH);
161 case kMode_Operand2_R_SXTW:
162 return Operand(InputRegister64(index), SXTW);
172 switch (AddressingModeField::decode(instr_->opcode())) {
174 case kMode_Operand2_R_LSR_I:
175 case kMode_Operand2_R_ASR_I:
176 case kMode_Operand2_R_ROR_I:
177 case kMode_Operand2_R_UXTB:
178 case kMode_Operand2_R_UXTH:
179 case kMode_Operand2_R_SXTB:
180 case kMode_Operand2_R_SXTH:
181 case kMode_Operand2_R_SXTW:
184 return MemOperand(kRootRegister, InputInt64(index));
185 case kMode_Operand2_R_LSL_I:
186 return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
187 LSL, InputInt32(index + 2));
189 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
191 return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
197 if (op->IsRegister()) {
198 return Operand(ToRegister(op));
200 return ToImmediate(op);
204 if (op->IsRegister()) {
205 return Operand(ToRegister(op).W());
207 return ToImmediate(op);
211 Constant constant = ToConstant(operand);
212 switch (constant.type()) {
213 case Constant::kInt32:
214 return Operand(constant.ToInt32());
215 case Constant::kInt64:
216 if (RelocInfo::IsWasmReference(constant.rmode())) {
217 return Operand(constant.ToInt64(), constant.rmode());
219 return Operand(constant.ToInt64());
221 case Constant::kFloat32:
222 return Operand(Operand::EmbeddedNumber(constant.ToFloat32()));
223 case Constant::kFloat64:
224 return Operand(Operand::EmbeddedNumber(constant.ToFloat64().value()));
225 case Constant::kExternalReference:
226 return Operand(constant.ToExternalReference());
227 case Constant::kHeapObject:
228 return Operand(constant.ToHeapObject());
229 case Constant::kDelayedStringConstant:
230 return Operand::EmbeddedStringConstant(
231 constant.ToDelayedStringConstant());
232 case Constant::kRpoNumber:
241 DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
242 return SlotToMemOperand(AllocatedOperand::cast(op)->index(), tasm);
246 FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
247 if (offset.from_frame_pointer()) {
248 int from_sp = offset.offset() + frame_access_state()->GetSPToFPOffset();
250 if (Assembler::IsImmLSUnscaled(from_sp) ||
251 Assembler::IsImmLSScaled(from_sp, 3)) {
252 offset = FrameOffset::FromStackPointer(from_sp);
255 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
265 RecordWriteMode mode, StubCallMode stub_mode,
274 stub_mode_(stub_mode),
275 must_save_lr_(!gen->frame_access_state()->has_frame()),
276 unwinding_info_writer_(unwinding_info_writer),
277 zone_(gen->zone()) {}
279 void Generate() final {
280 if (mode_ > RecordWriteMode::kValueIsPointer) {
281 __ JumpIfSmi(value_, exit());
283 __ CheckPageFlagClear(value_, scratch0_,
284 MemoryChunk::kPointersToHereAreInterestingMask,
286 __ Add(scratch1_, object_, index_);
287 RememberedSetAction
const remembered_set_action =
288 mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
289 : OMIT_REMEMBERED_SET;
290 SaveFPRegsMode
const save_fp_mode =
291 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
295 unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(), sp);
297 if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
301 __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
302 save_fp_mode, wasm::WasmCode::kWasmRecordWrite);
304 __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
309 unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
314 Register
const object_;
315 Operand
const index_;
316 Register
const value_;
317 Register
const scratch0_;
318 Register
const scratch1_;
319 RecordWriteMode
const mode_;
320 StubCallMode
const stub_mode_;
322 UnwindingInfoWriter*
const unwinding_info_writer_;
326 Condition FlagsConditionToCondition(FlagsCondition condition) {
332 case kSignedLessThan:
334 case kSignedGreaterThanOrEqual:
336 case kSignedLessThanOrEqual:
338 case kSignedGreaterThan:
340 case kUnsignedLessThan:
342 case kUnsignedGreaterThanOrEqual:
344 case kUnsignedLessThanOrEqual:
346 case kUnsignedGreaterThan:
348 case kFloatLessThanOrUnordered:
350 case kFloatGreaterThanOrEqual:
352 case kFloatLessThanOrEqual:
354 case kFloatGreaterThanOrUnordered:
358 case kFloatGreaterThanOrEqualOrUnordered:
360 case kFloatLessThanOrEqualOrUnordered:
362 case kFloatGreaterThan:
368 case kUnorderedEqual:
369 case kUnorderedNotEqual:
371 case kPositiveOrZero:
379 void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
380 InstructionCode opcode, Instruction* instr,
381 Arm64OperandConverter&
i) {
382 const MemoryAccessMode access_mode =
383 static_cast<MemoryAccessMode
>(MiscField::decode(opcode));
384 if (access_mode == kMemoryAccessPoisoned) {
385 Register value =
i.OutputRegister();
386 Register poison = value.Is64Bits() ? kSpeculationPoisonRegister
387 : kSpeculationPoisonRegister.W();
388 codegen->tasm()->And(value, value, Operand(poison));
394 #define ASSEMBLE_SHIFT(asm_instr, width) \ 396 if (instr->InputAt(1)->IsRegister()) { \ 397 __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \ 398 i.InputRegister##width(1)); \ 401 static_cast<uint32_t>(i.InputOperand##width(1).ImmediateValue()); \ 402 __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \ 407 #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, reg) \ 409 __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ 410 __ asm_instr(i.Output##reg(), i.TempRegister(0)); \ 413 #define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, reg) \ 415 __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ 416 __ asm_instr(i.Input##reg(2), i.TempRegister(0)); \ 419 #define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr, reg) \ 422 __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ 423 __ Bind(&exchange); \ 424 __ load_instr(i.Output##reg(), i.TempRegister(0)); \ 425 __ store_instr(i.TempRegister32(1), i.Input##reg(2), i.TempRegister(0)); \ 426 __ Cbnz(i.TempRegister32(1), &exchange); \ 429 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr, ext, \ 432 Label compareExchange; \ 434 __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ 435 __ Bind(&compareExchange); \ 436 __ load_instr(i.Output##reg(), i.TempRegister(0)); \ 437 __ Cmp(i.Output##reg(), Operand(i.Input##reg(2), ext)); \ 439 __ store_instr(i.TempRegister32(1), i.Input##reg(3), i.TempRegister(0)); \ 440 __ Cbnz(i.TempRegister32(1), &compareExchange); \ 444 #define ASSEMBLE_ATOMIC_BINOP(load_instr, store_instr, bin_instr, reg) \ 447 __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ 449 __ load_instr(i.Output##reg(), i.TempRegister(0)); \ 450 __ bin_instr(i.Temp##reg(1), i.Output##reg(), Operand(i.Input##reg(2))); \ 451 __ store_instr(i.TempRegister32(2), i.Temp##reg(1), i.TempRegister(0)); \ 452 __ Cbnz(i.TempRegister32(2), &binop); \ 455 #define ASSEMBLE_IEEE754_BINOP(name) \ 457 FrameScope scope(tasm(), StackFrame::MANUAL); \ 458 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \ 461 #define ASSEMBLE_IEEE754_UNOP(name) \ 463 FrameScope scope(tasm(), StackFrame::MANUAL); \ 464 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ 467 void CodeGenerator::AssembleDeconstructFrame() {
471 unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
474 void CodeGenerator::AssemblePrepareTailCall() {
475 if (frame_access_state()->has_frame()) {
476 __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
477 __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
479 frame_access_state()->SetFrameAccessToSP();
482 void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
486 DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
490 __ Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
492 Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
497 Register caller_args_count_reg = scratch1;
498 __ Ldr(caller_args_count_reg,
499 MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
500 __ SmiUntag(caller_args_count_reg);
502 ParameterCount callee_args_count(args_reg);
503 __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
510 void AdjustStackPointerForTailCall(TurboAssembler* tasm,
511 FrameAccessState* state,
512 int new_slot_above_sp,
513 bool allow_shrinkage =
true) {
514 int current_sp_offset = state->GetSPToFPSlotCount() +
515 StandardFrameConstants::kFixedSlotCountAboveFp;
516 int stack_slot_delta = new_slot_above_sp - current_sp_offset;
517 DCHECK_EQ(stack_slot_delta % 2, 0);
518 if (stack_slot_delta > 0) {
519 tasm->Claim(stack_slot_delta);
520 state->IncreaseSPDelta(stack_slot_delta);
521 }
else if (allow_shrinkage && stack_slot_delta < 0) {
522 tasm->Drop(-stack_slot_delta);
523 state->IncreaseSPDelta(stack_slot_delta);
529 void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
530 int first_unused_stack_slot) {
531 AdjustStackPointerForTailCall(tasm(), frame_access_state(),
532 first_unused_stack_slot,
false);
535 void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
536 int first_unused_stack_slot) {
537 DCHECK_EQ(first_unused_stack_slot % 2, 0);
538 AdjustStackPointerForTailCall(tasm(), frame_access_state(),
539 first_unused_stack_slot);
540 DCHECK(instr->IsTailCall());
541 InstructionOperandConverter g(
this, instr);
542 int optional_padding_slot = g.InputInt32(instr->InputCount() - 2);
543 if (optional_padding_slot % 2) {
544 __ Poke(padreg, optional_padding_slot * kPointerSize);
549 void CodeGenerator::AssembleCodeStartRegisterCheck() {
550 UseScratchRegisterScope temps(tasm());
551 Register scratch = temps.AcquireX();
552 __ ComputeCodeStartAddress(scratch);
553 __ cmp(scratch, kJavaScriptCallCodeStartRegister);
554 __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
564 void CodeGenerator::BailoutIfDeoptimized() {
565 UseScratchRegisterScope temps(tasm());
566 Register scratch = temps.AcquireX();
567 int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
568 __ Ldr(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
570 FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
571 Label not_deoptimized;
572 __ Tbz(scratch, Code::kMarkedForDeoptimizationBit, ¬_deoptimized);
575 DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
576 Handle<Code> code = isolate()->builtins()->builtin_handle(
577 Builtins::kCompileLazyDeoptimizedCode);
578 __ Jump(code, RelocInfo::CODE_TARGET);
579 __ Bind(¬_deoptimized);
582 void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
583 UseScratchRegisterScope temps(tasm());
584 Register scratch = temps.AcquireX();
588 __ ComputeCodeStartAddress(scratch);
589 __ Cmp(kJavaScriptCallCodeStartRegister, scratch);
590 __ Csetm(kSpeculationPoisonRegister, eq);
594 void CodeGenerator::AssembleRegisterArgumentPoisoning() {
595 UseScratchRegisterScope temps(tasm());
596 Register scratch = temps.AcquireX();
599 __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
600 __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
601 __ And(scratch, scratch, kSpeculationPoisonRegister);
606 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
607 Instruction* instr) {
608 Arm64OperandConverter
i(
this, instr);
609 InstructionCode opcode = instr->opcode();
610 ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
611 switch (arch_opcode) {
612 case kArchCallCodeObject: {
613 if (instr->InputAt(0)->IsImmediate()) {
614 __ Call(
i.InputCode(0), RelocInfo::CODE_TARGET);
616 Register reg =
i.InputRegister(0);
618 HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
619 reg == kJavaScriptCallCodeStartRegister);
620 __ Add(reg, reg, Code::kHeaderSize - kHeapObjectTag);
623 RecordCallPosition(instr);
624 frame_access_state()->ClearSPDelta();
627 case kArchCallWasmFunction: {
628 if (instr->InputAt(0)->IsImmediate()) {
629 Constant constant =
i.ToConstant(instr->InputAt(0));
630 Address wasm_code =
static_cast<Address
>(constant.ToInt64());
631 __ Call(wasm_code, constant.rmode());
633 Register target =
i.InputRegister(0);
636 RecordCallPosition(instr);
637 frame_access_state()->ClearSPDelta();
640 case kArchTailCallCodeObjectFromJSFunction:
641 case kArchTailCallCodeObject: {
642 if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
643 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
644 i.TempRegister(0),
i.TempRegister(1),
647 if (instr->InputAt(0)->IsImmediate()) {
648 __ Jump(
i.InputCode(0), RelocInfo::CODE_TARGET);
650 Register reg =
i.InputRegister(0);
652 HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
653 reg == kJavaScriptCallCodeStartRegister);
654 __ Add(reg, reg, Code::kHeaderSize - kHeapObjectTag);
657 unwinding_info_writer_.MarkBlockWillExit();
658 frame_access_state()->ClearSPDelta();
659 frame_access_state()->SetFrameAccessToDefault();
662 case kArchTailCallWasm: {
663 if (instr->InputAt(0)->IsImmediate()) {
664 Constant constant =
i.ToConstant(instr->InputAt(0));
665 Address wasm_code =
static_cast<Address
>(constant.ToInt64());
666 __ Jump(wasm_code, constant.rmode());
668 Register target =
i.InputRegister(0);
671 unwinding_info_writer_.MarkBlockWillExit();
672 frame_access_state()->ClearSPDelta();
673 frame_access_state()->SetFrameAccessToDefault();
676 case kArchTailCallAddress: {
677 CHECK(!instr->InputAt(0)->IsImmediate());
678 Register reg =
i.InputRegister(0);
680 HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
681 reg == kJavaScriptCallCodeStartRegister);
683 unwinding_info_writer_.MarkBlockWillExit();
684 frame_access_state()->ClearSPDelta();
685 frame_access_state()->SetFrameAccessToDefault();
688 case kArchCallJSFunction: {
689 Register func =
i.InputRegister(0);
690 if (FLAG_debug_code) {
692 UseScratchRegisterScope scope(tasm());
693 Register temp = scope.AcquireX();
694 __ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
696 __ Assert(eq, AbortReason::kWrongFunctionContext);
698 static_assert(kJavaScriptCallCodeStartRegister == x2,
"ABI mismatch");
699 __ Ldr(x2, FieldMemOperand(func, JSFunction::kCodeOffset));
700 __ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
702 RecordCallPosition(instr);
703 frame_access_state()->ClearSPDelta();
706 case kArchPrepareCallCFunction:
715 case kArchSaveCallerRegisters: {
717 static_cast<SaveFPRegsMode
>(MiscField::decode(instr->opcode()));
718 DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
720 int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
721 DCHECK_EQ(0, bytes % kPointerSize);
722 DCHECK_EQ(0, frame_access_state()->sp_delta());
723 frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
724 DCHECK(!caller_registers_saved_);
725 caller_registers_saved_ =
true;
728 case kArchRestoreCallerRegisters: {
730 static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
731 DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
733 int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
734 frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
735 DCHECK_EQ(0, frame_access_state()->sp_delta());
736 DCHECK(caller_registers_saved_);
737 caller_registers_saved_ =
false;
740 case kArchPrepareTailCall:
741 AssemblePrepareTailCall();
743 case kArchCallCFunction: {
744 int const num_parameters = MiscField::decode(instr->opcode());
745 if (instr->InputAt(0)->IsImmediate()) {
746 ExternalReference ref =
i.InputExternalReference(0);
747 __ CallCFunction(ref, num_parameters, 0);
749 Register func =
i.InputRegister(0);
750 __ CallCFunction(func, num_parameters, 0);
752 frame_access_state()->SetFrameAccessToDefault();
758 frame_access_state()->ClearSPDelta();
759 if (caller_registers_saved_) {
766 __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
767 frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
772 AssembleArchJump(
i.InputRpo(0));
774 case kArchTableSwitch:
775 AssembleArchTableSwitch(instr);
777 case kArchBinarySearchSwitch:
778 AssembleArchBinarySearchSwitch(instr);
780 case kArchLookupSwitch:
781 AssembleArchLookupSwitch(instr);
783 case kArchDebugAbort:
784 DCHECK(
i.InputRegister(0).is(x1));
785 if (!frame_access_state()->has_frame()) {
788 FrameScope scope(tasm(), StackFrame::NONE);
789 __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
790 RelocInfo::CODE_TARGET);
792 __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
793 RelocInfo::CODE_TARGET);
795 __ Debug(
"kArchDebugAbort", 0, BREAK);
796 unwinding_info_writer_.MarkBlockWillExit();
798 case kArchDebugBreak:
799 __ Debug(
"kArchDebugBreak", 0, BREAK);
802 __ RecordComment(reinterpret_cast<const char*>(
i.InputInt64(0)));
804 case kArchThrowTerminator:
805 unwinding_info_writer_.MarkBlockWillExit();
810 case kArchDeoptimize: {
812 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
813 CodeGenResult result =
814 AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
815 if (result != kSuccess)
return result;
816 unwinding_info_writer_.MarkBlockWillExit();
820 AssembleReturn(instr->InputAt(0));
822 case kArchStackPointer:
823 __ mov(
i.OutputRegister(), sp);
825 case kArchFramePointer:
826 __ mov(
i.OutputRegister(), fp);
828 case kArchParentFramePointer:
829 if (frame_access_state()->has_frame()) {
830 __ ldr(
i.OutputRegister(), MemOperand(fp, 0));
832 __ mov(
i.OutputRegister(), fp);
835 case kArchTruncateDoubleToI:
836 __ TruncateDoubleToI(isolate(), zone(),
i.OutputRegister(),
837 i.InputDoubleRegister(0), DetermineStubCallMode());
839 case kArchStoreWithWriteBarrier: {
840 RecordWriteMode mode =
841 static_cast<RecordWriteMode
>(MiscField::decode(instr->opcode()));
842 AddressingMode addressing_mode =
843 AddressingModeField::decode(instr->opcode());
844 Register
object =
i.InputRegister(0);
846 if (addressing_mode == kMode_MRI) {
847 index = Operand(
i.InputInt64(1));
849 DCHECK_EQ(addressing_mode, kMode_MRR);
850 index = Operand(
i.InputRegister(1));
852 Register value =
i.InputRegister(2);
853 Register scratch0 =
i.TempRegister(0);
854 Register scratch1 =
i.TempRegister(1);
855 auto ool =
new (zone()) OutOfLineRecordWrite(
856 this,
object, index, value, scratch0, scratch1, mode,
857 DetermineStubCallMode(), &unwinding_info_writer_);
858 __ Str(value, MemOperand(
object, index));
859 __ CheckPageFlagSet(
object, scratch0,
860 MemoryChunk::kPointersFromHereAreInterestingMask,
862 __ Bind(ool->exit());
865 case kArchStackSlot: {
867 frame_access_state()->GetFrameOffset(
i.InputInt32(0));
868 Register base = offset.from_stack_pointer() ? sp : fp;
869 __ Add(
i.OutputRegister(0), base, Operand(offset.offset()));
872 case kIeee754Float64Acos:
873 ASSEMBLE_IEEE754_UNOP(acos);
875 case kIeee754Float64Acosh:
876 ASSEMBLE_IEEE754_UNOP(acosh);
878 case kIeee754Float64Asin:
879 ASSEMBLE_IEEE754_UNOP(asin);
881 case kIeee754Float64Asinh:
882 ASSEMBLE_IEEE754_UNOP(asinh);
884 case kIeee754Float64Atan:
885 ASSEMBLE_IEEE754_UNOP(atan);
887 case kIeee754Float64Atanh:
888 ASSEMBLE_IEEE754_UNOP(atanh);
890 case kIeee754Float64Atan2:
891 ASSEMBLE_IEEE754_BINOP(atan2);
893 case kIeee754Float64Cos:
894 ASSEMBLE_IEEE754_UNOP(cos);
896 case kIeee754Float64Cosh:
897 ASSEMBLE_IEEE754_UNOP(cosh);
899 case kIeee754Float64Cbrt:
900 ASSEMBLE_IEEE754_UNOP(cbrt);
902 case kIeee754Float64Exp:
903 ASSEMBLE_IEEE754_UNOP(exp);
905 case kIeee754Float64Expm1:
906 ASSEMBLE_IEEE754_UNOP(expm1);
908 case kIeee754Float64Log:
909 ASSEMBLE_IEEE754_UNOP(log);
911 case kIeee754Float64Log1p:
912 ASSEMBLE_IEEE754_UNOP(log1p);
914 case kIeee754Float64Log2:
915 ASSEMBLE_IEEE754_UNOP(log2);
917 case kIeee754Float64Log10:
918 ASSEMBLE_IEEE754_UNOP(log10);
920 case kIeee754Float64Pow: {
921 __ Call(BUILTIN_CODE(isolate(), MathPowInternal), RelocInfo::CODE_TARGET);
924 case kIeee754Float64Sin:
925 ASSEMBLE_IEEE754_UNOP(sin);
927 case kIeee754Float64Sinh:
928 ASSEMBLE_IEEE754_UNOP(sinh);
930 case kIeee754Float64Tan:
931 ASSEMBLE_IEEE754_UNOP(tan);
933 case kIeee754Float64Tanh:
934 ASSEMBLE_IEEE754_UNOP(tanh);
936 case kArm64Float32RoundDown:
937 __ Frintm(
i.OutputFloat32Register(),
i.InputFloat32Register(0));
939 case kArm64Float64RoundDown:
940 __ Frintm(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
942 case kArm64Float32RoundUp:
943 __ Frintp(
i.OutputFloat32Register(),
i.InputFloat32Register(0));
945 case kArm64Float64RoundUp:
946 __ Frintp(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
948 case kArm64Float64RoundTiesAway:
949 __ Frinta(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
951 case kArm64Float32RoundTruncate:
952 __ Frintz(
i.OutputFloat32Register(),
i.InputFloat32Register(0));
954 case kArm64Float64RoundTruncate:
955 __ Frintz(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
957 case kArm64Float32RoundTiesEven:
958 __ Frintn(
i.OutputFloat32Register(),
i.InputFloat32Register(0));
960 case kArm64Float64RoundTiesEven:
961 __ Frintn(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
964 if (FlagsModeField::decode(opcode) != kFlags_none) {
965 __ Adds(
i.OutputRegister(),
i.InputOrZeroRegister64(0),
966 i.InputOperand2_64(1));
968 __ Add(
i.OutputRegister(),
i.InputOrZeroRegister64(0),
969 i.InputOperand2_64(1));
973 if (FlagsModeField::decode(opcode) != kFlags_none) {
974 __ Adds(
i.OutputRegister32(),
i.InputOrZeroRegister32(0),
975 i.InputOperand2_32(1));
977 __ Add(
i.OutputRegister32(),
i.InputOrZeroRegister32(0),
978 i.InputOperand2_32(1));
982 if (FlagsModeField::decode(opcode) != kFlags_none) {
985 DCHECK(FlagsConditionField::decode(opcode) == kEqual ||
986 FlagsConditionField::decode(opcode) == kNotEqual ||
987 FlagsConditionField::decode(opcode) == kPositiveOrZero ||
988 FlagsConditionField::decode(opcode) == kNegative);
989 __ Ands(
i.OutputRegister(),
i.InputOrZeroRegister64(0),
990 i.InputOperand2_64(1));
992 __ And(
i.OutputRegister(),
i.InputOrZeroRegister64(0),
993 i.InputOperand2_64(1));
997 if (FlagsModeField::decode(opcode) != kFlags_none) {
1000 DCHECK(FlagsConditionField::decode(opcode) == kEqual ||
1001 FlagsConditionField::decode(opcode) == kNotEqual ||
1002 FlagsConditionField::decode(opcode) == kPositiveOrZero ||
1003 FlagsConditionField::decode(opcode) == kNegative);
1004 __ Ands(
i.OutputRegister32(),
i.InputOrZeroRegister32(0),
1005 i.InputOperand2_32(1));
1007 __ And(
i.OutputRegister32(),
i.InputOrZeroRegister32(0),
1008 i.InputOperand2_32(1));
1012 __ Bic(
i.OutputRegister(),
i.InputOrZeroRegister64(0),
1013 i.InputOperand2_64(1));
1016 __ Bic(
i.OutputRegister32(),
i.InputOrZeroRegister32(0),
1017 i.InputOperand2_32(1));
1020 __ Mul(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1023 __ Mul(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputRegister32(1));
1026 __ Smull(
i.OutputRegister(),
i.InputRegister32(0),
i.InputRegister32(1));
1029 __ Umull(
i.OutputRegister(),
i.InputRegister32(0),
i.InputRegister32(1));
1032 __ Madd(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1033 i.InputRegister(2));
1036 __ Madd(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputRegister32(1),
1037 i.InputRegister32(2));
1040 __ Msub(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1),
1041 i.InputRegister(2));
1044 __ Msub(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputRegister32(1),
1045 i.InputRegister32(2));
1048 __ Mneg(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1051 __ Mneg(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputRegister32(1));
1054 __ Sdiv(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1057 __ Sdiv(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputRegister32(1));
1060 __ Udiv(
i.OutputRegister(),
i.InputRegister(0),
i.InputRegister(1));
1063 __ Udiv(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputRegister32(1));
1066 UseScratchRegisterScope scope(tasm());
1067 Register temp = scope.AcquireX();
1068 __ Sdiv(temp,
i.InputRegister(0),
i.InputRegister(1));
1069 __ Msub(
i.OutputRegister(), temp,
i.InputRegister(1),
i.InputRegister(0));
1072 case kArm64Imod32: {
1073 UseScratchRegisterScope scope(tasm());
1074 Register temp = scope.AcquireW();
1075 __ Sdiv(temp,
i.InputRegister32(0),
i.InputRegister32(1));
1076 __ Msub(
i.OutputRegister32(), temp,
i.InputRegister32(1),
1077 i.InputRegister32(0));
1081 UseScratchRegisterScope scope(tasm());
1082 Register temp = scope.AcquireX();
1083 __ Udiv(temp,
i.InputRegister(0),
i.InputRegister(1));
1084 __ Msub(
i.OutputRegister(), temp,
i.InputRegister(1),
i.InputRegister(0));
1087 case kArm64Umod32: {
1088 UseScratchRegisterScope scope(tasm());
1089 Register temp = scope.AcquireW();
1090 __ Udiv(temp,
i.InputRegister32(0),
i.InputRegister32(1));
1091 __ Msub(
i.OutputRegister32(), temp,
i.InputRegister32(1),
1092 i.InputRegister32(0));
1096 __ Mvn(
i.OutputRegister(),
i.InputOperand(0));
1099 __ Mvn(
i.OutputRegister32(),
i.InputOperand32(0));
1102 __ Orr(
i.OutputRegister(),
i.InputOrZeroRegister64(0),
1103 i.InputOperand2_64(1));
1106 __ Orr(
i.OutputRegister32(),
i.InputOrZeroRegister32(0),
1107 i.InputOperand2_32(1));
1110 __ Orn(
i.OutputRegister(),
i.InputOrZeroRegister64(0),
1111 i.InputOperand2_64(1));
1114 __ Orn(
i.OutputRegister32(),
i.InputOrZeroRegister32(0),
1115 i.InputOperand2_32(1));
1118 __ Eor(
i.OutputRegister(),
i.InputOrZeroRegister64(0),
1119 i.InputOperand2_64(1));
1122 __ Eor(
i.OutputRegister32(),
i.InputOrZeroRegister32(0),
1123 i.InputOperand2_32(1));
1126 __ Eon(
i.OutputRegister(),
i.InputOrZeroRegister64(0),
1127 i.InputOperand2_64(1));
1130 __ Eon(
i.OutputRegister32(),
i.InputOrZeroRegister32(0),
1131 i.InputOperand2_32(1));
1134 if (FlagsModeField::decode(opcode) != kFlags_none) {
1135 __ Subs(
i.OutputRegister(),
i.InputOrZeroRegister64(0),
1136 i.InputOperand2_64(1));
1138 __ Sub(
i.OutputRegister(),
i.InputOrZeroRegister64(0),
1139 i.InputOperand2_64(1));
1143 if (FlagsModeField::decode(opcode) != kFlags_none) {
1144 __ Subs(
i.OutputRegister32(),
i.InputOrZeroRegister32(0),
1145 i.InputOperand2_32(1));
1147 __ Sub(
i.OutputRegister32(),
i.InputOrZeroRegister32(0),
1148 i.InputOperand2_32(1));
1152 ASSEMBLE_SHIFT(Lsl, 64);
1155 ASSEMBLE_SHIFT(Lsl, 32);
1158 ASSEMBLE_SHIFT(Lsr, 64);
1161 ASSEMBLE_SHIFT(Lsr, 32);
1164 ASSEMBLE_SHIFT(Asr, 64);
1167 ASSEMBLE_SHIFT(Asr, 32);
1170 ASSEMBLE_SHIFT(Ror, 64);
1173 ASSEMBLE_SHIFT(Ror, 32);
1176 __ Mov(
i.OutputRegister32(),
i.InputRegister32(0));
1179 __ Sxtb(
i.OutputRegister32(),
i.InputRegister32(0));
1182 __ Sxth(
i.OutputRegister32(),
i.InputRegister32(0));
1185 __ Sxtb(
i.OutputRegister(),
i.InputRegister32(0));
1188 __ Sxth(
i.OutputRegister(),
i.InputRegister32(0));
1191 __ Sxtw(
i.OutputRegister(),
i.InputRegister32(0));
1194 __ Sbfx(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputInt5(1),
1198 __ Ubfx(
i.OutputRegister(),
i.InputRegister(0),
i.InputInt6(1),
1202 __ Ubfx(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputInt5(1),
1206 __ Ubfiz(
i.OutputRegister32(),
i.InputRegister32(0),
i.InputInt5(1),
1210 __ Bfi(
i.OutputRegister(),
i.InputRegister(1),
i.InputInt6(2),
1213 case kArm64TestAndBranch32:
1214 case kArm64TestAndBranch:
1217 case kArm64CompareAndBranch32:
1218 case kArm64CompareAndBranch:
1222 int count =
i.InputInt32(0);
1223 DCHECK_EQ(count % 2, 0);
1224 __ AssertSpAligned();
1227 frame_access_state()->IncreaseSPDelta(count);
1232 Operand operand(
i.InputInt32(1) * kPointerSize);
1233 if (instr->InputAt(0)->IsSimd128Register()) {
1234 __ Poke(
i.InputSimd128Register(0), operand);
1235 }
else if (instr->InputAt(0)->IsFPRegister()) {
1236 __ Poke(
i.InputFloat64Register(0), operand);
1238 __ Poke(
i.InputOrZeroRegister64(0), operand);
1242 case kArm64PokePair: {
1243 int slot =
i.InputInt32(2) - 1;
1244 if (instr->InputAt(0)->IsFPRegister()) {
1245 __ PokePair(
i.InputFloat64Register(1),
i.InputFloat64Register(0),
1246 slot * kPointerSize);
1248 __ PokePair(
i.InputRegister(1),
i.InputRegister(0),
1249 slot * kPointerSize);
1254 int reverse_slot =
i.InputInt32(0);
1256 FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
1257 if (instr->OutputAt(0)->IsFPRegister()) {
1258 LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
1259 if (op->representation() == MachineRepresentation::kFloat64) {
1260 __ Ldr(
i.OutputDoubleRegister(), MemOperand(fp, offset));
1262 DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
1263 __ Ldr(
i.OutputFloatRegister(), MemOperand(fp, offset));
1266 __ Ldr(
i.OutputRegister(), MemOperand(fp, offset));
1271 __ Clz(
i.OutputRegister64(),
i.InputRegister64(0));
1274 __ Clz(
i.OutputRegister32(),
i.InputRegister32(0));
1277 __ Rbit(
i.OutputRegister64(),
i.InputRegister64(0));
1280 __ Rbit(
i.OutputRegister32(),
i.InputRegister32(0));
1283 __ Rev(
i.OutputRegister64(),
i.InputRegister64(0));
1286 __ Rev(
i.OutputRegister32(),
i.InputRegister32(0));
1289 __ Cmp(
i.InputOrZeroRegister64(0),
i.InputOperand2_64(1));
1292 __ Cmp(
i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
1295 __ Cmn(
i.InputOrZeroRegister64(0),
i.InputOperand2_64(1));
1298 __ Cmn(
i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
1301 __ Tst(
i.InputOrZeroRegister64(0),
i.InputOperand2_64(1));
1304 __ Tst(
i.InputOrZeroRegister32(0),
i.InputOperand2_32(1));
1306 case kArm64Float32Cmp:
1307 if (instr->InputAt(1)->IsFPRegister()) {
1308 __ Fcmp(
i.InputFloat32Register(0),
i.InputFloat32Register(1));
1310 DCHECK(instr->InputAt(1)->IsImmediate());
1312 DCHECK_EQ(0.0f,
i.InputFloat32(1));
1313 __ Fcmp(
i.InputFloat32Register(0),
i.InputFloat32(1));
1316 case kArm64Float32Add:
1317 __ Fadd(
i.OutputFloat32Register(),
i.InputFloat32Register(0),
1318 i.InputFloat32Register(1));
1320 case kArm64Float32Sub:
1321 __ Fsub(
i.OutputFloat32Register(),
i.InputFloat32Register(0),
1322 i.InputFloat32Register(1));
1324 case kArm64Float32Mul:
1325 __ Fmul(
i.OutputFloat32Register(),
i.InputFloat32Register(0),
1326 i.InputFloat32Register(1));
1328 case kArm64Float32Div:
1329 __ Fdiv(
i.OutputFloat32Register(),
i.InputFloat32Register(0),
1330 i.InputFloat32Register(1));
1332 case kArm64Float32Abs:
1333 __ Fabs(
i.OutputFloat32Register(),
i.InputFloat32Register(0));
1335 case kArm64Float32Neg:
1336 __ Fneg(
i.OutputFloat32Register(),
i.InputFloat32Register(0));
1338 case kArm64Float32Sqrt:
1339 __ Fsqrt(
i.OutputFloat32Register(),
i.InputFloat32Register(0));
1341 case kArm64Float64Cmp:
1342 if (instr->InputAt(1)->IsFPRegister()) {
1343 __ Fcmp(
i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
1345 DCHECK(instr->InputAt(1)->IsImmediate());
1347 DCHECK_EQ(0.0,
i.InputDouble(1));
1348 __ Fcmp(
i.InputDoubleRegister(0),
i.InputDouble(1));
1351 case kArm64Float64Add:
1352 __ Fadd(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1353 i.InputDoubleRegister(1));
1355 case kArm64Float64Sub:
1356 __ Fsub(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1357 i.InputDoubleRegister(1));
1359 case kArm64Float64Mul:
1360 __ Fmul(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1361 i.InputDoubleRegister(1));
1363 case kArm64Float64Div:
1364 __ Fdiv(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1365 i.InputDoubleRegister(1));
1367 case kArm64Float64Mod: {
1369 FrameScope scope(tasm(), StackFrame::MANUAL);
1370 DCHECK(d0.is(
i.InputDoubleRegister(0)));
1371 DCHECK(d1.is(
i.InputDoubleRegister(1)));
1372 DCHECK(d0.is(
i.OutputDoubleRegister()));
1374 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
1377 case kArm64Float32Max: {
1378 __ Fmax(
i.OutputFloat32Register(),
i.InputFloat32Register(0),
1379 i.InputFloat32Register(1));
1382 case kArm64Float64Max: {
1383 __ Fmax(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1384 i.InputDoubleRegister(1));
1387 case kArm64Float32Min: {
1388 __ Fmin(
i.OutputFloat32Register(),
i.InputFloat32Register(0),
1389 i.InputFloat32Register(1));
1392 case kArm64Float64Min: {
1393 __ Fmin(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0),
1394 i.InputDoubleRegister(1));
1397 case kArm64Float64Abs:
1398 __ Fabs(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1400 case kArm64Float64Neg:
1401 __ Fneg(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1403 case kArm64Float64Sqrt:
1404 __ Fsqrt(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1406 case kArm64Float32ToFloat64:
1407 __ Fcvt(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0).S());
1409 case kArm64Float64ToFloat32:
1410 __ Fcvt(
i.OutputDoubleRegister().S(),
i.InputDoubleRegister(0));
1412 case kArm64Float32ToInt32:
1413 __ Fcvtzs(
i.OutputRegister32(),
i.InputFloat32Register(0));
1416 __ Cmn(
i.OutputRegister32(), 1);
1417 __ Csinc(
i.OutputRegister32(),
i.OutputRegister32(),
i.OutputRegister32(),
1420 case kArm64Float64ToInt32:
1421 __ Fcvtzs(
i.OutputRegister32(),
i.InputDoubleRegister(0));
1423 case kArm64Float32ToUint32:
1424 __ Fcvtzu(
i.OutputRegister32(),
i.InputFloat32Register(0));
1427 __ Cmn(
i.OutputRegister32(), 1);
1428 __ Adc(
i.OutputRegister32(),
i.OutputRegister32(), Operand(0));
1430 case kArm64Float64ToUint32:
1431 __ Fcvtzu(
i.OutputRegister32(),
i.InputDoubleRegister(0));
1433 case kArm64Float32ToInt64:
1434 __ Fcvtzs(
i.OutputRegister64(),
i.InputFloat32Register(0));
1435 if (
i.OutputCount() > 1) {
1437 __ Fcmp(
i.InputFloat32Register(0),
static_cast<float>(INT64_MIN));
1442 __ Ccmp(
i.OutputRegister(0), -1, VFlag, ge);
1443 __ Cset(
i.OutputRegister(1), vc);
1446 case kArm64Float64ToInt64:
1447 __ Fcvtzs(
i.OutputRegister(0),
i.InputDoubleRegister(0));
1448 if (
i.OutputCount() > 1) {
1450 __ Fcmp(
i.InputDoubleRegister(0),
static_cast<double>(INT64_MIN));
1451 __ Ccmp(
i.OutputRegister(0), -1, VFlag, ge);
1452 __ Cset(
i.OutputRegister(1), vc);
1455 case kArm64Float32ToUint64:
1456 __ Fcvtzu(
i.OutputRegister64(),
i.InputFloat32Register(0));
1457 if (
i.OutputCount() > 1) {
1459 __ Fcmp(
i.InputFloat32Register(0), -1.0);
1460 __ Ccmp(
i.OutputRegister(0), -1, ZFlag, gt);
1461 __ Cset(
i.OutputRegister(1), ne);
1464 case kArm64Float64ToUint64:
1465 __ Fcvtzu(
i.OutputRegister64(),
i.InputDoubleRegister(0));
1466 if (
i.OutputCount() > 1) {
1468 __ Fcmp(
i.InputDoubleRegister(0), -1.0);
1469 __ Ccmp(
i.OutputRegister(0), -1, ZFlag, gt);
1470 __ Cset(
i.OutputRegister(1), ne);
1473 case kArm64Int32ToFloat32:
1474 __ Scvtf(
i.OutputFloat32Register(),
i.InputRegister32(0));
1476 case kArm64Int32ToFloat64:
1477 __ Scvtf(
i.OutputDoubleRegister(),
i.InputRegister32(0));
1479 case kArm64Int64ToFloat32:
1480 __ Scvtf(
i.OutputDoubleRegister().S(),
i.InputRegister64(0));
1482 case kArm64Int64ToFloat64:
1483 __ Scvtf(
i.OutputDoubleRegister(),
i.InputRegister64(0));
1485 case kArm64Uint32ToFloat32:
1486 __ Ucvtf(
i.OutputFloat32Register(),
i.InputRegister32(0));
1488 case kArm64Uint32ToFloat64:
1489 __ Ucvtf(
i.OutputDoubleRegister(),
i.InputRegister32(0));
1491 case kArm64Uint64ToFloat32:
1492 __ Ucvtf(
i.OutputDoubleRegister().S(),
i.InputRegister64(0));
1494 case kArm64Uint64ToFloat64:
1495 __ Ucvtf(
i.OutputDoubleRegister(),
i.InputRegister64(0));
1497 case kArm64Float64ExtractLowWord32:
1498 __ Fmov(
i.OutputRegister32(),
i.InputFloat32Register(0));
1500 case kArm64Float64ExtractHighWord32:
1501 __ Umov(
i.OutputRegister32(),
i.InputFloat64Register(0).V2S(), 1);
1503 case kArm64Float64InsertLowWord32:
1504 DCHECK(
i.OutputFloat64Register().Is(
i.InputFloat64Register(0)));
1505 __ Ins(
i.OutputFloat64Register().V2S(), 0,
i.InputRegister32(1));
1507 case kArm64Float64InsertHighWord32:
1508 DCHECK(
i.OutputFloat64Register().Is(
i.InputFloat64Register(0)));
1509 __ Ins(
i.OutputFloat64Register().V2S(), 1,
i.InputRegister32(1));
1511 case kArm64Float64MoveU64:
1512 __ Fmov(
i.OutputFloat64Register(),
i.InputRegister(0));
1514 case kArm64Float64SilenceNaN:
1515 __ CanonicalizeNaN(
i.OutputDoubleRegister(),
i.InputDoubleRegister(0));
1517 case kArm64U64MoveFloat64:
1518 __ Fmov(
i.OutputRegister(),
i.InputDoubleRegister(0));
1521 __ Ldrb(
i.OutputRegister(),
i.MemoryOperand());
1522 EmitWordLoadPoisoningIfNeeded(
this, opcode, instr,
i);
1525 __ Ldrsb(
i.OutputRegister(),
i.MemoryOperand());
1526 EmitWordLoadPoisoningIfNeeded(
this, opcode, instr,
i);
1529 __ Strb(
i.InputOrZeroRegister64(0),
i.MemoryOperand(1));
1532 __ Ldrh(
i.OutputRegister(),
i.MemoryOperand());
1533 EmitWordLoadPoisoningIfNeeded(
this, opcode, instr,
i);
1536 __ Ldrsh(
i.OutputRegister(),
i.MemoryOperand());
1537 EmitWordLoadPoisoningIfNeeded(
this, opcode, instr,
i);
1540 __ Strh(
i.InputOrZeroRegister64(0),
i.MemoryOperand(1));
1543 __ Ldrsw(
i.OutputRegister(),
i.MemoryOperand());
1544 EmitWordLoadPoisoningIfNeeded(
this, opcode, instr,
i);
1547 __ Ldr(
i.OutputRegister32(),
i.MemoryOperand());
1548 EmitWordLoadPoisoningIfNeeded(
this, opcode, instr,
i);
1551 __ Str(
i.InputOrZeroRegister32(0),
i.MemoryOperand(1));
1554 __ Ldr(
i.OutputRegister(),
i.MemoryOperand());
1555 EmitWordLoadPoisoningIfNeeded(
this, opcode, instr,
i);
1558 __ Str(
i.InputOrZeroRegister64(0),
i.MemoryOperand(1));
1561 __ Ldr(
i.OutputDoubleRegister().S(),
i.MemoryOperand());
1564 __ Str(
i.InputFloat32OrZeroRegister(0),
i.MemoryOperand(1));
1567 __ Ldr(
i.OutputDoubleRegister(),
i.MemoryOperand());
1570 __ Str(
i.InputFloat64OrZeroRegister(0),
i.MemoryOperand(1));
1573 __ Ldr(
i.OutputSimd128Register(),
i.MemoryOperand());
1576 __ Str(
i.InputSimd128Register(0),
i.MemoryOperand(1));
1579 __ Dsb(FullSystem, BarrierAll);
1582 case kArchWordPoisonOnSpeculation:
1583 __ And(
i.OutputRegister(0),
i.InputRegister(0),
1584 Operand(kSpeculationPoisonRegister));
1586 case kWord32AtomicLoadInt8:
1587 ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb, Register32);
1588 __ Sxtb(
i.OutputRegister(0),
i.OutputRegister(0));
1590 case kWord32AtomicLoadUint8:
1591 case kArm64Word64AtomicLoadUint8:
1592 ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb, Register32);
1594 case kWord32AtomicLoadInt16:
1595 ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh, Register32);
1596 __ Sxth(
i.OutputRegister(0),
i.OutputRegister(0));
1598 case kWord32AtomicLoadUint16:
1599 case kArm64Word64AtomicLoadUint16:
1600 ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh, Register32);
1602 case kWord32AtomicLoadWord32:
1603 case kArm64Word64AtomicLoadUint32:
1604 ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldar, Register32);
1606 case kArm64Word64AtomicLoadUint64:
1607 ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldar, Register);
1609 case kWord32AtomicStoreWord8:
1610 case kArm64Word64AtomicStoreWord8:
1611 ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrb, Register32);
1613 case kWord32AtomicStoreWord16:
1614 case kArm64Word64AtomicStoreWord16:
1615 ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrh, Register32);
1617 case kWord32AtomicStoreWord32:
1618 case kArm64Word64AtomicStoreWord32:
1619 ASSEMBLE_ATOMIC_STORE_INTEGER(Stlr, Register32);
1621 case kArm64Word64AtomicStoreWord64:
1622 ASSEMBLE_ATOMIC_STORE_INTEGER(Stlr, Register);
1624 case kWord32AtomicExchangeInt8:
1625 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb, Register32);
1626 __ Sxtb(
i.OutputRegister(0),
i.OutputRegister(0));
1628 case kWord32AtomicExchangeUint8:
1629 case kArm64Word64AtomicExchangeUint8:
1630 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb, Register32);
1632 case kWord32AtomicExchangeInt16:
1633 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh, Register32);
1634 __ Sxth(
i.OutputRegister(0),
i.OutputRegister(0));
1636 case kWord32AtomicExchangeUint16:
1637 case kArm64Word64AtomicExchangeUint16:
1638 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh, Register32);
1640 case kWord32AtomicExchangeWord32:
1641 case kArm64Word64AtomicExchangeUint32:
1642 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr, Register32);
1644 case kArm64Word64AtomicExchangeUint64:
1645 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr, Register);
1647 case kWord32AtomicCompareExchangeInt8:
1648 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB,
1650 __ Sxtb(
i.OutputRegister(0),
i.OutputRegister(0));
1652 case kWord32AtomicCompareExchangeUint8:
1653 case kArm64Word64AtomicCompareExchangeUint8:
1654 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB,
1657 case kWord32AtomicCompareExchangeInt16:
1658 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH,
1660 __ Sxth(
i.OutputRegister(0),
i.OutputRegister(0));
1662 case kWord32AtomicCompareExchangeUint16:
1663 case kArm64Word64AtomicCompareExchangeUint16:
1664 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH,
1667 case kWord32AtomicCompareExchangeWord32:
1668 case kArm64Word64AtomicCompareExchangeUint32:
1669 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTW, Register32);
1671 case kArm64Word64AtomicCompareExchangeUint64:
1672 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTX, Register);
1674 #define ATOMIC_BINOP_CASE(op, inst) \ 1675 case kWord32Atomic##op##Int8: \ 1676 ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst, Register32); \ 1677 __ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); \ 1679 case kWord32Atomic##op##Uint8: \ 1680 case kArm64Word64Atomic##op##Uint8: \ 1681 ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst, Register32); \ 1683 case kWord32Atomic##op##Int16: \ 1684 ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst, Register32); \ 1685 __ Sxth(i.OutputRegister(0), i.OutputRegister(0)); \ 1687 case kWord32Atomic##op##Uint16: \ 1688 case kArm64Word64Atomic##op##Uint16: \ 1689 ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst, Register32); \ 1691 case kWord32Atomic##op##Word32: \ 1692 case kArm64Word64Atomic##op##Uint32: \ 1693 ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst, Register32); \ 1695 case kArm64Word64Atomic##op##Uint64: \ 1696 ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst, Register); \ 1698 ATOMIC_BINOP_CASE(Add, Add)
1699 ATOMIC_BINOP_CASE(Sub, Sub)
1700 ATOMIC_BINOP_CASE(And, And)
1701 ATOMIC_BINOP_CASE(Or, Orr)
1702 ATOMIC_BINOP_CASE(Xor, Eor)
1703 #undef ATOMIC_BINOP_CASE 1704 #undef ASSEMBLE_SHIFT 1705 #undef ASSEMBLE_ATOMIC_LOAD_INTEGER 1706 #undef ASSEMBLE_ATOMIC_STORE_INTEGER 1707 #undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER 1708 #undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER 1709 #undef ASSEMBLE_ATOMIC_BINOP 1710 #undef ASSEMBLE_IEEE754_BINOP 1711 #undef ASSEMBLE_IEEE754_UNOP 1713 #define SIMD_UNOP_CASE(Op, Instr, FORMAT) \ 1715 __ Instr(i.OutputSimd128Register().V##FORMAT(), \ 1716 i.InputSimd128Register(0).V##FORMAT()); \ 1718 #define SIMD_WIDENING_UNOP_CASE(Op, Instr, WIDE, NARROW) \ 1720 __ Instr(i.OutputSimd128Register().V##WIDE(), \ 1721 i.InputSimd128Register(0).V##NARROW()); \ 1723 #define SIMD_BINOP_CASE(Op, Instr, FORMAT) \ 1725 __ Instr(i.OutputSimd128Register().V##FORMAT(), \ 1726 i.InputSimd128Register(0).V##FORMAT(), \ 1727 i.InputSimd128Register(1).V##FORMAT()); \ 1730 case kArm64F32x4Splat: {
1731 __ Dup(
i.OutputSimd128Register().V4S(),
i.InputSimd128Register(0).S(), 0);
1734 case kArm64F32x4ExtractLane: {
1735 __ Mov(
i.OutputSimd128Register().S(),
i.InputSimd128Register(0).V4S(),
1739 case kArm64F32x4ReplaceLane: {
1740 VRegister dst =
i.OutputSimd128Register().V4S(),
1741 src1 =
i.InputSimd128Register(0).V4S();
1742 if (!dst.is(src1)) {
1745 __ Mov(dst,
i.InputInt8(1),
i.InputSimd128Register(2).V4S(), 0);
1748 SIMD_UNOP_CASE(kArm64F32x4SConvertI32x4, Scvtf, 4S);
1749 SIMD_UNOP_CASE(kArm64F32x4UConvertI32x4, Ucvtf, 4S);
1750 SIMD_UNOP_CASE(kArm64F32x4Abs, Fabs, 4S);
1751 SIMD_UNOP_CASE(kArm64F32x4Neg, Fneg, 4S);
1752 SIMD_UNOP_CASE(kArm64F32x4RecipApprox, Frecpe, 4S);
1753 SIMD_UNOP_CASE(kArm64F32x4RecipSqrtApprox, Frsqrte, 4S);
1754 SIMD_BINOP_CASE(kArm64F32x4Add, Fadd, 4S);
1755 SIMD_BINOP_CASE(kArm64F32x4AddHoriz, Faddp, 4S);
1756 SIMD_BINOP_CASE(kArm64F32x4Sub, Fsub, 4S);
1757 SIMD_BINOP_CASE(kArm64F32x4Mul, Fmul, 4S);
1758 SIMD_BINOP_CASE(kArm64F32x4Min, Fmin, 4S);
1759 SIMD_BINOP_CASE(kArm64F32x4Max, Fmax, 4S);
1760 SIMD_BINOP_CASE(kArm64F32x4Eq, Fcmeq, 4S);
1761 case kArm64F32x4Ne: {
1762 VRegister dst =
i.OutputSimd128Register().V4S();
1763 __ Fcmeq(dst,
i.InputSimd128Register(0).V4S(),
1764 i.InputSimd128Register(1).V4S());
1768 case kArm64F32x4Lt: {
1769 __ Fcmgt(
i.OutputSimd128Register().V4S(),
i.InputSimd128Register(1).V4S(),
1770 i.InputSimd128Register(0).V4S());
1773 case kArm64F32x4Le: {
1774 __ Fcmge(
i.OutputSimd128Register().V4S(),
i.InputSimd128Register(1).V4S(),
1775 i.InputSimd128Register(0).V4S());
1778 case kArm64I32x4Splat: {
1779 __ Dup(
i.OutputSimd128Register().V4S(),
i.InputRegister32(0));
1782 case kArm64I32x4ExtractLane: {
1783 __ Mov(
i.OutputRegister32(),
i.InputSimd128Register(0).V4S(),
1787 case kArm64I32x4ReplaceLane: {
1788 VRegister dst =
i.OutputSimd128Register().V4S(),
1789 src1 =
i.InputSimd128Register(0).V4S();
1790 if (!dst.is(src1)) {
1793 __ Mov(dst,
i.InputInt8(1),
i.InputRegister32(2));
1796 SIMD_UNOP_CASE(kArm64I32x4SConvertF32x4, Fcvtzs, 4S);
1797 SIMD_WIDENING_UNOP_CASE(kArm64I32x4SConvertI16x8Low, Sxtl, 4S, 4H);
1798 SIMD_WIDENING_UNOP_CASE(kArm64I32x4SConvertI16x8High, Sxtl2, 4S, 8H);
1799 SIMD_UNOP_CASE(kArm64I32x4Neg, Neg, 4S);
1800 case kArm64I32x4Shl: {
1801 __ Shl(
i.OutputSimd128Register().V4S(),
i.InputSimd128Register(0).V4S(),
1805 case kArm64I32x4ShrS: {
1806 __ Sshr(
i.OutputSimd128Register().V4S(),
i.InputSimd128Register(0).V4S(),
1810 SIMD_BINOP_CASE(kArm64I32x4Add, Add, 4S);
1811 SIMD_BINOP_CASE(kArm64I32x4AddHoriz, Addp, 4S);
1812 SIMD_BINOP_CASE(kArm64I32x4Sub, Sub, 4S);
1813 SIMD_BINOP_CASE(kArm64I32x4Mul, Mul, 4S);
1814 SIMD_BINOP_CASE(kArm64I32x4MinS, Smin, 4S);
1815 SIMD_BINOP_CASE(kArm64I32x4MaxS, Smax, 4S);
1816 SIMD_BINOP_CASE(kArm64I32x4Eq, Cmeq, 4S);
1817 case kArm64I32x4Ne: {
1818 VRegister dst =
i.OutputSimd128Register().V4S();
1819 __ Cmeq(dst,
i.InputSimd128Register(0).V4S(),
1820 i.InputSimd128Register(1).V4S());
1824 SIMD_BINOP_CASE(kArm64I32x4GtS, Cmgt, 4S);
1825 SIMD_BINOP_CASE(kArm64I32x4GeS, Cmge, 4S);
1826 SIMD_UNOP_CASE(kArm64I32x4UConvertF32x4, Fcvtzu, 4S);
1827 SIMD_WIDENING_UNOP_CASE(kArm64I32x4UConvertI16x8Low, Uxtl, 4S, 4H);
1828 SIMD_WIDENING_UNOP_CASE(kArm64I32x4UConvertI16x8High, Uxtl2, 4S, 8H);
1829 case kArm64I32x4ShrU: {
1830 __ Ushr(
i.OutputSimd128Register().V4S(),
i.InputSimd128Register(0).V4S(),
1834 SIMD_BINOP_CASE(kArm64I32x4MinU, Umin, 4S);
1835 SIMD_BINOP_CASE(kArm64I32x4MaxU, Umax, 4S);
1836 SIMD_BINOP_CASE(kArm64I32x4GtU, Cmhi, 4S);
1837 SIMD_BINOP_CASE(kArm64I32x4GeU, Cmhs, 4S);
1838 case kArm64I16x8Splat: {
1839 __ Dup(
i.OutputSimd128Register().V8H(),
i.InputRegister32(0));
1842 case kArm64I16x8ExtractLane: {
1843 __ Smov(
i.OutputRegister32(),
i.InputSimd128Register(0).V8H(),
1847 case kArm64I16x8ReplaceLane: {
1848 VRegister dst =
i.OutputSimd128Register().V8H(),
1849 src1 =
i.InputSimd128Register(0).V8H();
1850 if (!dst.is(src1)) {
1853 __ Mov(dst,
i.InputInt8(1),
i.InputRegister32(2));
1856 SIMD_WIDENING_UNOP_CASE(kArm64I16x8SConvertI8x16Low, Sxtl, 8H, 8B);
1857 SIMD_WIDENING_UNOP_CASE(kArm64I16x8SConvertI8x16High, Sxtl2, 8H, 16B);
1858 SIMD_UNOP_CASE(kArm64I16x8Neg, Neg, 8H);
1859 case kArm64I16x8Shl: {
1860 __ Shl(
i.OutputSimd128Register().V8H(),
i.InputSimd128Register(0).V8H(),
1864 case kArm64I16x8ShrS: {
1865 __ Sshr(
i.OutputSimd128Register().V8H(),
i.InputSimd128Register(0).V8H(),
1869 case kArm64I16x8SConvertI32x4: {
1870 VRegister dst =
i.OutputSimd128Register(),
1871 src0 =
i.InputSimd128Register(0),
1872 src1 =
i.InputSimd128Register(1);
1873 UseScratchRegisterScope scope(tasm());
1874 VRegister temp = scope.AcquireV(kFormat4S);
1876 __ Mov(temp, src1.V4S());
1879 __ Sqxtn(dst.V4H(), src0.V4S());
1880 __ Sqxtn2(dst.V8H(), src1.V4S());
1883 SIMD_BINOP_CASE(kArm64I16x8Add, Add, 8H);
1884 SIMD_BINOP_CASE(kArm64I16x8AddSaturateS, Sqadd, 8H);
1885 SIMD_BINOP_CASE(kArm64I16x8AddHoriz, Addp, 8H);
1886 SIMD_BINOP_CASE(kArm64I16x8Sub, Sub, 8H);
1887 SIMD_BINOP_CASE(kArm64I16x8SubSaturateS, Sqsub, 8H);
1888 SIMD_BINOP_CASE(kArm64I16x8Mul, Mul, 8H);
1889 SIMD_BINOP_CASE(kArm64I16x8MinS, Smin, 8H);
1890 SIMD_BINOP_CASE(kArm64I16x8MaxS, Smax, 8H);
1891 SIMD_BINOP_CASE(kArm64I16x8Eq, Cmeq, 8H);
1892 case kArm64I16x8Ne: {
1893 VRegister dst =
i.OutputSimd128Register().V8H();
1894 __ Cmeq(dst,
i.InputSimd128Register(0).V8H(),
1895 i.InputSimd128Register(1).V8H());
1899 SIMD_BINOP_CASE(kArm64I16x8GtS, Cmgt, 8H);
1900 SIMD_BINOP_CASE(kArm64I16x8GeS, Cmge, 8H);
1901 case kArm64I16x8UConvertI8x16Low: {
1902 __ Uxtl(
i.OutputSimd128Register().V8H(),
i.InputSimd128Register(0).V8B());
1905 case kArm64I16x8UConvertI8x16High: {
1906 __ Uxtl2(
i.OutputSimd128Register().V8H(),
1907 i.InputSimd128Register(0).V16B());
1910 case kArm64I16x8ShrU: {
1911 __ Ushr(
i.OutputSimd128Register().V8H(),
i.InputSimd128Register(0).V8H(),
1915 case kArm64I16x8UConvertI32x4: {
1916 VRegister dst =
i.OutputSimd128Register(),
1917 src0 =
i.InputSimd128Register(0),
1918 src1 =
i.InputSimd128Register(1);
1919 UseScratchRegisterScope scope(tasm());
1920 VRegister temp = scope.AcquireV(kFormat4S);
1922 __ Mov(temp, src1.V4S());
1925 __ Uqxtn(dst.V4H(), src0.V4S());
1926 __ Uqxtn2(dst.V8H(), src1.V4S());
1929 SIMD_BINOP_CASE(kArm64I16x8AddSaturateU, Uqadd, 8H);
1930 SIMD_BINOP_CASE(kArm64I16x8SubSaturateU, Uqsub, 8H);
1931 SIMD_BINOP_CASE(kArm64I16x8MinU, Umin, 8H);
1932 SIMD_BINOP_CASE(kArm64I16x8MaxU, Umax, 8H);
1933 SIMD_BINOP_CASE(kArm64I16x8GtU, Cmhi, 8H);
1934 SIMD_BINOP_CASE(kArm64I16x8GeU, Cmhs, 8H);
1935 case kArm64I8x16Splat: {
1936 __ Dup(
i.OutputSimd128Register().V16B(),
i.InputRegister32(0));
1939 case kArm64I8x16ExtractLane: {
1940 __ Smov(
i.OutputRegister32(),
i.InputSimd128Register(0).V16B(),
1944 case kArm64I8x16ReplaceLane: {
1945 VRegister dst =
i.OutputSimd128Register().V16B(),
1946 src1 =
i.InputSimd128Register(0).V16B();
1947 if (!dst.is(src1)) {
1950 __ Mov(dst,
i.InputInt8(1),
i.InputRegister32(2));
1953 SIMD_UNOP_CASE(kArm64I8x16Neg, Neg, 16B);
1954 case kArm64I8x16Shl: {
1955 __ Shl(
i.OutputSimd128Register().V16B(),
i.InputSimd128Register(0).V16B(),
1959 case kArm64I8x16ShrS: {
1960 __ Sshr(
i.OutputSimd128Register().V16B(),
1961 i.InputSimd128Register(0).V16B(),
i.InputInt5(1));
1964 case kArm64I8x16SConvertI16x8: {
1965 VRegister dst =
i.OutputSimd128Register(),
1966 src0 =
i.InputSimd128Register(0),
1967 src1 =
i.InputSimd128Register(1);
1968 UseScratchRegisterScope scope(tasm());
1969 VRegister temp = scope.AcquireV(kFormat8H);
1971 __ Mov(temp, src1.V8H());
1974 __ Sqxtn(dst.V8B(), src0.V8H());
1975 __ Sqxtn2(dst.V16B(), src1.V8H());
1978 SIMD_BINOP_CASE(kArm64I8x16Add, Add, 16B);
1979 SIMD_BINOP_CASE(kArm64I8x16AddSaturateS, Sqadd, 16B);
1980 SIMD_BINOP_CASE(kArm64I8x16Sub, Sub, 16B);
1981 SIMD_BINOP_CASE(kArm64I8x16SubSaturateS, Sqsub, 16B);
1982 SIMD_BINOP_CASE(kArm64I8x16Mul, Mul, 16B);
1983 SIMD_BINOP_CASE(kArm64I8x16MinS, Smin, 16B);
1984 SIMD_BINOP_CASE(kArm64I8x16MaxS, Smax, 16B);
1985 SIMD_BINOP_CASE(kArm64I8x16Eq, Cmeq, 16B);
1986 case kArm64I8x16Ne: {
1987 VRegister dst =
i.OutputSimd128Register().V16B();
1988 __ Cmeq(dst,
i.InputSimd128Register(0).V16B(),
1989 i.InputSimd128Register(1).V16B());
1993 SIMD_BINOP_CASE(kArm64I8x16GtS, Cmgt, 16B);
1994 SIMD_BINOP_CASE(kArm64I8x16GeS, Cmge, 16B);
1995 case kArm64I8x16ShrU: {
1996 __ Ushr(
i.OutputSimd128Register().V16B(),
1997 i.InputSimd128Register(0).V16B(),
i.InputInt5(1));
2000 case kArm64I8x16UConvertI16x8: {
2001 VRegister dst =
i.OutputSimd128Register(),
2002 src0 =
i.InputSimd128Register(0),
2003 src1 =
i.InputSimd128Register(1);
2004 UseScratchRegisterScope scope(tasm());
2005 VRegister temp = scope.AcquireV(kFormat8H);
2007 __ Mov(temp, src1.V8H());
2010 __ Uqxtn(dst.V8B(), src0.V8H());
2011 __ Uqxtn2(dst.V16B(), src1.V8H());
2014 SIMD_BINOP_CASE(kArm64I8x16AddSaturateU, Uqadd, 16B);
2015 SIMD_BINOP_CASE(kArm64I8x16SubSaturateU, Uqsub, 16B);
2016 SIMD_BINOP_CASE(kArm64I8x16MinU, Umin, 16B);
2017 SIMD_BINOP_CASE(kArm64I8x16MaxU, Umax, 16B);
2018 SIMD_BINOP_CASE(kArm64I8x16GtU, Cmhi, 16B);
2019 SIMD_BINOP_CASE(kArm64I8x16GeU, Cmhs, 16B);
2020 case kArm64S128Zero: {
2021 __ Movi(
i.OutputSimd128Register().V16B(), 0);
2024 SIMD_BINOP_CASE(kArm64S128And, And, 16B);
2025 SIMD_BINOP_CASE(kArm64S128Or, Orr, 16B);
2026 SIMD_BINOP_CASE(kArm64S128Xor, Eor, 16B);
2027 SIMD_UNOP_CASE(kArm64S128Not, Mvn, 16B);
2028 case kArm64S128Dup: {
2029 VRegister dst =
i.OutputSimd128Register(),
2030 src =
i.InputSimd128Register(0);
2031 int lanes =
i.InputInt32(1);
2032 int index =
i.InputInt32(2);
2035 __ Dup(dst.V4S(), src.V4S(), index);
2038 __ Dup(dst.V8H(), src.V8H(), index);
2041 __ Dup(dst.V16B(), src.V16B(), index);
2049 case kArm64S128Select: {
2050 VRegister dst =
i.OutputSimd128Register().V16B();
2051 DCHECK(dst.is(
i.InputSimd128Register(0).V16B()));
2052 __ Bsl(dst,
i.InputSimd128Register(1).V16B(),
2053 i.InputSimd128Register(2).V16B());
2056 case kArm64S32x4Shuffle: {
2057 Simd128Register dst =
i.OutputSimd128Register().V4S(),
2058 src0 =
i.InputSimd128Register(0).V4S(),
2059 src1 =
i.InputSimd128Register(1).V4S();
2062 UseScratchRegisterScope scope(tasm());
2063 VRegister temp = scope.AcquireV(kFormat4S);
2067 }
else if (dst.is(src1)) {
2072 int32_t shuffle =
i.InputInt32(2);
2073 for (
int i = 0;
i < 4;
i++) {
2074 VRegister src = src0;
2075 int lane = shuffle & 0x7;
2080 __ Mov(dst,
i, src, lane);
2085 SIMD_BINOP_CASE(kArm64S32x4ZipLeft, Zip1, 4S);
2086 SIMD_BINOP_CASE(kArm64S32x4ZipRight, Zip2, 4S);
2087 SIMD_BINOP_CASE(kArm64S32x4UnzipLeft, Uzp1, 4S);
2088 SIMD_BINOP_CASE(kArm64S32x4UnzipRight, Uzp2, 4S);
2089 SIMD_BINOP_CASE(kArm64S32x4TransposeLeft, Trn1, 4S);
2090 SIMD_BINOP_CASE(kArm64S32x4TransposeRight, Trn2, 4S);
2091 SIMD_BINOP_CASE(kArm64S16x8ZipLeft, Zip1, 8H);
2092 SIMD_BINOP_CASE(kArm64S16x8ZipRight, Zip2, 8H);
2093 SIMD_BINOP_CASE(kArm64S16x8UnzipLeft, Uzp1, 8H);
2094 SIMD_BINOP_CASE(kArm64S16x8UnzipRight, Uzp2, 8H);
2095 SIMD_BINOP_CASE(kArm64S16x8TransposeLeft, Trn1, 8H);
2096 SIMD_BINOP_CASE(kArm64S16x8TransposeRight, Trn2, 8H);
2097 SIMD_BINOP_CASE(kArm64S8x16ZipLeft, Zip1, 16B);
2098 SIMD_BINOP_CASE(kArm64S8x16ZipRight, Zip2, 16B);
2099 SIMD_BINOP_CASE(kArm64S8x16UnzipLeft, Uzp1, 16B);
2100 SIMD_BINOP_CASE(kArm64S8x16UnzipRight, Uzp2, 16B);
2101 SIMD_BINOP_CASE(kArm64S8x16TransposeLeft, Trn1, 16B);
2102 SIMD_BINOP_CASE(kArm64S8x16TransposeRight, Trn2, 16B);
2103 case kArm64S8x16Concat: {
2104 __ Ext(
i.OutputSimd128Register().V16B(),
i.InputSimd128Register(0).V16B(),
2105 i.InputSimd128Register(1).V16B(),
i.InputInt4(2));
2108 case kArm64S8x16Shuffle: {
2109 Simd128Register dst =
i.OutputSimd128Register().V16B(),
2110 src0 =
i.InputSimd128Register(0).V16B(),
2111 src1 =
i.InputSimd128Register(1).V16B();
2115 if (src0.is(src1)) {
2119 DCHECK(AreConsecutive(src0, src1));
2122 (
i.InputInt32(2) & mask) | ((
i.InputInt32(3) & mask) << 32);
2124 (
i.InputInt32(4) & mask) | ((
i.InputInt32(5) & mask) << 32);
2125 UseScratchRegisterScope scope(tasm());
2126 VRegister temp = scope.AcquireV(kFormat16B);
2127 __ Movi(temp, imm2, imm1);
2129 if (src0.is(src1)) {
2130 __ Tbl(dst, src0, temp.V16B());
2132 __ Tbl(dst, src0, src1, temp.V16B());
2136 SIMD_UNOP_CASE(kArm64S32x2Reverse, Rev64, 4S);
2137 SIMD_UNOP_CASE(kArm64S16x4Reverse, Rev64, 8H);
2138 SIMD_UNOP_CASE(kArm64S16x2Reverse, Rev32, 8H);
2139 SIMD_UNOP_CASE(kArm64S8x8Reverse, Rev64, 16B);
2140 SIMD_UNOP_CASE(kArm64S8x4Reverse, Rev32, 16B);
2141 SIMD_UNOP_CASE(kArm64S8x2Reverse, Rev16, 16B);
2143 #define SIMD_REDUCE_OP_CASE(Op, Instr, format, FORMAT) \ 2145 UseScratchRegisterScope scope(tasm()); \ 2146 VRegister temp = scope.AcquireV(format); \ 2147 __ Instr(temp, i.InputSimd128Register(0).V##FORMAT()); \ 2148 __ Umov(i.OutputRegister32(), temp, 0); \ 2151 SIMD_REDUCE_OP_CASE(kArm64S1x4AnyTrue, Umaxv, kFormatS, 4S);
2152 SIMD_REDUCE_OP_CASE(kArm64S1x4AllTrue, Uminv, kFormatS, 4S);
2153 SIMD_REDUCE_OP_CASE(kArm64S1x8AnyTrue, Umaxv, kFormatH, 8H);
2154 SIMD_REDUCE_OP_CASE(kArm64S1x8AllTrue, Uminv, kFormatH, 8H);
2155 SIMD_REDUCE_OP_CASE(kArm64S1x16AnyTrue, Umaxv, kFormatB, 16B);
2156 SIMD_REDUCE_OP_CASE(kArm64S1x16AllTrue, Uminv, kFormatB, 16B);
2161 #undef SIMD_UNOP_CASE 2162 #undef SIMD_WIDENING_UNOP_CASE 2163 #undef SIMD_BINOP_CASE 2164 #undef SIMD_REDUCE_OP_CASE 2167 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
2168 Arm64OperandConverter
i(
this, instr);
2169 Label* tlabel = branch->true_label;
2170 Label* flabel = branch->false_label;
2171 FlagsCondition condition = branch->condition;
2172 ArchOpcode opcode = instr->arch_opcode();
2174 if (opcode == kArm64CompareAndBranch32) {
2175 DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
2176 switch (condition) {
2178 __ Cbz(
i.InputRegister32(0), tlabel);
2181 __ Cbnz(
i.InputRegister32(0), tlabel);
2186 }
else if (opcode == kArm64CompareAndBranch) {
2187 DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
2188 switch (condition) {
2190 __ Cbz(
i.InputRegister64(0), tlabel);
2193 __ Cbnz(
i.InputRegister64(0), tlabel);
2198 }
else if (opcode == kArm64TestAndBranch32) {
2199 DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
2200 switch (condition) {
2202 __ Tbz(
i.InputRegister32(0),
i.InputInt5(1), tlabel);
2205 __ Tbnz(
i.InputRegister32(0),
i.InputInt5(1), tlabel);
2210 }
else if (opcode == kArm64TestAndBranch) {
2211 DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
2212 switch (condition) {
2214 __ Tbz(
i.InputRegister64(0),
i.InputInt6(1), tlabel);
2217 __ Tbnz(
i.InputRegister64(0),
i.InputInt6(1), tlabel);
2223 Condition cc = FlagsConditionToCondition(condition);
2226 if (!branch->fallthru) __ B(flabel);
2229 void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
2230 Instruction* instr) {
2232 if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
2236 condition = NegateFlagsCondition(condition);
2237 __ CmovX(kSpeculationPoisonRegister, xzr,
2238 FlagsConditionToCondition(condition));
2242 void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
2243 BranchInfo* branch) {
2244 AssembleArchBranch(instr, branch);
2247 void CodeGenerator::AssembleArchJump(RpoNumber target) {
2248 if (!IsNextInAssemblyOrder(target)) __ B(GetLabel(target));
2251 void CodeGenerator::AssembleArchTrap(Instruction* instr,
2252 FlagsCondition condition) {
2253 class OutOfLineTrap final :
public OutOfLineCode {
2255 OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
2256 : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
2257 void Generate() final {
2258 Arm64OperandConverter
i(gen_, instr_);
2260 static_cast<TrapId
>(
i.InputInt32(instr_->InputCount() - 1));
2261 GenerateCallToTrap(trap_id);
2265 void GenerateCallToTrap(TrapId trap_id) {
2266 if (trap_id == TrapId::kInvalid) {
2270 ExternalReference::wasm_call_trap_callback_for_testing(), 0);
2271 __ LeaveFrame(StackFrame::WASM_COMPILED);
2272 auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
2274 static_cast<int>(call_descriptor->StackParameterCount());
2275 pop_count += (pop_count & 1);
2279 gen_->AssembleSourcePosition(instr_);
2283 __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
2284 ReferenceMap* reference_map =
2285 new (gen_->zone()) ReferenceMap(gen_->zone());
2286 gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
2287 Safepoint::kNoLazyDeopt);
2288 if (FLAG_debug_code) {
2294 Instruction* instr_;
2295 CodeGenerator* gen_;
2297 auto ool =
new (zone()) OutOfLineTrap(
this, instr);
2298 Label* tlabel = ool->entry();
2299 Condition cc = FlagsConditionToCondition(condition);
2304 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
2305 FlagsCondition condition) {
2306 Arm64OperandConverter
i(
this, instr);
2310 DCHECK_NE(0u, instr->OutputCount());
2311 Register reg =
i.OutputRegister(instr->OutputCount() - 1);
2312 Condition cc = FlagsConditionToCondition(condition);
2316 void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
2317 Arm64OperandConverter
i(
this, instr);
2318 Register input =
i.InputRegister32(0);
2319 std::vector<std::pair<int32_t, Label*>> cases;
2320 for (
size_t index = 2; index < instr->InputCount(); index += 2) {
2321 cases.push_back({
i.InputInt32(index + 0), GetLabel(
i.InputRpo(index + 1))});
2323 AssembleArchBinarySearchSwitchRange(input,
i.InputRpo(1), cases.data(),
2324 cases.data() + cases.size());
2327 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
2328 Arm64OperandConverter
i(
this, instr);
2329 Register input =
i.InputRegister32(0);
2330 for (
size_t index = 2; index < instr->InputCount(); index += 2) {
2331 __ Cmp(input,
i.InputInt32(index + 0));
2332 __ B(eq, GetLabel(
i.InputRpo(index + 1)));
2334 AssembleArchJump(
i.InputRpo(1));
2337 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
2338 Arm64OperandConverter
i(
this, instr);
2339 UseScratchRegisterScope scope(tasm());
2340 Register input =
i.InputRegister32(0);
2341 Register temp = scope.AcquireX();
2342 size_t const case_count = instr->InputCount() - 2;
2344 __ Cmp(input, case_count);
2345 __ B(hs, GetLabel(
i.InputRpo(1)));
2346 __ Adr(temp, &table);
2347 __ Add(temp, temp, Operand(input, UXTW, 2));
2349 __ StartBlockPools();
2351 for (
size_t index = 0; index < case_count; ++index) {
2352 __ B(GetLabel(
i.InputRpo(index + 2)));
2357 void CodeGenerator::FinishFrame(Frame* frame) {
2358 frame->AlignFrame(16);
2359 auto call_descriptor = linkage()->GetIncomingDescriptor();
2362 CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
2363 call_descriptor->CalleeSavedFPRegisters());
2364 int saved_count = saves_fp.Count();
2365 if (saved_count != 0) {
2366 DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedV().list());
2367 DCHECK_EQ(saved_count % 2, 0);
2368 frame->AllocateSavedCalleeRegisterSlots(saved_count *
2369 (kDoubleSize / kPointerSize));
2372 CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
2373 call_descriptor->CalleeSavedRegisters());
2374 saved_count = saves.Count();
2375 if (saved_count != 0) {
2376 DCHECK_EQ(saved_count % 2, 0);
2377 frame->AllocateSavedCalleeRegisterSlots(saved_count);
2381 void CodeGenerator::AssembleConstructFrame() {
2382 auto call_descriptor = linkage()->GetIncomingDescriptor();
2383 __ AssertSpAligned();
2386 DCHECK_EQ(frame()->GetTotalFrameSlotCount() % 2, 0);
2387 int shrink_slots = frame()->GetTotalFrameSlotCount() -
2388 call_descriptor->CalculateFixedFrameSize();
2390 CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
2391 call_descriptor->CalleeSavedRegisters());
2392 CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
2393 call_descriptor->CalleeSavedFPRegisters());
2396 const int returns = RoundUp(frame()->GetReturnSlotCount(), 2);
2398 if (frame_access_state()->has_frame()) {
2400 if (call_descriptor->IsJSFunctionCall()) {
2406 unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
2409 if (info()->is_osr()) {
2411 __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
2417 if (FLAG_code_comments) __ RecordComment(
"-- OSR entrypoint --");
2418 osr_pc_offset_ = __ pc_offset();
2419 shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
2420 ResetSpeculationPoison();
2423 if (info()->IsWasm() && shrink_slots > 128) {
2432 if (shrink_slots * kPointerSize < FLAG_stack_size * 1024) {
2433 UseScratchRegisterScope scope(tasm());
2434 Register scratch = scope.AcquireX();
2435 __ Ldr(scratch, FieldMemOperand(
2436 kWasmInstanceRegister,
2437 WasmInstanceObject::kRealStackLimitAddressOffset));
2438 __ Ldr(scratch, MemOperand(scratch));
2439 __ Add(scratch, scratch, shrink_slots * kPointerSize);
2440 __ Cmp(sp, scratch);
2446 UseScratchRegisterScope temps(tasm());
2448 Register scratch = temps.AcquireX();
2450 StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
2451 __ Str(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
2452 __ Str(kWasmInstanceRegister,
2453 MemOperand(fp, WasmCompiledFrameConstants::kWasmInstanceOffset));
2455 __ Ldr(x2, FieldMemOperand(kWasmInstanceRegister,
2456 WasmInstanceObject::kCEntryStubOffset));
2457 __ Mov(cp, Smi::zero());
2458 __ CallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, x2);
2460 ReferenceMap* reference_map =
new (zone()) ReferenceMap(zone());
2461 RecordSafepoint(reference_map, Safepoint::kSimple, 0,
2462 Safepoint::kNoLazyDeopt);
2463 if (FLAG_debug_code) {
2470 shrink_slots -= saves.Count();
2471 shrink_slots -= saves_fp.Count();
2472 shrink_slots -= returns;
2478 switch (call_descriptor->kind()) {
2479 case CallDescriptor::kCallJSFunction:
2480 if (call_descriptor->PushArgumentCount()) {
2481 __ Claim(shrink_slots + 1);
2482 __ Str(kJavaScriptCallArgCountRegister,
2483 MemOperand(fp, OptimizedBuiltinFrameConstants::kArgCOffset));
2485 __ Claim(shrink_slots);
2488 case CallDescriptor::kCallCodeObject: {
2489 UseScratchRegisterScope temps(tasm());
2490 __ Claim(shrink_slots + 1);
2491 Register scratch = temps.AcquireX();
2493 StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
2494 __ Str(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
2496 case CallDescriptor::kCallWasmFunction: {
2497 UseScratchRegisterScope temps(tasm());
2498 __ Claim(shrink_slots + 2);
2499 Register scratch = temps.AcquireX();
2501 StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
2502 __ Str(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
2503 __ Str(kWasmInstanceRegister,
2504 MemOperand(fp, WasmCompiledFrameConstants::kWasmInstanceOffset));
2506 case CallDescriptor::kCallWasmImportWrapper: {
2507 UseScratchRegisterScope temps(tasm());
2508 __ ldr(kJSFunctionRegister,
2509 FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
2510 __ ldr(kWasmInstanceRegister,
2511 FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
2512 __ Claim(shrink_slots + 2);
2513 Register scratch = temps.AcquireX();
2515 StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
2516 __ Str(scratch, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
2517 __ Str(kWasmInstanceRegister,
2518 MemOperand(fp, WasmCompiledFrameConstants::kWasmInstanceOffset));
2520 case CallDescriptor::kCallAddress:
2521 __ Claim(shrink_slots);
2529 DCHECK_IMPLIES(saves_fp.Count() != 0,
2530 saves_fp.list() == CPURegList::GetCalleeSavedV().list());
2531 __ PushCPURegList(saves_fp);
2537 __ PushCPURegList(saves);
2544 void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
2545 auto call_descriptor = linkage()->GetIncomingDescriptor();
2547 const int returns = RoundUp(frame()->GetReturnSlotCount(), 2);
2554 CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
2555 call_descriptor->CalleeSavedRegisters());
2556 __ PopCPURegList(saves);
2559 CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
2560 call_descriptor->CalleeSavedFPRegisters());
2561 __ PopCPURegList(saves_fp);
2563 unwinding_info_writer_.MarkBlockWillExit();
2565 Arm64OperandConverter g(
this,
nullptr);
2566 int pop_count =
static_cast<int>(call_descriptor->StackParameterCount());
2567 if (call_descriptor->IsCFunctionCall()) {
2568 AssembleDeconstructFrame();
2569 }
else if (frame_access_state()->has_frame()) {
2572 if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
2573 if (return_label_.is_bound()) {
2574 __ B(&return_label_);
2577 __ Bind(&return_label_);
2578 AssembleDeconstructFrame();
2581 AssembleDeconstructFrame();
2585 if (pop->IsImmediate()) {
2586 pop_count += g.ToConstant(pop).ToInt32();
2587 __ DropArguments(pop_count);
2589 Register pop_reg = g.ToRegister(pop);
2590 __ Add(pop_reg, pop_reg, pop_count);
2591 __ DropArguments(pop_reg);
2594 __ AssertSpAligned();
2598 void CodeGenerator::FinishCode() { __ CheckConstPool(
true,
false); }
2600 void CodeGenerator::AssembleMove(InstructionOperand* source,
2601 InstructionOperand* destination) {
2602 Arm64OperandConverter g(
this,
nullptr);
2604 auto MoveConstantToRegister = [&](Register dst, Constant src) {
2605 if (src.type() == Constant::kHeapObject) {
2606 Handle<HeapObject> src_object = src.ToHeapObject();
2608 if (IsMaterializableFromRoot(src_object, &index)) {
2609 __ LoadRoot(dst, index);
2611 __ Mov(dst, src_object);
2614 __ Mov(dst, g.ToImmediate(source));
2617 switch (MoveType::InferMove(source, destination)) {
2618 case MoveType::kRegisterToRegister:
2619 if (source->IsRegister()) {
2620 __ Mov(g.ToRegister(destination), g.ToRegister(source));
2621 }
else if (source->IsFloatRegister() || source->IsDoubleRegister()) {
2622 __ Mov(g.ToDoubleRegister(destination), g.ToDoubleRegister(source));
2624 DCHECK(source->IsSimd128Register());
2625 __ Mov(g.ToDoubleRegister(destination).Q(),
2626 g.ToDoubleRegister(source).Q());
2629 case MoveType::kRegisterToStack: {
2630 MemOperand dst = g.ToMemOperand(destination, tasm());
2631 if (source->IsRegister()) {
2632 __ Str(g.ToRegister(source), dst);
2634 VRegister src = g.ToDoubleRegister(source);
2635 if (source->IsFloatRegister() || source->IsDoubleRegister()) {
2638 DCHECK(source->IsSimd128Register());
2639 __ Str(src.Q(), dst);
2644 case MoveType::kStackToRegister: {
2645 MemOperand src = g.ToMemOperand(source, tasm());
2646 if (destination->IsRegister()) {
2647 __ Ldr(g.ToRegister(destination), src);
2649 VRegister dst = g.ToDoubleRegister(destination);
2650 if (destination->IsFloatRegister() || destination->IsDoubleRegister()) {
2653 DCHECK(destination->IsSimd128Register());
2654 __ Ldr(dst.Q(), src);
2659 case MoveType::kStackToStack: {
2660 MemOperand src = g.ToMemOperand(source, tasm());
2661 MemOperand dst = g.ToMemOperand(destination, tasm());
2662 if (source->IsSimd128StackSlot()) {
2663 UseScratchRegisterScope scope(tasm());
2664 VRegister temp = scope.AcquireQ();
2668 UseScratchRegisterScope scope(tasm());
2669 Register temp = scope.AcquireX();
2675 case MoveType::kConstantToRegister: {
2676 Constant src = g.ToConstant(source);
2677 if (destination->IsRegister()) {
2678 MoveConstantToRegister(g.ToRegister(destination), src);
2680 VRegister dst = g.ToDoubleRegister(destination);
2681 if (destination->IsFloatRegister()) {
2682 __ Fmov(dst.S(), src.ToFloat32());
2684 DCHECK(destination->IsDoubleRegister());
2685 __ Fmov(dst, src.ToFloat64().value());
2690 case MoveType::kConstantToStack: {
2691 Constant src = g.ToConstant(source);
2692 MemOperand dst = g.ToMemOperand(destination, tasm());
2693 if (destination->IsStackSlot()) {
2694 UseScratchRegisterScope scope(tasm());
2695 Register temp = scope.AcquireX();
2696 MoveConstantToRegister(temp, src);
2698 }
else if (destination->IsFloatStackSlot()) {
2699 if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
2702 UseScratchRegisterScope scope(tasm());
2703 VRegister temp = scope.AcquireS();
2704 __ Fmov(temp, src.ToFloat32());
2708 DCHECK(destination->IsDoubleStackSlot());
2709 if (src.ToFloat64().AsUint64() == 0) {
2712 UseScratchRegisterScope scope(tasm());
2713 VRegister temp = scope.AcquireD();
2714 __ Fmov(temp, src.ToFloat64().value());
2724 void CodeGenerator::AssembleSwap(InstructionOperand* source,
2725 InstructionOperand* destination) {
2726 Arm64OperandConverter g(
this,
nullptr);
2727 switch (MoveType::InferSwap(source, destination)) {
2728 case MoveType::kRegisterToRegister:
2729 if (source->IsRegister()) {
2730 __ Swap(g.ToRegister(source), g.ToRegister(destination));
2732 VRegister src = g.ToDoubleRegister(source);
2733 VRegister dst = g.ToDoubleRegister(destination);
2734 if (source->IsFloatRegister() || source->IsDoubleRegister()) {
2737 DCHECK(source->IsSimd128Register());
2738 __ Swap(src.Q(), dst.Q());
2742 case MoveType::kRegisterToStack: {
2743 UseScratchRegisterScope scope(tasm());
2744 MemOperand dst = g.ToMemOperand(destination, tasm());
2745 if (source->IsRegister()) {
2746 Register temp = scope.AcquireX();
2747 Register src = g.ToRegister(source);
2752 UseScratchRegisterScope scope(tasm());
2753 VRegister src = g.ToDoubleRegister(source);
2754 if (source->IsFloatRegister() || source->IsDoubleRegister()) {
2755 VRegister temp = scope.AcquireD();
2760 DCHECK(source->IsSimd128Register());
2761 VRegister temp = scope.AcquireQ();
2762 __ Mov(temp, src.Q());
2763 __ Ldr(src.Q(), dst);
2769 case MoveType::kStackToStack: {
2770 UseScratchRegisterScope scope(tasm());
2771 MemOperand src = g.ToMemOperand(source, tasm());
2772 MemOperand dst = g.ToMemOperand(destination, tasm());
2773 VRegister temp_0 = scope.AcquireD();
2774 VRegister temp_1 = scope.AcquireD();
2775 if (source->IsSimd128StackSlot()) {
2776 __ Ldr(temp_0.Q(), src);
2777 __ Ldr(temp_1.Q(), dst);
2778 __ Str(temp_0.Q(), dst);
2779 __ Str(temp_1.Q(), src);
2781 __ Ldr(temp_0, src);
2782 __ Ldr(temp_1, dst);
2783 __ Str(temp_0, dst);
2784 __ Str(temp_1, src);
2794 void CodeGenerator::AssembleJumpTable(Label** targets,
size_t target_count) {