5 #include "src/assembler-inl.h" 6 #include "src/compiler/backend/instruction-selector-impl.h" 7 #include "src/compiler/node-matchers.h" 8 #include "src/compiler/node-properties.h" 34 if (CanBeImmediate(node, mode)) {
35 return UseImmediate(node);
37 return UseRegister(node);
43 if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
44 (IsFloatConstant(node) &&
45 (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
46 return UseImmediate(node);
48 return UseRegister(node);
54 if (sp_allowed && node->opcode() == IrOpcode::kLoadStackPointer)
56 LocationOperand::REGISTER,
57 MachineRepresentation::kWord64, sp.code());
58 return UseRegister(node);
64 if (GetIntegerConstantValue(node) == value) {
65 return UseImmediate(node);
67 return TempImmediate(value);
70 bool IsIntegerConstant(
Node* node) {
71 return (node->opcode() == IrOpcode::kInt32Constant) ||
72 (node->opcode() == IrOpcode::kInt64Constant);
76 if (node->opcode() == IrOpcode::kInt32Constant) {
77 return OpParameter<int32_t>(node->op());
79 DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
80 return OpParameter<int64_t>(node->op());
83 bool IsFloatConstant(
Node* node) {
84 return (node->opcode() == IrOpcode::kFloat32Constant) ||
85 (node->opcode() == IrOpcode::kFloat64Constant);
88 double GetFloatConstantValue(
Node* node) {
89 if (node->opcode() == IrOpcode::kFloat32Constant) {
90 return OpParameter<float>(node->op());
92 DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
93 return OpParameter<double>(node->op());
96 bool CanBeImmediate(
Node* node, ImmediateMode mode) {
97 return IsIntegerConstant(node) &&
98 CanBeImmediate(GetIntegerConstantValue(node), mode);
101 bool CanBeImmediate(
int64_t value, ImmediateMode mode) {
107 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32,
108 &ignored, &ignored, &ignored);
110 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64,
111 &ignored, &ignored, &ignored);
113 return Assembler::IsImmAddSub(value);
115 return IsLoadStoreImmediate(value, 0);
116 case kLoadStoreImm16:
117 return IsLoadStoreImmediate(value, 1);
118 case kLoadStoreImm32:
119 return IsLoadStoreImmediate(value, 2);
120 case kLoadStoreImm64:
121 return IsLoadStoreImmediate(value, 3);
134 bool CanBeLoadStoreShiftImmediate(
Node* node, MachineRepresentation rep) {
136 DCHECK_GT(MachineRepresentation::kSimd128, rep);
137 return IsIntegerConstant(node) &&
138 (GetIntegerConstantValue(node) == ElementSizeLog2Of(rep));
142 bool IsLoadStoreImmediate(
int64_t value,
unsigned size) {
143 return Assembler::IsImmLSScaled(value, size) ||
144 Assembler::IsImmLSUnscaled(value);
152 selector->Emit(opcode, g.DefineAsRegister(node),
153 g.UseRegister(node->InputAt(0)));
156 void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
157 Arm64OperandGenerator g(selector);
158 selector->Emit(opcode, g.DefineAsRegister(node),
159 g.UseRegister(node->InputAt(0)),
160 g.UseRegister(node->InputAt(1)));
163 void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
164 Arm64OperandGenerator g(selector);
165 int32_t imm = OpParameter<int32_t>(node->op());
166 selector->Emit(opcode, g.DefineAsRegister(node),
167 g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
170 void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
171 ImmediateMode operand_mode) {
172 Arm64OperandGenerator g(selector);
173 selector->Emit(opcode, g.DefineAsRegister(node),
174 g.UseRegister(node->InputAt(0)),
175 g.UseOperand(node->InputAt(1), operand_mode));
178 void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
179 Arm64OperandGenerator g(selector);
180 int32_t imm = OpParameter<int32_t>(node->op());
181 selector->Emit(opcode, g.DefineAsRegister(node),
182 g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
183 g.UseRegister(node->InputAt(1)));
186 struct ExtendingLoadMatcher {
187 ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
188 : matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
192 bool Matches()
const {
return matches_; }
202 ArchOpcode opcode()
const {
209 InstructionSelector* selector_;
214 void Initialize(Node* node) {
215 Int64BinopMatcher m(node);
219 DCHECK(m.IsWord64Sar());
220 if (m.left().IsLoad() && m.right().Is(32) &&
221 selector_->CanCover(m.node(), m.left().node())) {
222 Arm64OperandGenerator g(selector_);
223 Node* load = m.left().node();
224 Node* offset = load->InputAt(1);
225 base_ = load->InputAt(0);
226 opcode_ = kArm64Ldrsw;
227 if (g.IsIntegerConstant(offset)) {
228 immediate_ = g.GetIntegerConstantValue(offset) + 4;
229 matches_ = g.CanBeImmediate(immediate_, kLoadStoreImm32);
235 bool TryMatchExtendingLoad(InstructionSelector* selector, Node* node) {
236 ExtendingLoadMatcher m(node, selector);
240 bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node) {
241 ExtendingLoadMatcher m(node, selector);
242 Arm64OperandGenerator g(selector);
244 InstructionOperand inputs[2];
245 inputs[0] = g.UseRegister(m.base());
246 InstructionCode opcode =
247 m.opcode() | AddressingModeField::encode(kMode_MRI);
248 DCHECK(is_int32(m.immediate()));
249 inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
250 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
251 selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
258 bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
259 Node* input_node, InstructionCode* opcode,
bool try_ror) {
260 Arm64OperandGenerator g(selector);
262 if (!selector->CanCover(node, input_node))
return false;
263 if (input_node->InputCount() != 2)
return false;
264 if (!g.IsIntegerConstant(input_node->InputAt(1)))
return false;
266 switch (input_node->opcode()) {
267 case IrOpcode::kWord32Shl:
268 case IrOpcode::kWord64Shl:
269 *opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
271 case IrOpcode::kWord32Shr:
272 case IrOpcode::kWord64Shr:
273 *opcode |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
275 case IrOpcode::kWord32Sar:
276 *opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
278 case IrOpcode::kWord64Sar:
279 if (TryMatchExtendingLoad(selector, input_node))
return false;
280 *opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
282 case IrOpcode::kWord32Ror:
283 case IrOpcode::kWord64Ror:
285 *opcode |= AddressingModeField::encode(kMode_Operand2_R_ROR_I);
294 bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
295 Node* node, Node* left_node, Node* right_node,
296 InstructionOperand* left_op,
297 InstructionOperand* right_op, InstructionCode* opcode) {
298 if (!selector->CanCover(node, right_node))
return false;
300 NodeMatcher nm(right_node);
302 if (nm.IsWord32And()) {
303 Int32BinopMatcher mright(right_node);
304 if (mright.right().Is(0xFF) || mright.right().Is(0xFFFF)) {
305 int32_t mask = mright.right().Value();
306 *left_op = g->UseRegister(left_node);
307 *right_op = g->UseRegister(mright.left().node());
308 *opcode |= AddressingModeField::encode(
309 (mask == 0xFF) ? kMode_Operand2_R_UXTB : kMode_Operand2_R_UXTH);
312 }
else if (nm.IsWord32Sar()) {
313 Int32BinopMatcher mright(right_node);
314 if (selector->CanCover(mright.node(), mright.left().node()) &&
315 mright.left().IsWord32Shl()) {
316 Int32BinopMatcher mleft_of_right(mright.left().node());
317 if ((mright.right().Is(16) && mleft_of_right.right().Is(16)) ||
318 (mright.right().Is(24) && mleft_of_right.right().Is(24))) {
319 int32_t shift = mright.right().Value();
320 *left_op = g->UseRegister(left_node);
321 *right_op = g->UseRegister(mleft_of_right.left().node());
322 *opcode |= AddressingModeField::encode(
323 (shift == 24) ? kMode_Operand2_R_SXTB : kMode_Operand2_R_SXTH);
331 bool TryMatchLoadStoreShift(Arm64OperandGenerator* g,
332 InstructionSelector* selector,
333 MachineRepresentation rep, Node* node, Node* index,
334 InstructionOperand* index_op,
335 InstructionOperand* shift_immediate_op) {
336 if (!selector->CanCover(node, index))
return false;
337 if (index->InputCount() != 2)
return false;
338 Node* left = index->InputAt(0);
339 Node* right = index->InputAt(1);
340 switch (index->opcode()) {
341 case IrOpcode::kWord32Shl:
342 case IrOpcode::kWord64Shl:
343 if (!g->CanBeLoadStoreShiftImmediate(right, rep)) {
346 *index_op = g->UseRegister(left);
347 *shift_immediate_op = g->UseImmediate(right);
357 typedef BitField8<bool, 1, 1> CanCommuteField;
360 typedef BitField8<bool, 2, 1> MustCommuteCondField;
363 typedef BitField8<bool, 3, 1> IsComparisonField;
365 typedef BitField8<bool, 4, 1> IsAddSubField;
368 uint8_t GetBinopProperties(InstructionCode opcode) {
375 result = CanCommuteField::update(result,
true);
376 result = MustCommuteCondField::update(result,
true);
377 result = IsComparisonField::update(result,
true);
381 result = IsAddSubField::update(result,
true);
385 result = CanCommuteField::update(result,
true);
386 result = IsComparisonField::update(result,
true);
387 result = IsAddSubField::update(result,
true);
391 result = CanCommuteField::update(result,
true);
392 result = IsAddSubField::update(result,
true);
396 result = IsAddSubField::update(result,
true);
400 result = CanCommuteField::update(result,
true);
401 result = IsComparisonField::update(result,
true);
409 result = CanCommuteField::update(result,
true);
414 DCHECK_IMPLIES(MustCommuteCondField::decode(result),
415 CanCommuteField::decode(result));
420 template <
typename Matcher>
421 void VisitBinop(InstructionSelector* selector, Node* node,
422 InstructionCode opcode, ImmediateMode operand_mode,
423 FlagsContinuation* cont) {
424 Arm64OperandGenerator g(selector);
425 InstructionOperand inputs[3];
426 size_t input_count = 0;
427 InstructionOperand outputs[1];
428 size_t output_count = 0;
430 Node* left_node = node->InputAt(0);
431 Node* right_node = node->InputAt(1);
433 uint8_t properties = GetBinopProperties(opcode);
434 bool can_commute = CanCommuteField::decode(properties);
435 bool must_commute_cond = MustCommuteCondField::decode(properties);
436 bool is_add_sub = IsAddSubField::decode(properties);
438 if (g.CanBeImmediate(right_node, operand_mode)) {
439 inputs[input_count++] = g.UseRegister(left_node);
440 inputs[input_count++] = g.UseImmediate(right_node);
441 }
else if (can_commute && g.CanBeImmediate(left_node, operand_mode)) {
442 if (must_commute_cond) cont->Commute();
443 inputs[input_count++] = g.UseRegister(right_node);
444 inputs[input_count++] = g.UseImmediate(left_node);
445 }
else if (is_add_sub &&
446 TryMatchAnyExtend(&g, selector, node, left_node, right_node,
447 &inputs[0], &inputs[1], &opcode)) {
449 }
else if (is_add_sub && can_commute &&
450 TryMatchAnyExtend(&g, selector, node, right_node, left_node,
451 &inputs[0], &inputs[1], &opcode)) {
452 if (must_commute_cond) cont->Commute();
454 }
else if (TryMatchAnyShift(selector, node, right_node, &opcode,
456 Matcher m_shift(right_node);
457 inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
458 inputs[input_count++] = g.UseRegister(m_shift.left().node());
460 inputs[input_count++] =
461 g.UseImmediate(static_cast<int>(m_shift.right().Value() & 0x3F));
462 }
else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
464 if (must_commute_cond) cont->Commute();
465 Matcher m_shift(left_node);
466 inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
467 inputs[input_count++] = g.UseRegister(m_shift.left().node());
469 inputs[input_count++] =
470 g.UseImmediate(static_cast<int>(m_shift.right().Value() & 0x3F));
472 inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
473 inputs[input_count++] = g.UseRegister(right_node);
476 if (!IsComparisonField::decode(properties)) {
477 outputs[output_count++] = g.DefineAsRegister(node);
480 DCHECK_NE(0u, input_count);
481 DCHECK((output_count != 0) || IsComparisonField::decode(properties));
482 DCHECK_GE(arraysize(inputs), input_count);
483 DCHECK_GE(arraysize(outputs), output_count);
485 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
490 template <
typename Matcher>
491 void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
492 ImmediateMode operand_mode) {
493 FlagsContinuation cont;
494 VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
497 template <
typename Matcher>
498 void VisitAddSub(InstructionSelector* selector, Node* node, ArchOpcode opcode,
499 ArchOpcode negate_opcode) {
500 Arm64OperandGenerator g(selector);
502 if (m.right().HasValue() && (m.right().Value() < 0) &&
503 g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) {
504 selector->Emit(negate_opcode, g.DefineAsRegister(node),
505 g.UseRegister(m.left().node()),
506 g.TempImmediate(static_cast<int32_t>(-m.right().Value())));
508 VisitBinop<Matcher>(selector, node, opcode, kArithmeticImm);
515 template <
typename Matcher>
516 int32_t LeftShiftForReducedMultiply(Matcher* m) {
517 DCHECK(m->IsInt32Mul() || m->IsInt64Mul());
518 if (m->right().HasValue() && m->right().Value() >= 3) {
519 uint64_t value_minus_one = m->right().Value() - 1;
520 if (base::bits::IsPowerOfTwo(value_minus_one)) {
521 return WhichPowerOf2(value_minus_one);
529 void InstructionSelector::VisitStackSlot(Node* node) {
530 StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
531 int slot = frame_->AllocateSpillSlot(rep.size());
532 OperandGenerator g(
this);
534 Emit(kArchStackSlot, g.DefineAsRegister(node),
535 sequence()->AddImmediate(Constant(slot)), 0,
nullptr);
538 void InstructionSelector::VisitDebugAbort(Node* node) {
539 Arm64OperandGenerator g(
this);
540 Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), x1));
543 void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
544 ImmediateMode immediate_mode, MachineRepresentation rep,
545 Node* output =
nullptr) {
546 Arm64OperandGenerator g(selector);
547 Node* base = node->InputAt(0);
548 Node* index = node->InputAt(1);
549 InstructionOperand inputs[3];
550 size_t input_count = 0;
551 InstructionOperand outputs[1];
555 outputs[0] = g.DefineAsRegister(output ==
nullptr ? node : output);
557 if (selector->CanAddressRelativeToRootsRegister()) {
558 ExternalReferenceMatcher m(base);
559 if (m.HasValue() && g.IsIntegerConstant(index)) {
560 ptrdiff_t
const delta =
561 g.GetIntegerConstantValue(index) +
562 TurboAssemblerBase::RootRegisterOffsetForExternalReference(
563 selector->isolate(), m.Value());
567 if (is_int32(delta)) {
568 inputs[0] = g.UseImmediate(static_cast<int32_t>(delta));
569 opcode |= AddressingModeField::encode(kMode_Root);
570 selector->Emit(opcode, arraysize(outputs), outputs, input_count,
577 inputs[0] = g.UseRegister(base);
579 if (g.CanBeImmediate(index, immediate_mode)) {
581 inputs[1] = g.UseImmediate(index);
582 opcode |= AddressingModeField::encode(kMode_MRI);
583 }
else if (TryMatchLoadStoreShift(&g, selector, rep, node, index, &inputs[1],
586 opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
589 inputs[1] = g.UseRegister(index);
590 opcode |= AddressingModeField::encode(kMode_MRR);
593 selector->Emit(opcode, arraysize(outputs), outputs, input_count, inputs);
596 void InstructionSelector::VisitLoad(Node* node) {
597 InstructionCode opcode = kArchNop;
598 ImmediateMode immediate_mode = kNoImmediate;
599 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
600 MachineRepresentation rep = load_rep.representation();
602 case MachineRepresentation::kFloat32:
604 immediate_mode = kLoadStoreImm32;
606 case MachineRepresentation::kFloat64:
608 immediate_mode = kLoadStoreImm64;
610 case MachineRepresentation::kBit:
611 case MachineRepresentation::kWord8:
612 opcode = load_rep.IsSigned() ? kArm64Ldrsb : kArm64Ldrb;
613 immediate_mode = kLoadStoreImm8;
615 case MachineRepresentation::kWord16:
616 opcode = load_rep.IsSigned() ? kArm64Ldrsh : kArm64Ldrh;
617 immediate_mode = kLoadStoreImm16;
619 case MachineRepresentation::kWord32:
621 immediate_mode = kLoadStoreImm32;
623 case MachineRepresentation::kTaggedSigned:
624 case MachineRepresentation::kTaggedPointer:
625 case MachineRepresentation::kTagged:
626 case MachineRepresentation::kWord64:
628 immediate_mode = kLoadStoreImm64;
630 case MachineRepresentation::kSimd128:
632 immediate_mode = kNoImmediate;
634 case MachineRepresentation::kNone:
638 if (node->opcode() == IrOpcode::kPoisonedLoad) {
639 CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
640 opcode |= MiscField::encode(kMemoryAccessPoisoned);
643 EmitLoad(
this, node, opcode, immediate_mode, rep);
646 void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
648 void InstructionSelector::VisitProtectedLoad(Node* node) {
653 void InstructionSelector::VisitStore(Node* node) {
654 Arm64OperandGenerator g(
this);
655 Node* base = node->InputAt(0);
656 Node* index = node->InputAt(1);
657 Node* value = node->InputAt(2);
659 StoreRepresentation store_rep = StoreRepresentationOf(node->op());
660 WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
661 MachineRepresentation rep = store_rep.representation();
664 if (write_barrier_kind != kNoWriteBarrier) {
665 DCHECK(CanBeTaggedPointer(rep));
666 AddressingMode addressing_mode;
667 InstructionOperand inputs[3];
668 size_t input_count = 0;
669 inputs[input_count++] = g.UseUniqueRegister(base);
672 if (g.CanBeImmediate(index, kArithmeticImm) &&
673 g.CanBeImmediate(index, kLoadStoreImm64)) {
674 inputs[input_count++] = g.UseImmediate(index);
675 addressing_mode = kMode_MRI;
677 inputs[input_count++] = g.UseUniqueRegister(index);
678 addressing_mode = kMode_MRR;
680 inputs[input_count++] = g.UseUniqueRegister(value);
681 RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
682 switch (write_barrier_kind) {
683 case kNoWriteBarrier:
686 case kMapWriteBarrier:
687 record_write_mode = RecordWriteMode::kValueIsMap;
689 case kPointerWriteBarrier:
690 record_write_mode = RecordWriteMode::kValueIsPointer;
692 case kFullWriteBarrier:
693 record_write_mode = RecordWriteMode::kValueIsAny;
696 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
697 size_t const temp_count = arraysize(temps);
698 InstructionCode code = kArchStoreWithWriteBarrier;
699 code |= AddressingModeField::encode(addressing_mode);
700 code |= MiscField::encode(static_cast<int>(record_write_mode));
701 Emit(code, 0,
nullptr, input_count, inputs, temp_count, temps);
703 InstructionOperand inputs[4];
704 size_t input_count = 0;
705 InstructionCode opcode = kArchNop;
706 ImmediateMode immediate_mode = kNoImmediate;
708 case MachineRepresentation::kFloat32:
710 immediate_mode = kLoadStoreImm32;
712 case MachineRepresentation::kFloat64:
714 immediate_mode = kLoadStoreImm64;
716 case MachineRepresentation::kBit:
717 case MachineRepresentation::kWord8:
719 immediate_mode = kLoadStoreImm8;
721 case MachineRepresentation::kWord16:
723 immediate_mode = kLoadStoreImm16;
725 case MachineRepresentation::kWord32:
727 immediate_mode = kLoadStoreImm32;
729 case MachineRepresentation::kTaggedSigned:
730 case MachineRepresentation::kTaggedPointer:
731 case MachineRepresentation::kTagged:
732 case MachineRepresentation::kWord64:
734 immediate_mode = kLoadStoreImm64;
736 case MachineRepresentation::kSimd128:
738 immediate_mode = kNoImmediate;
740 case MachineRepresentation::kNone:
745 inputs[0] = g.UseRegisterOrImmediateZero(value);
746 inputs[1] = g.UseRegister(base);
748 if (g.CanBeImmediate(index, immediate_mode)) {
750 inputs[2] = g.UseImmediate(index);
751 opcode |= AddressingModeField::encode(kMode_MRI);
752 }
else if (TryMatchLoadStoreShift(&g,
this, rep, node, index, &inputs[2],
755 opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
758 inputs[2] = g.UseRegister(index);
759 opcode |= AddressingModeField::encode(kMode_MRR);
762 Emit(opcode, 0,
nullptr, input_count, inputs);
766 void InstructionSelector::VisitProtectedStore(Node* node) {
772 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
775 void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
777 template <
typename Matcher>
778 static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
779 ArchOpcode opcode,
bool left_can_cover,
780 bool right_can_cover, ImmediateMode imm_mode) {
781 Arm64OperandGenerator g(selector);
784 ArchOpcode inv_opcode = opcode;
787 inv_opcode = kArm64Bic32;
790 inv_opcode = kArm64Bic;
793 inv_opcode = kArm64Orn32;
796 inv_opcode = kArm64Orn;
799 inv_opcode = kArm64Eon32;
802 inv_opcode = kArm64Eon;
809 if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
810 Matcher mleft(m->left().node());
811 if (mleft.right().Is(-1)) {
813 selector->Emit(inv_opcode, g.DefineAsRegister(node),
814 g.UseRegister(m->right().node()),
815 g.UseRegister(mleft.left().node()));
821 if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
823 Matcher mright(m->right().node());
824 if (mright.right().Is(-1)) {
826 selector->Emit(inv_opcode, g.DefineAsRegister(node),
827 g.UseRegister(m->left().node()),
828 g.UseRegister(mright.left().node()));
833 if (m->IsWord32Xor() && m->right().Is(-1)) {
834 selector->Emit(kArm64Not32, g.DefineAsRegister(node),
835 g.UseRegister(m->left().node()));
836 }
else if (m->IsWord64Xor() && m->right().Is(-1)) {
837 selector->Emit(kArm64Not, g.DefineAsRegister(node),
838 g.UseRegister(m->left().node()));
840 VisitBinop<Matcher>(selector, node, opcode, imm_mode);
844 void InstructionSelector::VisitWord32And(Node* node) {
845 Arm64OperandGenerator g(
this);
846 Int32BinopMatcher m(node);
847 if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
848 m.right().HasValue()) {
850 uint32_t mask_width = base::bits::CountPopulation(mask);
851 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
852 if ((mask_width != 0) && (mask_width != 32) &&
853 (mask_msb + mask_width == 32)) {
855 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
859 Int32BinopMatcher mleft(m.left().node());
860 if (mleft.right().HasValue()) {
862 uint32_t lsb = mleft.right().Value() & 0x1F;
868 if (lsb + mask_width > 32) mask_width = 32 - lsb;
870 Emit(kArm64Ubfx32, g.DefineAsRegister(node),
871 g.UseRegister(mleft.left().node()),
872 g.UseImmediateOrTemp(mleft.right().node(), lsb),
873 g.TempImmediate(mask_width));
879 VisitLogical<Int32BinopMatcher>(
880 this, node, &m, kArm64And32, CanCover(node, m.left().node()),
881 CanCover(node, m.right().node()), kLogical32Imm);
884 void InstructionSelector::VisitWord64And(Node* node) {
885 Arm64OperandGenerator g(
this);
886 Int64BinopMatcher m(node);
887 if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
888 m.right().HasValue()) {
889 uint64_t mask = m.right().Value();
890 uint64_t mask_width = base::bits::CountPopulation(mask);
891 uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
892 if ((mask_width != 0) && (mask_width != 64) &&
893 (mask_msb + mask_width == 64)) {
895 DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
899 Int64BinopMatcher mleft(m.left().node());
900 if (mleft.right().HasValue()) {
908 if (lsb + mask_width > 64) mask_width = 64 - lsb;
910 Emit(kArm64Ubfx, g.DefineAsRegister(node),
911 g.UseRegister(mleft.left().node()),
912 g.UseImmediateOrTemp(mleft.right().node(), lsb),
913 g.TempImmediate(static_cast<int32_t>(mask_width)));
919 VisitLogical<Int64BinopMatcher>(
920 this, node, &m, kArm64And, CanCover(node, m.left().node()),
921 CanCover(node, m.right().node()), kLogical64Imm);
924 void InstructionSelector::VisitWord32Or(Node* node) {
925 Int32BinopMatcher m(node);
926 VisitLogical<Int32BinopMatcher>(
927 this, node, &m, kArm64Or32, CanCover(node, m.left().node()),
928 CanCover(node, m.right().node()), kLogical32Imm);
931 void InstructionSelector::VisitWord64Or(Node* node) {
932 Int64BinopMatcher m(node);
933 VisitLogical<Int64BinopMatcher>(
934 this, node, &m, kArm64Or, CanCover(node, m.left().node()),
935 CanCover(node, m.right().node()), kLogical64Imm);
938 void InstructionSelector::VisitWord32Xor(Node* node) {
939 Int32BinopMatcher m(node);
940 VisitLogical<Int32BinopMatcher>(
941 this, node, &m, kArm64Eor32, CanCover(node, m.left().node()),
942 CanCover(node, m.right().node()), kLogical32Imm);
945 void InstructionSelector::VisitWord64Xor(Node* node) {
946 Int64BinopMatcher m(node);
947 VisitLogical<Int64BinopMatcher>(
948 this, node, &m, kArm64Eor, CanCover(node, m.left().node()),
949 CanCover(node, m.right().node()), kLogical64Imm);
952 void InstructionSelector::VisitWord32Shl(Node* node) {
953 Int32BinopMatcher m(node);
954 if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
955 m.right().IsInRange(1, 31)) {
956 Arm64OperandGenerator g(
this);
957 Int32BinopMatcher mleft(m.left().node());
958 if (mleft.right().HasValue()) {
959 uint32_t mask = mleft.right().Value();
960 uint32_t mask_width = base::bits::CountPopulation(mask);
961 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
962 if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
964 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
965 DCHECK_NE(0u, shift);
967 if ((shift + mask_width) >= 32) {
970 Emit(kArm64Lsl32, g.DefineAsRegister(node),
971 g.UseRegister(mleft.left().node()),
972 g.UseImmediate(m.right().node()));
977 Emit(kArm64Ubfiz32, g.DefineAsRegister(node),
978 g.UseRegister(mleft.left().node()),
979 g.UseImmediate(m.right().node()), g.TempImmediate(mask_width));
985 VisitRRO(
this, kArm64Lsl32, node, kShift32Imm);
988 void InstructionSelector::VisitWord64Shl(Node* node) {
989 Arm64OperandGenerator g(
this);
990 Int64BinopMatcher m(node);
991 if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
992 m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
995 Emit(kArm64Lsl, g.DefineAsRegister(node),
996 g.UseRegister(m.left().node()->InputAt(0)),
997 g.UseImmediate(m.right().node()));
1000 VisitRRO(
this, kArm64Lsl, node, kShift64Imm);
1005 bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
1006 Arm64OperandGenerator g(selector);
1007 Int32BinopMatcher m(node);
1008 if (selector->CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
1011 Int32BinopMatcher mleft(m.left().node());
1012 if (mleft.right().HasValue() && m.right().HasValue() &&
1013 (mleft.right().Value() & 0x1F) != 0 &&
1014 (mleft.right().Value() & 0x1F) == (m.right().Value() & 0x1F)) {
1015 DCHECK(m.IsWord32Shr() || m.IsWord32Sar());
1016 ArchOpcode opcode = m.IsWord32Sar() ? kArm64Sbfx32 : kArm64Ubfx32;
1018 int right_val = m.right().Value() & 0x1F;
1019 DCHECK_NE(right_val, 0);
1021 selector->Emit(opcode, g.DefineAsRegister(node),
1022 g.UseRegister(mleft.left().node()), g.TempImmediate(0),
1023 g.TempImmediate(32 - right_val));
1032 void InstructionSelector::VisitWord32Shr(Node* node) {
1033 Int32BinopMatcher m(node);
1034 if (m.left().IsWord32And() && m.right().HasValue()) {
1035 uint32_t lsb = m.right().Value() & 0x1F;
1036 Int32BinopMatcher mleft(m.left().node());
1037 if (mleft.right().HasValue() && mleft.right().Value() != 0) {
1040 uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
1041 unsigned mask_width = base::bits::CountPopulation(mask);
1042 unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
1043 if ((mask_msb + mask_width + lsb) == 32) {
1044 Arm64OperandGenerator g(
this);
1045 DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
1046 Emit(kArm64Ubfx32, g.DefineAsRegister(node),
1047 g.UseRegister(mleft.left().node()),
1048 g.UseImmediateOrTemp(m.right().node(), lsb),
1049 g.TempImmediate(mask_width));
1053 }
else if (TryEmitBitfieldExtract32(
this, node)) {
1057 if (m.left().IsUint32MulHigh() && m.right().HasValue() &&
1058 CanCover(node, node->InputAt(0))) {
1061 Arm64OperandGenerator g(
this);
1062 Node* left = m.left().node();
1063 int shift = m.right().Value() & 0x1F;
1064 InstructionOperand
const smull_operand = g.TempRegister();
1065 Emit(kArm64Umull, smull_operand, g.UseRegister(left->InputAt(0)),
1066 g.UseRegister(left->InputAt(1)));
1067 Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand,
1068 g.TempImmediate(32 + shift));
1072 VisitRRO(
this, kArm64Lsr32, node, kShift32Imm);
1075 void InstructionSelector::VisitWord64Shr(Node* node) {
1076 Int64BinopMatcher m(node);
1077 if (m.left().IsWord64And() && m.right().HasValue()) {
1078 uint32_t lsb = m.right().Value() & 0x3F;
1079 Int64BinopMatcher mleft(m.left().node());
1080 if (mleft.right().HasValue() && mleft.right().Value() != 0) {
1083 uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
1084 unsigned mask_width = base::bits::CountPopulation(mask);
1085 unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
1086 if ((mask_msb + mask_width + lsb) == 64) {
1087 Arm64OperandGenerator g(
this);
1088 DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
1089 Emit(kArm64Ubfx, g.DefineAsRegister(node),
1090 g.UseRegister(mleft.left().node()),
1091 g.UseImmediateOrTemp(m.right().node(), lsb),
1092 g.TempImmediate(mask_width));
1097 VisitRRO(
this, kArm64Lsr, node, kShift64Imm);
1100 void InstructionSelector::VisitWord32Sar(Node* node) {
1101 if (TryEmitBitfieldExtract32(
this, node)) {
1105 Int32BinopMatcher m(node);
1106 if (m.left().IsInt32MulHigh() && m.right().HasValue() &&
1107 CanCover(node, node->InputAt(0))) {
1110 Arm64OperandGenerator g(
this);
1111 Node* left = m.left().node();
1112 int shift = m.right().Value() & 0x1F;
1113 InstructionOperand
const smull_operand = g.TempRegister();
1114 Emit(kArm64Smull, smull_operand, g.UseRegister(left->InputAt(0)),
1115 g.UseRegister(left->InputAt(1)));
1116 Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand,
1117 g.TempImmediate(32 + shift));
1121 if (m.left().IsInt32Add() && m.right().HasValue() &&
1122 CanCover(node, node->InputAt(0))) {
1123 Node* add_node = m.left().node();
1124 Int32BinopMatcher madd_node(add_node);
1125 if (madd_node.left().IsInt32MulHigh() &&
1126 CanCover(add_node, madd_node.left().node())) {
1131 Arm64OperandGenerator g(
this);
1132 Node* mul_node = madd_node.left().node();
1134 InstructionOperand
const smull_operand = g.TempRegister();
1135 Emit(kArm64Smull, smull_operand, g.UseRegister(mul_node->InputAt(0)),
1136 g.UseRegister(mul_node->InputAt(1)));
1138 InstructionOperand
const add_operand = g.TempRegister();
1139 Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_ASR_I),
1140 add_operand, g.UseRegister(add_node->InputAt(1)), smull_operand,
1141 g.TempImmediate(32));
1143 Emit(kArm64Asr32, g.DefineAsRegister(node), add_operand,
1144 g.UseImmediate(node->InputAt(1)));
1149 VisitRRO(
this, kArm64Asr32, node, kShift32Imm);
1152 void InstructionSelector::VisitWord64Sar(Node* node) {
1153 if (TryEmitExtendingLoad(
this, node))
return;
1154 VisitRRO(
this, kArm64Asr, node, kShift64Imm);
1157 void InstructionSelector::VisitWord32Ror(Node* node) {
1158 VisitRRO(
this, kArm64Ror32, node, kShift32Imm);
1161 void InstructionSelector::VisitWord64Ror(Node* node) {
1162 VisitRRO(
this, kArm64Ror, node, kShift64Imm);
1165 #define RR_OP_LIST(V) \ 1166 V(Word64Clz, kArm64Clz) \ 1167 V(Word32Clz, kArm64Clz32) \ 1168 V(Word32ReverseBits, kArm64Rbit32) \ 1169 V(Word64ReverseBits, kArm64Rbit) \ 1170 V(Word32ReverseBytes, kArm64Rev32) \ 1171 V(Word64ReverseBytes, kArm64Rev) \ 1172 V(ChangeFloat32ToFloat64, kArm64Float32ToFloat64) \ 1173 V(RoundInt32ToFloat32, kArm64Int32ToFloat32) \ 1174 V(RoundUint32ToFloat32, kArm64Uint32ToFloat32) \ 1175 V(ChangeInt32ToFloat64, kArm64Int32ToFloat64) \ 1176 V(ChangeInt64ToFloat64, kArm64Int64ToFloat64) \ 1177 V(ChangeUint32ToFloat64, kArm64Uint32ToFloat64) \ 1178 V(TruncateFloat32ToInt32, kArm64Float32ToInt32) \ 1179 V(ChangeFloat64ToInt32, kArm64Float64ToInt32) \ 1180 V(ChangeFloat64ToInt64, kArm64Float64ToInt64) \ 1181 V(TruncateFloat32ToUint32, kArm64Float32ToUint32) \ 1182 V(ChangeFloat64ToUint32, kArm64Float64ToUint32) \ 1183 V(ChangeFloat64ToUint64, kArm64Float64ToUint64) \ 1184 V(TruncateFloat64ToInt64, kArm64Float64ToInt64) \ 1185 V(TruncateFloat64ToUint32, kArm64Float64ToUint32) \ 1186 V(TruncateFloat64ToFloat32, kArm64Float64ToFloat32) \ 1187 V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \ 1188 V(RoundFloat64ToInt32, kArm64Float64ToInt32) \ 1189 V(RoundInt64ToFloat32, kArm64Int64ToFloat32) \ 1190 V(RoundInt64ToFloat64, kArm64Int64ToFloat64) \ 1191 V(RoundUint64ToFloat32, kArm64Uint64ToFloat32) \ 1192 V(RoundUint64ToFloat64, kArm64Uint64ToFloat64) \ 1193 V(BitcastFloat32ToInt32, kArm64Float64ExtractLowWord32) \ 1194 V(BitcastFloat64ToInt64, kArm64U64MoveFloat64) \ 1195 V(BitcastInt32ToFloat32, kArm64Float64MoveU64) \ 1196 V(BitcastInt64ToFloat64, kArm64Float64MoveU64) \ 1197 V(Float32Abs, kArm64Float32Abs) \ 1198 V(Float64Abs, kArm64Float64Abs) \ 1199 V(Float32Sqrt, kArm64Float32Sqrt) \ 1200 V(Float64Sqrt, kArm64Float64Sqrt) \ 1201 V(Float32RoundDown, kArm64Float32RoundDown) \ 1202 V(Float64RoundDown, kArm64Float64RoundDown) \ 1203 V(Float32RoundUp, kArm64Float32RoundUp) \ 1204 V(Float64RoundUp, kArm64Float64RoundUp) \ 1205 V(Float32RoundTruncate, kArm64Float32RoundTruncate) \ 1206 V(Float64RoundTruncate, kArm64Float64RoundTruncate) \ 1207 V(Float64RoundTiesAway, kArm64Float64RoundTiesAway) \ 1208 V(Float32RoundTiesEven, kArm64Float32RoundTiesEven) \ 1209 V(Float64RoundTiesEven, kArm64Float64RoundTiesEven) \ 1210 V(Float32Neg, kArm64Float32Neg) \ 1211 V(Float64Neg, kArm64Float64Neg) \ 1212 V(Float64ExtractLowWord32, kArm64Float64ExtractLowWord32) \ 1213 V(Float64ExtractHighWord32, kArm64Float64ExtractHighWord32) \ 1214 V(Float64SilenceNaN, kArm64Float64SilenceNaN) 1216 #define RRR_OP_LIST(V) \ 1217 V(Int32Div, kArm64Idiv32) \ 1218 V(Int64Div, kArm64Idiv) \ 1219 V(Uint32Div, kArm64Udiv32) \ 1220 V(Uint64Div, kArm64Udiv) \ 1221 V(Int32Mod, kArm64Imod32) \ 1222 V(Int64Mod, kArm64Imod) \ 1223 V(Uint32Mod, kArm64Umod32) \ 1224 V(Uint64Mod, kArm64Umod) \ 1225 V(Float32Add, kArm64Float32Add) \ 1226 V(Float64Add, kArm64Float64Add) \ 1227 V(Float32Sub, kArm64Float32Sub) \ 1228 V(Float64Sub, kArm64Float64Sub) \ 1229 V(Float32Mul, kArm64Float32Mul) \ 1230 V(Float64Mul, kArm64Float64Mul) \ 1231 V(Float32Div, kArm64Float32Div) \ 1232 V(Float64Div, kArm64Float64Div) \ 1233 V(Float32Max, kArm64Float32Max) \ 1234 V(Float64Max, kArm64Float64Max) \ 1235 V(Float32Min, kArm64Float32Min) \ 1236 V(Float64Min, kArm64Float64Min) 1238 #define RR_VISITOR(Name, opcode) \ 1239 void InstructionSelector::Visit##Name(Node* node) { \ 1240 VisitRR(this, opcode, node); \ 1242 RR_OP_LIST(RR_VISITOR)
1246 #define RRR_VISITOR(Name, opcode) \ 1247 void InstructionSelector::Visit##Name(Node* node) { \ 1248 VisitRRR(this, opcode, node); \ 1250 RRR_OP_LIST(RRR_VISITOR)
1254 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
1256 void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
1258 void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
1260 void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
1262 void InstructionSelector::VisitSpeculationFence(Node* node) {
1263 Arm64OperandGenerator g(
this);
1264 Emit(kArm64DsbIsb, g.NoOutput());
1267 void InstructionSelector::VisitInt32Add(Node* node) {
1268 Arm64OperandGenerator g(
this);
1269 Int32BinopMatcher m(node);
1271 if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
1272 Int32BinopMatcher mleft(m.left().node());
1274 if (LeftShiftForReducedMultiply(&mleft) == 0) {
1275 Emit(kArm64Madd32, g.DefineAsRegister(node),
1276 g.UseRegister(mleft.left().node()),
1277 g.UseRegister(mleft.right().node()),
1278 g.UseRegister(m.right().node()));
1283 if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
1284 Int32BinopMatcher mright(m.right().node());
1286 if (LeftShiftForReducedMultiply(&mright) == 0) {
1287 Emit(kArm64Madd32, g.DefineAsRegister(node),
1288 g.UseRegister(mright.left().node()),
1289 g.UseRegister(mright.right().node()),
1290 g.UseRegister(m.left().node()));
1294 VisitAddSub<Int32BinopMatcher>(
this, node, kArm64Add32, kArm64Sub32);
1297 void InstructionSelector::VisitInt64Add(Node* node) {
1298 Arm64OperandGenerator g(
this);
1299 Int64BinopMatcher m(node);
1301 if (m.left().IsInt64Mul() && CanCover(node, m.left().node())) {
1302 Int64BinopMatcher mleft(m.left().node());
1304 if (LeftShiftForReducedMultiply(&mleft) == 0) {
1305 Emit(kArm64Madd, g.DefineAsRegister(node),
1306 g.UseRegister(mleft.left().node()),
1307 g.UseRegister(mleft.right().node()),
1308 g.UseRegister(m.right().node()));
1313 if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
1314 Int64BinopMatcher mright(m.right().node());
1316 if (LeftShiftForReducedMultiply(&mright) == 0) {
1317 Emit(kArm64Madd, g.DefineAsRegister(node),
1318 g.UseRegister(mright.left().node()),
1319 g.UseRegister(mright.right().node()),
1320 g.UseRegister(m.left().node()));
1324 VisitAddSub<Int64BinopMatcher>(
this, node, kArm64Add, kArm64Sub);
1327 void InstructionSelector::VisitInt32Sub(Node* node) {
1328 Arm64OperandGenerator g(
this);
1329 Int32BinopMatcher m(node);
1332 if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
1333 Int32BinopMatcher mright(m.right().node());
1335 if (LeftShiftForReducedMultiply(&mright) == 0) {
1336 Emit(kArm64Msub32, g.DefineAsRegister(node),
1337 g.UseRegister(mright.left().node()),
1338 g.UseRegister(mright.right().node()),
1339 g.UseRegister(m.left().node()));
1344 VisitAddSub<Int32BinopMatcher>(
this, node, kArm64Sub32, kArm64Add32);
1347 void InstructionSelector::VisitInt64Sub(Node* node) {
1348 Arm64OperandGenerator g(
this);
1349 Int64BinopMatcher m(node);
1352 if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
1353 Int64BinopMatcher mright(m.right().node());
1355 if (LeftShiftForReducedMultiply(&mright) == 0) {
1356 Emit(kArm64Msub, g.DefineAsRegister(node),
1357 g.UseRegister(mright.left().node()),
1358 g.UseRegister(mright.right().node()),
1359 g.UseRegister(m.left().node()));
1364 VisitAddSub<Int64BinopMatcher>(
this, node, kArm64Sub, kArm64Add);
1369 void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
1370 FlagsContinuation* cont) {
1371 Arm64OperandGenerator g(selector);
1372 Int32BinopMatcher m(node);
1373 InstructionOperand result = g.DefineAsRegister(node);
1374 InstructionOperand left = g.UseRegister(m.left().node());
1375 InstructionOperand right = g.UseRegister(m.right().node());
1376 selector->Emit(kArm64Smull, result, left, right);
1378 InstructionCode opcode =
1379 kArm64Cmp | AddressingModeField::encode(kMode_Operand2_R_SXTW);
1380 selector->EmitWithContinuation(opcode, result, result, cont);
1385 void InstructionSelector::VisitInt32Mul(Node* node) {
1386 Arm64OperandGenerator g(
this);
1387 Int32BinopMatcher m(node);
1391 int32_t shift = LeftShiftForReducedMultiply(&m);
1393 Emit(kArm64Add32 | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
1394 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1395 g.UseRegister(m.left().node()), g.TempImmediate(shift));
1399 if (m.left().IsInt32Sub() && CanCover(node, m.left().node())) {
1400 Int32BinopMatcher mleft(m.left().node());
1403 if (mleft.left().Is(0)) {
1404 Emit(kArm64Mneg32, g.DefineAsRegister(node),
1405 g.UseRegister(mleft.right().node()),
1406 g.UseRegister(m.right().node()));
1411 if (m.right().IsInt32Sub() && CanCover(node, m.right().node())) {
1412 Int32BinopMatcher mright(m.right().node());
1415 if (mright.left().Is(0)) {
1416 Emit(kArm64Mneg32, g.DefineAsRegister(node),
1417 g.UseRegister(m.left().node()),
1418 g.UseRegister(mright.right().node()));
1423 VisitRRR(
this, kArm64Mul32, node);
1426 void InstructionSelector::VisitInt64Mul(Node* node) {
1427 Arm64OperandGenerator g(
this);
1428 Int64BinopMatcher m(node);
1432 int32_t shift = LeftShiftForReducedMultiply(&m);
1434 Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
1435 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1436 g.UseRegister(m.left().node()), g.TempImmediate(shift));
1440 if (m.left().IsInt64Sub() && CanCover(node, m.left().node())) {
1441 Int64BinopMatcher mleft(m.left().node());
1444 if (mleft.left().Is(0)) {
1445 Emit(kArm64Mneg, g.DefineAsRegister(node),
1446 g.UseRegister(mleft.right().node()),
1447 g.UseRegister(m.right().node()));
1452 if (m.right().IsInt64Sub() && CanCover(node, m.right().node())) {
1453 Int64BinopMatcher mright(m.right().node());
1456 if (mright.left().Is(0)) {
1457 Emit(kArm64Mneg, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1458 g.UseRegister(mright.right().node()));
1463 VisitRRR(
this, kArm64Mul, node);
1466 void InstructionSelector::VisitInt32MulHigh(Node* node) {
1467 Arm64OperandGenerator g(
this);
1468 InstructionOperand
const smull_operand = g.TempRegister();
1469 Emit(kArm64Smull, smull_operand, g.UseRegister(node->InputAt(0)),
1470 g.UseRegister(node->InputAt(1)));
1471 Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
1474 void InstructionSelector::VisitUint32MulHigh(Node* node) {
1475 Arm64OperandGenerator g(
this);
1476 InstructionOperand
const smull_operand = g.TempRegister();
1477 Emit(kArm64Umull, smull_operand, g.UseRegister(node->InputAt(0)),
1478 g.UseRegister(node->InputAt(1)));
1479 Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
1482 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1483 Arm64OperandGenerator g(
this);
1485 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1486 InstructionOperand outputs[2];
1487 size_t output_count = 0;
1488 outputs[output_count++] = g.DefineAsRegister(node);
1490 Node* success_output = NodeProperties::FindProjection(node, 1);
1491 if (success_output) {
1492 outputs[output_count++] = g.DefineAsRegister(success_output);
1495 Emit(kArm64Float32ToInt64, output_count, outputs, 1, inputs);
1498 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1499 Arm64OperandGenerator g(
this);
1501 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1502 InstructionOperand outputs[2];
1503 size_t output_count = 0;
1504 outputs[output_count++] = g.DefineAsRegister(node);
1506 Node* success_output = NodeProperties::FindProjection(node, 1);
1507 if (success_output) {
1508 outputs[output_count++] = g.DefineAsRegister(success_output);
1511 Emit(kArm64Float64ToInt64, output_count, outputs, 1, inputs);
1514 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1515 Arm64OperandGenerator g(
this);
1517 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1518 InstructionOperand outputs[2];
1519 size_t output_count = 0;
1520 outputs[output_count++] = g.DefineAsRegister(node);
1522 Node* success_output = NodeProperties::FindProjection(node, 1);
1523 if (success_output) {
1524 outputs[output_count++] = g.DefineAsRegister(success_output);
1527 Emit(kArm64Float32ToUint64, output_count, outputs, 1, inputs);
1530 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1531 Arm64OperandGenerator g(
this);
1533 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1534 InstructionOperand outputs[2];
1535 size_t output_count = 0;
1536 outputs[output_count++] = g.DefineAsRegister(node);
1538 Node* success_output = NodeProperties::FindProjection(node, 1);
1539 if (success_output) {
1540 outputs[output_count++] = g.DefineAsRegister(success_output);
1543 Emit(kArm64Float64ToUint64, output_count, outputs, 1, inputs);
1546 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
1547 Node* value = node->InputAt(0);
1548 if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
1550 LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1551 MachineRepresentation rep = load_rep.representation();
1552 InstructionCode opcode = kArchNop;
1553 ImmediateMode immediate_mode = kNoImmediate;
1555 case MachineRepresentation::kBit:
1556 case MachineRepresentation::kWord8:
1557 opcode = load_rep.IsSigned() ? kArm64Ldrsb : kArm64Ldrb;
1558 immediate_mode = kLoadStoreImm8;
1560 case MachineRepresentation::kWord16:
1561 opcode = load_rep.IsSigned() ? kArm64Ldrsh : kArm64Ldrh;
1562 immediate_mode = kLoadStoreImm16;
1564 case MachineRepresentation::kWord32:
1565 opcode = kArm64Ldrsw;
1566 immediate_mode = kLoadStoreImm32;
1572 EmitLoad(
this, value, opcode, immediate_mode, rep, node);
1574 VisitRR(
this, kArm64Sxtw, node);
1578 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1579 Arm64OperandGenerator g(
this);
1580 Node* value = node->InputAt(0);
1581 switch (value->opcode()) {
1582 case IrOpcode::kWord32And:
1583 case IrOpcode::kWord32Or:
1584 case IrOpcode::kWord32Xor:
1585 case IrOpcode::kWord32Shl:
1586 case IrOpcode::kWord32Shr:
1587 case IrOpcode::kWord32Sar:
1588 case IrOpcode::kWord32Ror:
1589 case IrOpcode::kWord32Equal:
1590 case IrOpcode::kInt32Add:
1591 case IrOpcode::kInt32AddWithOverflow:
1592 case IrOpcode::kInt32Sub:
1593 case IrOpcode::kInt32SubWithOverflow:
1594 case IrOpcode::kInt32Mul:
1595 case IrOpcode::kInt32MulHigh:
1596 case IrOpcode::kInt32Div:
1597 case IrOpcode::kInt32Mod:
1598 case IrOpcode::kInt32LessThan:
1599 case IrOpcode::kInt32LessThanOrEqual:
1600 case IrOpcode::kUint32Div:
1601 case IrOpcode::kUint32LessThan:
1602 case IrOpcode::kUint32LessThanOrEqual:
1603 case IrOpcode::kUint32Mod:
1604 case IrOpcode::kUint32MulHigh: {
1608 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1611 case IrOpcode::kLoad: {
1614 LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1615 switch (load_rep.representation()) {
1616 case MachineRepresentation::kWord8:
1617 case MachineRepresentation::kWord16:
1618 case MachineRepresentation::kWord32:
1619 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1629 Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(value));
1632 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1633 Arm64OperandGenerator g(
this);
1634 Node* value = node->InputAt(0);
1637 Emit(kArchNop, g.DefineSameAsFirst(node), g.UseRegister(value));
1640 void InstructionSelector::VisitFloat64Mod(Node* node) {
1641 Arm64OperandGenerator g(
this);
1642 Emit(kArm64Float64Mod, g.DefineAsFixed(node, d0),
1643 g.UseFixed(node->InputAt(0), d0), g.UseFixed(node->InputAt(1), d1))
1647 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1648 InstructionCode opcode) {
1649 Arm64OperandGenerator g(
this);
1650 Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
1651 g.UseFixed(node->InputAt(1), d1))
1655 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1656 InstructionCode opcode) {
1657 Arm64OperandGenerator g(
this);
1658 Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0))
1662 void InstructionSelector::EmitPrepareArguments(
1663 ZoneVector<PushParameter>* arguments,
const CallDescriptor* call_descriptor,
1665 Arm64OperandGenerator g(
this);
1669 int claim_count =
static_cast<int>(arguments->size());
1670 int slot = claim_count - 1;
1671 claim_count = RoundUp(claim_count, 2);
1673 if (claim_count > 0) {
1677 Emit(kArm64Claim, g.NoOutput(), g.TempImmediate(claim_count));
1680 if (claim_count > 0) {
1682 Emit(kArm64Poke, g.NoOutput(), g.UseImmediate(0),
1683 g.TempImmediate(claim_count - 1));
1688 Node* input_node = (*arguments)[slot].node;
1690 if (input_node !=
nullptr) {
1691 Emit(kArm64Poke, g.NoOutput(), g.UseRegister(input_node),
1692 g.TempImmediate(slot));
1703 void InstructionSelector::EmitPrepareResults(
1704 ZoneVector<PushParameter>* results,
const CallDescriptor* call_descriptor,
1706 Arm64OperandGenerator g(
this);
1708 int reverse_slot = 0;
1709 for (PushParameter output : *results) {
1710 if (!output.location.IsCallerFrameSlot())
continue;
1711 reverse_slot += output.location.GetSizeInPointers();
1713 if (output.node ==
nullptr)
continue;
1714 DCHECK(!call_descriptor->IsCFunctionCall());
1716 if (output.location.GetType() == MachineType::Float32()) {
1717 MarkAsFloat32(output.node);
1718 }
else if (output.location.GetType() == MachineType::Float64()) {
1719 MarkAsFloat64(output.node);
1722 Emit(kArm64Peek, g.DefineAsRegister(output.node),
1723 g.UseImmediate(reverse_slot));
1727 bool InstructionSelector::IsTailCallAddressImmediate() {
return false; }
1729 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() {
return 3; }
1734 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1735 InstructionOperand left, InstructionOperand right,
1736 FlagsContinuation* cont) {
1737 selector->EmitWithContinuation(opcode, left, right, cont);
1741 void VisitWordCompare(InstructionSelector* selector, Node* node,
1742 InstructionCode opcode, FlagsContinuation* cont,
1743 bool commutative, ImmediateMode immediate_mode) {
1744 Arm64OperandGenerator g(selector);
1745 Node* left = node->InputAt(0);
1746 Node* right = node->InputAt(1);
1748 if (right->opcode() == IrOpcode::kLoadStackPointer ||
1749 g.CanBeImmediate(left, immediate_mode)) {
1750 if (!commutative) cont->Commute();
1751 std::swap(left, right);
1755 if (g.CanBeImmediate(right, immediate_mode)) {
1756 VisitCompare(selector, opcode,
1757 g.UseRegisterOrStackPointer(left, opcode == kArm64Cmp),
1758 g.UseImmediate(right), cont);
1760 VisitCompare(selector, opcode,
1761 g.UseRegisterOrStackPointer(left, opcode == kArm64Cmp),
1762 g.UseRegister(right), cont);
1774 bool CanUseFlagSettingBinop(FlagsCondition cond) {
1778 case kSignedLessThan:
1779 case kSignedGreaterThanOrEqual:
1780 case kUnsignedLessThanOrEqual:
1781 case kUnsignedGreaterThan:
1793 FlagsCondition MapForFlagSettingBinop(FlagsCondition cond) {
1794 DCHECK(CanUseFlagSettingBinop(cond));
1799 case kSignedLessThan:
1801 case kSignedGreaterThanOrEqual:
1802 return kPositiveOrZero;
1803 case kUnsignedLessThanOrEqual:
1805 case kUnsignedGreaterThan:
1818 void MaybeReplaceCmpZeroWithFlagSettingBinop(InstructionSelector* selector,
1819 Node** node, Node* binop,
1821 FlagsCondition cond,
1822 FlagsContinuation* cont,
1823 ImmediateMode* immediate_mode) {
1824 ArchOpcode binop_opcode;
1825 ArchOpcode no_output_opcode;
1826 ImmediateMode binop_immediate_mode;
1827 switch (binop->opcode()) {
1828 case IrOpcode::kInt32Add:
1829 binop_opcode = kArm64Add32;
1830 no_output_opcode = kArm64Cmn32;
1831 binop_immediate_mode = kArithmeticImm;
1833 case IrOpcode::kWord32And:
1834 binop_opcode = kArm64And32;
1835 no_output_opcode = kArm64Tst32;
1836 binop_immediate_mode = kLogical32Imm;
1842 if (selector->CanCover(*node, binop)) {
1845 cont->Overwrite(MapForFlagSettingBinop(cond));
1846 *opcode = no_output_opcode;
1848 *immediate_mode = binop_immediate_mode;
1849 }
else if (selector->IsOnlyUserOfNodeInSameBlock(*node, binop)) {
1853 cont->Overwrite(MapForFlagSettingBinop(cond));
1854 *opcode = binop_opcode;
1856 *immediate_mode = binop_immediate_mode;
1863 FlagsCondition MapForTbz(FlagsCondition cond) {
1865 case kSignedLessThan:
1867 case kSignedGreaterThanOrEqual:
1877 FlagsCondition MapForCbz(FlagsCondition cond) {
1882 case kUnsignedLessThanOrEqual:
1884 case kUnsignedGreaterThan:
1891 void EmitBranchOrDeoptimize(InstructionSelector* selector,
1892 InstructionCode opcode, InstructionOperand value,
1893 FlagsContinuation* cont) {
1894 DCHECK(cont->IsBranch() || cont->IsDeoptimize());
1895 selector->EmitWithContinuation(opcode, value, cont);
1900 bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node,
uint32_t value,
1901 Node* user, FlagsCondition cond, FlagsContinuation* cont) {
1904 DCHECK(!cont->IsPoisoned());
1906 if (!cont->IsBranch() && !cont->IsDeoptimize())
return false;
1909 case kSignedLessThan:
1910 case kSignedGreaterThanOrEqual: {
1912 if (value != 0)
return false;
1916 if (cont->IsDeoptimize())
return false;
1917 Arm64OperandGenerator g(selector);
1918 cont->Overwrite(MapForTbz(cond));
1919 Int32Matcher m(node);
1920 if (m.IsFloat64ExtractHighWord32() && selector->CanCover(user, node)) {
1924 InstructionOperand temp = g.TempRegister();
1925 selector->Emit(kArm64U64MoveFloat64, temp,
1926 g.UseRegister(node->InputAt(0)));
1927 selector->EmitWithContinuation(kArm64TestAndBranch, temp,
1928 g.TempImmediate(63), cont);
1931 selector->EmitWithContinuation(kArm64TestAndBranch32, g.UseRegister(node),
1932 g.TempImmediate(31), cont);
1937 if (node->opcode() == IrOpcode::kWord32And) {
1940 Int32BinopMatcher m_and(node);
1941 if (cont->IsBranch() && base::bits::IsPowerOfTwo(value) &&
1942 m_and.right().Is(value) && selector->CanCover(user, node)) {
1943 Arm64OperandGenerator g(selector);
1947 selector->EmitWithContinuation(
1948 kArm64TestAndBranch32, g.UseRegister(m_and.left().node()),
1949 g.TempImmediate(base::bits::CountTrailingZeros(value)), cont);
1955 case kUnsignedLessThanOrEqual:
1956 case kUnsignedGreaterThan: {
1957 if (value != 0)
return false;
1958 Arm64OperandGenerator g(selector);
1959 cont->Overwrite(MapForCbz(cond));
1960 EmitBranchOrDeoptimize(selector, kArm64CompareAndBranch32,
1961 g.UseRegister(node), cont);
1969 void VisitWord32Compare(InstructionSelector* selector, Node* node,
1970 FlagsContinuation* cont) {
1971 Int32BinopMatcher m(node);
1972 FlagsCondition cond = cont->condition();
1973 if (!cont->IsPoisoned()) {
1974 if (m.right().HasValue()) {
1975 if (TryEmitCbzOrTbz(selector, m.left().node(), m.right().Value(), node,
1979 }
else if (m.left().HasValue()) {
1980 FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
1981 if (TryEmitCbzOrTbz(selector, m.right().node(), m.left().Value(), node,
1982 commuted_cond, cont)) {
1987 ArchOpcode opcode = kArm64Cmp32;
1988 ImmediateMode immediate_mode = kArithmeticImm;
1989 if (m.right().Is(0) && (m.left().IsInt32Add() || m.left().IsWord32And())) {
1991 if (CanUseFlagSettingBinop(cond)) {
1992 Node* binop = m.left().node();
1993 MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, binop, &opcode,
1994 cond, cont, &immediate_mode);
1996 }
else if (m.left().Is(0) &&
1997 (m.right().IsInt32Add() || m.right().IsWord32And())) {
2000 FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
2001 if (CanUseFlagSettingBinop(commuted_cond)) {
2002 Node* binop = m.right().node();
2003 MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, binop, &opcode,
2004 commuted_cond, cont,
2007 }
else if (m.right().IsInt32Sub() && (cond == kEqual || cond == kNotEqual)) {
2012 Node* sub = m.right().node();
2013 Int32BinopMatcher msub(sub);
2014 if (msub.left().Is(0)) {
2015 bool can_cover = selector->CanCover(node, sub);
2016 node->ReplaceInput(1, msub.right().node());
2025 if (can_cover) sub->ReplaceInput(1, msub.left().node());
2026 opcode = kArm64Cmn32;
2029 VisitBinop<Int32BinopMatcher>(selector, node, opcode, immediate_mode, cont);
2032 void VisitWordTest(InstructionSelector* selector, Node* node,
2033 InstructionCode opcode, FlagsContinuation* cont) {
2034 Arm64OperandGenerator g(selector);
2035 VisitCompare(selector, opcode, g.UseRegister(node), g.UseRegister(node),
2039 void VisitWord32Test(InstructionSelector* selector, Node* node,
2040 FlagsContinuation* cont) {
2041 VisitWordTest(selector, node, kArm64Tst32, cont);
2044 void VisitWord64Test(InstructionSelector* selector, Node* node,
2045 FlagsContinuation* cont) {
2046 VisitWordTest(selector, node, kArm64Tst, cont);
2049 template <
typename Matcher>
2050 struct TestAndBranchMatcher {
2051 TestAndBranchMatcher(Node* node, FlagsContinuation* cont)
2052 : matches_(false), cont_(cont), matcher_(node) {
2055 bool Matches()
const {
return matches_; }
2057 unsigned bit()
const {
2059 return base::bits::CountTrailingZeros(matcher_.right().Value());
2062 Node* input()
const {
2064 return matcher_.left().node();
2069 FlagsContinuation* cont_;
2073 if (cont_->IsBranch() && !cont_->IsPoisoned() &&
2074 matcher_.right().HasValue() &&
2075 base::bits::IsPowerOfTwo(matcher_.right().Value())) {
2077 DCHECK((cont_->condition() == kEqual) ||
2078 (cont_->condition() == kNotEqual));
2087 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
2088 FlagsContinuation* cont) {
2089 Arm64OperandGenerator g(selector);
2090 Float32BinopMatcher m(node);
2091 if (m.right().Is(0.0f)) {
2092 VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.left().node()),
2093 g.UseImmediate(m.right().node()), cont);
2094 }
else if (m.left().Is(0.0f)) {
2096 VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.right().node()),
2097 g.UseImmediate(m.left().node()), cont);
2099 VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.left().node()),
2100 g.UseRegister(m.right().node()), cont);
2105 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
2106 FlagsContinuation* cont) {
2107 Arm64OperandGenerator g(selector);
2108 Float64BinopMatcher m(node);
2109 if (m.right().Is(0.0)) {
2110 VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
2111 g.UseImmediate(m.right().node()), cont);
2112 }
else if (m.left().Is(0.0)) {
2114 VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.right().node()),
2115 g.UseImmediate(m.left().node()), cont);
2117 VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
2118 g.UseRegister(m.right().node()), cont);
2122 void VisitAtomicExchange(InstructionSelector* selector, Node* node,
2123 ArchOpcode opcode) {
2124 Arm64OperandGenerator g(selector);
2125 Node* base = node->InputAt(0);
2126 Node* index = node->InputAt(1);
2127 Node* value = node->InputAt(2);
2128 InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
2129 g.UseUniqueRegister(value)};
2130 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
2131 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
2132 InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
2133 selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
2134 arraysize(temps), temps);
2137 void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
2138 ArchOpcode opcode) {
2139 Arm64OperandGenerator g(selector);
2140 Node* base = node->InputAt(0);
2141 Node* index = node->InputAt(1);
2142 Node* old_value = node->InputAt(2);
2143 Node* new_value = node->InputAt(3);
2144 InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
2145 g.UseUniqueRegister(old_value),
2146 g.UseUniqueRegister(new_value)};
2147 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
2148 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
2149 InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
2150 selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
2151 arraysize(temps), temps);
2154 void VisitAtomicLoad(InstructionSelector* selector, Node* node,
2155 ArchOpcode opcode) {
2156 Arm64OperandGenerator g(selector);
2157 Node* base = node->InputAt(0);
2158 Node* index = node->InputAt(1);
2159 InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)};
2160 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
2161 InstructionOperand temps[] = {g.TempRegister()};
2162 InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
2163 selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
2164 arraysize(temps), temps);
2167 void VisitAtomicStore(InstructionSelector* selector, Node* node,
2168 ArchOpcode opcode) {
2169 Arm64OperandGenerator g(selector);
2170 Node* base = node->InputAt(0);
2171 Node* index = node->InputAt(1);
2172 Node* value = node->InputAt(2);
2173 InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
2174 g.UseUniqueRegister(value)};
2175 InstructionOperand temps[] = {g.TempRegister()};
2176 InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
2177 selector->Emit(code, 0,
nullptr, arraysize(inputs), inputs, arraysize(temps),
2181 void VisitAtomicBinop(InstructionSelector* selector, Node* node,
2182 ArchOpcode opcode) {
2183 Arm64OperandGenerator g(selector);
2184 Node* base = node->InputAt(0);
2185 Node* index = node->InputAt(1);
2186 Node* value = node->InputAt(2);
2187 AddressingMode addressing_mode = kMode_MRR;
2188 InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
2189 g.UseUniqueRegister(value)};
2190 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
2191 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
2193 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2194 selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
2195 arraysize(temps), temps);
2200 void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
2201 FlagsContinuation* cont) {
2202 Arm64OperandGenerator g(
this);
2204 while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
2205 Int32BinopMatcher m(value);
2206 if (!m.right().Is(0))
break;
2209 value = m.left().node();
2218 switch (value->opcode()) {
2219 case IrOpcode::kWord64Equal: {
2220 Int64BinopMatcher m(value);
2221 if (m.right().Is(0)) {
2222 Node*
const left = m.left().node();
2223 if (left->opcode() == IrOpcode::kWord64And) {
2226 TestAndBranchMatcher<Uint64BinopMatcher> tbm(left, cont);
2227 if (tbm.Matches()) {
2228 Arm64OperandGenerator gen(
this);
2229 cont->OverwriteAndNegateIfEqual(kEqual);
2230 this->EmitWithContinuation(kArm64TestAndBranch,
2231 gen.UseRegister(tbm.input()),
2232 gen.TempImmediate(tbm.bit()), cont);
2239 case IrOpcode::kWord32And: {
2240 TestAndBranchMatcher<Uint32BinopMatcher> tbm(value, cont);
2241 if (tbm.Matches()) {
2242 Arm64OperandGenerator gen(
this);
2243 this->EmitWithContinuation(kArm64TestAndBranch32,
2244 gen.UseRegister(tbm.input()),
2245 gen.TempImmediate(tbm.bit()), cont);
2250 case IrOpcode::kWord64And: {
2251 TestAndBranchMatcher<Uint64BinopMatcher> tbm(value, cont);
2252 if (tbm.Matches()) {
2253 Arm64OperandGenerator gen(
this);
2254 this->EmitWithContinuation(kArm64TestAndBranch,
2255 gen.UseRegister(tbm.input()),
2256 gen.TempImmediate(tbm.bit()), cont);
2265 if (CanCover(user, value)) {
2266 switch (value->opcode()) {
2267 case IrOpcode::kWord32Equal:
2268 cont->OverwriteAndNegateIfEqual(kEqual);
2269 return VisitWord32Compare(
this, value, cont);
2270 case IrOpcode::kInt32LessThan:
2271 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2272 return VisitWord32Compare(
this, value, cont);
2273 case IrOpcode::kInt32LessThanOrEqual:
2274 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2275 return VisitWord32Compare(
this, value, cont);
2276 case IrOpcode::kUint32LessThan:
2277 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2278 return VisitWord32Compare(
this, value, cont);
2279 case IrOpcode::kUint32LessThanOrEqual:
2280 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2281 return VisitWord32Compare(
this, value, cont);
2282 case IrOpcode::kWord64Equal: {
2283 cont->OverwriteAndNegateIfEqual(kEqual);
2284 Int64BinopMatcher m(value);
2285 if (m.right().Is(0)) {
2286 Node*
const left = m.left().node();
2287 if (CanCover(value, left) && left->opcode() == IrOpcode::kWord64And) {
2288 return VisitWordCompare(
this, left, kArm64Tst, cont,
true,
2292 if ((cont->IsBranch() || cont->IsDeoptimize()) &&
2293 !cont->IsPoisoned()) {
2294 EmitBranchOrDeoptimize(
this, kArm64CompareAndBranch,
2295 g.UseRegister(left), cont);
2299 return VisitWordCompare(
this, value, kArm64Cmp, cont,
false,
2302 case IrOpcode::kInt64LessThan:
2303 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2304 return VisitWordCompare(
this, value, kArm64Cmp, cont,
false,
2306 case IrOpcode::kInt64LessThanOrEqual:
2307 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2308 return VisitWordCompare(
this, value, kArm64Cmp, cont,
false,
2310 case IrOpcode::kUint64LessThan:
2311 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2312 return VisitWordCompare(
this, value, kArm64Cmp, cont,
false,
2314 case IrOpcode::kUint64LessThanOrEqual:
2315 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2316 return VisitWordCompare(
this, value, kArm64Cmp, cont,
false,
2318 case IrOpcode::kFloat32Equal:
2319 cont->OverwriteAndNegateIfEqual(kEqual);
2320 return VisitFloat32Compare(
this, value, cont);
2321 case IrOpcode::kFloat32LessThan:
2322 cont->OverwriteAndNegateIfEqual(kFloatLessThan);
2323 return VisitFloat32Compare(
this, value, cont);
2324 case IrOpcode::kFloat32LessThanOrEqual:
2325 cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
2326 return VisitFloat32Compare(
this, value, cont);
2327 case IrOpcode::kFloat64Equal:
2328 cont->OverwriteAndNegateIfEqual(kEqual);
2329 return VisitFloat64Compare(
this, value, cont);
2330 case IrOpcode::kFloat64LessThan:
2331 cont->OverwriteAndNegateIfEqual(kFloatLessThan);
2332 return VisitFloat64Compare(
this, value, cont);
2333 case IrOpcode::kFloat64LessThanOrEqual:
2334 cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
2335 return VisitFloat64Compare(
this, value, cont);
2336 case IrOpcode::kProjection:
2339 if (ProjectionIndexOf(value->op()) == 1u) {
2345 Node*
const node = value->InputAt(0);
2346 Node*
const result = NodeProperties::FindProjection(node, 0);
2347 if (result ==
nullptr || IsDefined(result)) {
2348 switch (node->opcode()) {
2349 case IrOpcode::kInt32AddWithOverflow:
2350 cont->OverwriteAndNegateIfEqual(kOverflow);
2351 return VisitBinop<Int32BinopMatcher>(
this, node, kArm64Add32,
2352 kArithmeticImm, cont);
2353 case IrOpcode::kInt32SubWithOverflow:
2354 cont->OverwriteAndNegateIfEqual(kOverflow);
2355 return VisitBinop<Int32BinopMatcher>(
this, node, kArm64Sub32,
2356 kArithmeticImm, cont);
2357 case IrOpcode::kInt32MulWithOverflow:
2362 cont->OverwriteAndNegateIfEqual(kNotEqual);
2363 return EmitInt32MulWithOverflow(
this, node, cont);
2364 case IrOpcode::kInt64AddWithOverflow:
2365 cont->OverwriteAndNegateIfEqual(kOverflow);
2366 return VisitBinop<Int64BinopMatcher>(
this, node, kArm64Add,
2367 kArithmeticImm, cont);
2368 case IrOpcode::kInt64SubWithOverflow:
2369 cont->OverwriteAndNegateIfEqual(kOverflow);
2370 return VisitBinop<Int64BinopMatcher>(
this, node, kArm64Sub,
2371 kArithmeticImm, cont);
2378 case IrOpcode::kInt32Add:
2379 return VisitWordCompare(
this, value, kArm64Cmn32, cont,
true,
2381 case IrOpcode::kInt32Sub:
2382 return VisitWord32Compare(
this, value, cont);
2383 case IrOpcode::kWord32And:
2384 return VisitWordCompare(
this, value, kArm64Tst32, cont,
true,
2386 case IrOpcode::kWord64And:
2387 return VisitWordCompare(
this, value, kArm64Tst, cont,
true,
2395 if (!cont->IsPoisoned() && cont->IsBranch()) {
2396 Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
2397 g.UseRegister(value), g.Label(cont->true_block()),
2398 g.Label(cont->false_block()));
2400 EmitWithContinuation(cont->Encode(kArm64Tst32), g.UseRegister(value),
2401 g.UseRegister(value), cont);
2405 void InstructionSelector::VisitSwitch(Node* node,
const SwitchInfo& sw) {
2406 Arm64OperandGenerator g(
this);
2407 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
2410 if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
2411 static const size_t kMaxTableSwitchValueRange = 2 << 16;
2412 size_t table_space_cost = 4 + sw.value_range();
2413 size_t table_time_cost = 3;
2414 size_t lookup_space_cost = 3 + 2 * sw.case_count();
2415 size_t lookup_time_cost = sw.case_count();
2416 if (sw.case_count() > 0 &&
2417 table_space_cost + 3 * table_time_cost <=
2418 lookup_space_cost + 3 * lookup_time_cost &&
2419 sw.min_value() > std::numeric_limits<int32_t>::min() &&
2420 sw.value_range() <= kMaxTableSwitchValueRange) {
2421 InstructionOperand index_operand = value_operand;
2422 if (sw.min_value()) {
2423 index_operand = g.TempRegister();
2424 Emit(kArm64Sub32, index_operand, value_operand,
2425 g.TempImmediate(sw.min_value()));
2428 return EmitTableSwitch(sw, index_operand);
2433 return EmitBinarySearchSwitch(sw, value_operand);
2436 void InstructionSelector::VisitWord32Equal(Node*
const node) {
2437 Node*
const user = node;
2438 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2439 Int32BinopMatcher m(user);
2440 if (m.right().Is(0)) {
2441 Node*
const value = m.left().node();
2442 if (CanCover(user, value)) {
2443 switch (value->opcode()) {
2444 case IrOpcode::kInt32Add:
2445 case IrOpcode::kWord32And:
2446 return VisitWord32Compare(
this, node, &cont);
2447 case IrOpcode::kInt32Sub:
2448 return VisitWordCompare(
this, value, kArm64Cmp32, &cont,
false,
2450 case IrOpcode::kWord32Equal: {
2452 Int32BinopMatcher mequal(value);
2453 node->ReplaceInput(0, mequal.left().node());
2454 node->ReplaceInput(1, mequal.right().node());
2461 mequal.node()->ReplaceInput(0, m.right().node());
2462 mequal.node()->ReplaceInput(1, m.right().node());
2463 return VisitWord32Compare(
this, node, &cont);
2468 return VisitWord32Test(
this, value, &cont);
2471 VisitWord32Compare(
this, node, &cont);
2474 void InstructionSelector::VisitInt32LessThan(Node* node) {
2475 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2476 VisitWord32Compare(
this, node, &cont);
2479 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
2480 FlagsContinuation cont =
2481 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2482 VisitWord32Compare(
this, node, &cont);
2485 void InstructionSelector::VisitUint32LessThan(Node* node) {
2486 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2487 VisitWord32Compare(
this, node, &cont);
2490 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
2491 FlagsContinuation cont =
2492 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2493 VisitWord32Compare(
this, node, &cont);
2496 void InstructionSelector::VisitWord64Equal(Node*
const node) {
2497 Node*
const user = node;
2498 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2499 Int64BinopMatcher m(user);
2500 if (m.right().Is(0)) {
2501 Node*
const value = m.left().node();
2502 if (CanCover(user, value)) {
2503 switch (value->opcode()) {
2504 case IrOpcode::kWord64And:
2505 return VisitWordCompare(
this, value, kArm64Tst, &cont,
true,
2510 return VisitWord64Test(
this, value, &cont);
2513 VisitWordCompare(
this, node, kArm64Cmp, &cont,
false, kArithmeticImm);
2516 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
2517 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2518 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2519 return VisitBinop<Int32BinopMatcher>(
this, node, kArm64Add32,
2520 kArithmeticImm, &cont);
2522 FlagsContinuation cont;
2523 VisitBinop<Int32BinopMatcher>(
this, node, kArm64Add32, kArithmeticImm, &cont);
2526 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
2527 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2528 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2529 return VisitBinop<Int32BinopMatcher>(
this, node, kArm64Sub32,
2530 kArithmeticImm, &cont);
2532 FlagsContinuation cont;
2533 VisitBinop<Int32BinopMatcher>(
this, node, kArm64Sub32, kArithmeticImm, &cont);
2536 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
2537 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2542 FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
2543 return EmitInt32MulWithOverflow(
this, node, &cont);
2545 FlagsContinuation cont;
2546 EmitInt32MulWithOverflow(
this, node, &cont);
2549 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
2550 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2551 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2552 return VisitBinop<Int64BinopMatcher>(
this, node, kArm64Add, kArithmeticImm,
2555 FlagsContinuation cont;
2556 VisitBinop<Int64BinopMatcher>(
this, node, kArm64Add, kArithmeticImm, &cont);
2559 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
2560 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2561 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2562 return VisitBinop<Int64BinopMatcher>(
this, node, kArm64Sub, kArithmeticImm,
2565 FlagsContinuation cont;
2566 VisitBinop<Int64BinopMatcher>(
this, node, kArm64Sub, kArithmeticImm, &cont);
2569 void InstructionSelector::VisitInt64LessThan(Node* node) {
2570 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2571 VisitWordCompare(
this, node, kArm64Cmp, &cont,
false, kArithmeticImm);
2574 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2575 FlagsContinuation cont =
2576 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2577 VisitWordCompare(
this, node, kArm64Cmp, &cont,
false, kArithmeticImm);
2580 void InstructionSelector::VisitUint64LessThan(Node* node) {
2581 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2582 VisitWordCompare(
this, node, kArm64Cmp, &cont,
false, kArithmeticImm);
2585 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2586 FlagsContinuation cont =
2587 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2588 VisitWordCompare(
this, node, kArm64Cmp, &cont,
false, kArithmeticImm);
2591 void InstructionSelector::VisitFloat32Equal(Node* node) {
2592 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2593 VisitFloat32Compare(
this, node, &cont);
2596 void InstructionSelector::VisitFloat32LessThan(Node* node) {
2597 FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
2598 VisitFloat32Compare(
this, node, &cont);
2601 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2602 FlagsContinuation cont =
2603 FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
2604 VisitFloat32Compare(
this, node, &cont);
2607 void InstructionSelector::VisitFloat64Equal(Node* node) {
2608 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2609 VisitFloat64Compare(
this, node, &cont);
2612 void InstructionSelector::VisitFloat64LessThan(Node* node) {
2613 FlagsContinuation cont = FlagsContinuation::ForSet(kFloatLessThan, node);
2614 VisitFloat64Compare(
this, node, &cont);
2617 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
2618 FlagsContinuation cont =
2619 FlagsContinuation::ForSet(kFloatLessThanOrEqual, node);
2620 VisitFloat64Compare(
this, node, &cont);
2623 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
2624 Arm64OperandGenerator g(
this);
2625 Node* left = node->InputAt(0);
2626 Node* right = node->InputAt(1);
2627 if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
2628 CanCover(node, left)) {
2629 Node* right_of_left = left->InputAt(1);
2630 Emit(kArm64Bfi, g.DefineSameAsFirst(right), g.UseRegister(right),
2631 g.UseRegister(right_of_left), g.TempImmediate(32),
2632 g.TempImmediate(32));
2633 Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(right));
2636 Emit(kArm64Float64InsertLowWord32, g.DefineSameAsFirst(node),
2637 g.UseRegister(left), g.UseRegister(right));
2640 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
2641 Arm64OperandGenerator g(
this);
2642 Node* left = node->InputAt(0);
2643 Node* right = node->InputAt(1);
2644 if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
2645 CanCover(node, left)) {
2646 Node* right_of_left = left->InputAt(1);
2647 Emit(kArm64Bfi, g.DefineSameAsFirst(left), g.UseRegister(right_of_left),
2648 g.UseRegister(right), g.TempImmediate(32), g.TempImmediate(32));
2649 Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(left));
2652 Emit(kArm64Float64InsertHighWord32, g.DefineSameAsFirst(node),
2653 g.UseRegister(left), g.UseRegister(right));
2656 void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
2657 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2658 ArchOpcode opcode = kArchNop;
2659 switch (load_rep.representation()) {
2660 case MachineRepresentation::kWord8:
2662 load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
2664 case MachineRepresentation::kWord16:
2665 opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
2666 : kWord32AtomicLoadUint16;
2668 case MachineRepresentation::kWord32:
2669 opcode = kWord32AtomicLoadWord32;
2675 VisitAtomicLoad(
this, node, opcode);
2678 void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
2679 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2680 ArchOpcode opcode = kArchNop;
2681 switch (load_rep.representation()) {
2682 case MachineRepresentation::kWord8:
2683 opcode = kArm64Word64AtomicLoadUint8;
2685 case MachineRepresentation::kWord16:
2686 opcode = kArm64Word64AtomicLoadUint16;
2688 case MachineRepresentation::kWord32:
2689 opcode = kArm64Word64AtomicLoadUint32;
2691 case MachineRepresentation::kWord64:
2692 opcode = kArm64Word64AtomicLoadUint64;
2698 VisitAtomicLoad(
this, node, opcode);
2701 void InstructionSelector::VisitWord32AtomicStore(Node* node) {
2702 MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2703 ArchOpcode opcode = kArchNop;
2705 case MachineRepresentation::kWord8:
2706 opcode = kWord32AtomicStoreWord8;
2708 case MachineRepresentation::kWord16:
2709 opcode = kWord32AtomicStoreWord16;
2711 case MachineRepresentation::kWord32:
2712 opcode = kWord32AtomicStoreWord32;
2718 VisitAtomicStore(
this, node, opcode);
2721 void InstructionSelector::VisitWord64AtomicStore(Node* node) {
2722 MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2723 ArchOpcode opcode = kArchNop;
2725 case MachineRepresentation::kWord8:
2726 opcode = kArm64Word64AtomicStoreWord8;
2728 case MachineRepresentation::kWord16:
2729 opcode = kArm64Word64AtomicStoreWord16;
2731 case MachineRepresentation::kWord32:
2732 opcode = kArm64Word64AtomicStoreWord32;
2734 case MachineRepresentation::kWord64:
2735 opcode = kArm64Word64AtomicStoreWord64;
2741 VisitAtomicStore(
this, node, opcode);
2744 void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
2745 ArchOpcode opcode = kArchNop;
2746 MachineType type = AtomicOpType(node->op());
2747 if (type == MachineType::Int8()) {
2748 opcode = kWord32AtomicExchangeInt8;
2749 }
else if (type == MachineType::Uint8()) {
2750 opcode = kWord32AtomicExchangeUint8;
2751 }
else if (type == MachineType::Int16()) {
2752 opcode = kWord32AtomicExchangeInt16;
2753 }
else if (type == MachineType::Uint16()) {
2754 opcode = kWord32AtomicExchangeUint16;
2755 }
else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2756 opcode = kWord32AtomicExchangeWord32;
2761 VisitAtomicExchange(
this, node, opcode);
2764 void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
2765 ArchOpcode opcode = kArchNop;
2766 MachineType type = AtomicOpType(node->op());
2767 if (type == MachineType::Uint8()) {
2768 opcode = kArm64Word64AtomicExchangeUint8;
2769 }
else if (type == MachineType::Uint16()) {
2770 opcode = kArm64Word64AtomicExchangeUint16;
2771 }
else if (type == MachineType::Uint32()) {
2772 opcode = kArm64Word64AtomicExchangeUint32;
2773 }
else if (type == MachineType::Uint64()) {
2774 opcode = kArm64Word64AtomicExchangeUint64;
2779 VisitAtomicExchange(
this, node, opcode);
2782 void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
2783 ArchOpcode opcode = kArchNop;
2784 MachineType type = AtomicOpType(node->op());
2785 if (type == MachineType::Int8()) {
2786 opcode = kWord32AtomicCompareExchangeInt8;
2787 }
else if (type == MachineType::Uint8()) {
2788 opcode = kWord32AtomicCompareExchangeUint8;
2789 }
else if (type == MachineType::Int16()) {
2790 opcode = kWord32AtomicCompareExchangeInt16;
2791 }
else if (type == MachineType::Uint16()) {
2792 opcode = kWord32AtomicCompareExchangeUint16;
2793 }
else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2794 opcode = kWord32AtomicCompareExchangeWord32;
2799 VisitAtomicCompareExchange(
this, node, opcode);
2802 void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
2803 ArchOpcode opcode = kArchNop;
2804 MachineType type = AtomicOpType(node->op());
2805 if (type == MachineType::Uint8()) {
2806 opcode = kArm64Word64AtomicCompareExchangeUint8;
2807 }
else if (type == MachineType::Uint16()) {
2808 opcode = kArm64Word64AtomicCompareExchangeUint16;
2809 }
else if (type == MachineType::Uint32()) {
2810 opcode = kArm64Word64AtomicCompareExchangeUint32;
2811 }
else if (type == MachineType::Uint64()) {
2812 opcode = kArm64Word64AtomicCompareExchangeUint64;
2817 VisitAtomicCompareExchange(
this, node, opcode);
2820 void InstructionSelector::VisitWord32AtomicBinaryOperation(
2821 Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2822 ArchOpcode uint16_op, ArchOpcode word32_op) {
2823 ArchOpcode opcode = kArchNop;
2824 MachineType type = AtomicOpType(node->op());
2825 if (type == MachineType::Int8()) {
2827 }
else if (type == MachineType::Uint8()) {
2829 }
else if (type == MachineType::Int16()) {
2831 }
else if (type == MachineType::Uint16()) {
2833 }
else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2839 VisitAtomicBinop(
this, node, opcode);
2842 #define VISIT_ATOMIC_BINOP(op) \ 2843 void InstructionSelector::VisitWord32Atomic##op(Node* node) { \ 2844 VisitWord32AtomicBinaryOperation( \ 2845 node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \ 2846 kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \ 2847 kWord32Atomic##op##Word32); \ 2849 VISIT_ATOMIC_BINOP(Add)
2850 VISIT_ATOMIC_BINOP(Sub)
2851 VISIT_ATOMIC_BINOP(And)
2852 VISIT_ATOMIC_BINOP(Or)
2853 VISIT_ATOMIC_BINOP(Xor)
2854 #undef VISIT_ATOMIC_BINOP 2856 void InstructionSelector::VisitWord64AtomicBinaryOperation(
2857 Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
2858 ArchOpcode uint64_op) {
2859 ArchOpcode opcode = kArchNop;
2860 MachineType type = AtomicOpType(node->op());
2861 if (type == MachineType::Uint8()) {
2863 }
else if (type == MachineType::Uint16()) {
2865 }
else if (type == MachineType::Uint32()) {
2867 }
else if (type == MachineType::Uint64()) {
2873 VisitAtomicBinop(
this, node, opcode);
2876 #define VISIT_ATOMIC_BINOP(op) \ 2877 void InstructionSelector::VisitWord64Atomic##op(Node* node) { \ 2878 VisitWord64AtomicBinaryOperation( \ 2879 node, kArm64Word64Atomic##op##Uint8, kArm64Word64Atomic##op##Uint16, \ 2880 kArm64Word64Atomic##op##Uint32, kArm64Word64Atomic##op##Uint64); \ 2882 VISIT_ATOMIC_BINOP(Add)
2883 VISIT_ATOMIC_BINOP(Sub)
2884 VISIT_ATOMIC_BINOP(And)
2885 VISIT_ATOMIC_BINOP(Or)
2886 VISIT_ATOMIC_BINOP(Xor)
2887 #undef VISIT_ATOMIC_BINOP 2889 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
2893 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
2897 #define SIMD_TYPE_LIST(V) \ 2903 #define SIMD_UNOP_LIST(V) \ 2904 V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \ 2905 V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \ 2906 V(F32x4Abs, kArm64F32x4Abs) \ 2907 V(F32x4Neg, kArm64F32x4Neg) \ 2908 V(F32x4RecipApprox, kArm64F32x4RecipApprox) \ 2909 V(F32x4RecipSqrtApprox, kArm64F32x4RecipSqrtApprox) \ 2910 V(I32x4SConvertF32x4, kArm64I32x4SConvertF32x4) \ 2911 V(I32x4SConvertI16x8Low, kArm64I32x4SConvertI16x8Low) \ 2912 V(I32x4SConvertI16x8High, kArm64I32x4SConvertI16x8High) \ 2913 V(I32x4Neg, kArm64I32x4Neg) \ 2914 V(I32x4UConvertF32x4, kArm64I32x4UConvertF32x4) \ 2915 V(I32x4UConvertI16x8Low, kArm64I32x4UConvertI16x8Low) \ 2916 V(I32x4UConvertI16x8High, kArm64I32x4UConvertI16x8High) \ 2917 V(I16x8SConvertI8x16Low, kArm64I16x8SConvertI8x16Low) \ 2918 V(I16x8SConvertI8x16High, kArm64I16x8SConvertI8x16High) \ 2919 V(I16x8Neg, kArm64I16x8Neg) \ 2920 V(I16x8UConvertI8x16Low, kArm64I16x8UConvertI8x16Low) \ 2921 V(I16x8UConvertI8x16High, kArm64I16x8UConvertI8x16High) \ 2922 V(I8x16Neg, kArm64I8x16Neg) \ 2923 V(S128Not, kArm64S128Not) \ 2924 V(S1x4AnyTrue, kArm64S1x4AnyTrue) \ 2925 V(S1x4AllTrue, kArm64S1x4AllTrue) \ 2926 V(S1x8AnyTrue, kArm64S1x8AnyTrue) \ 2927 V(S1x8AllTrue, kArm64S1x8AllTrue) \ 2928 V(S1x16AnyTrue, kArm64S1x16AnyTrue) \ 2929 V(S1x16AllTrue, kArm64S1x16AllTrue) 2931 #define SIMD_SHIFT_OP_LIST(V) \ 2942 #define SIMD_BINOP_LIST(V) \ 2943 V(F32x4Add, kArm64F32x4Add) \ 2944 V(F32x4AddHoriz, kArm64F32x4AddHoriz) \ 2945 V(F32x4Sub, kArm64F32x4Sub) \ 2946 V(F32x4Mul, kArm64F32x4Mul) \ 2947 V(F32x4Min, kArm64F32x4Min) \ 2948 V(F32x4Max, kArm64F32x4Max) \ 2949 V(F32x4Eq, kArm64F32x4Eq) \ 2950 V(F32x4Ne, kArm64F32x4Ne) \ 2951 V(F32x4Lt, kArm64F32x4Lt) \ 2952 V(F32x4Le, kArm64F32x4Le) \ 2953 V(I32x4Add, kArm64I32x4Add) \ 2954 V(I32x4AddHoriz, kArm64I32x4AddHoriz) \ 2955 V(I32x4Sub, kArm64I32x4Sub) \ 2956 V(I32x4Mul, kArm64I32x4Mul) \ 2957 V(I32x4MinS, kArm64I32x4MinS) \ 2958 V(I32x4MaxS, kArm64I32x4MaxS) \ 2959 V(I32x4Eq, kArm64I32x4Eq) \ 2960 V(I32x4Ne, kArm64I32x4Ne) \ 2961 V(I32x4GtS, kArm64I32x4GtS) \ 2962 V(I32x4GeS, kArm64I32x4GeS) \ 2963 V(I32x4MinU, kArm64I32x4MinU) \ 2964 V(I32x4MaxU, kArm64I32x4MaxU) \ 2965 V(I32x4GtU, kArm64I32x4GtU) \ 2966 V(I32x4GeU, kArm64I32x4GeU) \ 2967 V(I16x8SConvertI32x4, kArm64I16x8SConvertI32x4) \ 2968 V(I16x8Add, kArm64I16x8Add) \ 2969 V(I16x8AddSaturateS, kArm64I16x8AddSaturateS) \ 2970 V(I16x8AddHoriz, kArm64I16x8AddHoriz) \ 2971 V(I16x8Sub, kArm64I16x8Sub) \ 2972 V(I16x8SubSaturateS, kArm64I16x8SubSaturateS) \ 2973 V(I16x8Mul, kArm64I16x8Mul) \ 2974 V(I16x8MinS, kArm64I16x8MinS) \ 2975 V(I16x8MaxS, kArm64I16x8MaxS) \ 2976 V(I16x8Eq, kArm64I16x8Eq) \ 2977 V(I16x8Ne, kArm64I16x8Ne) \ 2978 V(I16x8GtS, kArm64I16x8GtS) \ 2979 V(I16x8GeS, kArm64I16x8GeS) \ 2980 V(I16x8UConvertI32x4, kArm64I16x8UConvertI32x4) \ 2981 V(I16x8AddSaturateU, kArm64I16x8AddSaturateU) \ 2982 V(I16x8SubSaturateU, kArm64I16x8SubSaturateU) \ 2983 V(I16x8MinU, kArm64I16x8MinU) \ 2984 V(I16x8MaxU, kArm64I16x8MaxU) \ 2985 V(I16x8GtU, kArm64I16x8GtU) \ 2986 V(I16x8GeU, kArm64I16x8GeU) \ 2987 V(I8x16SConvertI16x8, kArm64I8x16SConvertI16x8) \ 2988 V(I8x16Add, kArm64I8x16Add) \ 2989 V(I8x16AddSaturateS, kArm64I8x16AddSaturateS) \ 2990 V(I8x16Sub, kArm64I8x16Sub) \ 2991 V(I8x16SubSaturateS, kArm64I8x16SubSaturateS) \ 2992 V(I8x16Mul, kArm64I8x16Mul) \ 2993 V(I8x16MinS, kArm64I8x16MinS) \ 2994 V(I8x16MaxS, kArm64I8x16MaxS) \ 2995 V(I8x16Eq, kArm64I8x16Eq) \ 2996 V(I8x16Ne, kArm64I8x16Ne) \ 2997 V(I8x16GtS, kArm64I8x16GtS) \ 2998 V(I8x16GeS, kArm64I8x16GeS) \ 2999 V(I8x16UConvertI16x8, kArm64I8x16UConvertI16x8) \ 3000 V(I8x16AddSaturateU, kArm64I8x16AddSaturateU) \ 3001 V(I8x16SubSaturateU, kArm64I8x16SubSaturateU) \ 3002 V(I8x16MinU, kArm64I8x16MinU) \ 3003 V(I8x16MaxU, kArm64I8x16MaxU) \ 3004 V(I8x16GtU, kArm64I8x16GtU) \ 3005 V(I8x16GeU, kArm64I8x16GeU) \ 3006 V(S128And, kArm64S128And) \ 3007 V(S128Or, kArm64S128Or) \ 3008 V(S128Xor, kArm64S128Xor) 3010 void InstructionSelector::VisitS128Zero(Node* node) {
3011 Arm64OperandGenerator g(
this);
3012 Emit(kArm64S128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node));
3015 #define SIMD_VISIT_SPLAT(Type) \ 3016 void InstructionSelector::Visit##Type##Splat(Node* node) { \ 3017 VisitRR(this, kArm64##Type##Splat, node); \ 3019 SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
3020 #undef SIMD_VISIT_SPLAT 3022 #define SIMD_VISIT_EXTRACT_LANE(Type) \ 3023 void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \ 3024 VisitRRI(this, kArm64##Type##ExtractLane, node); \ 3026 SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
3027 #undef SIMD_VISIT_EXTRACT_LANE 3029 #define SIMD_VISIT_REPLACE_LANE(Type) \ 3030 void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \ 3031 VisitRRIR(this, kArm64##Type##ReplaceLane, node); \ 3033 SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
3034 #undef SIMD_VISIT_REPLACE_LANE 3035 #undef SIMD_TYPE_LIST 3037 #define SIMD_VISIT_UNOP(Name, instruction) \ 3038 void InstructionSelector::Visit##Name(Node* node) { \ 3039 VisitRR(this, instruction, node); \ 3041 SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
3042 #undef SIMD_VISIT_UNOP 3043 #undef SIMD_UNOP_LIST 3045 #define SIMD_VISIT_SHIFT_OP(Name) \ 3046 void InstructionSelector::Visit##Name(Node* node) { \ 3047 VisitRRI(this, kArm64##Name, node); \ 3049 SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
3050 #undef SIMD_VISIT_SHIFT_OP 3051 #undef SIMD_SHIFT_OP_LIST 3053 #define SIMD_VISIT_BINOP(Name, instruction) \ 3054 void InstructionSelector::Visit##Name(Node* node) { \ 3055 VisitRRR(this, instruction, node); \ 3057 SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
3058 #undef SIMD_VISIT_BINOP 3059 #undef SIMD_BINOP_LIST 3061 void InstructionSelector::VisitS128Select(Node* node) {
3062 Arm64OperandGenerator g(
this);
3063 Emit(kArm64S128Select, g.DefineSameAsFirst(node),
3064 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
3065 g.UseRegister(node->InputAt(2)));
3070 struct ShuffleEntry {
3071 uint8_t shuffle[kSimd128Size];
3075 static const ShuffleEntry arch_shuffles[] = {
3076 {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
3077 kArm64S32x4ZipLeft},
3078 {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
3079 kArm64S32x4ZipRight},
3080 {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
3081 kArm64S32x4UnzipLeft},
3082 {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
3083 kArm64S32x4UnzipRight},
3084 {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
3085 kArm64S32x4TransposeLeft},
3086 {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 21, 22, 23, 24},
3087 kArm64S32x4TransposeRight},
3088 {{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11},
3089 kArm64S32x2Reverse},
3091 {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
3092 kArm64S16x8ZipLeft},
3093 {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
3094 kArm64S16x8ZipRight},
3095 {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
3096 kArm64S16x8UnzipLeft},
3097 {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
3098 kArm64S16x8UnzipRight},
3099 {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
3100 kArm64S16x8TransposeLeft},
3101 {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
3102 kArm64S16x8TransposeRight},
3103 {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
3104 kArm64S16x4Reverse},
3105 {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
3106 kArm64S16x2Reverse},
3108 {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
3109 kArm64S8x16ZipLeft},
3110 {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
3111 kArm64S8x16ZipRight},
3112 {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
3113 kArm64S8x16UnzipLeft},
3114 {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
3115 kArm64S8x16UnzipRight},
3116 {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
3117 kArm64S8x16TransposeLeft},
3118 {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
3119 kArm64S8x16TransposeRight},
3120 {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kArm64S8x8Reverse},
3121 {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kArm64S8x4Reverse},
3122 {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
3123 kArm64S8x2Reverse}};
3125 bool TryMatchArchShuffle(
const uint8_t* shuffle,
const ShuffleEntry* table,
3126 size_t num_entries,
bool is_swizzle,
3127 ArchOpcode* opcode) {
3128 uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
3129 for (
size_t i = 0;
i < num_entries;
i++) {
3130 const ShuffleEntry& entry = table[
i];
3132 for (; j < kSimd128Size; j++) {
3133 if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
3137 if (j == kSimd128Size) {
3138 *opcode = entry.opcode;
3145 void ArrangeShuffleTable(Arm64OperandGenerator* g, Node* input0, Node* input1,
3146 InstructionOperand* src0, InstructionOperand* src1) {
3147 if (input0 == input1) {
3149 *src0 = *src1 = g->UseRegister(input0);
3152 *src0 = g->UseFixed(input0, fp_fixed1);
3153 *src1 = g->UseFixed(input1, fp_fixed2);
3159 void InstructionSelector::VisitS8x16Shuffle(Node* node) {
3160 uint8_t shuffle[kSimd128Size];
3162 CanonicalizeShuffle(node, shuffle, &is_swizzle);
3163 uint8_t shuffle32x4[4];
3164 Arm64OperandGenerator g(
this);
3166 if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
3167 is_swizzle, &opcode)) {
3168 VisitRRR(
this, opcode, node);
3171 Node* input0 = node->InputAt(0);
3172 Node* input1 = node->InputAt(1);
3174 if (TryMatchConcat(shuffle, &offset)) {
3175 Emit(kArm64S8x16Concat, g.DefineAsRegister(node), g.UseRegister(input0),
3176 g.UseRegister(input1), g.UseImmediate(offset));
3180 if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
3181 if (TryMatchDup<4>(shuffle, &index)) {
3182 DCHECK_GT(4, index);
3183 Emit(kArm64S128Dup, g.DefineAsRegister(node), g.UseRegister(input0),
3184 g.UseImmediate(4), g.UseImmediate(index % 4));
3185 }
else if (TryMatchIdentity(shuffle)) {
3188 Emit(kArm64S32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
3189 g.UseRegister(input1), g.UseImmediate(Pack4Lanes(shuffle32x4)));
3193 if (TryMatchDup<8>(shuffle, &index)) {
3194 DCHECK_GT(8, index);
3195 Emit(kArm64S128Dup, g.DefineAsRegister(node), g.UseRegister(input0),
3196 g.UseImmediate(8), g.UseImmediate(index % 8));
3199 if (TryMatchDup<16>(shuffle, &index)) {
3200 DCHECK_GT(16, index);
3201 Emit(kArm64S128Dup, g.DefineAsRegister(node), g.UseRegister(input0),
3202 g.UseImmediate(16), g.UseImmediate(index % 16));
3206 InstructionOperand src0, src1;
3207 ArrangeShuffleTable(&g, input0, input1, &src0, &src1);
3208 Emit(kArm64S8x16Shuffle, g.DefineAsRegister(node), src0, src1,
3209 g.UseImmediate(Pack4Lanes(shuffle)),
3210 g.UseImmediate(Pack4Lanes(shuffle + 4)),
3211 g.UseImmediate(Pack4Lanes(shuffle + 8)),
3212 g.UseImmediate(Pack4Lanes(shuffle + 12)));
3215 void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
3216 VisitRR(
this, kArm64Sxtb32, node);
3219 void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
3220 VisitRR(
this, kArm64Sxth32, node);
3223 void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
3224 VisitRR(
this, kArm64Sxtb, node);
3227 void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
3228 VisitRR(
this, kArm64Sxth, node);
3231 void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
3232 VisitRR(
this, kArm64Sxtw, node);
3236 MachineOperatorBuilder::Flags
3237 InstructionSelector::SupportedMachineOperatorFlags() {
3238 return MachineOperatorBuilder::kFloat32RoundDown |
3239 MachineOperatorBuilder::kFloat64RoundDown |
3240 MachineOperatorBuilder::kFloat32RoundUp |
3241 MachineOperatorBuilder::kFloat64RoundUp |
3242 MachineOperatorBuilder::kFloat32RoundTruncate |
3243 MachineOperatorBuilder::kFloat64RoundTruncate |
3244 MachineOperatorBuilder::kFloat64RoundTiesAway |
3245 MachineOperatorBuilder::kFloat32RoundTiesEven |
3246 MachineOperatorBuilder::kFloat64RoundTiesEven |
3247 MachineOperatorBuilder::kWord32ShiftIsSafe |
3248 MachineOperatorBuilder::kInt32DivIsSafe |
3249 MachineOperatorBuilder::kUint32DivIsSafe |
3250 MachineOperatorBuilder::kWord32ReverseBits |
3251 MachineOperatorBuilder::kWord64ReverseBits |
3252 MachineOperatorBuilder::kSpeculationFence;
3256 MachineOperatorBuilder::AlignmentRequirements
3257 InstructionSelector::AlignmentRequirements() {
3258 return MachineOperatorBuilder::AlignmentRequirements::
3259 FullUnalignedAccessSupport();