7 #include "src/base/adapters.h" 8 #include "src/compiler/backend/instruction-selector-impl.h" 9 #include "src/compiler/node-matchers.h" 10 #include "src/compiler/node-properties.h" 11 #include "src/roots-inl.h" 12 #include "src/turbo-assembler.h" 24 bool CanBeImmediate(
Node* node) {
25 switch (node->opcode()) {
26 case IrOpcode::kInt32Constant:
27 case IrOpcode::kRelocatableInt32Constant:
29 case IrOpcode::kInt64Constant: {
30 const int64_t value = OpParameter<int64_t>(node->op());
31 return std::numeric_limits<int32_t>::min() < value &&
32 value <= std::numeric_limits<int32_t>::max();
34 case IrOpcode::kNumberConstant: {
35 const double value = OpParameter<double>(node->op());
36 return bit_cast<
int64_t>(value) == 0;
43 int32_t GetImmediateIntegerValue(
Node* node) {
44 DCHECK(CanBeImmediate(node));
45 if (node->opcode() == IrOpcode::kInt32Constant) {
46 return OpParameter<int32_t>(node->op());
48 DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
49 return static_cast<int32_t
>(OpParameter<int64_t>(node->op()));
52 bool CanBeMemoryOperand(InstructionCode opcode,
Node* node,
Node* input,
54 if (input->opcode() != IrOpcode::kLoad ||
55 !selector()->CanCover(node, input)) {
58 if (effect_level != selector()->GetEffectLevel(input)) {
61 MachineRepresentation rep =
62 LoadRepresentationOf(input->op()).representation();
72 return rep == MachineRepresentation::kWord64 || IsAnyTagged(rep);
80 return rep == MachineRepresentation::kWord32;
83 return rep == MachineRepresentation::kWord16;
86 return rep == MachineRepresentation::kWord8;
93 AddressingMode GenerateMemoryOperandInputs(
Node* index,
int scale_exponent,
95 DisplacementMode displacement_mode,
97 size_t* input_count) {
98 AddressingMode mode = kMode_MRI;
99 if (base !=
nullptr && (index !=
nullptr || displacement !=
nullptr)) {
100 if (base->opcode() == IrOpcode::kInt32Constant &&
101 OpParameter<int32_t>(base->op()) == 0) {
103 }
else if (base->opcode() == IrOpcode::kInt64Constant &&
104 OpParameter<int64_t>(base->op()) == 0) {
108 if (base !=
nullptr) {
109 inputs[(*input_count)++] = UseRegister(base);
110 if (index !=
nullptr) {
111 DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
112 inputs[(*input_count)++] = UseRegister(index);
113 if (displacement !=
nullptr) {
114 inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
115 ? UseNegatedImmediate(displacement)
116 : UseImmediate(displacement);
117 static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
118 kMode_MR4I, kMode_MR8I};
119 mode = kMRnI_modes[scale_exponent];
121 static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
122 kMode_MR4, kMode_MR8};
123 mode = kMRn_modes[scale_exponent];
126 if (displacement ==
nullptr) {
129 inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
130 ? UseNegatedImmediate(displacement)
131 : UseImmediate(displacement);
136 DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
137 if (displacement !=
nullptr) {
138 if (index ==
nullptr) {
139 inputs[(*input_count)++] = UseRegister(displacement);
142 inputs[(*input_count)++] = UseRegister(index);
143 inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
144 ? UseNegatedImmediate(displacement)
145 : UseImmediate(displacement);
146 static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
147 kMode_M4I, kMode_M8I};
148 mode = kMnI_modes[scale_exponent];
151 inputs[(*input_count)++] = UseRegister(index);
152 static const AddressingMode kMn_modes[] = {kMode_MR, kMode_MR1,
154 mode = kMn_modes[scale_exponent];
155 if (mode == kMode_MR1) {
157 inputs[(*input_count)++] = UseRegister(index);
164 AddressingMode GetEffectiveAddressMemoryOperand(
Node* operand,
166 size_t* input_count) {
167 if (selector()->CanAddressRelativeToRootsRegister()) {
169 if (m.index().HasValue() && m.object().HasValue()) {
170 ptrdiff_t
const delta =
172 TurboAssemblerBase::RootRegisterOffsetForExternalReference(
173 selector()->isolate(), m.object().Value());
174 if (is_int32(delta)) {
175 inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta));
182 if (m.displacement() ==
nullptr || CanBeImmediate(m.displacement())) {
183 return GenerateMemoryOperandInputs(
184 m.index(), m.scale(), m.base(), m.displacement(),
185 m.displacement_mode(), inputs, input_count);
186 }
else if (m.base() ==
nullptr &&
187 m.displacement_mode() == kPositiveDisplacement) {
191 return GenerateMemoryOperandInputs(m.index(), m.scale(), m.displacement(),
192 nullptr, m.displacement_mode(), inputs,
195 inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
196 inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
202 AddressingMode* mode) {
203 if (CanBeImmediate(index)) {
205 return UseImmediate(index);
208 return UseUniqueRegister(index);
212 bool CanBeBetterLeftOperand(
Node* node)
const {
213 return !selector()->IsLive(node);
219 ArchOpcode opcode = kArchNop;
220 switch (load_rep.representation()) {
221 case MachineRepresentation::kFloat32:
224 case MachineRepresentation::kFloat64:
227 case MachineRepresentation::kBit:
228 case MachineRepresentation::kWord8:
229 opcode = load_rep.IsSigned() ? kX64Movsxbl : kX64Movzxbl;
231 case MachineRepresentation::kWord16:
232 opcode = load_rep.IsSigned() ? kX64Movsxwl : kX64Movzxwl;
234 case MachineRepresentation::kWord32:
237 #ifdef V8_COMPRESS_POINTERS 238 case MachineRepresentation::kTaggedSigned:
239 return kX64MovqDecompressTaggedSigned;
240 case MachineRepresentation::kTaggedPointer:
241 return kX64MovqDecompressTaggedPointer;
242 case MachineRepresentation::kTagged:
243 return kX64MovqDecompressAnyTagged;
245 case MachineRepresentation::kTaggedSigned:
246 case MachineRepresentation::kTaggedPointer:
247 case MachineRepresentation::kTagged:
249 case MachineRepresentation::kWord64:
252 case MachineRepresentation::kSimd128:
255 case MachineRepresentation::kNone:
262 ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
263 switch (store_rep.representation()) {
264 case MachineRepresentation::kFloat32:
267 case MachineRepresentation::kFloat64:
270 case MachineRepresentation::kBit:
271 case MachineRepresentation::kWord8:
274 case MachineRepresentation::kWord16:
277 case MachineRepresentation::kWord32:
280 case MachineRepresentation::kTaggedSigned:
281 case MachineRepresentation::kTaggedPointer:
282 case MachineRepresentation::kTagged:
283 case MachineRepresentation::kWord64:
286 case MachineRepresentation::kSimd128:
289 case MachineRepresentation::kNone:
297 void InstructionSelector::VisitStackSlot(Node* node) {
298 StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
299 int slot = frame_->AllocateSpillSlot(rep.size());
300 OperandGenerator g(
this);
302 Emit(kArchStackSlot, g.DefineAsRegister(node),
303 sequence()->AddImmediate(Constant(slot)), 0,
nullptr);
306 void InstructionSelector::VisitDebugAbort(Node* node) {
307 X64OperandGenerator g(
this);
308 Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx));
311 void InstructionSelector::VisitSpeculationFence(Node* node) {
312 X64OperandGenerator g(
this);
313 Emit(kLFence, g.NoOutput());
316 void InstructionSelector::VisitLoad(Node* node) {
317 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
318 X64OperandGenerator g(
this);
320 ArchOpcode opcode = GetLoadOpcode(load_rep);
321 size_t temp_count = 0;
322 InstructionOperand temps[2];
323 #ifdef V8_COMPRESS_POINTERS 324 if (opcode == kX64MovqDecompressAnyTagged) {
325 temps[temp_count++] = g.TempRegister();
328 if (opcode == kX64MovqDecompressTaggedSigned ||
329 opcode == kX64MovqDecompressTaggedPointer ||
330 opcode == kX64MovqDecompressAnyTagged) {
331 temps[temp_count++] = g.TempRegister();
334 #endif // V8_COMPRESS_POINTERS 335 DCHECK_LE(temp_count, arraysize(temps));
336 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
337 InstructionOperand inputs[3];
338 size_t input_count = 0;
339 AddressingMode mode =
340 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
341 InstructionCode code = opcode | AddressingModeField::encode(mode);
342 if (node->opcode() == IrOpcode::kProtectedLoad) {
343 code |= MiscField::encode(kMemoryAccessProtected);
344 }
else if (node->opcode() == IrOpcode::kPoisonedLoad) {
345 CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
346 code |= MiscField::encode(kMemoryAccessPoisoned);
348 Emit(code, 1, outputs, input_count, inputs, temp_count, temps);
351 void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
353 void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
355 void InstructionSelector::VisitStore(Node* node) {
356 X64OperandGenerator g(
this);
357 Node* base = node->InputAt(0);
358 Node* index = node->InputAt(1);
359 Node* value = node->InputAt(2);
361 StoreRepresentation store_rep = StoreRepresentationOf(node->op());
362 WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
364 if (write_barrier_kind != kNoWriteBarrier) {
365 DCHECK(CanBeTaggedPointer(store_rep.representation()));
366 AddressingMode addressing_mode;
367 InstructionOperand inputs[] = {
368 g.UseUniqueRegister(base),
369 g.GetEffectiveIndexOperand(index, &addressing_mode),
370 g.UseUniqueRegister(value)};
371 RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
372 switch (write_barrier_kind) {
373 case kNoWriteBarrier:
376 case kMapWriteBarrier:
377 record_write_mode = RecordWriteMode::kValueIsMap;
379 case kPointerWriteBarrier:
380 record_write_mode = RecordWriteMode::kValueIsPointer;
382 case kFullWriteBarrier:
383 record_write_mode = RecordWriteMode::kValueIsAny;
386 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
387 InstructionCode code = kArchStoreWithWriteBarrier;
388 code |= AddressingModeField::encode(addressing_mode);
389 code |= MiscField::encode(static_cast<int>(record_write_mode));
390 Emit(code, 0,
nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
392 ArchOpcode opcode = GetStoreOpcode(store_rep);
393 InstructionOperand inputs[4];
394 size_t input_count = 0;
395 AddressingMode addressing_mode =
396 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
397 InstructionCode code =
398 opcode | AddressingModeField::encode(addressing_mode);
399 if ((ElementSizeLog2Of(store_rep.representation()) < kPointerSizeLog2) &&
400 (value->opcode() == IrOpcode::kTruncateInt64ToInt32) &&
401 CanCover(node, value)) {
402 value = value->InputAt(0);
404 InstructionOperand value_operand =
405 g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
406 inputs[input_count++] = value_operand;
407 Emit(code, 0, static_cast<InstructionOperand*>(
nullptr), input_count,
412 void InstructionSelector::VisitProtectedStore(Node* node) {
413 X64OperandGenerator g(
this);
414 Node* value = node->InputAt(2);
416 StoreRepresentation store_rep = StoreRepresentationOf(node->op());
418 ArchOpcode opcode = GetStoreOpcode(store_rep);
419 InstructionOperand inputs[4];
420 size_t input_count = 0;
421 AddressingMode addressing_mode =
422 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
423 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
424 MiscField::encode(kMemoryAccessProtected);
425 InstructionOperand value_operand =
426 g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
427 inputs[input_count++] = value_operand;
428 Emit(code, 0, static_cast<InstructionOperand*>(
nullptr), input_count, inputs);
432 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
435 void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
438 static void VisitBinop(InstructionSelector* selector, Node* node,
439 InstructionCode opcode, FlagsContinuation* cont) {
440 X64OperandGenerator g(selector);
441 Int32BinopMatcher m(node);
442 Node* left = m.left().node();
443 Node* right = m.right().node();
444 InstructionOperand inputs[8];
445 size_t input_count = 0;
446 InstructionOperand outputs[1];
447 size_t output_count = 0;
458 InstructionOperand
const input = g.UseRegister(left);
459 inputs[input_count++] = input;
460 inputs[input_count++] = input;
461 }
else if (g.CanBeImmediate(right)) {
462 inputs[input_count++] = g.UseRegister(left);
463 inputs[input_count++] = g.UseImmediate(right);
465 int effect_level = selector->GetEffectLevel(node);
466 if (cont->IsBranch()) {
467 effect_level = selector->GetEffectLevel(
468 cont->true_block()->PredecessorAt(0)->control_input());
470 if (node->op()->HasProperty(Operator::kCommutative) &&
471 g.CanBeBetterLeftOperand(right) &&
472 (!g.CanBeBetterLeftOperand(left) ||
473 !g.CanBeMemoryOperand(opcode, node, right, effect_level))) {
474 std::swap(left, right);
476 if (g.CanBeMemoryOperand(opcode, node, right, effect_level)) {
477 inputs[input_count++] = g.UseRegister(left);
478 AddressingMode addressing_mode =
479 g.GetEffectiveAddressMemoryOperand(right, inputs, &input_count);
480 opcode |= AddressingModeField::encode(addressing_mode);
482 inputs[input_count++] = g.UseRegister(left);
483 inputs[input_count++] = g.Use(right);
487 if (cont->IsBranch()) {
488 inputs[input_count++] = g.Label(cont->true_block());
489 inputs[input_count++] = g.Label(cont->false_block());
492 outputs[output_count++] = g.DefineSameAsFirst(node);
494 DCHECK_NE(0u, input_count);
495 DCHECK_EQ(1u, output_count);
496 DCHECK_GE(arraysize(inputs), input_count);
497 DCHECK_GE(arraysize(outputs), output_count);
499 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
504 static void VisitBinop(InstructionSelector* selector, Node* node,
505 InstructionCode opcode) {
506 FlagsContinuation cont;
507 VisitBinop(selector, node, opcode, &cont);
510 void InstructionSelector::VisitWord32And(Node* node) {
511 X64OperandGenerator g(
this);
512 Uint32BinopMatcher m(node);
513 if (m.right().Is(0xFF)) {
514 Emit(kX64Movzxbl, g.DefineAsRegister(node), g.Use(m.left().node()));
515 }
else if (m.right().Is(0xFFFF)) {
516 Emit(kX64Movzxwl, g.DefineAsRegister(node), g.Use(m.left().node()));
518 VisitBinop(
this, node, kX64And32);
522 void InstructionSelector::VisitWord64And(Node* node) {
523 VisitBinop(
this, node, kX64And);
526 void InstructionSelector::VisitWord32Or(Node* node) {
527 VisitBinop(
this, node, kX64Or32);
530 void InstructionSelector::VisitWord64Or(Node* node) {
531 VisitBinop(
this, node, kX64Or);
534 void InstructionSelector::VisitWord32Xor(Node* node) {
535 X64OperandGenerator g(
this);
536 Uint32BinopMatcher m(node);
537 if (m.right().Is(-1)) {
538 Emit(kX64Not32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
540 VisitBinop(
this, node, kX64Xor32);
544 void InstructionSelector::VisitWord64Xor(Node* node) {
545 X64OperandGenerator g(
this);
546 Uint64BinopMatcher m(node);
547 if (m.right().Is(-1)) {
548 Emit(kX64Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
550 VisitBinop(
this, node, kX64Xor);
556 bool TryMergeTruncateInt64ToInt32IntoLoad(InstructionSelector* selector,
557 Node* node, Node* load) {
558 if (load->opcode() == IrOpcode::kLoad && selector->CanCover(node, load)) {
559 LoadRepresentation load_rep = LoadRepresentationOf(load->op());
560 MachineRepresentation rep = load_rep.representation();
561 InstructionCode opcode = kArchNop;
563 case MachineRepresentation::kBit:
564 case MachineRepresentation::kWord8:
565 opcode = load_rep.IsSigned() ? kX64Movsxbl : kX64Movzxbl;
567 case MachineRepresentation::kWord16:
568 opcode = load_rep.IsSigned() ? kX64Movsxwl : kX64Movzxwl;
570 case MachineRepresentation::kWord32:
571 case MachineRepresentation::kWord64:
572 case MachineRepresentation::kTaggedSigned:
573 case MachineRepresentation::kTagged:
580 X64OperandGenerator g(selector);
581 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
582 size_t input_count = 0;
583 InstructionOperand inputs[3];
584 AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
585 node->InputAt(0), inputs, &input_count);
586 opcode |= AddressingModeField::encode(mode);
587 selector->Emit(opcode, 1, outputs, input_count, inputs);
595 void VisitWord32Shift(InstructionSelector* selector, Node* node,
597 X64OperandGenerator g(selector);
598 Int32BinopMatcher m(node);
599 Node* left = m.left().node();
600 Node* right = m.right().node();
602 if (left->opcode() == IrOpcode::kTruncateInt64ToInt32 &&
603 selector->CanCover(node, left)) {
604 left = left->InputAt(0);
607 if (g.CanBeImmediate(right)) {
608 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
609 g.UseImmediate(right));
611 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
612 g.UseFixed(right, rcx));
618 void VisitWord64Shift(InstructionSelector* selector, Node* node,
620 X64OperandGenerator g(selector);
621 Int64BinopMatcher m(node);
622 Node* left = m.left().node();
623 Node* right = m.right().node();
625 if (g.CanBeImmediate(right)) {
626 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
627 g.UseImmediate(right));
629 if (m.right().IsWord64And()) {
630 Int64BinopMatcher mright(right);
631 if (mright.right().Is(0x3F)) {
632 right = mright.left().node();
635 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
636 g.UseFixed(right, rcx));
641 template <
typename BinopMatcher,
int Bits>
642 bool TryVisitWordShift(InstructionSelector* selector, Node* node,
643 ArchOpcode opcode, FlagsContinuation* cont) {
644 X64OperandGenerator g(selector);
645 BinopMatcher m(node);
646 Node* left = m.left().node();
647 Node* right = m.right().node();
650 if (!g.CanBeImmediate(right) ||
651 (g.GetImmediateIntegerValue(right) & (Bits - 1)) == 0) {
654 InstructionOperand output = g.DefineSameAsFirst(node);
655 InstructionOperand inputs[2];
656 inputs[0] = g.UseRegister(left);
657 inputs[1] = g.UseImmediate(right);
658 selector->EmitWithContinuation(opcode, 1, &output, 2, inputs, cont);
662 void EmitLea(InstructionSelector* selector, InstructionCode opcode,
663 Node* result, Node* index,
int scale, Node* base,
664 Node* displacement, DisplacementMode displacement_mode) {
665 X64OperandGenerator g(selector);
667 InstructionOperand inputs[4];
668 size_t input_count = 0;
669 AddressingMode mode =
670 g.GenerateMemoryOperandInputs(index, scale, base, displacement,
671 displacement_mode, inputs, &input_count);
673 DCHECK_NE(0u, input_count);
674 DCHECK_GE(arraysize(inputs), input_count);
676 InstructionOperand outputs[1];
677 outputs[0] = g.DefineAsRegister(result);
679 opcode = AddressingModeField::encode(mode) | opcode;
681 selector->Emit(opcode, 1, outputs, input_count, inputs);
686 void InstructionSelector::VisitWord32Shl(Node* node) {
687 Int32ScaleMatcher m(node,
true);
689 Node* index = node->InputAt(0);
690 Node* base = m.power_of_two_plus_one() ? index :
nullptr;
691 EmitLea(
this, kX64Lea32, node, index, m.scale(), base,
nullptr,
692 kPositiveDisplacement);
695 VisitWord32Shift(
this, node, kX64Shl32);
698 void InstructionSelector::VisitWord64Shl(Node* node) {
699 X64OperandGenerator g(
this);
700 Int64ScaleMatcher m(node,
true);
702 Node* index = node->InputAt(0);
703 Node* base = m.power_of_two_plus_one() ? index :
nullptr;
704 EmitLea(
this, kX64Lea, node, index, m.scale(), base,
nullptr,
705 kPositiveDisplacement);
708 Int64BinopMatcher m(node);
709 if ((m.left().IsChangeInt32ToInt64() ||
710 m.left().IsChangeUint32ToUint64()) &&
711 m.right().IsInRange(32, 63)) {
714 Emit(kX64Shl, g.DefineSameAsFirst(node),
715 g.UseRegister(m.left().node()->InputAt(0)),
716 g.UseImmediate(m.right().node()));
720 VisitWord64Shift(
this, node, kX64Shl);
723 void InstructionSelector::VisitWord32Shr(Node* node) {
724 VisitWord32Shift(
this, node, kX64Shr32);
729 inline AddressingMode AddDisplacementToAddressingMode(AddressingMode mode) {
774 bool TryMatchLoadWord64AndShiftRight(InstructionSelector* selector, Node* node,
775 InstructionCode opcode) {
776 DCHECK(IrOpcode::kWord64Sar == node->opcode() ||
777 IrOpcode::kWord64Shr == node->opcode());
778 X64OperandGenerator g(selector);
779 Int64BinopMatcher m(node);
780 if (selector->CanCover(m.node(), m.left().node()) && m.left().IsLoad() &&
782 DCHECK_EQ(selector->GetEffectLevel(node),
783 selector->GetEffectLevel(m.left().node()));
786 BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(),
787 AddressOption::kAllowAll);
788 if (mleft.matches() && (mleft.displacement() ==
nullptr ||
789 g.CanBeImmediate(mleft.displacement()))) {
790 size_t input_count = 0;
791 InstructionOperand inputs[3];
792 AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
793 m.left().node(), inputs, &input_count);
794 if (mleft.displacement() ==
nullptr) {
798 mode = AddDisplacementToAddressingMode(mode);
799 inputs[input_count++] = ImmediateOperand(ImmediateOperand::INLINE, 4);
804 if (!inputs[input_count - 1].IsImmediate())
return false;
805 int32_t displacement = g.GetImmediateIntegerValue(mleft.displacement());
806 inputs[input_count - 1] =
807 ImmediateOperand(ImmediateOperand::INLINE, displacement + 4);
809 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
810 InstructionCode code = opcode | AddressingModeField::encode(mode);
811 selector->Emit(code, 1, outputs, input_count, inputs);
820 void InstructionSelector::VisitWord64Shr(Node* node) {
821 if (TryMatchLoadWord64AndShiftRight(
this, node, kX64Movl))
return;
822 VisitWord64Shift(
this, node, kX64Shr);
825 void InstructionSelector::VisitWord32Sar(Node* node) {
826 X64OperandGenerator g(
this);
827 Int32BinopMatcher m(node);
828 if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
829 Int32BinopMatcher mleft(m.left().node());
830 if (mleft.right().Is(16) && m.right().Is(16)) {
831 Emit(kX64Movsxwl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
833 }
else if (mleft.right().Is(24) && m.right().Is(24)) {
834 Emit(kX64Movsxbl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
838 VisitWord32Shift(
this, node, kX64Sar32);
841 void InstructionSelector::VisitWord64Sar(Node* node) {
842 if (TryMatchLoadWord64AndShiftRight(
this, node, kX64Movsxlq))
return;
843 VisitWord64Shift(
this, node, kX64Sar);
846 void InstructionSelector::VisitWord32Ror(Node* node) {
847 VisitWord32Shift(
this, node, kX64Ror32);
850 void InstructionSelector::VisitWord64Ror(Node* node) {
851 VisitWord64Shift(
this, node, kX64Ror);
854 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
856 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
858 void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
859 X64OperandGenerator g(
this);
860 Emit(kX64Bswap, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)));
863 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
864 X64OperandGenerator g(
this);
865 Emit(kX64Bswap32, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)));
868 void InstructionSelector::VisitInt32Add(Node* node) {
869 X64OperandGenerator g(
this);
872 BaseWithIndexAndDisplacement32Matcher m(node);
874 (m.displacement() ==
nullptr || g.CanBeImmediate(m.displacement()))) {
875 EmitLea(
this, kX64Lea32, node, m.index(), m.scale(), m.base(),
876 m.displacement(), m.displacement_mode());
881 VisitBinop(
this, node, kX64Add32);
884 void InstructionSelector::VisitInt64Add(Node* node) {
885 X64OperandGenerator g(
this);
888 BaseWithIndexAndDisplacement64Matcher m(node);
890 (m.displacement() ==
nullptr || g.CanBeImmediate(m.displacement()))) {
891 EmitLea(
this, kX64Lea, node, m.index(), m.scale(), m.base(),
892 m.displacement(), m.displacement_mode());
897 VisitBinop(
this, node, kX64Add);
900 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
901 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
902 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
903 return VisitBinop(
this, node, kX64Add, &cont);
905 FlagsContinuation cont;
906 VisitBinop(
this, node, kX64Add, &cont);
909 void InstructionSelector::VisitInt32Sub(Node* node) {
910 X64OperandGenerator g(
this);
911 DCHECK_EQ(node->InputCount(), 2);
912 Node* input1 = node->InputAt(0);
913 Node* input2 = node->InputAt(1);
914 if (input1->opcode() == IrOpcode::kTruncateInt64ToInt32 &&
915 g.CanBeImmediate(input2)) {
916 int32_t imm = g.GetImmediateIntegerValue(input2);
917 InstructionOperand int64_input = g.UseRegister(input1->InputAt(0));
920 Emit(kX64Movl, g.DefineAsRegister(node), int64_input);
924 Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
925 g.DefineAsRegister(node), int64_input, g.TempImmediate(-imm));
930 Int32BinopMatcher m(node);
931 if (m.left().Is(0)) {
932 Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
933 }
else if (m.right().Is(0)) {
936 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(m.left().node()));
937 }
else if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
940 Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
941 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
942 g.TempImmediate(-m.right().Value()));
944 VisitBinop(
this, node, kX64Sub32);
948 void InstructionSelector::VisitInt64Sub(Node* node) {
949 X64OperandGenerator g(
this);
950 Int64BinopMatcher m(node);
951 if (m.left().Is(0)) {
952 Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
954 if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
957 Emit(kX64Lea | AddressingModeField::encode(kMode_MRI),
958 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
959 g.TempImmediate(-static_cast<int32_t>(m.right().Value())));
962 VisitBinop(
this, node, kX64Sub);
966 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
967 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
968 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
969 return VisitBinop(
this, node, kX64Sub, &cont);
971 FlagsContinuation cont;
972 VisitBinop(
this, node, kX64Sub, &cont);
977 void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
978 X64OperandGenerator g(selector);
979 Int32BinopMatcher m(node);
980 Node* left = m.left().node();
981 Node* right = m.right().node();
982 if (g.CanBeImmediate(right)) {
983 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
984 g.UseImmediate(right));
986 if (g.CanBeBetterLeftOperand(right)) {
987 std::swap(left, right);
989 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
994 void VisitMulHigh(InstructionSelector* selector, Node* node,
996 X64OperandGenerator g(selector);
997 Node* left = node->InputAt(0);
998 Node* right = node->InputAt(1);
999 if (selector->IsLive(left) && !selector->IsLive(right)) {
1000 std::swap(left, right);
1002 InstructionOperand temps[] = {g.TempRegister(rax)};
1005 selector->Emit(opcode, g.DefineAsFixed(node, rdx), g.UseFixed(left, rax),
1006 g.UseUniqueRegister(right), arraysize(temps), temps);
1009 void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
1010 X64OperandGenerator g(selector);
1011 InstructionOperand temps[] = {g.TempRegister(rdx)};
1013 opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
1014 g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
1017 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
1018 X64OperandGenerator g(selector);
1019 InstructionOperand temps[] = {g.TempRegister(rax)};
1021 opcode, g.DefineAsFixed(node, rdx), g.UseFixed(node->InputAt(0), rax),
1022 g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
1027 void InstructionSelector::VisitInt32Mul(Node* node) {
1028 Int32ScaleMatcher m(node,
true);
1030 Node* index = node->InputAt(0);
1031 Node* base = m.power_of_two_plus_one() ? index :
nullptr;
1032 EmitLea(
this, kX64Lea32, node, index, m.scale(), base,
nullptr,
1033 kPositiveDisplacement);
1036 VisitMul(
this, node, kX64Imul32);
1039 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
1041 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1042 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1043 return VisitBinop(
this, node, kX64Imul32, &cont);
1045 FlagsContinuation cont;
1046 VisitBinop(
this, node, kX64Imul32, &cont);
1049 void InstructionSelector::VisitInt64Mul(Node* node) {
1050 VisitMul(
this, node, kX64Imul);
1053 void InstructionSelector::VisitInt32MulHigh(Node* node) {
1054 VisitMulHigh(
this, node, kX64ImulHigh32);
1057 void InstructionSelector::VisitInt32Div(Node* node) {
1058 VisitDiv(
this, node, kX64Idiv32);
1061 void InstructionSelector::VisitInt64Div(Node* node) {
1062 VisitDiv(
this, node, kX64Idiv);
1065 void InstructionSelector::VisitUint32Div(Node* node) {
1066 VisitDiv(
this, node, kX64Udiv32);
1069 void InstructionSelector::VisitUint64Div(Node* node) {
1070 VisitDiv(
this, node, kX64Udiv);
1073 void InstructionSelector::VisitInt32Mod(Node* node) {
1074 VisitMod(
this, node, kX64Idiv32);
1077 void InstructionSelector::VisitInt64Mod(Node* node) {
1078 VisitMod(
this, node, kX64Idiv);
1081 void InstructionSelector::VisitUint32Mod(Node* node) {
1082 VisitMod(
this, node, kX64Udiv32);
1085 void InstructionSelector::VisitUint64Mod(Node* node) {
1086 VisitMod(
this, node, kX64Udiv);
1089 void InstructionSelector::VisitUint32MulHigh(Node* node) {
1090 VisitMulHigh(
this, node, kX64UmulHigh32);
1093 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1094 X64OperandGenerator g(
this);
1095 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1096 InstructionOperand outputs[2];
1097 size_t output_count = 0;
1098 outputs[output_count++] = g.DefineAsRegister(node);
1100 Node* success_output = NodeProperties::FindProjection(node, 1);
1101 if (success_output) {
1102 outputs[output_count++] = g.DefineAsRegister(success_output);
1105 Emit(kSSEFloat32ToInt64, output_count, outputs, 1, inputs);
1108 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1109 X64OperandGenerator g(
this);
1110 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1111 InstructionOperand outputs[2];
1112 size_t output_count = 0;
1113 outputs[output_count++] = g.DefineAsRegister(node);
1115 Node* success_output = NodeProperties::FindProjection(node, 1);
1116 if (success_output) {
1117 outputs[output_count++] = g.DefineAsRegister(success_output);
1120 Emit(kSSEFloat64ToInt64, output_count, outputs, 1, inputs);
1123 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1124 X64OperandGenerator g(
this);
1125 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1126 InstructionOperand outputs[2];
1127 size_t output_count = 0;
1128 outputs[output_count++] = g.DefineAsRegister(node);
1130 Node* success_output = NodeProperties::FindProjection(node, 1);
1131 if (success_output) {
1132 outputs[output_count++] = g.DefineAsRegister(success_output);
1135 Emit(kSSEFloat32ToUint64, output_count, outputs, 1, inputs);
1138 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1139 X64OperandGenerator g(
this);
1140 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1141 InstructionOperand outputs[2];
1142 size_t output_count = 0;
1143 outputs[output_count++] = g.DefineAsRegister(node);
1145 Node* success_output = NodeProperties::FindProjection(node, 1);
1146 if (success_output) {
1147 outputs[output_count++] = g.DefineAsRegister(success_output);
1150 Emit(kSSEFloat64ToUint64, output_count, outputs, 1, inputs);
1153 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
1154 X64OperandGenerator g(
this);
1155 Node*
const value = node->InputAt(0);
1156 if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
1157 LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1158 MachineRepresentation rep = load_rep.representation();
1159 InstructionCode opcode = kArchNop;
1161 case MachineRepresentation::kBit:
1162 case MachineRepresentation::kWord8:
1163 opcode = load_rep.IsSigned() ? kX64Movsxbq : kX64Movzxbq;
1165 case MachineRepresentation::kWord16:
1166 opcode = load_rep.IsSigned() ? kX64Movsxwq : kX64Movzxwq;
1168 case MachineRepresentation::kWord32:
1169 opcode = load_rep.IsSigned() ? kX64Movsxlq : kX64Movl;
1175 InstructionOperand outputs[] = {g.DefineAsRegister(node)};
1176 size_t input_count = 0;
1177 InstructionOperand inputs[3];
1178 AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
1179 node->InputAt(0), inputs, &input_count);
1180 opcode |= AddressingModeField::encode(mode);
1181 Emit(opcode, 1, outputs, input_count, inputs);
1183 Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1189 bool ZeroExtendsWord32ToWord64(Node* node) {
1190 switch (node->opcode()) {
1191 case IrOpcode::kWord32And:
1192 case IrOpcode::kWord32Or:
1193 case IrOpcode::kWord32Xor:
1194 case IrOpcode::kWord32Shl:
1195 case IrOpcode::kWord32Shr:
1196 case IrOpcode::kWord32Sar:
1197 case IrOpcode::kWord32Ror:
1198 case IrOpcode::kWord32Equal:
1199 case IrOpcode::kInt32Add:
1200 case IrOpcode::kInt32Sub:
1201 case IrOpcode::kInt32Mul:
1202 case IrOpcode::kInt32MulHigh:
1203 case IrOpcode::kInt32Div:
1204 case IrOpcode::kInt32LessThan:
1205 case IrOpcode::kInt32LessThanOrEqual:
1206 case IrOpcode::kInt32Mod:
1207 case IrOpcode::kUint32Div:
1208 case IrOpcode::kUint32LessThan:
1209 case IrOpcode::kUint32LessThanOrEqual:
1210 case IrOpcode::kUint32Mod:
1211 case IrOpcode::kUint32MulHigh:
1215 case IrOpcode::kProjection: {
1216 Node*
const value = node->InputAt(0);
1217 switch (value->opcode()) {
1218 case IrOpcode::kInt32AddWithOverflow:
1219 case IrOpcode::kInt32SubWithOverflow:
1220 case IrOpcode::kInt32MulWithOverflow:
1226 case IrOpcode::kLoad:
1227 case IrOpcode::kPoisonedLoad: {
1230 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1231 switch (load_rep.representation()) {
1232 case MachineRepresentation::kWord8:
1233 case MachineRepresentation::kWord16:
1234 case MachineRepresentation::kWord32:
1247 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1248 X64OperandGenerator g(
this);
1249 Node* value = node->InputAt(0);
1250 if (ZeroExtendsWord32ToWord64(value)) {
1253 return EmitIdentity(node);
1255 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
1260 void VisitRO(InstructionSelector* selector, Node* node,
1261 InstructionCode opcode) {
1262 X64OperandGenerator g(selector);
1263 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1266 void VisitRR(InstructionSelector* selector, Node* node,
1267 InstructionCode opcode) {
1268 X64OperandGenerator g(selector);
1269 selector->Emit(opcode, g.DefineAsRegister(node),
1270 g.UseRegister(node->InputAt(0)));
1273 void VisitRRO(InstructionSelector* selector, Node* node,
1274 InstructionCode opcode) {
1275 X64OperandGenerator g(selector);
1276 selector->Emit(opcode, g.DefineSameAsFirst(node),
1277 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
1280 void VisitFloatBinop(InstructionSelector* selector, Node* node,
1281 ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
1282 X64OperandGenerator g(selector);
1283 InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
1284 InstructionOperand operand1 = g.Use(node->InputAt(1));
1285 if (selector->IsSupported(AVX)) {
1286 selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1);
1288 selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
1292 void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
1293 ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
1294 X64OperandGenerator g(selector);
1295 if (selector->IsSupported(AVX)) {
1296 selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input));
1298 selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
1304 #define RO_OP_LIST(V) \ 1305 V(Word64Clz, kX64Lzcnt) \ 1306 V(Word32Clz, kX64Lzcnt32) \ 1307 V(Word64Ctz, kX64Tzcnt) \ 1308 V(Word32Ctz, kX64Tzcnt32) \ 1309 V(Word64Popcnt, kX64Popcnt) \ 1310 V(Word32Popcnt, kX64Popcnt32) \ 1311 V(Float64Sqrt, kSSEFloat64Sqrt) \ 1312 V(Float32Sqrt, kSSEFloat32Sqrt) \ 1313 V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \ 1314 V(ChangeFloat64ToInt64, kSSEFloat64ToInt64) \ 1315 V(ChangeFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(1)) \ 1316 V(TruncateFloat64ToInt64, kSSEFloat64ToInt64) \ 1317 V(TruncateFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(0)) \ 1318 V(ChangeFloat64ToUint64, kSSEFloat64ToUint64) \ 1319 V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32) \ 1320 V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64) \ 1321 V(TruncateFloat32ToInt32, kSSEFloat32ToInt32) \ 1322 V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \ 1323 V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \ 1324 V(ChangeInt64ToFloat64, kSSEInt64ToFloat64) \ 1325 V(ChangeUint32ToFloat64, kSSEUint32ToFloat64) \ 1326 V(RoundFloat64ToInt32, kSSEFloat64ToInt32) \ 1327 V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \ 1328 V(RoundInt64ToFloat32, kSSEInt64ToFloat32) \ 1329 V(RoundUint64ToFloat32, kSSEUint64ToFloat32) \ 1330 V(RoundInt64ToFloat64, kSSEInt64ToFloat64) \ 1331 V(RoundUint64ToFloat64, kSSEUint64ToFloat64) \ 1332 V(RoundUint32ToFloat32, kSSEUint32ToFloat32) \ 1333 V(BitcastFloat32ToInt32, kX64BitcastFI) \ 1334 V(BitcastFloat64ToInt64, kX64BitcastDL) \ 1335 V(BitcastInt32ToFloat32, kX64BitcastIF) \ 1336 V(BitcastInt64ToFloat64, kX64BitcastLD) \ 1337 V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \ 1338 V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32) \ 1339 V(SignExtendWord8ToInt32, kX64Movsxbl) \ 1340 V(SignExtendWord16ToInt32, kX64Movsxwl) \ 1341 V(SignExtendWord8ToInt64, kX64Movsxbq) \ 1342 V(SignExtendWord16ToInt64, kX64Movsxwq) \ 1343 V(SignExtendWord32ToInt64, kX64Movsxlq) 1345 #define RR_OP_LIST(V) \ 1346 V(Float32RoundDown, kSSEFloat32Round | MiscField::encode(kRoundDown)) \ 1347 V(Float64RoundDown, kSSEFloat64Round | MiscField::encode(kRoundDown)) \ 1348 V(Float32RoundUp, kSSEFloat32Round | MiscField::encode(kRoundUp)) \ 1349 V(Float64RoundUp, kSSEFloat64Round | MiscField::encode(kRoundUp)) \ 1350 V(Float32RoundTruncate, kSSEFloat32Round | MiscField::encode(kRoundToZero)) \ 1351 V(Float64RoundTruncate, kSSEFloat64Round | MiscField::encode(kRoundToZero)) \ 1352 V(Float32RoundTiesEven, \ 1353 kSSEFloat32Round | MiscField::encode(kRoundToNearest)) \ 1354 V(Float64RoundTiesEven, kSSEFloat64Round | MiscField::encode(kRoundToNearest)) 1356 #define RO_VISITOR(Name, opcode) \ 1357 void InstructionSelector::Visit##Name(Node* node) { \ 1358 VisitRO(this, node, opcode); \ 1360 RO_OP_LIST(RO_VISITOR)
1364 #define RR_VISITOR(Name, opcode) \ 1365 void InstructionSelector::Visit##Name(Node* node) { \ 1366 VisitRR(this, node, opcode); \ 1368 RR_OP_LIST(RR_VISITOR)
1372 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1373 VisitRR(
this, node, kArchTruncateDoubleToI);
1376 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1377 X64OperandGenerator g(
this);
1378 Node* value = node->InputAt(0);
1379 if (CanCover(node, value)) {
1380 switch (value->opcode()) {
1381 case IrOpcode::kWord64Sar:
1382 case IrOpcode::kWord64Shr: {
1383 Int64BinopMatcher m(value);
1384 if (m.right().Is(32)) {
1385 if (CanCoverTransitively(node, value, value->InputAt(0)) &&
1386 TryMatchLoadWord64AndShiftRight(
this, value, kX64Movl)) {
1387 return EmitIdentity(node);
1389 Emit(kX64Shr, g.DefineSameAsFirst(node),
1390 g.UseRegister(m.left().node()), g.TempImmediate(32));
1395 case IrOpcode::kLoad: {
1396 if (TryMergeTruncateInt64ToInt32IntoLoad(
this, node, value)) {
1405 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
1408 void InstructionSelector::VisitFloat32Add(Node* node) {
1409 VisitFloatBinop(
this, node, kAVXFloat32Add, kSSEFloat32Add);
1412 void InstructionSelector::VisitFloat32Sub(Node* node) {
1413 VisitFloatBinop(
this, node, kAVXFloat32Sub, kSSEFloat32Sub);
1416 void InstructionSelector::VisitFloat32Mul(Node* node) {
1417 VisitFloatBinop(
this, node, kAVXFloat32Mul, kSSEFloat32Mul);
1420 void InstructionSelector::VisitFloat32Div(Node* node) {
1421 VisitFloatBinop(
this, node, kAVXFloat32Div, kSSEFloat32Div);
1424 void InstructionSelector::VisitFloat32Abs(Node* node) {
1425 VisitFloatUnop(
this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
1428 void InstructionSelector::VisitFloat32Max(Node* node) {
1429 VisitRRO(
this, node, kSSEFloat32Max);
1432 void InstructionSelector::VisitFloat32Min(Node* node) {
1433 VisitRRO(
this, node, kSSEFloat32Min);
1436 void InstructionSelector::VisitFloat64Add(Node* node) {
1437 VisitFloatBinop(
this, node, kAVXFloat64Add, kSSEFloat64Add);
1440 void InstructionSelector::VisitFloat64Sub(Node* node) {
1441 VisitFloatBinop(
this, node, kAVXFloat64Sub, kSSEFloat64Sub);
1444 void InstructionSelector::VisitFloat64Mul(Node* node) {
1445 VisitFloatBinop(
this, node, kAVXFloat64Mul, kSSEFloat64Mul);
1448 void InstructionSelector::VisitFloat64Div(Node* node) {
1449 VisitFloatBinop(
this, node, kAVXFloat64Div, kSSEFloat64Div);
1452 void InstructionSelector::VisitFloat64Mod(Node* node) {
1453 X64OperandGenerator g(
this);
1454 InstructionOperand temps[] = {g.TempRegister(rax)};
1455 Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
1456 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
1460 void InstructionSelector::VisitFloat64Max(Node* node) {
1461 VisitRRO(
this, node, kSSEFloat64Max);
1464 void InstructionSelector::VisitFloat64Min(Node* node) {
1465 VisitRRO(
this, node, kSSEFloat64Min);
1468 void InstructionSelector::VisitFloat64Abs(Node* node) {
1469 VisitFloatUnop(
this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
1472 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1476 void InstructionSelector::VisitFloat32Neg(Node* node) {
1477 VisitFloatUnop(
this, node, node->InputAt(0), kAVXFloat32Neg, kSSEFloat32Neg);
1480 void InstructionSelector::VisitFloat64Neg(Node* node) {
1481 VisitFloatUnop(
this, node, node->InputAt(0), kAVXFloat64Neg, kSSEFloat64Neg);
1484 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1485 InstructionCode opcode) {
1486 X64OperandGenerator g(
this);
1487 Emit(opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(node->InputAt(0), xmm0),
1488 g.UseFixed(node->InputAt(1), xmm1))
1492 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1493 InstructionCode opcode) {
1494 X64OperandGenerator g(
this);
1495 Emit(opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(node->InputAt(0), xmm0))
1499 void InstructionSelector::EmitPrepareArguments(
1500 ZoneVector<PushParameter>* arguments,
const CallDescriptor* call_descriptor,
1502 X64OperandGenerator g(
this);
1505 if (call_descriptor->IsCFunctionCall()) {
1506 Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
1507 call_descriptor->ParameterCount())),
1508 0,
nullptr, 0,
nullptr);
1511 for (
size_t n = 0; n < arguments->size(); ++n) {
1512 PushParameter input = (*arguments)[n];
1514 int slot =
static_cast<int>(n);
1515 InstructionOperand value = g.CanBeImmediate(input.node)
1516 ? g.UseImmediate(input.node)
1517 : g.UseRegister(input.node);
1518 Emit(kX64Poke | MiscField::encode(slot), g.NoOutput(), value);
1523 int effect_level = GetEffectLevel(node);
1524 for (PushParameter input : base::Reversed(*arguments)) {
1527 if (input.node ==
nullptr)
continue;
1528 if (g.CanBeImmediate(input.node)) {
1529 Emit(kX64Push, g.NoOutput(), g.UseImmediate(input.node));
1530 }
else if (IsSupported(ATOM) ||
1531 sequence()->IsFP(GetVirtualRegister(input.node))) {
1534 Emit(kX64Push, g.NoOutput(), g.UseRegister(input.node));
1535 }
else if (g.CanBeMemoryOperand(kX64Push, node, input.node,
1537 InstructionOperand outputs[1];
1538 InstructionOperand inputs[4];
1539 size_t input_count = 0;
1540 InstructionCode opcode = kX64Push;
1541 AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
1542 input.node, inputs, &input_count);
1543 opcode |= AddressingModeField::encode(mode);
1544 Emit(opcode, 0, outputs, input_count, inputs);
1546 Emit(kX64Push, g.NoOutput(), g.UseAny(input.node));
1552 void InstructionSelector::EmitPrepareResults(
1553 ZoneVector<PushParameter>* results,
const CallDescriptor* call_descriptor,
1555 X64OperandGenerator g(
this);
1557 int reverse_slot = 0;
1558 for (PushParameter output : *results) {
1559 if (!output.location.IsCallerFrameSlot())
continue;
1560 reverse_slot += output.location.GetSizeInPointers();
1562 if (output.node ==
nullptr)
continue;
1563 DCHECK(!call_descriptor->IsCFunctionCall());
1564 if (output.location.GetType() == MachineType::Float32()) {
1565 MarkAsFloat32(output.node);
1566 }
else if (output.location.GetType() == MachineType::Float64()) {
1567 MarkAsFloat64(output.node);
1569 InstructionOperand result = g.DefineAsRegister(output.node);
1570 InstructionOperand slot = g.UseImmediate(reverse_slot);
1571 Emit(kX64Peek, 1, &result, 1, &slot);
1575 bool InstructionSelector::IsTailCallAddressImmediate() {
return true; }
1577 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() {
return 3; }
1581 void VisitCompareWithMemoryOperand(InstructionSelector* selector,
1582 InstructionCode opcode, Node* left,
1583 InstructionOperand right,
1584 FlagsContinuation* cont) {
1585 DCHECK_EQ(IrOpcode::kLoad, left->opcode());
1586 X64OperandGenerator g(selector);
1587 size_t input_count = 0;
1588 InstructionOperand inputs[4];
1589 AddressingMode addressing_mode =
1590 g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
1591 opcode |= AddressingModeField::encode(addressing_mode);
1592 inputs[input_count++] = right;
1594 selector->EmitWithContinuation(opcode, 0,
nullptr, input_count, inputs, cont);
1598 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1599 InstructionOperand left, InstructionOperand right,
1600 FlagsContinuation* cont) {
1601 selector->EmitWithContinuation(opcode, left, right, cont);
1605 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1606 Node* left, Node* right, FlagsContinuation* cont,
1608 X64OperandGenerator g(selector);
1609 if (commutative && g.CanBeBetterLeftOperand(right)) {
1610 std::swap(left, right);
1612 VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
1615 MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
1616 if (hint_node->opcode() == IrOpcode::kLoad) {
1617 MachineType hint = LoadRepresentationOf(hint_node->op());
1618 if (node->opcode() == IrOpcode::kInt32Constant ||
1619 node->opcode() == IrOpcode::kInt64Constant) {
1620 int64_t constant = node->opcode() == IrOpcode::kInt32Constant
1621 ? OpParameter<int32_t>(node->op())
1622 : OpParameter<int64_t>(node->op());
1623 if (hint == MachineType::Int8()) {
1624 if (constant >= std::numeric_limits<int8_t>::min() &&
1625 constant <= std::numeric_limits<int8_t>::max()) {
1628 }
else if (hint == MachineType::Uint8()) {
1629 if (constant >= std::numeric_limits<uint8_t>::min() &&
1630 constant <= std::numeric_limits<uint8_t>::max()) {
1633 }
else if (hint == MachineType::Int16()) {
1634 if (constant >= std::numeric_limits<int16_t>::min() &&
1635 constant <= std::numeric_limits<int16_t>::max()) {
1638 }
else if (hint == MachineType::Uint16()) {
1639 if (constant >= std::numeric_limits<uint16_t>::min() &&
1640 constant <= std::numeric_limits<uint16_t>::max()) {
1643 }
else if (hint == MachineType::Int32()) {
1645 }
else if (hint == MachineType::Uint32()) {
1646 if (constant >= 0)
return hint;
1650 return node->opcode() == IrOpcode::kLoad ? LoadRepresentationOf(node->op())
1651 : MachineType::None();
1656 InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
1657 Node* right, FlagsContinuation* cont) {
1661 MachineType left_type = MachineTypeForNarrow(left, right);
1662 MachineType right_type = MachineTypeForNarrow(right, left);
1663 if (left_type == right_type) {
1664 switch (left_type.representation()) {
1665 case MachineRepresentation::kBit:
1666 case MachineRepresentation::kWord8: {
1667 if (opcode == kX64Test32)
return kX64Test8;
1668 if (opcode == kX64Cmp32) {
1669 if (left_type.semantic() == MachineSemantic::kUint32) {
1670 cont->OverwriteUnsignedIfSigned();
1672 CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
1678 case MachineRepresentation::kWord16:
1679 if (opcode == kX64Test32)
return kX64Test16;
1680 if (opcode == kX64Cmp32) {
1681 if (left_type.semantic() == MachineSemantic::kUint32) {
1682 cont->OverwriteUnsignedIfSigned();
1684 CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
1697 void VisitWordCompare(InstructionSelector* selector, Node* node,
1698 InstructionCode opcode, FlagsContinuation* cont) {
1699 X64OperandGenerator g(selector);
1700 Node* left = node->InputAt(0);
1701 Node* right = node->InputAt(1);
1705 if (opcode == kX64Cmp32 || opcode == kX64Test32) {
1706 if (left->opcode() == IrOpcode::kTruncateInt64ToInt32 &&
1707 selector->CanCover(node, left)) {
1708 left = left->InputAt(0);
1711 if (right->opcode() == IrOpcode::kTruncateInt64ToInt32 &&
1712 selector->CanCover(node, right)) {
1713 right = right->InputAt(0);
1717 opcode = TryNarrowOpcodeSize(opcode, left, right, cont);
1721 int effect_level = selector->GetEffectLevel(node);
1722 if (cont->IsBranch()) {
1723 effect_level = selector->GetEffectLevel(
1724 cont->true_block()->PredecessorAt(0)->control_input());
1727 if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
1728 (g.CanBeMemoryOperand(opcode, node, right, effect_level) &&
1729 !g.CanBeMemoryOperand(opcode, node, left, effect_level))) {
1730 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1731 std::swap(left, right);
1735 if (g.CanBeImmediate(right)) {
1736 if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
1737 return VisitCompareWithMemoryOperand(selector, opcode, left,
1738 g.UseImmediate(right), cont);
1740 return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
1745 if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
1746 return VisitCompareWithMemoryOperand(selector, opcode, left,
1747 g.UseRegister(right), cont);
1750 return VisitCompare(selector, opcode, left, right, cont,
1751 node->op()->HasProperty(Operator::kCommutative));
1755 void VisitWord64Compare(InstructionSelector* selector, Node* node,
1756 FlagsContinuation* cont) {
1757 X64OperandGenerator g(selector);
1758 if (selector->CanUseRootsRegister()) {
1759 const RootsTable& roots_table = selector->isolate()->roots_table();
1760 RootIndex root_index;
1761 HeapObjectBinopMatcher m(node);
1762 if (m.right().HasValue() &&
1763 roots_table.IsRootHandle(m.right().Value(), &root_index)) {
1764 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1765 InstructionCode opcode =
1766 kX64Cmp | AddressingModeField::encode(kMode_Root);
1767 return VisitCompare(
1770 TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)),
1771 g.UseRegister(m.left().node()), cont);
1772 }
else if (m.left().HasValue() &&
1773 roots_table.IsRootHandle(m.left().Value(), &root_index)) {
1774 InstructionCode opcode =
1775 kX64Cmp | AddressingModeField::encode(kMode_Root);
1776 return VisitCompare(
1779 TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)),
1780 g.UseRegister(m.right().node()), cont);
1783 StackCheckMatcher<Int64BinopMatcher, IrOpcode::kUint64LessThan> m(
1784 selector->isolate(), node);
1787 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1788 InstructionCode opcode = cont->Encode(kX64StackCheck);
1789 CHECK(cont->IsBranch());
1790 selector->EmitWithContinuation(opcode, cont);
1793 WasmStackCheckMatcher<Int64BinopMatcher, IrOpcode::kUint64LessThan> wasm_m(
1795 if (wasm_m.Matched()) {
1799 Node* left = node->InputAt(0);
1800 LocationOperand rsp(InstructionOperand::EXPLICIT, LocationOperand::REGISTER,
1801 InstructionSequence::DefaultRepresentation(),
1802 RegisterCode::kRegCode_rsp);
1803 return VisitCompareWithMemoryOperand(selector, kX64Cmp, left, rsp, cont);
1805 VisitWordCompare(selector, node, kX64Cmp, cont);
1809 void VisitCompareZero(InstructionSelector* selector, Node* user, Node* node,
1810 InstructionCode opcode, FlagsContinuation* cont) {
1811 X64OperandGenerator g(selector);
1812 if (cont->IsBranch() &&
1813 (cont->condition() == kNotEqual || cont->condition() == kEqual)) {
1814 switch (node->opcode()) {
1815 #define FLAGS_SET_BINOP_LIST(V) \ 1816 V(kInt32Add, VisitBinop, kX64Add32) \ 1817 V(kInt32Sub, VisitBinop, kX64Sub32) \ 1818 V(kWord32And, VisitBinop, kX64And32) \ 1819 V(kWord32Or, VisitBinop, kX64Or32) \ 1820 V(kInt64Add, VisitBinop, kX64Add) \ 1821 V(kInt64Sub, VisitBinop, kX64Sub) \ 1822 V(kWord64And, VisitBinop, kX64And) \ 1823 V(kWord64Or, VisitBinop, kX64Or) 1824 #define FLAGS_SET_BINOP(opcode, Visit, archOpcode) \ 1825 case IrOpcode::opcode: \ 1826 if (selector->IsOnlyUserOfNodeInSameBlock(user, node)) { \ 1827 return Visit(selector, node, archOpcode, cont); \ 1830 FLAGS_SET_BINOP_LIST(FLAGS_SET_BINOP)
1831 #undef FLAGS_SET_BINOP_LIST 1832 #undef FLAGS_SET_BINOP 1834 #define TRY_VISIT_WORD32_SHIFT TryVisitWordShift<Int32BinopMatcher, 32> 1835 #define TRY_VISIT_WORD64_SHIFT TryVisitWordShift<Int64BinopMatcher, 64> 1837 #define FLAGS_SET_SHIFT_LIST(V) \ 1838 V(kWord32Shl, TRY_VISIT_WORD32_SHIFT, kX64Shl32) \ 1839 V(kWord32Shr, TRY_VISIT_WORD32_SHIFT, kX64Shr32) \ 1840 V(kWord64Shl, TRY_VISIT_WORD64_SHIFT, kX64Shl) \ 1841 V(kWord64Shr, TRY_VISIT_WORD64_SHIFT, kX64Shr) 1842 #define FLAGS_SET_SHIFT(opcode, TryVisit, archOpcode) \ 1843 case IrOpcode::opcode: \ 1844 if (selector->IsOnlyUserOfNodeInSameBlock(user, node)) { \ 1845 if (TryVisit(selector, node, archOpcode, cont)) return; \ 1848 FLAGS_SET_SHIFT_LIST(FLAGS_SET_SHIFT)
1849 #undef TRY_VISIT_WORD32_SHIFT 1850 #undef TRY_VISIT_WORD64_SHIFT 1851 #undef FLAGS_SET_SHIFT_LIST 1852 #undef FLAGS_SET_SHIFT 1857 int effect_level = selector->GetEffectLevel(node);
1858 if (cont->IsBranch()) {
1859 effect_level = selector->GetEffectLevel(
1860 cont->true_block()->PredecessorAt(0)->control_input());
1862 if (node->opcode() == IrOpcode::kLoad) {
1863 switch (LoadRepresentationOf(node->op()).representation()) {
1864 case MachineRepresentation::kWord8:
1865 if (opcode == kX64Cmp32) {
1867 }
else if (opcode == kX64Test32) {
1871 case MachineRepresentation::kWord16:
1872 if (opcode == kX64Cmp32) {
1874 }
else if (opcode == kX64Test32) {
1875 opcode = kX64Test16;
1882 if (g.CanBeMemoryOperand(opcode, user, node, effect_level)) {
1883 VisitCompareWithMemoryOperand(selector, opcode, node, g.TempImmediate(0),
1886 VisitCompare(selector, opcode, g.Use(node), g.TempImmediate(0), cont);
1891 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1892 FlagsContinuation* cont) {
1893 Node*
const left = node->InputAt(0);
1894 Node*
const right = node->InputAt(1);
1895 InstructionCode
const opcode =
1896 selector->IsSupported(AVX) ? kAVXFloat32Cmp : kSSEFloat32Cmp;
1897 VisitCompare(selector, opcode, right, left, cont,
false);
1901 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1902 FlagsContinuation* cont) {
1903 Node*
const left = node->InputAt(0);
1904 Node*
const right = node->InputAt(1);
1905 InstructionCode
const opcode =
1906 selector->IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
1907 VisitCompare(selector, opcode, right, left, cont,
false);
1911 void VisitAtomicBinop(InstructionSelector* selector, Node* node,
1912 ArchOpcode opcode) {
1913 X64OperandGenerator g(selector);
1914 Node* base = node->InputAt(0);
1915 Node* index = node->InputAt(1);
1916 Node* value = node->InputAt(2);
1917 AddressingMode addressing_mode;
1918 InstructionOperand inputs[] = {
1919 g.UseUniqueRegister(value), g.UseUniqueRegister(base),
1920 g.GetEffectiveIndexOperand(index, &addressing_mode)};
1921 InstructionOperand outputs[] = {g.DefineAsFixed(node, rax)};
1922 InstructionOperand temps[] = {g.TempRegister()};
1923 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
1924 selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
1925 arraysize(temps), temps);
1929 void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
1930 ArchOpcode opcode) {
1931 X64OperandGenerator g(selector);
1932 Node* base = node->InputAt(0);
1933 Node* index = node->InputAt(1);
1934 Node* old_value = node->InputAt(2);
1935 Node* new_value = node->InputAt(3);
1936 AddressingMode addressing_mode;
1937 InstructionOperand inputs[] = {
1938 g.UseFixed(old_value, rax), g.UseUniqueRegister(new_value),
1939 g.UseUniqueRegister(base),
1940 g.GetEffectiveIndexOperand(index, &addressing_mode)};
1941 InstructionOperand outputs[] = {g.DefineAsFixed(node, rax)};
1942 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
1943 selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
1947 void VisitAtomicExchange(InstructionSelector* selector, Node* node,
1948 ArchOpcode opcode) {
1949 X64OperandGenerator g(selector);
1950 Node* base = node->InputAt(0);
1951 Node* index = node->InputAt(1);
1952 Node* value = node->InputAt(2);
1953 AddressingMode addressing_mode;
1954 InstructionOperand inputs[] = {
1955 g.UseUniqueRegister(value), g.UseUniqueRegister(base),
1956 g.GetEffectiveIndexOperand(index, &addressing_mode)};
1957 InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
1958 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
1959 selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
1965 void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
1966 FlagsContinuation* cont) {
1968 while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
1969 Int32BinopMatcher m(value);
1970 if (!m.right().Is(0))
break;
1973 value = m.left().node();
1977 if (CanCover(user, value)) {
1978 switch (value->opcode()) {
1979 case IrOpcode::kWord32Equal:
1980 cont->OverwriteAndNegateIfEqual(kEqual);
1981 return VisitWordCompare(
this, value, kX64Cmp32, cont);
1982 case IrOpcode::kInt32LessThan:
1983 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1984 return VisitWordCompare(
this, value, kX64Cmp32, cont);
1985 case IrOpcode::kInt32LessThanOrEqual:
1986 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1987 return VisitWordCompare(
this, value, kX64Cmp32, cont);
1988 case IrOpcode::kUint32LessThan:
1989 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1990 return VisitWordCompare(
this, value, kX64Cmp32, cont);
1991 case IrOpcode::kUint32LessThanOrEqual:
1992 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1993 return VisitWordCompare(
this, value, kX64Cmp32, cont);
1994 case IrOpcode::kWord64Equal: {
1995 cont->OverwriteAndNegateIfEqual(kEqual);
1996 Int64BinopMatcher m(value);
1997 if (m.right().Is(0)) {
1999 Node*
const user = m.node();
2000 Node*
const value = m.left().node();
2001 if (CanCover(user, value)) {
2002 switch (value->opcode()) {
2003 case IrOpcode::kInt64Sub:
2004 return VisitWord64Compare(
this, value, cont);
2005 case IrOpcode::kWord64And:
2006 return VisitWordCompare(
this, value, kX64Test, cont);
2011 return VisitCompareZero(
this, user, value, kX64Cmp, cont);
2013 return VisitWord64Compare(
this, value, cont);
2015 case IrOpcode::kInt64LessThan:
2016 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2017 return VisitWord64Compare(
this, value, cont);
2018 case IrOpcode::kInt64LessThanOrEqual:
2019 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2020 return VisitWord64Compare(
this, value, cont);
2021 case IrOpcode::kUint64LessThan:
2022 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2023 return VisitWord64Compare(
this, value, cont);
2024 case IrOpcode::kUint64LessThanOrEqual:
2025 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2026 return VisitWord64Compare(
this, value, cont);
2027 case IrOpcode::kFloat32Equal:
2028 cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
2029 return VisitFloat32Compare(
this, value, cont);
2030 case IrOpcode::kFloat32LessThan:
2031 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
2032 return VisitFloat32Compare(
this, value, cont);
2033 case IrOpcode::kFloat32LessThanOrEqual:
2034 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
2035 return VisitFloat32Compare(
this, value, cont);
2036 case IrOpcode::kFloat64Equal:
2037 cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
2038 return VisitFloat64Compare(
this, value, cont);
2039 case IrOpcode::kFloat64LessThan: {
2040 Float64BinopMatcher m(value);
2041 if (m.left().Is(0.0) && m.right().IsFloat64Abs()) {
2050 cont->OverwriteAndNegateIfEqual(kNotEqual);
2051 InstructionCode
const opcode =
2052 IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
2053 return VisitCompare(
this, opcode, m.left().node(),
2054 m.right().InputAt(0), cont,
false);
2056 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
2057 return VisitFloat64Compare(
this, value, cont);
2059 case IrOpcode::kFloat64LessThanOrEqual:
2060 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
2061 return VisitFloat64Compare(
this, value, cont);
2062 case IrOpcode::kProjection:
2065 if (ProjectionIndexOf(value->op()) == 1u) {
2071 Node*
const node = value->InputAt(0);
2072 Node*
const result = NodeProperties::FindProjection(node, 0);
2073 if (result ==
nullptr || IsDefined(result)) {
2074 switch (node->opcode()) {
2075 case IrOpcode::kInt32AddWithOverflow:
2076 cont->OverwriteAndNegateIfEqual(kOverflow);
2077 return VisitBinop(
this, node, kX64Add32, cont);
2078 case IrOpcode::kInt32SubWithOverflow:
2079 cont->OverwriteAndNegateIfEqual(kOverflow);
2080 return VisitBinop(
this, node, kX64Sub32, cont);
2081 case IrOpcode::kInt32MulWithOverflow:
2082 cont->OverwriteAndNegateIfEqual(kOverflow);
2083 return VisitBinop(
this, node, kX64Imul32, cont);
2084 case IrOpcode::kInt64AddWithOverflow:
2085 cont->OverwriteAndNegateIfEqual(kOverflow);
2086 return VisitBinop(
this, node, kX64Add, cont);
2087 case IrOpcode::kInt64SubWithOverflow:
2088 cont->OverwriteAndNegateIfEqual(kOverflow);
2089 return VisitBinop(
this, node, kX64Sub, cont);
2096 case IrOpcode::kInt32Sub:
2097 return VisitWordCompare(
this, value, kX64Cmp32, cont);
2098 case IrOpcode::kWord32And:
2099 return VisitWordCompare(
this, value, kX64Test32, cont);
2106 VisitCompareZero(
this, user, value, kX64Cmp32, cont);
2109 void InstructionSelector::VisitSwitch(Node* node,
const SwitchInfo& sw) {
2110 X64OperandGenerator g(
this);
2111 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
2114 if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
2115 static const size_t kMaxTableSwitchValueRange = 2 << 16;
2116 size_t table_space_cost = 4 + sw.value_range();
2117 size_t table_time_cost = 3;
2118 size_t lookup_space_cost = 3 + 2 * sw.case_count();
2119 size_t lookup_time_cost = sw.case_count();
2120 if (sw.case_count() > 4 &&
2121 table_space_cost + 3 * table_time_cost <=
2122 lookup_space_cost + 3 * lookup_time_cost &&
2123 sw.min_value() > std::numeric_limits<int32_t>::min() &&
2124 sw.value_range() <= kMaxTableSwitchValueRange) {
2125 InstructionOperand index_operand = g.TempRegister();
2126 if (sw.min_value()) {
2129 Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand,
2130 value_operand, g.TempImmediate(-sw.min_value()));
2133 Emit(kX64Movl, index_operand, value_operand);
2136 return EmitTableSwitch(sw, index_operand);
2141 return EmitBinarySearchSwitch(sw, value_operand);
2144 void InstructionSelector::VisitWord32Equal(Node*
const node) {
2146 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2147 Int32BinopMatcher m(user);
2148 if (m.right().Is(0)) {
2149 return VisitWordCompareZero(m.node(), m.left().node(), &cont);
2151 VisitWordCompare(
this, node, kX64Cmp32, &cont);
2154 void InstructionSelector::VisitInt32LessThan(Node* node) {
2155 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2156 VisitWordCompare(
this, node, kX64Cmp32, &cont);
2159 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
2160 FlagsContinuation cont =
2161 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2162 VisitWordCompare(
this, node, kX64Cmp32, &cont);
2165 void InstructionSelector::VisitUint32LessThan(Node* node) {
2166 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2167 VisitWordCompare(
this, node, kX64Cmp32, &cont);
2170 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
2171 FlagsContinuation cont =
2172 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2173 VisitWordCompare(
this, node, kX64Cmp32, &cont);
2176 void InstructionSelector::VisitWord64Equal(Node*
const node) {
2177 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2178 Int64BinopMatcher m(node);
2179 if (m.right().Is(0)) {
2181 Node*
const user = m.node();
2182 Node*
const value = m.left().node();
2183 if (CanCover(user, value)) {
2184 switch (value->opcode()) {
2185 case IrOpcode::kInt64Sub:
2186 return VisitWord64Compare(
this, value, &cont);
2187 case IrOpcode::kWord64And:
2188 return VisitWordCompare(
this, value, kX64Test, &cont);
2194 VisitWord64Compare(
this, node, &cont);
2197 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
2198 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2199 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2200 return VisitBinop(
this, node, kX64Add32, &cont);
2202 FlagsContinuation cont;
2203 VisitBinop(
this, node, kX64Add32, &cont);
2206 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
2207 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2208 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2209 return VisitBinop(
this, node, kX64Sub32, &cont);
2211 FlagsContinuation cont;
2212 VisitBinop(
this, node, kX64Sub32, &cont);
2215 void InstructionSelector::VisitInt64LessThan(Node* node) {
2216 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2217 VisitWord64Compare(
this, node, &cont);
2220 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2221 FlagsContinuation cont =
2222 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2223 VisitWord64Compare(
this, node, &cont);
2226 void InstructionSelector::VisitUint64LessThan(Node* node) {
2227 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2228 VisitWord64Compare(
this, node, &cont);
2231 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2232 FlagsContinuation cont =
2233 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2234 VisitWord64Compare(
this, node, &cont);
2237 void InstructionSelector::VisitFloat32Equal(Node* node) {
2238 FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
2239 VisitFloat32Compare(
this, node, &cont);
2242 void InstructionSelector::VisitFloat32LessThan(Node* node) {
2243 FlagsContinuation cont =
2244 FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
2245 VisitFloat32Compare(
this, node, &cont);
2248 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2249 FlagsContinuation cont =
2250 FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
2251 VisitFloat32Compare(
this, node, &cont);
2254 void InstructionSelector::VisitFloat64Equal(Node* node) {
2255 FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
2256 VisitFloat64Compare(
this, node, &cont);
2259 void InstructionSelector::VisitFloat64LessThan(Node* node) {
2260 Float64BinopMatcher m(node);
2261 if (m.left().Is(0.0) && m.right().IsFloat64Abs()) {
2270 FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, node);
2271 InstructionCode
const opcode =
2272 IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
2273 return VisitCompare(
this, opcode, m.left().node(), m.right().InputAt(0),
2276 FlagsContinuation cont =
2277 FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
2278 VisitFloat64Compare(
this, node, &cont);
2281 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
2282 FlagsContinuation cont =
2283 FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
2284 VisitFloat64Compare(
this, node, &cont);
2287 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
2288 X64OperandGenerator g(
this);
2289 Node* left = node->InputAt(0);
2290 Node* right = node->InputAt(1);
2291 Float64Matcher mleft(left);
2292 if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
2293 Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
2296 Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node),
2297 g.UseRegister(left), g.Use(right));
2300 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
2301 X64OperandGenerator g(
this);
2302 Node* left = node->InputAt(0);
2303 Node* right = node->InputAt(1);
2304 Emit(kSSEFloat64InsertHighWord32, g.DefineSameAsFirst(node),
2305 g.UseRegister(left), g.Use(right));
2308 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
2309 X64OperandGenerator g(
this);
2310 Emit(kSSEFloat64SilenceNaN, g.DefineSameAsFirst(node),
2311 g.UseRegister(node->InputAt(0)));
2314 void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
2315 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2316 DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
2317 load_rep.representation() == MachineRepresentation::kWord16 ||
2318 load_rep.representation() == MachineRepresentation::kWord32);
2323 void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
2324 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2329 void InstructionSelector::VisitWord32AtomicStore(Node* node) {
2330 MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2331 ArchOpcode opcode = kArchNop;
2333 case MachineRepresentation::kWord8:
2334 opcode = kWord32AtomicExchangeInt8;
2336 case MachineRepresentation::kWord16:
2337 opcode = kWord32AtomicExchangeInt16;
2339 case MachineRepresentation::kWord32:
2340 opcode = kWord32AtomicExchangeWord32;
2346 VisitAtomicExchange(
this, node, opcode);
2349 void InstructionSelector::VisitWord64AtomicStore(Node* node) {
2350 MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2351 ArchOpcode opcode = kArchNop;
2353 case MachineRepresentation::kWord8:
2354 opcode = kX64Word64AtomicExchangeUint8;
2356 case MachineRepresentation::kWord16:
2357 opcode = kX64Word64AtomicExchangeUint16;
2359 case MachineRepresentation::kWord32:
2360 opcode = kX64Word64AtomicExchangeUint32;
2362 case MachineRepresentation::kWord64:
2363 opcode = kX64Word64AtomicExchangeUint64;
2369 VisitAtomicExchange(
this, node, opcode);
2372 void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
2373 MachineType type = AtomicOpType(node->op());
2374 ArchOpcode opcode = kArchNop;
2375 if (type == MachineType::Int8()) {
2376 opcode = kWord32AtomicExchangeInt8;
2377 }
else if (type == MachineType::Uint8()) {
2378 opcode = kWord32AtomicExchangeUint8;
2379 }
else if (type == MachineType::Int16()) {
2380 opcode = kWord32AtomicExchangeInt16;
2381 }
else if (type == MachineType::Uint16()) {
2382 opcode = kWord32AtomicExchangeUint16;
2383 }
else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2384 opcode = kWord32AtomicExchangeWord32;
2389 VisitAtomicExchange(
this, node, opcode);
2392 void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
2393 MachineType type = AtomicOpType(node->op());
2394 ArchOpcode opcode = kArchNop;
2395 if (type == MachineType::Uint8()) {
2396 opcode = kX64Word64AtomicExchangeUint8;
2397 }
else if (type == MachineType::Uint16()) {
2398 opcode = kX64Word64AtomicExchangeUint16;
2399 }
else if (type == MachineType::Uint32()) {
2400 opcode = kX64Word64AtomicExchangeUint32;
2401 }
else if (type == MachineType::Uint64()) {
2402 opcode = kX64Word64AtomicExchangeUint64;
2407 VisitAtomicExchange(
this, node, opcode);
2410 void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
2411 MachineType type = AtomicOpType(node->op());
2412 ArchOpcode opcode = kArchNop;
2413 if (type == MachineType::Int8()) {
2414 opcode = kWord32AtomicCompareExchangeInt8;
2415 }
else if (type == MachineType::Uint8()) {
2416 opcode = kWord32AtomicCompareExchangeUint8;
2417 }
else if (type == MachineType::Int16()) {
2418 opcode = kWord32AtomicCompareExchangeInt16;
2419 }
else if (type == MachineType::Uint16()) {
2420 opcode = kWord32AtomicCompareExchangeUint16;
2421 }
else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2422 opcode = kWord32AtomicCompareExchangeWord32;
2427 VisitAtomicCompareExchange(
this, node, opcode);
2430 void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
2431 MachineType type = AtomicOpType(node->op());
2432 ArchOpcode opcode = kArchNop;
2433 if (type == MachineType::Uint8()) {
2434 opcode = kX64Word64AtomicCompareExchangeUint8;
2435 }
else if (type == MachineType::Uint16()) {
2436 opcode = kX64Word64AtomicCompareExchangeUint16;
2437 }
else if (type == MachineType::Uint32()) {
2438 opcode = kX64Word64AtomicCompareExchangeUint32;
2439 }
else if (type == MachineType::Uint64()) {
2440 opcode = kX64Word64AtomicCompareExchangeUint64;
2445 VisitAtomicCompareExchange(
this, node, opcode);
2448 void InstructionSelector::VisitWord32AtomicBinaryOperation(
2449 Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2450 ArchOpcode uint16_op, ArchOpcode word32_op) {
2451 MachineType type = AtomicOpType(node->op());
2452 ArchOpcode opcode = kArchNop;
2453 if (type == MachineType::Int8()) {
2455 }
else if (type == MachineType::Uint8()) {
2457 }
else if (type == MachineType::Int16()) {
2459 }
else if (type == MachineType::Uint16()) {
2461 }
else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2467 VisitAtomicBinop(
this, node, opcode);
2470 #define VISIT_ATOMIC_BINOP(op) \ 2471 void InstructionSelector::VisitWord32Atomic##op(Node* node) { \ 2472 VisitWord32AtomicBinaryOperation( \ 2473 node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \ 2474 kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \ 2475 kWord32Atomic##op##Word32); \ 2477 VISIT_ATOMIC_BINOP(Add)
2478 VISIT_ATOMIC_BINOP(Sub)
2479 VISIT_ATOMIC_BINOP(And)
2480 VISIT_ATOMIC_BINOP(Or)
2481 VISIT_ATOMIC_BINOP(Xor)
2482 #undef VISIT_ATOMIC_BINOP 2484 void InstructionSelector::VisitWord64AtomicBinaryOperation(
2485 Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
2486 ArchOpcode word64_op) {
2487 MachineType type = AtomicOpType(node->op());
2488 ArchOpcode opcode = kArchNop;
2489 if (type == MachineType::Uint8()) {
2491 }
else if (type == MachineType::Uint16()) {
2493 }
else if (type == MachineType::Uint32()) {
2495 }
else if (type == MachineType::Uint64()) {
2501 VisitAtomicBinop(
this, node, opcode);
2504 #define VISIT_ATOMIC_BINOP(op) \ 2505 void InstructionSelector::VisitWord64Atomic##op(Node* node) { \ 2506 VisitWord64AtomicBinaryOperation( \ 2507 node, kX64Word64Atomic##op##Uint8, kX64Word64Atomic##op##Uint16, \ 2508 kX64Word64Atomic##op##Uint32, kX64Word64Atomic##op##Uint64); \ 2510 VISIT_ATOMIC_BINOP(Add)
2511 VISIT_ATOMIC_BINOP(Sub)
2512 VISIT_ATOMIC_BINOP(And)
2513 VISIT_ATOMIC_BINOP(Or)
2514 VISIT_ATOMIC_BINOP(Xor)
2515 #undef VISIT_ATOMIC_BINOP 2517 #define SIMD_TYPES(V) \ 2523 #define SIMD_BINOP_LIST(V) \ 2548 V(I16x8SConvertI32x4) \ 2550 V(I16x8AddSaturateS) \ 2553 V(I16x8SubSaturateS) \ 2561 V(I16x8AddSaturateU) \ 2562 V(I16x8SubSaturateU) \ 2567 V(I8x16SConvertI16x8) \ 2569 V(I8x16AddSaturateS) \ 2571 V(I8x16SubSaturateS) \ 2578 V(I8x16AddSaturateU) \ 2579 V(I8x16SubSaturateU) \ 2588 #define SIMD_UNOP_LIST(V) \ 2589 V(F32x4SConvertI32x4) \ 2592 V(F32x4RecipApprox) \ 2593 V(F32x4RecipSqrtApprox) \ 2594 V(I32x4SConvertI16x8Low) \ 2595 V(I32x4SConvertI16x8High) \ 2597 V(I32x4UConvertI16x8Low) \ 2598 V(I32x4UConvertI16x8High) \ 2599 V(I16x8SConvertI8x16Low) \ 2600 V(I16x8SConvertI8x16High) \ 2602 V(I16x8UConvertI8x16Low) \ 2603 V(I16x8UConvertI8x16High) \ 2607 #define SIMD_SHIFT_OPCODES(V) \ 2618 #define SIMD_ANYTRUE_LIST(V) \ 2623 #define SIMD_ALLTRUE_LIST(V) \ 2628 void InstructionSelector::VisitS128Zero(Node* node) {
2629 X64OperandGenerator g(
this);
2630 Emit(kX64S128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node));
2633 #define VISIT_SIMD_SPLAT(Type) \ 2634 void InstructionSelector::Visit##Type##Splat(Node* node) { \ 2635 X64OperandGenerator g(this); \ 2636 Emit(kX64##Type##Splat, g.DefineAsRegister(node), \ 2637 g.Use(node->InputAt(0))); \ 2639 SIMD_TYPES(VISIT_SIMD_SPLAT)
2640 #undef VISIT_SIMD_SPLAT 2642 #define VISIT_SIMD_EXTRACT_LANE(Type) \ 2643 void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \ 2644 X64OperandGenerator g(this); \ 2645 int32_t lane = OpParameter<int32_t>(node->op()); \ 2646 Emit(kX64##Type##ExtractLane, g.DefineAsRegister(node), \ 2647 g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); \ 2649 SIMD_TYPES(VISIT_SIMD_EXTRACT_LANE)
2650 #undef VISIT_SIMD_EXTRACT_LANE 2652 #define VISIT_SIMD_REPLACE_LANE(Type) \ 2653 void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \ 2654 X64OperandGenerator g(this); \ 2655 int32_t lane = OpParameter<int32_t>(node->op()); \ 2656 Emit(kX64##Type##ReplaceLane, g.DefineSameAsFirst(node), \ 2657 g.UseRegister(node->InputAt(0)), g.UseImmediate(lane), \ 2658 g.Use(node->InputAt(1))); \ 2660 SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
2661 #undef VISIT_SIMD_REPLACE_LANE 2663 #define VISIT_SIMD_SHIFT(Opcode) \ 2664 void InstructionSelector::Visit##Opcode(Node* node) { \ 2665 X64OperandGenerator g(this); \ 2666 int32_t value = OpParameter<int32_t>(node->op()); \ 2667 Emit(kX64##Opcode, g.DefineSameAsFirst(node), \ 2668 g.UseRegister(node->InputAt(0)), g.UseImmediate(value)); \ 2670 SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
2671 #undef VISIT_SIMD_SHIFT 2672 #undef SIMD_SHIFT_OPCODES 2674 #define VISIT_SIMD_UNOP(Opcode) \ 2675 void InstructionSelector::Visit##Opcode(Node* node) { \ 2676 X64OperandGenerator g(this); \ 2677 Emit(kX64##Opcode, g.DefineAsRegister(node), \ 2678 g.UseRegister(node->InputAt(0))); \ 2680 SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
2681 #undef VISIT_SIMD_UNOP 2682 #undef SIMD_UNOP_LIST 2684 #define VISIT_SIMD_BINOP(Opcode) \ 2685 void InstructionSelector::Visit##Opcode(Node* node) { \ 2686 X64OperandGenerator g(this); \ 2687 Emit(kX64##Opcode, g.DefineSameAsFirst(node), \ 2688 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); \ 2690 SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
2691 #undef VISIT_SIMD_BINOP 2692 #undef SIMD_BINOP_LIST 2694 #define VISIT_SIMD_ANYTRUE(Opcode) \ 2695 void InstructionSelector::Visit##Opcode(Node* node) { \ 2696 X64OperandGenerator g(this); \ 2697 InstructionOperand temps[] = {g.TempRegister()}; \ 2698 Emit(kX64##Opcode, g.DefineAsRegister(node), \ 2699 g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps); \ 2701 SIMD_ANYTRUE_LIST(VISIT_SIMD_ANYTRUE)
2702 #undef VISIT_SIMD_ANYTRUE 2703 #undef SIMD_ANYTRUE_LIST 2705 #define VISIT_SIMD_ALLTRUE(Opcode) \ 2706 void InstructionSelector::Visit##Opcode(Node* node) { \ 2707 X64OperandGenerator g(this); \ 2708 InstructionOperand temps[] = {g.TempRegister()}; \ 2709 Emit(kX64##Opcode, g.DefineAsRegister(node), \ 2710 g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps); \ 2712 SIMD_ALLTRUE_LIST(VISIT_SIMD_ALLTRUE)
2713 #undef VISIT_SIMD_ALLTRUE 2714 #undef SIMD_ALLTRUE_LIST 2717 void InstructionSelector::VisitS128Select(Node* node) {
2718 X64OperandGenerator g(
this);
2719 Emit(kX64S128Select, g.DefineSameAsFirst(node),
2720 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
2721 g.UseRegister(node->InputAt(2)));
2724 void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
2725 X64OperandGenerator g(
this);
2726 Emit(kX64F32x4UConvertI32x4, g.DefineSameAsFirst(node),
2727 g.UseRegister(node->InputAt(0)));
2730 void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
2731 X64OperandGenerator g(
this);
2732 Emit(kX64I32x4SConvertF32x4, g.DefineSameAsFirst(node),
2733 g.UseRegister(node->InputAt(0)));
2736 void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
2737 X64OperandGenerator g(
this);
2738 InstructionOperand temps[] = {g.TempSimd128Register()};
2739 Emit(kX64I32x4UConvertF32x4, g.DefineSameAsFirst(node),
2740 g.UseRegister(node->InputAt(0)), arraysize(temps), temps);
2743 void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
2744 X64OperandGenerator g(
this);
2745 Emit(kX64I16x8UConvertI32x4, g.DefineSameAsFirst(node),
2746 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
2749 void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
2750 X64OperandGenerator g(
this);
2751 Emit(kX64I8x16UConvertI16x8, g.DefineSameAsFirst(node),
2752 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
2755 void InstructionSelector::VisitI8x16Mul(Node* node) {
2756 X64OperandGenerator g(
this);
2757 InstructionOperand temps[] = {g.TempSimd128Register()};
2758 Emit(kX64I8x16Mul, g.DefineSameAsFirst(node),
2759 g.UseUniqueRegister(node->InputAt(0)),
2760 g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
2763 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
2767 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
2772 MachineOperatorBuilder::Flags
2773 InstructionSelector::SupportedMachineOperatorFlags() {
2774 MachineOperatorBuilder::Flags flags =
2775 MachineOperatorBuilder::kWord32ShiftIsSafe |
2776 MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz |
2777 MachineOperatorBuilder::kSpeculationFence;
2778 if (CpuFeatures::IsSupported(POPCNT)) {
2779 flags |= MachineOperatorBuilder::kWord32Popcnt |
2780 MachineOperatorBuilder::kWord64Popcnt;
2782 if (CpuFeatures::IsSupported(SSE4_1)) {
2783 flags |= MachineOperatorBuilder::kFloat32RoundDown |
2784 MachineOperatorBuilder::kFloat64RoundDown |
2785 MachineOperatorBuilder::kFloat32RoundUp |
2786 MachineOperatorBuilder::kFloat64RoundUp |
2787 MachineOperatorBuilder::kFloat32RoundTruncate |
2788 MachineOperatorBuilder::kFloat64RoundTruncate |
2789 MachineOperatorBuilder::kFloat32RoundTiesEven |
2790 MachineOperatorBuilder::kFloat64RoundTiesEven;
2796 MachineOperatorBuilder::AlignmentRequirements
2797 InstructionSelector::AlignmentRequirements() {
2798 return MachineOperatorBuilder::AlignmentRequirements::
2799 FullUnalignedAccessSupport();