5 #include "src/base/adapters.h" 6 #include "src/compiler/backend/instruction-selector-impl.h" 7 #include "src/compiler/node-matchers.h" 8 #include "src/compiler/node-properties.h" 22 return UseFixed(node, edx);
27 return DefineAsRegister(node);
30 bool CanBeMemoryOperand(InstructionCode opcode,
Node* node,
Node* input,
32 if (input->opcode() != IrOpcode::kLoad ||
33 !selector()->CanCover(node, input)) {
36 if (effect_level != selector()->GetEffectLevel(input)) {
39 MachineRepresentation rep =
40 LoadRepresentationOf(input->op()).representation();
49 return rep == MachineRepresentation::kWord32 || IsAnyTagged(rep);
52 return rep == MachineRepresentation::kWord16;
55 return rep == MachineRepresentation::kWord8;
62 bool CanBeImmediate(
Node* node) {
63 switch (node->opcode()) {
64 case IrOpcode::kInt32Constant:
65 case IrOpcode::kNumberConstant:
66 case IrOpcode::kExternalConstant:
67 case IrOpcode::kRelocatableInt32Constant:
68 case IrOpcode::kRelocatableInt64Constant:
70 case IrOpcode::kHeapConstant: {
78 return !Heap::InNewSpace(*value);
88 AddressingMode GenerateMemoryOperandInputs(
Node* index,
int scale,
Node* base,
89 Node* displacement_node,
90 DisplacementMode displacement_mode,
92 size_t* input_count) {
93 AddressingMode mode = kMode_MRI;
94 int32_t displacement = (displacement_node ==
nullptr)
96 : OpParameter<int32_t>(displacement_node->op());
97 if (displacement_mode == kNegativeDisplacement) {
98 displacement = -displacement;
100 if (base !=
nullptr) {
101 if (base->opcode() == IrOpcode::kInt32Constant) {
102 displacement += OpParameter<int32_t>(base->op());
106 if (base !=
nullptr) {
107 inputs[(*input_count)++] = UseRegister(base);
108 if (index !=
nullptr) {
109 DCHECK(scale >= 0 && scale <= 3);
110 inputs[(*input_count)++] = UseRegister(index);
111 if (displacement != 0) {
112 inputs[(*input_count)++] = TempImmediate(displacement);
113 static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
114 kMode_MR4I, kMode_MR8I};
115 mode = kMRnI_modes[scale];
117 static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
118 kMode_MR4, kMode_MR8};
119 mode = kMRn_modes[scale];
122 if (displacement == 0) {
125 inputs[(*input_count)++] = TempImmediate(displacement);
130 DCHECK(scale >= 0 && scale <= 3);
131 if (index !=
nullptr) {
132 inputs[(*input_count)++] = UseRegister(index);
133 if (displacement != 0) {
134 inputs[(*input_count)++] = TempImmediate(displacement);
135 static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
136 kMode_M4I, kMode_M8I};
137 mode = kMnI_modes[scale];
139 static const AddressingMode kMn_modes[] = {kMode_MR, kMode_M2,
141 mode = kMn_modes[scale];
144 inputs[(*input_count)++] = TempImmediate(displacement);
151 AddressingMode GetEffectiveAddressMemoryOperand(
Node* node,
153 size_t* input_count) {
156 if ((m.displacement() ==
nullptr || CanBeImmediate(m.displacement()))) {
157 return GenerateMemoryOperandInputs(
158 m.index(), m.scale(), m.base(), m.displacement(),
159 m.displacement_mode(), inputs, input_count);
161 inputs[(*input_count)++] = UseRegister(node->InputAt(0));
162 inputs[(*input_count)++] = UseRegister(node->InputAt(1));
168 AddressingMode* mode) {
169 if (CanBeImmediate(index)) {
171 return UseImmediate(index);
174 return UseUniqueRegister(index);
178 bool CanBeBetterLeftOperand(
Node* node)
const {
179 return !selector()->IsLive(node);
188 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
189 arraysize(temps), temps);
192 void VisitRR(InstructionSelector* selector, Node* node,
193 InstructionCode opcode) {
194 IA32OperandGenerator g(selector);
195 selector->Emit(opcode, g.DefineAsRegister(node),
196 g.UseRegister(node->InputAt(0)));
199 void VisitRROFloat(InstructionSelector* selector, Node* node,
200 ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
201 IA32OperandGenerator g(selector);
202 InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
203 InstructionOperand operand1 = g.Use(node->InputAt(1));
204 if (selector->IsSupported(AVX)) {
205 selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1);
207 selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
211 void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
212 ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
213 IA32OperandGenerator g(selector);
214 if (selector->IsSupported(AVX)) {
215 selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input));
217 selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
221 void VisitRRSimd(InstructionSelector* selector, Node* node,
222 ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
223 IA32OperandGenerator g(selector);
224 InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
225 if (selector->IsSupported(AVX)) {
226 selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0);
228 selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0);
232 void VisitRRISimd(InstructionSelector* selector, Node* node,
234 IA32OperandGenerator g(selector);
235 InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
236 InstructionOperand operand1 =
237 g.UseImmediate(OpParameter<int32_t>(node->op()));
240 InstructionOperand dest = opcode == kIA32I8x16ExtractLane
241 ? g.DefineAsFixed(node, eax)
242 : g.DefineAsRegister(node);
243 selector->Emit(opcode, dest, operand0, operand1);
246 void VisitRRISimd(InstructionSelector* selector, Node* node,
247 ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
248 IA32OperandGenerator g(selector);
249 InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
250 InstructionOperand operand1 =
251 g.UseImmediate(OpParameter<int32_t>(node->op()));
252 if (selector->IsSupported(AVX)) {
253 selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1);
255 selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
261 void InstructionSelector::VisitStackSlot(Node* node) {
262 StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
263 int slot = frame_->AllocateSpillSlot(rep.size());
264 OperandGenerator g(
this);
266 Emit(kArchStackSlot, g.DefineAsRegister(node),
267 sequence()->AddImmediate(Constant(slot)), 0,
nullptr);
270 void InstructionSelector::VisitDebugAbort(Node* node) {
271 IA32OperandGenerator g(
this);
272 Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), edx));
275 void InstructionSelector::VisitSpeculationFence(Node* node) {
276 IA32OperandGenerator g(
this);
277 Emit(kLFence, g.NoOutput());
280 void InstructionSelector::VisitLoad(Node* node) {
281 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
283 ArchOpcode opcode = kArchNop;
284 switch (load_rep.representation()) {
285 case MachineRepresentation::kFloat32:
288 case MachineRepresentation::kFloat64:
291 case MachineRepresentation::kBit:
292 case MachineRepresentation::kWord8:
293 opcode = load_rep.IsSigned() ? kIA32Movsxbl : kIA32Movzxbl;
295 case MachineRepresentation::kWord16:
296 opcode = load_rep.IsSigned() ? kIA32Movsxwl : kIA32Movzxwl;
298 case MachineRepresentation::kTaggedSigned:
299 case MachineRepresentation::kTaggedPointer:
300 case MachineRepresentation::kTagged:
301 case MachineRepresentation::kWord32:
304 case MachineRepresentation::kSimd128:
305 opcode = kIA32Movdqu;
307 case MachineRepresentation::kWord64:
308 case MachineRepresentation::kNone:
313 IA32OperandGenerator g(
this);
314 InstructionOperand outputs[1];
315 outputs[0] = g.DefineAsRegister(node);
316 InstructionOperand inputs[3];
317 size_t input_count = 0;
318 AddressingMode mode =
319 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
320 InstructionCode code = opcode | AddressingModeField::encode(mode);
321 if (node->opcode() == IrOpcode::kPoisonedLoad) {
322 CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
323 code |= MiscField::encode(kMemoryAccessPoisoned);
325 Emit(code, 1, outputs, input_count, inputs);
328 void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
330 void InstructionSelector::VisitProtectedLoad(Node* node) {
335 void InstructionSelector::VisitStore(Node* node) {
336 IA32OperandGenerator g(
this);
337 Node* base = node->InputAt(0);
338 Node* index = node->InputAt(1);
339 Node* value = node->InputAt(2);
341 StoreRepresentation store_rep = StoreRepresentationOf(node->op());
342 WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
343 MachineRepresentation rep = store_rep.representation();
345 if (write_barrier_kind != kNoWriteBarrier) {
346 DCHECK(CanBeTaggedPointer(rep));
347 AddressingMode addressing_mode;
348 InstructionOperand inputs[] = {
349 g.UseUniqueRegister(base),
350 g.GetEffectiveIndexOperand(index, &addressing_mode),
351 g.UseUniqueRegister(value)};
352 RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
353 switch (write_barrier_kind) {
354 case kNoWriteBarrier:
357 case kMapWriteBarrier:
358 record_write_mode = RecordWriteMode::kValueIsMap;
360 case kPointerWriteBarrier:
361 record_write_mode = RecordWriteMode::kValueIsPointer;
363 case kFullWriteBarrier:
364 record_write_mode = RecordWriteMode::kValueIsAny;
367 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
368 size_t const temp_count = arraysize(temps);
369 InstructionCode code = kArchStoreWithWriteBarrier;
370 code |= AddressingModeField::encode(addressing_mode);
371 code |= MiscField::encode(static_cast<int>(record_write_mode));
372 Emit(code, 0,
nullptr, arraysize(inputs), inputs, temp_count, temps);
374 ArchOpcode opcode = kArchNop;
376 case MachineRepresentation::kFloat32:
379 case MachineRepresentation::kFloat64:
382 case MachineRepresentation::kBit:
383 case MachineRepresentation::kWord8:
386 case MachineRepresentation::kWord16:
389 case MachineRepresentation::kTaggedSigned:
390 case MachineRepresentation::kTaggedPointer:
391 case MachineRepresentation::kTagged:
392 case MachineRepresentation::kWord32:
395 case MachineRepresentation::kSimd128:
396 opcode = kIA32Movdqu;
398 case MachineRepresentation::kWord64:
399 case MachineRepresentation::kNone:
404 InstructionOperand val;
405 if (g.CanBeImmediate(value)) {
406 val = g.UseImmediate(value);
407 }
else if (rep == MachineRepresentation::kWord8 ||
408 rep == MachineRepresentation::kBit) {
409 val = g.UseByteRegister(value);
411 val = g.UseRegister(value);
414 InstructionOperand inputs[4];
415 size_t input_count = 0;
416 AddressingMode addressing_mode =
417 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
418 InstructionCode code =
419 opcode | AddressingModeField::encode(addressing_mode);
420 inputs[input_count++] = val;
421 Emit(code, 0, static_cast<InstructionOperand*>(
nullptr), input_count,
426 void InstructionSelector::VisitProtectedStore(Node* node) {
432 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
435 void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
440 void VisitBinop(InstructionSelector* selector, Node* node,
441 InstructionCode opcode, FlagsContinuation* cont) {
442 IA32OperandGenerator g(selector);
443 Int32BinopMatcher m(node);
444 Node* left = m.left().node();
445 Node* right = m.right().node();
446 InstructionOperand inputs[6];
447 size_t input_count = 0;
448 InstructionOperand outputs[1];
449 size_t output_count = 0;
460 InstructionOperand
const input = g.UseRegister(left);
461 inputs[input_count++] = input;
462 inputs[input_count++] = input;
463 }
else if (g.CanBeImmediate(right)) {
464 inputs[input_count++] = g.UseRegister(left);
465 inputs[input_count++] = g.UseImmediate(right);
467 int effect_level = selector->GetEffectLevel(node);
468 if (cont->IsBranch()) {
469 effect_level = selector->GetEffectLevel(
470 cont->true_block()->PredecessorAt(0)->control_input());
472 if (node->op()->HasProperty(Operator::kCommutative) &&
473 g.CanBeBetterLeftOperand(right) &&
474 (!g.CanBeBetterLeftOperand(left) ||
475 !g.CanBeMemoryOperand(opcode, node, right, effect_level))) {
476 std::swap(left, right);
478 if (g.CanBeMemoryOperand(opcode, node, right, effect_level)) {
479 inputs[input_count++] = g.UseRegister(left);
480 AddressingMode addressing_mode =
481 g.GetEffectiveAddressMemoryOperand(right, inputs, &input_count);
482 opcode |= AddressingModeField::encode(addressing_mode);
484 inputs[input_count++] = g.UseRegister(left);
485 inputs[input_count++] = g.Use(right);
489 outputs[output_count++] = g.DefineSameAsFirst(node);
491 DCHECK_NE(0u, input_count);
492 DCHECK_EQ(1u, output_count);
493 DCHECK_GE(arraysize(inputs), input_count);
494 DCHECK_GE(arraysize(outputs), output_count);
496 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
501 void VisitBinop(InstructionSelector* selector, Node* node,
502 InstructionCode opcode) {
503 FlagsContinuation cont;
504 VisitBinop(selector, node, opcode, &cont);
509 void InstructionSelector::VisitWord32And(Node* node) {
510 VisitBinop(
this, node, kIA32And);
513 void InstructionSelector::VisitWord32Or(Node* node) {
514 VisitBinop(
this, node, kIA32Or);
517 void InstructionSelector::VisitWord32Xor(Node* node) {
518 IA32OperandGenerator g(
this);
519 Int32BinopMatcher m(node);
520 if (m.right().Is(-1)) {
521 Emit(kIA32Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
523 VisitBinop(
this, node, kIA32Xor);
528 static inline void VisitShift(InstructionSelector* selector, Node* node,
530 IA32OperandGenerator g(selector);
531 Node* left = node->InputAt(0);
532 Node* right = node->InputAt(1);
534 if (g.CanBeImmediate(right)) {
535 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
536 g.UseImmediate(right));
538 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
539 g.UseFixed(right, ecx));
545 void VisitMulHigh(InstructionSelector* selector, Node* node,
547 IA32OperandGenerator g(selector);
548 InstructionOperand temps[] = {g.TempRegister(eax)};
550 opcode, g.DefineAsFixed(node, edx), g.UseFixed(node->InputAt(0), eax),
551 g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
554 void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
555 IA32OperandGenerator g(selector);
556 InstructionOperand temps[] = {g.TempRegister(edx)};
557 selector->Emit(opcode, g.DefineAsFixed(node, eax),
558 g.UseFixed(node->InputAt(0), eax),
559 g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
562 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
563 IA32OperandGenerator g(selector);
564 InstructionOperand temps[] = {g.TempRegister(eax)};
565 selector->Emit(opcode, g.DefineAsFixed(node, edx),
566 g.UseFixed(node->InputAt(0), eax),
567 g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
570 void EmitLea(InstructionSelector* selector, Node* result, Node* index,
571 int scale, Node* base, Node* displacement,
572 DisplacementMode displacement_mode) {
573 IA32OperandGenerator g(selector);
574 InstructionOperand inputs[4];
575 size_t input_count = 0;
576 AddressingMode mode =
577 g.GenerateMemoryOperandInputs(index, scale, base, displacement,
578 displacement_mode, inputs, &input_count);
580 DCHECK_NE(0u, input_count);
581 DCHECK_GE(arraysize(inputs), input_count);
583 InstructionOperand outputs[1];
584 outputs[0] = g.DefineAsRegister(result);
586 InstructionCode opcode = AddressingModeField::encode(mode) | kIA32Lea;
588 selector->Emit(opcode, 1, outputs, input_count, inputs);
593 void InstructionSelector::VisitWord32Shl(Node* node) {
594 Int32ScaleMatcher m(node,
true);
596 Node* index = node->InputAt(0);
597 Node* base = m.power_of_two_plus_one() ? index :
nullptr;
598 EmitLea(
this, node, index, m.scale(), base,
nullptr, kPositiveDisplacement);
601 VisitShift(
this, node, kIA32Shl);
604 void InstructionSelector::VisitWord32Shr(Node* node) {
605 VisitShift(
this, node, kIA32Shr);
608 void InstructionSelector::VisitWord32Sar(Node* node) {
609 VisitShift(
this, node, kIA32Sar);
612 void InstructionSelector::VisitInt32PairAdd(Node* node) {
613 IA32OperandGenerator g(
this);
615 Node* projection1 = NodeProperties::FindProjection(node, 1);
619 InstructionOperand inputs[] = {
620 g.UseRegister(node->InputAt(0)),
621 g.UseUniqueRegisterOrSlotOrConstant(node->InputAt(1)),
622 g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
624 InstructionOperand outputs[] = {g.DefineSameAsFirst(node),
625 g.DefineAsRegister(projection1)};
627 InstructionOperand temps[] = {g.TempRegister()};
629 Emit(kIA32AddPair, 2, outputs, 4, inputs, 1, temps);
633 Emit(kIA32Add, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
634 g.Use(node->InputAt(2)));
638 void InstructionSelector::VisitInt32PairSub(Node* node) {
639 IA32OperandGenerator g(
this);
641 Node* projection1 = NodeProperties::FindProjection(node, 1);
645 InstructionOperand inputs[] = {
646 g.UseRegister(node->InputAt(0)),
647 g.UseUniqueRegisterOrSlotOrConstant(node->InputAt(1)),
648 g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
650 InstructionOperand outputs[] = {g.DefineSameAsFirst(node),
651 g.DefineAsRegister(projection1)};
653 InstructionOperand temps[] = {g.TempRegister()};
655 Emit(kIA32SubPair, 2, outputs, 4, inputs, 1, temps);
659 Emit(kIA32Sub, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
660 g.Use(node->InputAt(2)));
664 void InstructionSelector::VisitInt32PairMul(Node* node) {
665 IA32OperandGenerator g(
this);
667 Node* projection1 = NodeProperties::FindProjection(node, 1);
671 InstructionOperand inputs[] = {
672 g.UseUnique(node->InputAt(0)),
673 g.UseUniqueRegisterOrSlotOrConstant(node->InputAt(1)),
674 g.UseUniqueRegister(node->InputAt(2)),
675 g.UseFixed(node->InputAt(3), ecx)};
677 InstructionOperand outputs[] = {
678 g.DefineAsFixed(node, eax),
679 g.DefineAsFixed(NodeProperties::FindProjection(node, 1), ecx)};
681 InstructionOperand temps[] = {g.TempRegister(edx)};
683 Emit(kIA32MulPair, 2, outputs, 4, inputs, 1, temps);
687 Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
688 g.Use(node->InputAt(2)));
692 void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
694 IA32OperandGenerator g(selector);
696 Node* shift = node->InputAt(2);
697 InstructionOperand shift_operand;
698 if (g.CanBeImmediate(shift)) {
699 shift_operand = g.UseImmediate(shift);
701 shift_operand = g.UseFixed(shift, ecx);
703 InstructionOperand inputs[] = {g.UseFixed(node->InputAt(0), eax),
704 g.UseFixed(node->InputAt(1), edx),
707 InstructionOperand outputs[2];
708 InstructionOperand temps[1];
709 int32_t output_count = 0;
710 int32_t temp_count = 0;
711 outputs[output_count++] = g.DefineAsFixed(node, eax);
712 Node* projection1 = NodeProperties::FindProjection(node, 1);
714 outputs[output_count++] = g.DefineAsFixed(projection1, edx);
716 temps[temp_count++] = g.TempRegister(edx);
719 selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
722 void InstructionSelector::VisitWord32PairShl(Node* node) {
723 VisitWord32PairShift(
this, kIA32ShlPair, node);
726 void InstructionSelector::VisitWord32PairShr(Node* node) {
727 VisitWord32PairShift(
this, kIA32ShrPair, node);
730 void InstructionSelector::VisitWord32PairSar(Node* node) {
731 VisitWord32PairShift(
this, kIA32SarPair, node);
734 void InstructionSelector::VisitWord32Ror(Node* node) {
735 VisitShift(
this, node, kIA32Ror);
738 #define RO_OP_LIST(V) \ 739 V(Word32Clz, kIA32Lzcnt) \ 740 V(Word32Ctz, kIA32Tzcnt) \ 741 V(Word32Popcnt, kIA32Popcnt) \ 742 V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64) \ 743 V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \ 744 V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \ 745 V(ChangeUint32ToFloat64, kSSEUint32ToFloat64) \ 746 V(TruncateFloat32ToInt32, kSSEFloat32ToInt32) \ 747 V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \ 748 V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \ 749 V(ChangeFloat64ToUint32, kSSEFloat64ToUint32) \ 750 V(TruncateFloat64ToUint32, kSSEFloat64ToUint32) \ 751 V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32) \ 752 V(RoundFloat64ToInt32, kSSEFloat64ToInt32) \ 753 V(BitcastFloat32ToInt32, kIA32BitcastFI) \ 754 V(BitcastInt32ToFloat32, kIA32BitcastIF) \ 755 V(Float32Sqrt, kSSEFloat32Sqrt) \ 756 V(Float64Sqrt, kSSEFloat64Sqrt) \ 757 V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \ 758 V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32) \ 759 V(SignExtendWord8ToInt32, kIA32Movsxbl) \ 760 V(SignExtendWord16ToInt32, kIA32Movsxwl) 762 #define RR_OP_LIST(V) \ 763 V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \ 764 V(Float32RoundDown, kSSEFloat32Round | MiscField::encode(kRoundDown)) \ 765 V(Float64RoundDown, kSSEFloat64Round | MiscField::encode(kRoundDown)) \ 766 V(Float32RoundUp, kSSEFloat32Round | MiscField::encode(kRoundUp)) \ 767 V(Float64RoundUp, kSSEFloat64Round | MiscField::encode(kRoundUp)) \ 768 V(Float32RoundTruncate, kSSEFloat32Round | MiscField::encode(kRoundToZero)) \ 769 V(Float64RoundTruncate, kSSEFloat64Round | MiscField::encode(kRoundToZero)) \ 770 V(Float32RoundTiesEven, \ 771 kSSEFloat32Round | MiscField::encode(kRoundToNearest)) \ 772 V(Float64RoundTiesEven, kSSEFloat64Round | MiscField::encode(kRoundToNearest)) 774 #define RRO_FLOAT_OP_LIST(V) \ 775 V(Float32Add, kAVXFloat32Add, kSSEFloat32Add) \ 776 V(Float64Add, kAVXFloat64Add, kSSEFloat64Add) \ 777 V(Float32Sub, kAVXFloat32Sub, kSSEFloat32Sub) \ 778 V(Float64Sub, kAVXFloat64Sub, kSSEFloat64Sub) \ 779 V(Float32Mul, kAVXFloat32Mul, kSSEFloat32Mul) \ 780 V(Float64Mul, kAVXFloat64Mul, kSSEFloat64Mul) \ 781 V(Float32Div, kAVXFloat32Div, kSSEFloat32Div) \ 782 V(Float64Div, kAVXFloat64Div, kSSEFloat64Div) 784 #define FLOAT_UNOP_LIST(V) \ 785 V(Float32Abs, kAVXFloat32Abs, kSSEFloat32Abs) \ 786 V(Float64Abs, kAVXFloat64Abs, kSSEFloat64Abs) \ 787 V(Float32Neg, kAVXFloat32Neg, kSSEFloat32Neg) \ 788 V(Float64Neg, kAVXFloat64Neg, kSSEFloat64Neg) 790 #define RO_VISITOR(Name, opcode) \ 791 void InstructionSelector::Visit##Name(Node* node) { \ 792 VisitRO(this, node, opcode); \ 794 RO_OP_LIST(RO_VISITOR)
798 #define RR_VISITOR(Name, opcode) \ 799 void InstructionSelector::Visit##Name(Node* node) { \ 800 VisitRR(this, node, opcode); \ 802 RR_OP_LIST(RR_VISITOR)
806 #define RRO_FLOAT_VISITOR(Name, avx, sse) \ 807 void InstructionSelector::Visit##Name(Node* node) { \ 808 VisitRROFloat(this, node, avx, sse); \ 810 RRO_FLOAT_OP_LIST(RRO_FLOAT_VISITOR)
811 #undef RRO_FLOAT_VISITOR 812 #undef RRO_FLOAT_OP_LIST 814 #define FLOAT_UNOP_VISITOR(Name, avx, sse) \ 815 void InstructionSelector::Visit##Name(Node* node) { \ 816 VisitFloatUnop(this, node, node->InputAt(0), avx, sse); \ 818 FLOAT_UNOP_LIST(FLOAT_UNOP_VISITOR)
819 #undef FLOAT_UNOP_VISITOR 820 #undef FLOAT_UNOP_LIST 822 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
824 void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
826 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
827 IA32OperandGenerator g(
this);
828 Emit(kIA32Bswap, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)));
831 void InstructionSelector::VisitInt32Add(Node* node) {
832 IA32OperandGenerator g(
this);
835 BaseWithIndexAndDisplacement32Matcher m(node);
837 (m.displacement() ==
nullptr || g.CanBeImmediate(m.displacement()))) {
838 InstructionOperand inputs[4];
839 size_t input_count = 0;
840 AddressingMode mode = g.GenerateMemoryOperandInputs(
841 m.index(), m.scale(), m.base(), m.displacement(), m.displacement_mode(),
842 inputs, &input_count);
844 DCHECK_NE(0u, input_count);
845 DCHECK_GE(arraysize(inputs), input_count);
847 InstructionOperand outputs[1];
848 outputs[0] = g.DefineAsRegister(node);
850 InstructionCode opcode = AddressingModeField::encode(mode) | kIA32Lea;
851 Emit(opcode, 1, outputs, input_count, inputs);
856 VisitBinop(
this, node, kIA32Add);
859 void InstructionSelector::VisitInt32Sub(Node* node) {
860 IA32OperandGenerator g(
this);
861 Int32BinopMatcher m(node);
862 if (m.left().Is(0)) {
863 Emit(kIA32Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
865 VisitBinop(
this, node, kIA32Sub);
869 void InstructionSelector::VisitInt32Mul(Node* node) {
870 Int32ScaleMatcher m(node,
true);
872 Node* index = node->InputAt(0);
873 Node* base = m.power_of_two_plus_one() ? index :
nullptr;
874 EmitLea(
this, node, index, m.scale(), base,
nullptr, kPositiveDisplacement);
877 IA32OperandGenerator g(
this);
878 Node* left = node->InputAt(0);
879 Node* right = node->InputAt(1);
880 if (g.CanBeImmediate(right)) {
881 Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(left),
882 g.UseImmediate(right));
884 if (g.CanBeBetterLeftOperand(right)) {
885 std::swap(left, right);
887 Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(left),
892 void InstructionSelector::VisitInt32MulHigh(Node* node) {
893 VisitMulHigh(
this, node, kIA32ImulHigh);
896 void InstructionSelector::VisitUint32MulHigh(Node* node) {
897 VisitMulHigh(
this, node, kIA32UmulHigh);
900 void InstructionSelector::VisitInt32Div(Node* node) {
901 VisitDiv(
this, node, kIA32Idiv);
904 void InstructionSelector::VisitUint32Div(Node* node) {
905 VisitDiv(
this, node, kIA32Udiv);
908 void InstructionSelector::VisitInt32Mod(Node* node) {
909 VisitMod(
this, node, kIA32Idiv);
912 void InstructionSelector::VisitUint32Mod(Node* node) {
913 VisitMod(
this, node, kIA32Udiv);
916 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
917 IA32OperandGenerator g(
this);
918 InstructionOperand temps[] = {g.TempRegister()};
919 Emit(kSSEUint32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
920 arraysize(temps), temps);
923 void InstructionSelector::VisitFloat64Mod(Node* node) {
924 IA32OperandGenerator g(
this);
925 InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister()};
926 Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
927 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
928 arraysize(temps), temps);
931 void InstructionSelector::VisitFloat32Max(Node* node) {
932 IA32OperandGenerator g(
this);
933 InstructionOperand temps[] = {g.TempRegister()};
934 Emit(kSSEFloat32Max, g.DefineSameAsFirst(node),
935 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
936 arraysize(temps), temps);
939 void InstructionSelector::VisitFloat64Max(Node* node) {
940 IA32OperandGenerator g(
this);
941 InstructionOperand temps[] = {g.TempRegister()};
942 Emit(kSSEFloat64Max, g.DefineSameAsFirst(node),
943 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
944 arraysize(temps), temps);
947 void InstructionSelector::VisitFloat32Min(Node* node) {
948 IA32OperandGenerator g(
this);
949 InstructionOperand temps[] = {g.TempRegister()};
950 Emit(kSSEFloat32Min, g.DefineSameAsFirst(node),
951 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
952 arraysize(temps), temps);
955 void InstructionSelector::VisitFloat64Min(Node* node) {
956 IA32OperandGenerator g(
this);
957 InstructionOperand temps[] = {g.TempRegister()};
958 Emit(kSSEFloat64Min, g.DefineSameAsFirst(node),
959 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
960 arraysize(temps), temps);
963 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
967 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
968 InstructionCode opcode) {
969 IA32OperandGenerator g(
this);
970 Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
971 g.UseRegister(node->InputAt(1)))
975 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
976 InstructionCode opcode) {
977 IA32OperandGenerator g(
this);
978 Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)))
982 void InstructionSelector::EmitPrepareArguments(
983 ZoneVector<PushParameter>* arguments,
const CallDescriptor* call_descriptor,
985 IA32OperandGenerator g(
this);
988 if (call_descriptor->IsCFunctionCall()) {
989 InstructionOperand temps[] = {g.TempRegister()};
990 size_t const temp_count = arraysize(temps);
991 Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
992 call_descriptor->ParameterCount())),
993 0,
nullptr, 0,
nullptr, temp_count, temps);
996 for (
size_t n = 0; n < arguments->size(); ++n) {
997 PushParameter input = (*arguments)[n];
999 int const slot =
static_cast<int>(n);
1000 InstructionOperand value = g.CanBeImmediate(node)
1001 ? g.UseImmediate(input.node)
1002 : g.UseRegister(input.node);
1003 Emit(kIA32Poke | MiscField::encode(slot), g.NoOutput(), value);
1008 int effect_level = GetEffectLevel(node);
1009 for (PushParameter input : base::Reversed(*arguments)) {
1011 if (input.node ==
nullptr)
continue;
1012 if (g.CanBeMemoryOperand(kIA32Push, node, input.node, effect_level)) {
1013 InstructionOperand outputs[1];
1014 InstructionOperand inputs[4];
1015 size_t input_count = 0;
1016 InstructionCode opcode = kIA32Push;
1017 AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
1018 input.node, inputs, &input_count);
1019 opcode |= AddressingModeField::encode(mode);
1020 Emit(opcode, 0, outputs, input_count, inputs);
1022 InstructionOperand value =
1023 g.CanBeImmediate(input.node)
1024 ? g.UseImmediate(input.node)
1025 : IsSupported(ATOM) ||
1026 sequence()->IsFP(GetVirtualRegister(input.node))
1027 ? g.UseRegister(input.node)
1028 : g.Use(input.node);
1029 if (input.location.GetType() == MachineType::Float32()) {
1030 Emit(kIA32PushFloat32, g.NoOutput(), value);
1031 }
else if (input.location.GetType() == MachineType::Float64()) {
1032 Emit(kIA32PushFloat64, g.NoOutput(), value);
1033 }
else if (input.location.GetType() == MachineType::Simd128()) {
1034 Emit(kIA32PushSimd128, g.NoOutput(), value);
1036 Emit(kIA32Push, g.NoOutput(), value);
1043 void InstructionSelector::EmitPrepareResults(
1044 ZoneVector<PushParameter>* results,
const CallDescriptor* call_descriptor,
1046 IA32OperandGenerator g(
this);
1048 int reverse_slot = 0;
1049 for (PushParameter output : *results) {
1050 if (!output.location.IsCallerFrameSlot())
continue;
1052 if (output.node !=
nullptr) {
1053 DCHECK(!call_descriptor->IsCFunctionCall());
1054 if (output.location.GetType() == MachineType::Float32()) {
1055 MarkAsFloat32(output.node);
1056 }
else if (output.location.GetType() == MachineType::Float64()) {
1057 MarkAsFloat64(output.node);
1059 Emit(kIA32Peek, g.DefineAsRegister(output.node),
1060 g.UseImmediate(reverse_slot));
1062 reverse_slot += output.location.GetSizeInPointers();
1066 bool InstructionSelector::IsTailCallAddressImmediate() {
return true; }
1068 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() {
return 0; }
1072 void VisitCompareWithMemoryOperand(InstructionSelector* selector,
1073 InstructionCode opcode, Node* left,
1074 InstructionOperand right,
1075 FlagsContinuation* cont) {
1076 DCHECK_EQ(IrOpcode::kLoad, left->opcode());
1077 IA32OperandGenerator g(selector);
1078 size_t input_count = 0;
1079 InstructionOperand inputs[4];
1080 AddressingMode addressing_mode =
1081 g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
1082 opcode |= AddressingModeField::encode(addressing_mode);
1083 inputs[input_count++] = right;
1085 selector->EmitWithContinuation(opcode, 0,
nullptr, input_count, inputs, cont);
1089 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1090 InstructionOperand left, InstructionOperand right,
1091 FlagsContinuation* cont) {
1092 selector->EmitWithContinuation(opcode, left, right, cont);
1096 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1097 Node* left, Node* right, FlagsContinuation* cont,
1099 IA32OperandGenerator g(selector);
1100 if (commutative && g.CanBeBetterLeftOperand(right)) {
1101 std::swap(left, right);
1103 VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
1106 MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
1107 if (hint_node->opcode() == IrOpcode::kLoad) {
1108 MachineType hint = LoadRepresentationOf(hint_node->op());
1109 if (node->opcode() == IrOpcode::kInt32Constant ||
1110 node->opcode() == IrOpcode::kInt64Constant) {
1111 int64_t constant = node->opcode() == IrOpcode::kInt32Constant
1112 ? OpParameter<int32_t>(node->op())
1113 : OpParameter<int64_t>(node->op());
1114 if (hint == MachineType::Int8()) {
1115 if (constant >= std::numeric_limits<int8_t>::min() &&
1116 constant <= std::numeric_limits<int8_t>::max()) {
1119 }
else if (hint == MachineType::Uint8()) {
1120 if (constant >= std::numeric_limits<uint8_t>::min() &&
1121 constant <= std::numeric_limits<uint8_t>::max()) {
1124 }
else if (hint == MachineType::Int16()) {
1125 if (constant >= std::numeric_limits<int16_t>::min() &&
1126 constant <= std::numeric_limits<int16_t>::max()) {
1129 }
else if (hint == MachineType::Uint16()) {
1130 if (constant >= std::numeric_limits<uint16_t>::min() &&
1131 constant <= std::numeric_limits<uint16_t>::max()) {
1134 }
else if (hint == MachineType::Int32()) {
1136 }
else if (hint == MachineType::Uint32()) {
1137 if (constant >= 0)
return hint;
1141 return node->opcode() == IrOpcode::kLoad ? LoadRepresentationOf(node->op())
1142 : MachineType::None();
1147 InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
1148 Node* right, FlagsContinuation* cont) {
1152 MachineType left_type = MachineTypeForNarrow(left, right);
1153 MachineType right_type = MachineTypeForNarrow(right, left);
1154 if (left_type == right_type) {
1155 switch (left_type.representation()) {
1156 case MachineRepresentation::kBit:
1157 case MachineRepresentation::kWord8: {
1158 if (opcode == kIA32Test)
return kIA32Test8;
1159 if (opcode == kIA32Cmp) {
1160 if (left_type.semantic() == MachineSemantic::kUint32) {
1161 cont->OverwriteUnsignedIfSigned();
1163 CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
1169 case MachineRepresentation::kWord16:
1170 if (opcode == kIA32Test)
return kIA32Test16;
1171 if (opcode == kIA32Cmp) {
1172 if (left_type.semantic() == MachineSemantic::kUint32) {
1173 cont->OverwriteUnsignedIfSigned();
1175 CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
1188 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1189 FlagsContinuation* cont) {
1190 Node*
const left = node->InputAt(0);
1191 Node*
const right = node->InputAt(1);
1192 VisitCompare(selector, kSSEFloat32Cmp, right, left, cont,
false);
1196 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1197 FlagsContinuation* cont) {
1198 Node*
const left = node->InputAt(0);
1199 Node*
const right = node->InputAt(1);
1200 VisitCompare(selector, kSSEFloat64Cmp, right, left, cont,
false);
1204 void VisitWordCompare(InstructionSelector* selector, Node* node,
1205 InstructionCode opcode, FlagsContinuation* cont) {
1206 IA32OperandGenerator g(selector);
1207 Node* left = node->InputAt(0);
1208 Node* right = node->InputAt(1);
1210 InstructionCode narrowed_opcode =
1211 TryNarrowOpcodeSize(opcode, left, right, cont);
1213 int effect_level = selector->GetEffectLevel(node);
1214 if (cont->IsBranch()) {
1215 effect_level = selector->GetEffectLevel(
1216 cont->true_block()->PredecessorAt(0)->control_input());
1221 if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
1222 (g.CanBeMemoryOperand(narrowed_opcode, node, right, effect_level) &&
1223 !g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level))) {
1224 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1225 std::swap(left, right);
1229 if (g.CanBeImmediate(right)) {
1230 if (g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level)) {
1231 return VisitCompareWithMemoryOperand(selector, narrowed_opcode, left,
1232 g.UseImmediate(right), cont);
1234 return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
1239 if (g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level)) {
1240 bool needs_byte_register =
1241 narrowed_opcode == kIA32Test8 || narrowed_opcode == kIA32Cmp8;
1242 return VisitCompareWithMemoryOperand(
1243 selector, narrowed_opcode, left,
1244 needs_byte_register ? g.UseByteRegister(right) : g.UseRegister(right),
1248 return VisitCompare(selector, opcode, left, right, cont,
1249 node->op()->HasProperty(Operator::kCommutative));
1252 void VisitWordCompare(InstructionSelector* selector, Node* node,
1253 FlagsContinuation* cont) {
1254 StackCheckMatcher<Int32BinopMatcher, IrOpcode::kUint32LessThan> m(
1255 selector->isolate(), node);
1258 if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1259 InstructionCode opcode = cont->Encode(kIA32StackCheck);
1260 CHECK(cont->IsBranch());
1261 selector->EmitWithContinuation(opcode, cont);
1264 WasmStackCheckMatcher<Int32BinopMatcher, IrOpcode::kUint32LessThan> wasm_m(
1266 if (wasm_m.Matched()) {
1270 Node* left = node->InputAt(0);
1271 LocationOperand esp(InstructionOperand::EXPLICIT, LocationOperand::REGISTER,
1272 InstructionSequence::DefaultRepresentation(),
1273 RegisterCode::kRegCode_esp);
1274 return VisitCompareWithMemoryOperand(selector, kIA32Cmp, left, esp, cont);
1276 VisitWordCompare(selector, node, kIA32Cmp, cont);
1279 void VisitAtomicExchange(InstructionSelector* selector, Node* node,
1280 ArchOpcode opcode, MachineRepresentation rep) {
1281 IA32OperandGenerator g(selector);
1282 Node* base = node->InputAt(0);
1283 Node* index = node->InputAt(1);
1284 Node* value = node->InputAt(2);
1286 AddressingMode addressing_mode;
1287 InstructionOperand value_operand = (rep == MachineRepresentation::kWord8)
1288 ? g.UseFixed(value, edx)
1289 : g.UseUniqueRegister(value);
1290 InstructionOperand inputs[] = {
1291 value_operand, g.UseUniqueRegister(base),
1292 g.GetEffectiveIndexOperand(index, &addressing_mode)};
1293 InstructionOperand outputs[] = {
1294 (rep == MachineRepresentation::kWord8)
1296 ? g.DefineAsFixed(node, edx)
1297 : g.DefineSameAsFirst(node)};
1298 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
1299 selector->Emit(code, 1, outputs, arraysize(inputs), inputs);
1302 void VisitAtomicBinOp(InstructionSelector* selector, Node* node,
1303 ArchOpcode opcode, MachineRepresentation rep) {
1304 AddressingMode addressing_mode;
1305 IA32OperandGenerator g(selector);
1306 Node* base = node->InputAt(0);
1307 Node* index = node->InputAt(1);
1308 Node* value = node->InputAt(2);
1309 InstructionOperand inputs[] = {
1310 g.UseUniqueRegister(value), g.UseUniqueRegister(base),
1311 g.GetEffectiveIndexOperand(index, &addressing_mode)};
1312 InstructionOperand outputs[] = {g.DefineAsFixed(node, eax)};
1313 InstructionOperand temp[] = {(rep == MachineRepresentation::kWord8)
1314 ? g.UseByteRegister(node)
1315 : g.TempRegister()};
1316 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
1317 selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
1318 arraysize(temp), temp);
1321 void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node,
1322 ArchOpcode opcode) {
1323 IA32OperandGenerator g(selector);
1324 Node* base = node->InputAt(0);
1325 Node* index = node->InputAt(1);
1326 Node* value = node->InputAt(2);
1329 Node* value_high = node->InputAt(3);
1333 AddressingMode addressing_mode;
1334 InstructionOperand inputs[] = {
1335 g.UseUniqueRegisterOrSlotOrConstant(value), g.UseFixed(value_high, ecx),
1336 g.UseUniqueRegister(base),
1337 g.GetEffectiveIndexOperand(index, &addressing_mode)};
1338 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
1339 Node* projection0 = NodeProperties::FindProjection(node, 0);
1340 Node* projection1 = NodeProperties::FindProjection(node, 1);
1342 InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax),
1343 g.DefineAsFixed(projection1, edx)};
1344 selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
1346 }
else if (projection0) {
1347 InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax)};
1348 InstructionOperand temps[] = {g.TempRegister(edx)};
1349 const int num_temps = arraysize(temps);
1350 selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
1353 InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
1354 const int num_temps = arraysize(temps);
1355 selector->Emit(code, 0,
nullptr, arraysize(inputs), inputs, num_temps,
1363 void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
1364 FlagsContinuation* cont) {
1366 while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
1367 Int32BinopMatcher m(value);
1368 if (!m.right().Is(0))
break;
1371 value = m.left().node();
1375 if (CanCover(user, value)) {
1376 switch (value->opcode()) {
1377 case IrOpcode::kWord32Equal:
1378 cont->OverwriteAndNegateIfEqual(kEqual);
1379 return VisitWordCompare(
this, value, cont);
1380 case IrOpcode::kInt32LessThan:
1381 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1382 return VisitWordCompare(
this, value, cont);
1383 case IrOpcode::kInt32LessThanOrEqual:
1384 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1385 return VisitWordCompare(
this, value, cont);
1386 case IrOpcode::kUint32LessThan:
1387 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1388 return VisitWordCompare(
this, value, cont);
1389 case IrOpcode::kUint32LessThanOrEqual:
1390 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1391 return VisitWordCompare(
this, value, cont);
1392 case IrOpcode::kFloat32Equal:
1393 cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
1394 return VisitFloat32Compare(
this, value, cont);
1395 case IrOpcode::kFloat32LessThan:
1396 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
1397 return VisitFloat32Compare(
this, value, cont);
1398 case IrOpcode::kFloat32LessThanOrEqual:
1399 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
1400 return VisitFloat32Compare(
this, value, cont);
1401 case IrOpcode::kFloat64Equal:
1402 cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
1403 return VisitFloat64Compare(
this, value, cont);
1404 case IrOpcode::kFloat64LessThan:
1405 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
1406 return VisitFloat64Compare(
this, value, cont);
1407 case IrOpcode::kFloat64LessThanOrEqual:
1408 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
1409 return VisitFloat64Compare(
this, value, cont);
1410 case IrOpcode::kProjection:
1413 if (ProjectionIndexOf(value->op()) == 1u) {
1419 Node*
const node = value->InputAt(0);
1420 Node*
const result = NodeProperties::FindProjection(node, 0);
1421 if (result ==
nullptr || IsDefined(result)) {
1422 switch (node->opcode()) {
1423 case IrOpcode::kInt32AddWithOverflow:
1424 cont->OverwriteAndNegateIfEqual(kOverflow);
1425 return VisitBinop(
this, node, kIA32Add, cont);
1426 case IrOpcode::kInt32SubWithOverflow:
1427 cont->OverwriteAndNegateIfEqual(kOverflow);
1428 return VisitBinop(
this, node, kIA32Sub, cont);
1429 case IrOpcode::kInt32MulWithOverflow:
1430 cont->OverwriteAndNegateIfEqual(kOverflow);
1431 return VisitBinop(
this, node, kIA32Imul, cont);
1438 case IrOpcode::kInt32Sub:
1439 return VisitWordCompare(
this, value, cont);
1440 case IrOpcode::kWord32And:
1441 return VisitWordCompare(
this, value, kIA32Test, cont);
1448 IA32OperandGenerator g(
this);
1449 VisitCompare(
this, kIA32Cmp, g.Use(value), g.TempImmediate(0), cont);
1452 void InstructionSelector::VisitSwitch(Node* node,
const SwitchInfo& sw) {
1453 IA32OperandGenerator g(
this);
1454 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1457 if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
1458 static const size_t kMaxTableSwitchValueRange = 2 << 16;
1459 size_t table_space_cost = 4 + sw.value_range();
1460 size_t table_time_cost = 3;
1461 size_t lookup_space_cost = 3 + 2 * sw.case_count();
1462 size_t lookup_time_cost = sw.case_count();
1463 if (sw.case_count() > 4 &&
1464 table_space_cost + 3 * table_time_cost <=
1465 lookup_space_cost + 3 * lookup_time_cost &&
1466 sw.min_value() > std::numeric_limits<int32_t>::min() &&
1467 sw.value_range() <= kMaxTableSwitchValueRange) {
1468 InstructionOperand index_operand = value_operand;
1469 if (sw.min_value()) {
1470 index_operand = g.TempRegister();
1471 Emit(kIA32Lea | AddressingModeField::encode(kMode_MRI), index_operand,
1472 value_operand, g.TempImmediate(-sw.min_value()));
1475 return EmitTableSwitch(sw, index_operand);
1480 return EmitBinarySearchSwitch(sw, value_operand);
1483 void InstructionSelector::VisitWord32Equal(Node*
const node) {
1484 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1485 Int32BinopMatcher m(node);
1486 if (m.right().Is(0)) {
1487 return VisitWordCompareZero(m.node(), m.left().node(), &cont);
1489 VisitWordCompare(
this, node, &cont);
1492 void InstructionSelector::VisitInt32LessThan(Node* node) {
1493 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1494 VisitWordCompare(
this, node, &cont);
1497 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1498 FlagsContinuation cont =
1499 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1500 VisitWordCompare(
this, node, &cont);
1503 void InstructionSelector::VisitUint32LessThan(Node* node) {
1504 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1505 VisitWordCompare(
this, node, &cont);
1508 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1509 FlagsContinuation cont =
1510 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1511 VisitWordCompare(
this, node, &cont);
1514 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1515 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1516 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1517 return VisitBinop(
this, node, kIA32Add, &cont);
1519 FlagsContinuation cont;
1520 VisitBinop(
this, node, kIA32Add, &cont);
1523 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1524 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1525 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1526 return VisitBinop(
this, node, kIA32Sub, &cont);
1528 FlagsContinuation cont;
1529 VisitBinop(
this, node, kIA32Sub, &cont);
1532 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
1533 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1534 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1535 return VisitBinop(
this, node, kIA32Imul, &cont);
1537 FlagsContinuation cont;
1538 VisitBinop(
this, node, kIA32Imul, &cont);
1541 void InstructionSelector::VisitFloat32Equal(Node* node) {
1542 FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
1543 VisitFloat32Compare(
this, node, &cont);
1546 void InstructionSelector::VisitFloat32LessThan(Node* node) {
1547 FlagsContinuation cont =
1548 FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
1549 VisitFloat32Compare(
this, node, &cont);
1552 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
1553 FlagsContinuation cont =
1554 FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
1555 VisitFloat32Compare(
this, node, &cont);
1558 void InstructionSelector::VisitFloat64Equal(Node* node) {
1559 FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
1560 VisitFloat64Compare(
this, node, &cont);
1563 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1564 FlagsContinuation cont =
1565 FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
1566 VisitFloat64Compare(
this, node, &cont);
1569 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1570 FlagsContinuation cont =
1571 FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
1572 VisitFloat64Compare(
this, node, &cont);
1575 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
1576 IA32OperandGenerator g(
this);
1577 Node* left = node->InputAt(0);
1578 Node* right = node->InputAt(1);
1579 Float64Matcher mleft(left);
1580 if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
1581 Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
1584 Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node),
1585 g.UseRegister(left), g.Use(right));
1588 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
1589 IA32OperandGenerator g(
this);
1590 Node* left = node->InputAt(0);
1591 Node* right = node->InputAt(1);
1592 Emit(kSSEFloat64InsertHighWord32, g.DefineSameAsFirst(node),
1593 g.UseRegister(left), g.Use(right));
1596 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
1597 IA32OperandGenerator g(
this);
1598 Emit(kSSEFloat64SilenceNaN, g.DefineSameAsFirst(node),
1599 g.UseRegister(node->InputAt(0)));
1602 void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
1603 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1604 DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
1605 load_rep.representation() == MachineRepresentation::kWord16 ||
1606 load_rep.representation() == MachineRepresentation::kWord32);
1611 void InstructionSelector::VisitWord32AtomicStore(Node* node) {
1612 IA32OperandGenerator g(
this);
1613 MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
1614 ArchOpcode opcode = kArchNop;
1616 case MachineRepresentation::kWord8:
1617 opcode = kWord32AtomicExchangeInt8;
1619 case MachineRepresentation::kWord16:
1620 opcode = kWord32AtomicExchangeInt16;
1622 case MachineRepresentation::kWord32:
1623 opcode = kWord32AtomicExchangeWord32;
1629 VisitAtomicExchange(
this, node, opcode, rep);
1632 void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
1633 IA32OperandGenerator g(
this);
1634 MachineType type = AtomicOpType(node->op());
1635 ArchOpcode opcode = kArchNop;
1636 if (type == MachineType::Int8()) {
1637 opcode = kWord32AtomicExchangeInt8;
1638 }
else if (type == MachineType::Uint8()) {
1639 opcode = kWord32AtomicExchangeUint8;
1640 }
else if (type == MachineType::Int16()) {
1641 opcode = kWord32AtomicExchangeInt16;
1642 }
else if (type == MachineType::Uint16()) {
1643 opcode = kWord32AtomicExchangeUint16;
1644 }
else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
1645 opcode = kWord32AtomicExchangeWord32;
1650 VisitAtomicExchange(
this, node, opcode, type.representation());
1653 void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
1654 IA32OperandGenerator g(
this);
1655 Node* base = node->InputAt(0);
1656 Node* index = node->InputAt(1);
1657 Node* old_value = node->InputAt(2);
1658 Node* new_value = node->InputAt(3);
1660 MachineType type = AtomicOpType(node->op());
1661 ArchOpcode opcode = kArchNop;
1662 if (type == MachineType::Int8()) {
1663 opcode = kWord32AtomicCompareExchangeInt8;
1664 }
else if (type == MachineType::Uint8()) {
1665 opcode = kWord32AtomicCompareExchangeUint8;
1666 }
else if (type == MachineType::Int16()) {
1667 opcode = kWord32AtomicCompareExchangeInt16;
1668 }
else if (type == MachineType::Uint16()) {
1669 opcode = kWord32AtomicCompareExchangeUint16;
1670 }
else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
1671 opcode = kWord32AtomicCompareExchangeWord32;
1676 AddressingMode addressing_mode;
1677 InstructionOperand new_val_operand =
1678 (type.representation() == MachineRepresentation::kWord8)
1679 ? g.UseByteRegister(new_value)
1680 : g.UseUniqueRegister(new_value);
1681 InstructionOperand inputs[] = {
1682 g.UseFixed(old_value, eax), new_val_operand, g.UseUniqueRegister(base),
1683 g.GetEffectiveIndexOperand(index, &addressing_mode)};
1684 InstructionOperand outputs[] = {g.DefineAsFixed(node, eax)};
1685 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
1686 Emit(code, 1, outputs, arraysize(inputs), inputs);
1689 void InstructionSelector::VisitWord32AtomicBinaryOperation(
1690 Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
1691 ArchOpcode uint16_op, ArchOpcode word32_op) {
1692 MachineType type = AtomicOpType(node->op());
1693 ArchOpcode opcode = kArchNop;
1694 if (type == MachineType::Int8()) {
1696 }
else if (type == MachineType::Uint8()) {
1698 }
else if (type == MachineType::Int16()) {
1700 }
else if (type == MachineType::Uint16()) {
1702 }
else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
1708 VisitAtomicBinOp(
this, node, opcode, type.representation());
1711 #define VISIT_ATOMIC_BINOP(op) \ 1712 void InstructionSelector::VisitWord32Atomic##op(Node* node) { \ 1713 VisitWord32AtomicBinaryOperation( \ 1714 node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \ 1715 kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \ 1716 kWord32Atomic##op##Word32); \ 1718 VISIT_ATOMIC_BINOP(Add)
1719 VISIT_ATOMIC_BINOP(Sub)
1720 VISIT_ATOMIC_BINOP(And)
1721 VISIT_ATOMIC_BINOP(Or)
1722 VISIT_ATOMIC_BINOP(Xor)
1723 #undef VISIT_ATOMIC_BINOP 1725 void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
1726 IA32OperandGenerator g(
this);
1727 AddressingMode mode;
1728 Node* base = node->InputAt(0);
1729 Node* index = node->InputAt(1);
1730 InstructionOperand inputs[] = {g.UseUniqueRegister(base),
1731 g.GetEffectiveIndexOperand(index, &mode)};
1732 Node* projection0 = NodeProperties::FindProjection(node, 0);
1733 Node* projection1 = NodeProperties::FindProjection(node, 1);
1734 InstructionCode code =
1735 kIA32Word32AtomicPairLoad | AddressingModeField::encode(mode);
1738 InstructionOperand temps[] = {g.TempDoubleRegister()};
1739 InstructionOperand outputs[] = {g.DefineAsRegister(projection0),
1740 g.DefineAsRegister(projection1)};
1741 Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
1742 arraysize(temps), temps);
1743 }
else if (projection0) {
1744 InstructionOperand temps[] = {g.TempDoubleRegister(), g.TempRegister()};
1745 InstructionOperand outputs[] = {g.DefineAsRegister(projection0)};
1746 Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
1747 arraysize(temps), temps);
1749 InstructionOperand temps[] = {g.TempDoubleRegister(), g.TempRegister(),
1751 Emit(code, 0,
nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
1755 void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
1756 IA32OperandGenerator g(
this);
1757 Node* base = node->InputAt(0);
1758 Node* index = node->InputAt(1);
1759 Node* value = node->InputAt(2);
1760 Node* value_high = node->InputAt(3);
1762 AddressingMode addressing_mode;
1763 InstructionOperand inputs[] = {
1764 g.UseUniqueRegisterOrSlotOrConstant(value), g.UseFixed(value_high, ecx),
1765 g.UseUniqueRegister(base),
1766 g.GetEffectiveIndexOperand(index, &addressing_mode)};
1770 InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
1771 const int num_temps = arraysize(temps);
1772 InstructionCode code =
1773 kIA32Word32AtomicPairStore | AddressingModeField::encode(addressing_mode);
1774 Emit(code, 0,
nullptr, arraysize(inputs), inputs, num_temps, temps);
1777 void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
1778 VisitPairAtomicBinOp(
this, node, kIA32Word32AtomicPairAdd);
1781 void InstructionSelector::VisitWord32AtomicPairSub(Node* node) {
1782 VisitPairAtomicBinOp(
this, node, kIA32Word32AtomicPairSub);
1785 void InstructionSelector::VisitWord32AtomicPairAnd(Node* node) {
1786 VisitPairAtomicBinOp(
this, node, kIA32Word32AtomicPairAnd);
1789 void InstructionSelector::VisitWord32AtomicPairOr(Node* node) {
1790 VisitPairAtomicBinOp(
this, node, kIA32Word32AtomicPairOr);
1793 void InstructionSelector::VisitWord32AtomicPairXor(Node* node) {
1794 VisitPairAtomicBinOp(
this, node, kIA32Word32AtomicPairXor);
1797 void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
1798 VisitPairAtomicBinOp(
this, node, kIA32Word32AtomicPairExchange);
1801 void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
1802 IA32OperandGenerator g(
this);
1803 Node* index = node->InputAt(1);
1804 AddressingMode addressing_mode;
1806 InstructionOperand inputs[] = {
1808 g.UseFixed(node->InputAt(2), eax), g.UseFixed(node->InputAt(3), edx),
1810 g.UseUniqueRegisterOrSlotOrConstant(node->InputAt(4)),
1811 g.UseFixed(node->InputAt(5), ecx),
1813 g.UseUniqueRegister(node->InputAt(0)),
1814 g.GetEffectiveIndexOperand(index, &addressing_mode)};
1815 Node* projection0 = NodeProperties::FindProjection(node, 0);
1816 Node* projection1 = NodeProperties::FindProjection(node, 1);
1817 InstructionCode code = kIA32Word32AtomicPairCompareExchange |
1818 AddressingModeField::encode(addressing_mode);
1821 InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax),
1822 g.DefineAsFixed(projection1, edx)};
1823 Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs, 0, {});
1824 }
else if (projection0) {
1825 InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax)};
1826 InstructionOperand temps[] = {g.TempRegister(edx)};
1827 const int num_temps = arraysize(temps);
1828 Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
1831 InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
1832 const int num_temps = arraysize(temps);
1833 Emit(code, 0,
nullptr, arraysize(inputs), inputs, num_temps, temps);
1837 #define SIMD_INT_TYPES(V) \ 1842 #define SIMD_BINOP_LIST(V) \ 1867 V(I16x8SConvertI32x4) \ 1869 V(I16x8AddSaturateS) \ 1872 V(I16x8SubSaturateS) \ 1880 V(I16x8AddSaturateU) \ 1881 V(I16x8SubSaturateU) \ 1886 V(I8x16SConvertI16x8) \ 1888 V(I8x16AddSaturateS) \ 1890 V(I8x16SubSaturateS) \ 1897 V(I8x16AddSaturateU) \ 1898 V(I8x16SubSaturateU) \ 1907 #define SIMD_UNOP_LIST(V) \ 1908 V(F32x4SConvertI32x4) \ 1909 V(F32x4RecipApprox) \ 1910 V(F32x4RecipSqrtApprox) \ 1911 V(I32x4SConvertI16x8Low) \ 1912 V(I32x4SConvertI16x8High) \ 1914 V(I32x4UConvertI16x8Low) \ 1915 V(I32x4UConvertI16x8High) \ 1916 V(I16x8SConvertI8x16Low) \ 1917 V(I16x8SConvertI8x16High) \ 1919 V(I16x8UConvertI8x16Low) \ 1920 V(I16x8UConvertI8x16High) \ 1923 #define SIMD_UNOP_PREFIX_LIST(V) \ 1928 #define SIMD_ANYTRUE_LIST(V) \ 1933 #define SIMD_ALLTRUE_LIST(V) \ 1938 #define SIMD_SHIFT_OPCODES(V) \ 1947 #define SIMD_I8X16_RIGHT_SHIFT_OPCODES(V) \ 1951 void InstructionSelector::VisitF32x4Splat(Node* node) {
1952 VisitRRSimd(
this, node, kAVXF32x4Splat, kSSEF32x4Splat);
1955 void InstructionSelector::VisitF32x4ExtractLane(Node* node) {
1956 VisitRRISimd(
this, node, kAVXF32x4ExtractLane, kSSEF32x4ExtractLane);
1959 void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
1960 VisitRRSimd(
this, node, kAVXF32x4UConvertI32x4, kSSEF32x4UConvertI32x4);
1963 void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
1964 VisitRRSimd(
this, node, kAVXI32x4SConvertF32x4, kSSEI32x4SConvertF32x4);
1967 void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
1968 IA32OperandGenerator g(
this);
1969 InstructionOperand temps[] = {g.TempSimd128Register()};
1970 InstructionCode opcode =
1971 IsSupported(AVX) ? kAVXI32x4UConvertF32x4 : kSSEI32x4UConvertF32x4;
1972 Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
1973 arraysize(temps), temps);
1976 void InstructionSelector::VisitI8x16Mul(Node* node) {
1977 IA32OperandGenerator g(
this);
1978 InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
1979 InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1));
1980 InstructionOperand temps[] = {g.TempSimd128Register()};
1981 if (IsSupported(AVX)) {
1982 Emit(kAVXI8x16Mul, g.DefineAsRegister(node), operand0, operand1,
1983 arraysize(temps), temps);
1985 Emit(kSSEI8x16Mul, g.DefineSameAsFirst(node), operand0, operand1,
1986 arraysize(temps), temps);
1990 void InstructionSelector::VisitS128Zero(Node* node) {
1991 IA32OperandGenerator g(
this);
1992 Emit(kIA32S128Zero, g.DefineAsRegister(node));
1995 void InstructionSelector::VisitS128Select(Node* node) {
1996 IA32OperandGenerator g(
this);
1997 InstructionOperand operand2 = g.UseRegister(node->InputAt(2));
1998 if (IsSupported(AVX)) {
1999 Emit(kAVXS128Select, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
2000 g.Use(node->InputAt(1)), operand2);
2002 Emit(kSSES128Select, g.DefineSameAsFirst(node),
2003 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
2008 #define VISIT_SIMD_SPLAT(Type) \ 2009 void InstructionSelector::Visit##Type##Splat(Node* node) { \ 2010 VisitRO(this, node, kIA32##Type##Splat); \ 2012 SIMD_INT_TYPES(VISIT_SIMD_SPLAT)
2013 #undef VISIT_SIMD_SPLAT 2015 #define VISIT_SIMD_EXTRACT_LANE(Type) \ 2016 void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \ 2017 VisitRRISimd(this, node, kIA32##Type##ExtractLane); \ 2019 SIMD_INT_TYPES(VISIT_SIMD_EXTRACT_LANE)
2020 #undef VISIT_SIMD_EXTRACT_LANE 2022 #define VISIT_SIMD_REPLACE_LANE(Type) \ 2023 void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \ 2024 IA32OperandGenerator g(this); \ 2025 InstructionOperand operand0 = g.UseRegister(node->InputAt(0)); \ 2026 InstructionOperand operand1 = \ 2027 g.UseImmediate(OpParameter<int32_t>(node->op())); \ 2028 InstructionOperand operand2 = g.Use(node->InputAt(1)); \ 2029 if (IsSupported(AVX)) { \ 2030 Emit(kAVX##Type##ReplaceLane, g.DefineAsRegister(node), operand0, \ 2031 operand1, operand2); \ 2033 Emit(kSSE##Type##ReplaceLane, g.DefineSameAsFirst(node), operand0, \ 2034 operand1, operand2); \ 2037 SIMD_INT_TYPES(VISIT_SIMD_REPLACE_LANE)
2038 VISIT_SIMD_REPLACE_LANE(F32x4)
2039 #undef VISIT_SIMD_REPLACE_LANE 2040 #undef SIMD_INT_TYPES 2042 #define VISIT_SIMD_SHIFT(Opcode) \ 2043 void InstructionSelector::Visit##Opcode(Node* node) { \ 2044 VisitRRISimd(this, node, kAVX##Opcode, kSSE##Opcode); \ 2046 SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
2047 #undef VISIT_SIMD_SHIFT 2048 #undef SIMD_SHIFT_OPCODES 2050 #define VISIT_SIMD_I8X16_RIGHT_SHIFT(Op) \ 2051 void InstructionSelector::Visit##Op(Node* node) { \ 2052 VisitRRISimd(this, node, kIA32##Op); \ 2055 SIMD_I8X16_RIGHT_SHIFT_OPCODES(VISIT_SIMD_I8X16_RIGHT_SHIFT)
2056 #undef SIMD_I8X16_RIGHT_SHIFT_OPCODES 2057 #undef VISIT_SIMD_I8X16_RIGHT_SHIFT 2059 #define VISIT_SIMD_UNOP(Opcode) \ 2060 void InstructionSelector::Visit##Opcode(Node* node) { \ 2061 IA32OperandGenerator g(this); \ 2062 Emit(kIA32##Opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0))); \ 2064 SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
2065 #undef VISIT_SIMD_UNOP 2066 #undef SIMD_UNOP_LIST 2068 #define VISIT_SIMD_UNOP_PREFIX(Opcode) \ 2069 void InstructionSelector::Visit##Opcode(Node* node) { \ 2070 IA32OperandGenerator g(this); \ 2071 InstructionCode opcode = IsSupported(AVX) ? kAVX##Opcode : kSSE##Opcode; \ 2072 Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0))); \ 2074 SIMD_UNOP_PREFIX_LIST(VISIT_SIMD_UNOP_PREFIX)
2075 #undef VISIT_SIMD_UNOP_PREFIX 2076 #undef SIMD_UNOP_PREFIX_LIST 2078 #define VISIT_SIMD_ANYTRUE(Opcode) \ 2079 void InstructionSelector::Visit##Opcode(Node* node) { \ 2080 IA32OperandGenerator g(this); \ 2081 InstructionOperand temps[] = {g.TempRegister()}; \ 2082 Emit(kIA32##Opcode, g.DefineAsRegister(node), \ 2083 g.UseRegister(node->InputAt(0)), arraysize(temps), temps); \ 2085 SIMD_ANYTRUE_LIST(VISIT_SIMD_ANYTRUE)
2086 #undef VISIT_SIMD_ANYTRUE 2087 #undef SIMD_ANYTRUE_LIST 2089 #define VISIT_SIMD_ALLTRUE(Opcode) \ 2090 void InstructionSelector::Visit##Opcode(Node* node) { \ 2091 IA32OperandGenerator g(this); \ 2092 InstructionOperand temps[] = {g.TempRegister()}; \ 2093 Emit(kIA32##Opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)), \ 2094 arraysize(temps), temps); \ 2096 SIMD_ALLTRUE_LIST(VISIT_SIMD_ALLTRUE)
2097 #undef VISIT_SIMD_ALLTRUE 2098 #undef SIMD_ALLTRUE_LIST 2100 #define VISIT_SIMD_BINOP(Opcode) \ 2101 void InstructionSelector::Visit##Opcode(Node* node) { \ 2102 VisitRROFloat(this, node, kAVX##Opcode, kSSE##Opcode); \ 2104 SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
2105 #undef VISIT_SIMD_BINOP 2106 #undef SIMD_BINOP_LIST 2108 void VisitPack(InstructionSelector* selector, Node* node, ArchOpcode avx_opcode,
2109 ArchOpcode sse_opcode) {
2110 IA32OperandGenerator g(selector);
2111 InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
2112 InstructionOperand operand1 = g.Use(node->InputAt(1));
2113 if (selector->IsSupported(AVX)) {
2114 selector->Emit(avx_opcode, g.DefineSameAsFirst(node), operand0, operand1);
2116 selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
2120 void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
2121 VisitPack(
this, node, kAVXI16x8UConvertI32x4, kSSEI16x8UConvertI32x4);
2124 void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
2125 VisitPack(
this, node, kAVXI8x16UConvertI16x8, kSSEI8x16UConvertI16x8);
2128 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
2132 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
2140 uint8_t PackShuffle4(uint8_t* shuffle) {
2141 return (shuffle[0] & 3) | ((shuffle[1] & 3) << 2) | ((shuffle[2] & 3) << 4) |
2142 ((shuffle[3] & 3) << 6);
2146 uint8_t PackBlend8(
const uint8_t* shuffle16x8) {
2148 for (
int i = 0;
i < 8; ++
i) {
2149 result |= (shuffle16x8[
i] >= 8 ? 1 : 0) <<
i;
2155 uint8_t PackBlend4(
const uint8_t* shuffle32x4) {
2157 for (
int i = 0;
i < 4; ++
i) {
2158 result |= (shuffle32x4[
i] >= 4 ? 0x3 : 0) << (
i * 2);
2166 bool TryMatch16x8HalfShuffle(uint8_t* shuffle16x8, uint8_t* blend_mask) {
2168 for (
int i = 0;
i < 8;
i++) {
2169 if ((shuffle16x8[
i] & 0x4) != (
i & 0x4))
return false;
2170 *blend_mask |= (shuffle16x8[
i] > 7 ? 1 : 0) <<
i;
2175 struct ShuffleEntry {
2176 uint8_t shuffle[kSimd128Size];
2178 ArchOpcode avx_opcode;
2179 bool src0_needs_reg;
2180 bool src1_needs_reg;
2188 static const ShuffleEntry arch_shuffles[] = {
2189 {{0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23},
2190 kIA32S64x2UnpackLow,
2191 kIA32S64x2UnpackLow,
2194 {{8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31},
2195 kIA32S64x2UnpackHigh,
2196 kIA32S64x2UnpackHigh,
2199 {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
2200 kIA32S32x4UnpackLow,
2201 kIA32S32x4UnpackLow,
2204 {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
2205 kIA32S32x4UnpackHigh,
2206 kIA32S32x4UnpackHigh,
2209 {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
2210 kIA32S16x8UnpackLow,
2211 kIA32S16x8UnpackLow,
2214 {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
2215 kIA32S16x8UnpackHigh,
2216 kIA32S16x8UnpackHigh,
2219 {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
2220 kIA32S8x16UnpackLow,
2221 kIA32S8x16UnpackLow,
2224 {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
2225 kIA32S8x16UnpackHigh,
2226 kIA32S8x16UnpackHigh,
2230 {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
2235 {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
2240 {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
2245 {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
2251 {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
2252 kSSES8x16TransposeLow,
2253 kAVXS8x16TransposeLow,
2256 {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
2257 kSSES8x16TransposeHigh,
2258 kAVXS8x16TransposeHigh,
2261 {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
2266 {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
2271 {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
2277 bool TryMatchArchShuffle(
const uint8_t* shuffle,
const ShuffleEntry* table,
2278 size_t num_entries,
bool is_swizzle,
2279 const ShuffleEntry** arch_shuffle) {
2280 uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
2281 for (
size_t i = 0;
i < num_entries; ++
i) {
2282 const ShuffleEntry& entry = table[
i];
2284 for (; j < kSimd128Size; ++j) {
2285 if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
2289 if (j == kSimd128Size) {
2290 *arch_shuffle = &entry;
2299 void InstructionSelector::VisitS8x16Shuffle(Node* node) {
2300 uint8_t shuffle[kSimd128Size];
2302 CanonicalizeShuffle(node, shuffle, &is_swizzle);
2305 static const int kMaxImms = 6;
2308 static const int kMaxTemps = 2;
2309 InstructionOperand temps[kMaxTemps];
2311 IA32OperandGenerator g(
this);
2312 bool use_avx = CpuFeatures::IsSupported(AVX);
2314 bool no_same_as_first = use_avx || is_swizzle;
2316 bool src0_needs_reg =
true;
2317 bool src1_needs_reg =
false;
2318 ArchOpcode opcode = kIA32S8x16Shuffle;
2321 uint8_t shuffle32x4[4];
2322 uint8_t shuffle16x8[8];
2324 const ShuffleEntry* arch_shuffle;
2325 if (TryMatchConcat(shuffle, &offset)) {
2327 SwapShuffleInputs(node);
2329 no_same_as_first = use_avx;
2330 opcode = kIA32S8x16Alignr;
2332 imms[imm_count++] = offset;
2333 }
else if (TryMatchArchShuffle(shuffle, arch_shuffles,
2334 arraysize(arch_shuffles), is_swizzle,
2336 opcode = use_avx ? arch_shuffle->avx_opcode : arch_shuffle->opcode;
2337 src0_needs_reg = !use_avx || arch_shuffle->src0_needs_reg;
2340 src1_needs_reg = use_avx && arch_shuffle->src1_needs_reg;
2341 no_same_as_first = use_avx;
2342 }
else if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
2343 uint8_t shuffle_mask = PackShuffle4(shuffle32x4);
2345 if (TryMatchIdentity(shuffle)) {
2351 opcode = kIA32S32x4Swizzle;
2352 no_same_as_first =
true;
2353 src0_needs_reg =
false;
2354 imms[imm_count++] = shuffle_mask;
2359 if (TryMatchBlend(shuffle)) {
2360 opcode = kIA32S16x8Blend;
2361 uint8_t blend_mask = PackBlend4(shuffle32x4);
2362 imms[imm_count++] = blend_mask;
2364 opcode = kIA32S32x4Shuffle;
2365 no_same_as_first =
true;
2366 src0_needs_reg =
false;
2367 imms[imm_count++] = shuffle_mask;
2368 int8_t blend_mask = PackBlend4(shuffle32x4);
2369 imms[imm_count++] = blend_mask;
2372 }
else if (TryMatch16x8Shuffle(shuffle, shuffle16x8)) {
2374 if (TryMatchBlend(shuffle)) {
2375 opcode = kIA32S16x8Blend;
2376 blend_mask = PackBlend8(shuffle16x8);
2377 imms[imm_count++] = blend_mask;
2378 }
else if (TryMatchDup<8>(shuffle, &index)) {
2379 opcode = kIA32S16x8Dup;
2380 src0_needs_reg =
false;
2381 imms[imm_count++] = index;
2382 }
else if (TryMatch16x8HalfShuffle(shuffle16x8, &blend_mask)) {
2383 opcode = is_swizzle ? kIA32S16x8HalfShuffle1 : kIA32S16x8HalfShuffle2;
2385 no_same_as_first =
true;
2386 src0_needs_reg =
false;
2387 uint8_t mask_lo = PackShuffle4(shuffle16x8);
2388 uint8_t mask_hi = PackShuffle4(shuffle16x8 + 4);
2389 imms[imm_count++] = mask_lo;
2390 imms[imm_count++] = mask_hi;
2391 if (!is_swizzle) imms[imm_count++] = blend_mask;
2393 }
else if (TryMatchDup<16>(shuffle, &index)) {
2394 opcode = kIA32S8x16Dup;
2395 no_same_as_first = use_avx;
2396 src0_needs_reg =
true;
2397 imms[imm_count++] = index;
2399 if (opcode == kIA32S8x16Shuffle) {
2401 no_same_as_first = !is_swizzle;
2402 src0_needs_reg = !no_same_as_first;
2403 imms[imm_count++] = Pack4Lanes(shuffle);
2404 imms[imm_count++] = Pack4Lanes(shuffle + 4);
2405 imms[imm_count++] = Pack4Lanes(shuffle + 8);
2406 imms[imm_count++] = Pack4Lanes(shuffle + 12);
2407 temps[temp_count++] = g.TempRegister();
2412 Node* input0 = node->InputAt(0);
2413 InstructionOperand dst =
2414 no_same_as_first ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
2415 InstructionOperand src0 =
2416 src0_needs_reg ? g.UseRegister(input0) : g.Use(input0);
2418 int input_count = 0;
2419 InstructionOperand inputs[2 + kMaxImms + kMaxTemps];
2420 inputs[input_count++] = src0;
2422 Node* input1 = node->InputAt(1);
2423 inputs[input_count++] =
2424 src1_needs_reg ? g.UseRegister(input1) : g.Use(input1);
2426 for (
int i = 0;
i < imm_count; ++
i) {
2427 inputs[input_count++] = g.UseImmediate(imms[
i]);
2429 Emit(opcode, 1, &dst, input_count, inputs, temp_count, temps);
2433 MachineOperatorBuilder::Flags
2434 InstructionSelector::SupportedMachineOperatorFlags() {
2435 MachineOperatorBuilder::Flags flags =
2436 MachineOperatorBuilder::kWord32ShiftIsSafe |
2437 MachineOperatorBuilder::kWord32Ctz |
2438 MachineOperatorBuilder::kSpeculationFence;
2439 if (CpuFeatures::IsSupported(POPCNT)) {
2440 flags |= MachineOperatorBuilder::kWord32Popcnt;
2442 if (CpuFeatures::IsSupported(SSE4_1)) {
2443 flags |= MachineOperatorBuilder::kFloat32RoundDown |
2444 MachineOperatorBuilder::kFloat64RoundDown |
2445 MachineOperatorBuilder::kFloat32RoundUp |
2446 MachineOperatorBuilder::kFloat64RoundUp |
2447 MachineOperatorBuilder::kFloat32RoundTruncate |
2448 MachineOperatorBuilder::kFloat64RoundTruncate |
2449 MachineOperatorBuilder::kFloat32RoundTiesEven |
2450 MachineOperatorBuilder::kFloat64RoundTiesEven;
2456 MachineOperatorBuilder::AlignmentRequirements
2457 InstructionSelector::AlignmentRequirements() {
2458 return MachineOperatorBuilder::AlignmentRequirements::
2459 FullUnalignedAccessSupport();