V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
instruction-selector.h
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_
6 #define V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_
7 
8 #include <map>
9 
10 #include "src/compiler/backend/instruction-scheduler.h"
11 #include "src/compiler/backend/instruction.h"
12 #include "src/compiler/common-operator.h"
13 #include "src/compiler/linkage.h"
14 #include "src/compiler/machine-operator.h"
15 #include "src/compiler/node.h"
16 #include "src/globals.h"
17 #include "src/zone/zone-containers.h"
18 
19 namespace v8 {
20 namespace internal {
21 namespace compiler {
22 
23 // Forward declarations.
24 class BasicBlock;
25 struct CallBuffer; // TODO(bmeurer): Remove this.
26 class Linkage;
27 class OperandGenerator;
28 class SwitchInfo;
29 class StateObjectDeduplicator;
30 
31 // The flags continuation is a way to combine a branch or a materialization
32 // of a boolean value with an instruction that sets the flags register.
33 // The whole instruction is treated as a unit by the register allocator, and
34 // thus no spills or moves can be introduced between the flags-setting
35 // instruction and the branch or set it should be combined with.
36 class FlagsContinuation final {
37  public:
38  FlagsContinuation() : mode_(kFlags_none) {}
39 
40  // Creates a new flags continuation from the given condition and true/false
41  // blocks.
42  static FlagsContinuation ForBranch(FlagsCondition condition,
43  BasicBlock* true_block,
44  BasicBlock* false_block) {
45  return FlagsContinuation(kFlags_branch, condition, true_block, false_block);
46  }
47 
48  static FlagsContinuation ForBranchAndPoison(FlagsCondition condition,
49  BasicBlock* true_block,
50  BasicBlock* false_block) {
51  return FlagsContinuation(kFlags_branch_and_poison, condition, true_block,
52  false_block);
53  }
54 
55  // Creates a new flags continuation for an eager deoptimization exit.
56  static FlagsContinuation ForDeoptimize(FlagsCondition condition,
57  DeoptimizeKind kind,
58  DeoptimizeReason reason,
59  VectorSlotPair const& feedback,
60  Node* frame_state) {
61  return FlagsContinuation(kFlags_deoptimize, condition, kind, reason,
62  feedback, frame_state);
63  }
64 
65  // Creates a new flags continuation for an eager deoptimization exit.
66  static FlagsContinuation ForDeoptimizeAndPoison(
67  FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason,
68  VectorSlotPair const& feedback, Node* frame_state) {
69  return FlagsContinuation(kFlags_deoptimize_and_poison, condition, kind,
70  reason, feedback, frame_state);
71  }
72 
73  // Creates a new flags continuation for a boolean value.
74  static FlagsContinuation ForSet(FlagsCondition condition, Node* result) {
75  return FlagsContinuation(condition, result);
76  }
77 
78  // Creates a new flags continuation for a wasm trap.
79  static FlagsContinuation ForTrap(FlagsCondition condition, TrapId trap_id,
80  Node* result) {
81  return FlagsContinuation(condition, trap_id, result);
82  }
83 
84  bool IsNone() const { return mode_ == kFlags_none; }
85  bool IsBranch() const {
86  return mode_ == kFlags_branch || mode_ == kFlags_branch_and_poison;
87  }
88  bool IsDeoptimize() const {
89  return mode_ == kFlags_deoptimize || mode_ == kFlags_deoptimize_and_poison;
90  }
91  bool IsPoisoned() const {
92  return mode_ == kFlags_branch_and_poison ||
93  mode_ == kFlags_deoptimize_and_poison;
94  }
95  bool IsSet() const { return mode_ == kFlags_set; }
96  bool IsTrap() const { return mode_ == kFlags_trap; }
97  FlagsCondition condition() const {
98  DCHECK(!IsNone());
99  return condition_;
100  }
101  DeoptimizeKind kind() const {
102  DCHECK(IsDeoptimize());
103  return kind_;
104  }
105  DeoptimizeReason reason() const {
106  DCHECK(IsDeoptimize());
107  return reason_;
108  }
109  VectorSlotPair const& feedback() const {
110  DCHECK(IsDeoptimize());
111  return feedback_;
112  }
113  Node* frame_state() const {
114  DCHECK(IsDeoptimize());
115  return frame_state_or_result_;
116  }
117  Node* result() const {
118  DCHECK(IsSet());
119  return frame_state_or_result_;
120  }
121  TrapId trap_id() const {
122  DCHECK(IsTrap());
123  return trap_id_;
124  }
125  BasicBlock* true_block() const {
126  DCHECK(IsBranch());
127  return true_block_;
128  }
129  BasicBlock* false_block() const {
130  DCHECK(IsBranch());
131  return false_block_;
132  }
133 
134  void Negate() {
135  DCHECK(!IsNone());
136  condition_ = NegateFlagsCondition(condition_);
137  }
138 
139  void Commute() {
140  DCHECK(!IsNone());
141  condition_ = CommuteFlagsCondition(condition_);
142  }
143 
144  void Overwrite(FlagsCondition condition) { condition_ = condition; }
145 
146  void OverwriteAndNegateIfEqual(FlagsCondition condition) {
147  DCHECK(condition_ == kEqual || condition_ == kNotEqual);
148  bool negate = condition_ == kEqual;
149  condition_ = condition;
150  if (negate) Negate();
151  }
152 
153  void OverwriteUnsignedIfSigned() {
154  switch (condition_) {
155  case kSignedLessThan:
156  condition_ = kUnsignedLessThan;
157  break;
158  case kSignedLessThanOrEqual:
159  condition_ = kUnsignedLessThanOrEqual;
160  break;
161  case kSignedGreaterThan:
162  condition_ = kUnsignedGreaterThan;
163  break;
164  case kSignedGreaterThanOrEqual:
165  condition_ = kUnsignedGreaterThanOrEqual;
166  break;
167  default:
168  break;
169  }
170  }
171 
172  // Encodes this flags continuation into the given opcode.
173  InstructionCode Encode(InstructionCode opcode) {
174  opcode |= FlagsModeField::encode(mode_);
175  if (mode_ != kFlags_none) {
176  opcode |= FlagsConditionField::encode(condition_);
177  }
178  return opcode;
179  }
180 
181  private:
182  FlagsContinuation(FlagsMode mode, FlagsCondition condition,
183  BasicBlock* true_block, BasicBlock* false_block)
184  : mode_(mode),
185  condition_(condition),
186  true_block_(true_block),
187  false_block_(false_block) {
188  DCHECK(mode == kFlags_branch || mode == kFlags_branch_and_poison);
189  DCHECK_NOT_NULL(true_block);
190  DCHECK_NOT_NULL(false_block);
191  }
192 
193  FlagsContinuation(FlagsMode mode, FlagsCondition condition,
194  DeoptimizeKind kind, DeoptimizeReason reason,
195  VectorSlotPair const& feedback, Node* frame_state)
196  : mode_(mode),
197  condition_(condition),
198  kind_(kind),
199  reason_(reason),
200  feedback_(feedback),
201  frame_state_or_result_(frame_state) {
202  DCHECK(mode == kFlags_deoptimize || mode == kFlags_deoptimize_and_poison);
203  DCHECK_NOT_NULL(frame_state);
204  }
205 
206  FlagsContinuation(FlagsCondition condition, Node* result)
207  : mode_(kFlags_set),
208  condition_(condition),
209  frame_state_or_result_(result) {
210  DCHECK_NOT_NULL(result);
211  }
212 
213  FlagsContinuation(FlagsCondition condition, TrapId trap_id, Node* result)
214  : mode_(kFlags_trap),
215  condition_(condition),
216  frame_state_or_result_(result),
217  trap_id_(trap_id) {
218  DCHECK_NOT_NULL(result);
219  }
220 
221  FlagsMode const mode_;
222  FlagsCondition condition_;
223  DeoptimizeKind kind_; // Only valid if mode_ == kFlags_deoptimize*
224  DeoptimizeReason reason_; // Only valid if mode_ == kFlags_deoptimize*
225  VectorSlotPair feedback_; // Only valid if mode_ == kFlags_deoptimize*
226  Node* frame_state_or_result_; // Only valid if mode_ == kFlags_deoptimize*
227  // or mode_ == kFlags_set.
228  BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch*.
229  BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch*.
230  TrapId trap_id_; // Only valid if mode_ == kFlags_trap.
231 };
232 
233 // This struct connects nodes of parameters which are going to be pushed on the
234 // call stack with their parameter index in the call descriptor of the callee.
236  PushParameter(Node* n = nullptr,
237  LinkageLocation l = LinkageLocation::ForAnyRegister())
238  : node(n), location(l) {}
239 
240  Node* node;
241  LinkageLocation location;
242 };
243 
244 enum class FrameStateInputKind { kAny, kStackSlot };
245 
246 // Instruction selection generates an InstructionSequence for a given Schedule.
247 class V8_EXPORT_PRIVATE InstructionSelector final {
248  public:
249  // Forward declarations.
250  class Features;
251 
252  enum SourcePositionMode { kCallSourcePositions, kAllSourcePositions };
253  enum EnableScheduling { kDisableScheduling, kEnableScheduling };
254  enum EnableRootsRelativeAddressing {
255  kDisableRootsRelativeAddressing,
256  kEnableRootsRelativeAddressing
257  };
258  enum EnableSwitchJumpTable {
259  kDisableSwitchJumpTable,
260  kEnableSwitchJumpTable
261  };
262  enum EnableTraceTurboJson { kDisableTraceTurboJson, kEnableTraceTurboJson };
263 
265  Zone* zone, size_t node_count, Linkage* linkage,
266  InstructionSequence* sequence, Schedule* schedule,
267  SourcePositionTable* source_positions, Frame* frame,
268  EnableSwitchJumpTable enable_switch_jump_table,
269  SourcePositionMode source_position_mode = kCallSourcePositions,
270  Features features = SupportedFeatures(),
271  EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling
272  ? kEnableScheduling
273  : kDisableScheduling,
274  EnableRootsRelativeAddressing enable_roots_relative_addressing =
275  kDisableRootsRelativeAddressing,
276  PoisoningMitigationLevel poisoning_level =
277  PoisoningMitigationLevel::kDontPoison,
278  EnableTraceTurboJson trace_turbo = kDisableTraceTurboJson);
279 
280  // Visit code for the entire graph with the included schedule.
281  bool SelectInstructions();
282 
283  void StartBlock(RpoNumber rpo);
284  void EndBlock(RpoNumber rpo);
285  void AddInstruction(Instruction* instr);
286  void AddTerminator(Instruction* instr);
287 
288  // ===========================================================================
289  // ============= Architecture-independent code emission methods. =============
290  // ===========================================================================
291 
292  Instruction* Emit(InstructionCode opcode, InstructionOperand output,
293  size_t temp_count = 0, InstructionOperand* temps = nullptr);
294  Instruction* Emit(InstructionCode opcode, InstructionOperand output,
295  InstructionOperand a, size_t temp_count = 0,
296  InstructionOperand* temps = nullptr);
297  Instruction* Emit(InstructionCode opcode, InstructionOperand output,
299  size_t temp_count = 0, InstructionOperand* temps = nullptr);
300  Instruction* Emit(InstructionCode opcode, InstructionOperand output,
302  InstructionOperand c, size_t temp_count = 0,
303  InstructionOperand* temps = nullptr);
304  Instruction* Emit(InstructionCode opcode, InstructionOperand output,
307  size_t temp_count = 0, InstructionOperand* temps = nullptr);
308  Instruction* Emit(InstructionCode opcode, InstructionOperand output,
311  InstructionOperand e, size_t temp_count = 0,
312  InstructionOperand* temps = nullptr);
313  Instruction* Emit(InstructionCode opcode, InstructionOperand output,
317  size_t temp_count = 0, InstructionOperand* temps = nullptr);
318  Instruction* Emit(InstructionCode opcode, size_t output_count,
319  InstructionOperand* outputs, size_t input_count,
320  InstructionOperand* inputs, size_t temp_count = 0,
321  InstructionOperand* temps = nullptr);
322  Instruction* Emit(Instruction* instr);
323 
324  // [0-3] operand instructions with no output, uses labels for true and false
325  // blocks of the continuation.
326  Instruction* EmitWithContinuation(InstructionCode opcode,
327  FlagsContinuation* cont);
328  Instruction* EmitWithContinuation(InstructionCode opcode,
330  FlagsContinuation* cont);
331  Instruction* EmitWithContinuation(InstructionCode opcode,
333  FlagsContinuation* cont);
334  Instruction* EmitWithContinuation(InstructionCode opcode,
337  FlagsContinuation* cont);
338  Instruction* EmitWithContinuation(InstructionCode opcode, size_t output_count,
339  InstructionOperand* outputs,
340  size_t input_count,
341  InstructionOperand* inputs,
342  FlagsContinuation* cont);
343 
344  // ===========================================================================
345  // ===== Architecture-independent deoptimization exit emission methods. ======
346  // ===========================================================================
347  Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count,
348  InstructionOperand* outputs, size_t input_count,
349  InstructionOperand* inputs, DeoptimizeKind kind,
350  DeoptimizeReason reason,
351  VectorSlotPair const& feedback,
352  Node* frame_state);
353 
354  // ===========================================================================
355  // ============== Architecture-independent CPU feature methods. ==============
356  // ===========================================================================
357 
358  class Features final {
359  public:
360  Features() : bits_(0) {}
361  explicit Features(unsigned bits) : bits_(bits) {}
362  explicit Features(CpuFeature f) : bits_(1u << f) {}
363  Features(CpuFeature f1, CpuFeature f2) : bits_((1u << f1) | (1u << f2)) {}
364 
365  bool Contains(CpuFeature f) const { return (bits_ & (1u << f)); }
366 
367  private:
368  unsigned bits_;
369  };
370 
371  bool IsSupported(CpuFeature feature) const {
372  return features_.Contains(feature);
373  }
374 
375  // Returns the features supported on the target platform.
376  static Features SupportedFeatures() {
377  return Features(CpuFeatures::SupportedFeatures());
378  }
379 
380  // TODO(sigurds) This should take a CpuFeatures argument.
381  static MachineOperatorBuilder::Flags SupportedMachineOperatorFlags();
382 
383  static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements();
384 
385  bool NeedsPoisoning(IsSafetyCheck safety_check) const;
386 
387  // ===========================================================================
388  // ============ Architecture-independent graph covering methods. =============
389  // ===========================================================================
390 
391  // Used in pattern matching during code generation.
392  // Check if {node} can be covered while generating code for the current
393  // instruction. A node can be covered if the {user} of the node has the only
394  // edge and the two are in the same basic block.
395  bool CanCover(Node* user, Node* node) const;
396  // CanCover is not transitive. The counter example are Nodes A,B,C such that
397  // CanCover(A, B) and CanCover(B,C) and B is pure: The the effect level of A
398  // and B might differ. CanCoverTransitively does the additional checks.
399  bool CanCoverTransitively(Node* user, Node* node, Node* node_input) const;
400 
401  // Used in pattern matching during code generation.
402  // This function checks that {node} and {user} are in the same basic block,
403  // and that {user} is the only user of {node} in this basic block. This
404  // check guarantees that there are no users of {node} scheduled between
405  // {node} and {user}, and thus we can select a single instruction for both
406  // nodes, if such an instruction exists. This check can be used for example
407  // when selecting instructions for:
408  // n = Int32Add(a, b)
409  // c = Word32Compare(n, 0, cond)
410  // Branch(c, true_label, false_label)
411  // Here we can generate a flag-setting add instruction, even if the add has
412  // uses in other basic blocks, since the flag-setting add instruction will
413  // still generate the result of the addition and not just set the flags.
414  // However, if we had uses of the add in the same basic block, we could have:
415  // n = Int32Add(a, b)
416  // o = OtherOp(n, ...)
417  // c = Word32Compare(n, 0, cond)
418  // Branch(c, true_label, false_label)
419  // where we cannot select the add and the compare together. If we were to
420  // select a flag-setting add instruction for Word32Compare and Int32Add while
421  // visiting Word32Compare, we would then have to select an instruction for
422  // OtherOp *afterwards*, which means we would attempt to use the result of
423  // the add before we have defined it.
424  bool IsOnlyUserOfNodeInSameBlock(Node* user, Node* node) const;
425 
426  // Checks if {node} was already defined, and therefore code was already
427  // generated for it.
428  bool IsDefined(Node* node) const;
429 
430  // Checks if {node} has any uses, and therefore code has to be generated for
431  // it.
432  bool IsUsed(Node* node) const;
433 
434  // Checks if {node} is currently live.
435  bool IsLive(Node* node) const { return !IsDefined(node) && IsUsed(node); }
436 
437  // Gets the effect level of {node}.
438  int GetEffectLevel(Node* node) const;
439 
440  int GetVirtualRegister(const Node* node);
441  const std::map<NodeId, int> GetVirtualRegistersForTesting() const;
442 
443  // Check if we can generate loads and stores of ExternalConstants relative
444  // to the roots register.
445  bool CanAddressRelativeToRootsRegister() const;
446  // Check if we can use the roots register to access GC roots.
447  bool CanUseRootsRegister() const;
448 
449  Isolate* isolate() const { return sequence()->isolate(); }
450 
451  const ZoneVector<std::pair<int, int>>& instr_origins() const {
452  return instr_origins_;
453  }
454 
455  // Expose these SIMD helper functions for testing.
456  static void CanonicalizeShuffleForTesting(bool inputs_equal, uint8_t* shuffle,
457  bool* needs_swap,
458  bool* is_swizzle) {
459  CanonicalizeShuffle(inputs_equal, shuffle, needs_swap, is_swizzle);
460  }
461 
462  static bool TryMatchIdentityForTesting(const uint8_t* shuffle) {
463  return TryMatchIdentity(shuffle);
464  }
465  template <int LANES>
466  static bool TryMatchDupForTesting(const uint8_t* shuffle, int* index) {
467  return TryMatchDup<LANES>(shuffle, index);
468  }
469  static bool TryMatch32x4ShuffleForTesting(const uint8_t* shuffle,
470  uint8_t* shuffle32x4) {
471  return TryMatch32x4Shuffle(shuffle, shuffle32x4);
472  }
473  static bool TryMatch16x8ShuffleForTesting(const uint8_t* shuffle,
474  uint8_t* shuffle16x8) {
475  return TryMatch16x8Shuffle(shuffle, shuffle16x8);
476  }
477  static bool TryMatchConcatForTesting(const uint8_t* shuffle,
478  uint8_t* offset) {
479  return TryMatchConcat(shuffle, offset);
480  }
481  static bool TryMatchBlendForTesting(const uint8_t* shuffle) {
482  return TryMatchBlend(shuffle);
483  }
484 
485  private:
486  friend class OperandGenerator;
487 
488  bool UseInstructionScheduling() const {
489  return (enable_scheduling_ == kEnableScheduling) &&
490  InstructionScheduler::SchedulerSupported();
491  }
492 
493  void AppendDeoptimizeArguments(InstructionOperandVector* args,
494  DeoptimizeKind kind, DeoptimizeReason reason,
495  VectorSlotPair const& feedback,
496  Node* frame_state);
497 
498  void EmitTableSwitch(const SwitchInfo& sw, InstructionOperand& index_operand);
499  void EmitLookupSwitch(const SwitchInfo& sw,
500  InstructionOperand& value_operand);
501  void EmitBinarySearchSwitch(const SwitchInfo& sw,
502  InstructionOperand& value_operand);
503 
504  void TryRename(InstructionOperand* op);
505  int GetRename(int virtual_register);
506  void SetRename(const Node* node, const Node* rename);
507  void UpdateRenames(Instruction* instruction);
508  void UpdateRenamesInPhi(PhiInstruction* phi);
509 
510  // Inform the instruction selection that {node} was just defined.
511  void MarkAsDefined(Node* node);
512 
513  // Inform the instruction selection that {node} has at least one use and we
514  // will need to generate code for it.
515  void MarkAsUsed(Node* node);
516 
517  // Sets the effect level of {node}.
518  void SetEffectLevel(Node* node, int effect_level);
519 
520  // Inform the register allocation of the representation of the value produced
521  // by {node}.
522  void MarkAsRepresentation(MachineRepresentation rep, Node* node);
523  void MarkAsWord32(Node* node) {
524  MarkAsRepresentation(MachineRepresentation::kWord32, node);
525  }
526  void MarkAsWord64(Node* node) {
527  MarkAsRepresentation(MachineRepresentation::kWord64, node);
528  }
529  void MarkAsFloat32(Node* node) {
530  MarkAsRepresentation(MachineRepresentation::kFloat32, node);
531  }
532  void MarkAsFloat64(Node* node) {
533  MarkAsRepresentation(MachineRepresentation::kFloat64, node);
534  }
535  void MarkAsSimd128(Node* node) {
536  MarkAsRepresentation(MachineRepresentation::kSimd128, node);
537  }
538  void MarkAsReference(Node* node) {
539  MarkAsRepresentation(MachineRepresentation::kTagged, node);
540  }
541 
542  // Inform the register allocation of the representation of the unallocated
543  // operand {op}.
544  void MarkAsRepresentation(MachineRepresentation rep,
545  const InstructionOperand& op);
546 
547  enum CallBufferFlag {
548  kCallCodeImmediate = 1u << 0,
549  kCallAddressImmediate = 1u << 1,
550  kCallTail = 1u << 2,
551  kCallFixedTargetRegister = 1u << 3,
552  kAllowCallThroughSlot = 1u << 4
553  };
554  typedef base::Flags<CallBufferFlag> CallBufferFlags;
555 
556  // Initialize the call buffer with the InstructionOperands, nodes, etc,
557  // corresponding
558  // to the inputs and outputs of the call.
559  // {call_code_immediate} to generate immediate operands to calls of code.
560  // {call_address_immediate} to generate immediate operands to address calls.
561  void InitializeCallBuffer(Node* call, CallBuffer* buffer,
562  CallBufferFlags flags, bool is_tail_call,
563  int stack_slot_delta = 0);
564  bool IsTailCallAddressImmediate();
565  int GetTempsCountForTailCallFromJSFunction();
566 
567  FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
568  size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
569  Node* state, OperandGenerator* g,
570  StateObjectDeduplicator* deduplicator,
571  InstructionOperandVector* inputs,
572  FrameStateInputKind kind, Zone* zone);
573  size_t AddOperandToStateValueDescriptor(StateValueList* values,
574  InstructionOperandVector* inputs,
575  OperandGenerator* g,
576  StateObjectDeduplicator* deduplicator,
577  Node* input, MachineType type,
578  FrameStateInputKind kind, Zone* zone);
579 
580  // ===========================================================================
581  // ============= Architecture-specific graph covering methods. ===============
582  // ===========================================================================
583 
584  // Visit nodes in the given block and generate code.
585  void VisitBlock(BasicBlock* block);
586 
587  // Visit the node for the control flow at the end of the block, generating
588  // code if necessary.
589  void VisitControl(BasicBlock* block);
590 
591  // Visit the node and generate code, if any.
592  void VisitNode(Node* node);
593 
594  // Visit the node and generate code for IEEE 754 functions.
595  void VisitFloat64Ieee754Binop(Node*, InstructionCode code);
596  void VisitFloat64Ieee754Unop(Node*, InstructionCode code);
597 
598 #define DECLARE_GENERATOR(x) void Visit##x(Node* node);
599  MACHINE_OP_LIST(DECLARE_GENERATOR)
600  MACHINE_SIMD_OP_LIST(DECLARE_GENERATOR)
601 #undef DECLARE_GENERATOR
602 
603  void VisitFinishRegion(Node* node);
604  void VisitParameter(Node* node);
605  void VisitIfException(Node* node);
606  void VisitOsrValue(Node* node);
607  void VisitPhi(Node* node);
608  void VisitProjection(Node* node);
609  void VisitConstant(Node* node);
610  void VisitCall(Node* call, BasicBlock* handler = nullptr);
611  void VisitCallWithCallerSavedRegisters(Node* call,
612  BasicBlock* handler = nullptr);
613  void VisitDeoptimizeIf(Node* node);
614  void VisitDeoptimizeUnless(Node* node);
615  void VisitTrapIf(Node* node, TrapId trap_id);
616  void VisitTrapUnless(Node* node, TrapId trap_id);
617  void VisitTailCall(Node* call);
618  void VisitGoto(BasicBlock* target);
619  void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
620  void VisitSwitch(Node* node, const SwitchInfo& sw);
621  void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
622  VectorSlotPair const& feedback, Node* value);
623  void VisitReturn(Node* ret);
624  void VisitThrow(Node* node);
625  void VisitRetain(Node* node);
626  void VisitUnreachable(Node* node);
627  void VisitDeadValue(Node* node);
628 
629  void VisitWordCompareZero(Node* user, Node* value, FlagsContinuation* cont);
630 
631  void EmitWordPoisonOnSpeculation(Node* node);
632 
633  void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
634  const CallDescriptor* call_descriptor, Node* node);
635  void EmitPrepareResults(ZoneVector<compiler::PushParameter>* results,
636  const CallDescriptor* call_descriptor, Node* node);
637 
638  void EmitIdentity(Node* node);
639  bool CanProduceSignalingNaN(Node* node);
640 
641  // ===========================================================================
642  // ============= Vector instruction (SIMD) helper fns. =======================
643  // ===========================================================================
644 
645  // Converts a shuffle into canonical form, meaning that the first lane index
646  // is in the range [0 .. 15]. Set |inputs_equal| true if this is an explicit
647  // swizzle. Returns canonicalized |shuffle|, |needs_swap|, and |is_swizzle|.
648  // If |needs_swap| is true, inputs must be swapped. If |is_swizzle| is true,
649  // the second input can be ignored.
650  static void CanonicalizeShuffle(bool inputs_equal, uint8_t* shuffle,
651  bool* needs_swap, bool* is_swizzle);
652 
653  // Canonicalize shuffles to make pattern matching simpler. Returns the shuffle
654  // indices, and a boolean indicating if the shuffle is a swizzle (one input).
655  void CanonicalizeShuffle(Node* node, uint8_t* shuffle, bool* is_swizzle);
656 
657  // Swaps the two first input operands of the node, to help match shuffles
658  // to specific architectural instructions.
659  void SwapShuffleInputs(Node* node);
660 
661  // Tries to match an 8x16 byte shuffle to the identity shuffle, which is
662  // [0 1 ... 15]. This should be called after canonicalizing the shuffle, so
663  // the second identity shuffle, [16 17 .. 31] is converted to the first one.
664  static bool TryMatchIdentity(const uint8_t* shuffle);
665 
666  // Tries to match a byte shuffle to a scalar splat operation. Returns the
667  // index of the lane if successful.
668  template <int LANES>
669  static bool TryMatchDup(const uint8_t* shuffle, int* index) {
670  const int kBytesPerLane = kSimd128Size / LANES;
671  // Get the first lane's worth of bytes and check that indices start at a
672  // lane boundary and are consecutive.
673  uint8_t lane0[kBytesPerLane];
674  lane0[0] = shuffle[0];
675  if (lane0[0] % kBytesPerLane != 0) return false;
676  for (int i = 1; i < kBytesPerLane; ++i) {
677  lane0[i] = shuffle[i];
678  if (lane0[i] != lane0[0] + i) return false;
679  }
680  // Now check that the other lanes are identical to lane0.
681  for (int i = 1; i < LANES; ++i) {
682  for (int j = 0; j < kBytesPerLane; ++j) {
683  if (lane0[j] != shuffle[i * kBytesPerLane + j]) return false;
684  }
685  }
686  *index = lane0[0] / kBytesPerLane;
687  return true;
688  }
689 
690  // Tries to match an 8x16 byte shuffle to an equivalent 32x4 shuffle. If
691  // successful, it writes the 32x4 shuffle word indices. E.g.
692  // [0 1 2 3 8 9 10 11 4 5 6 7 12 13 14 15] == [0 2 1 3]
693  static bool TryMatch32x4Shuffle(const uint8_t* shuffle, uint8_t* shuffle32x4);
694 
695  // Tries to match an 8x16 byte shuffle to an equivalent 16x8 shuffle. If
696  // successful, it writes the 16x8 shuffle word indices. E.g.
697  // [0 1 8 9 2 3 10 11 4 5 12 13 6 7 14 15] == [0 4 1 5 2 6 3 7]
698  static bool TryMatch16x8Shuffle(const uint8_t* shuffle, uint8_t* shuffle16x8);
699 
700  // Tries to match a byte shuffle to a concatenate operation, formed by taking
701  // 16 bytes from the 32 byte concatenation of the inputs. If successful, it
702  // writes the byte offset. E.g. [4 5 6 7 .. 16 17 18 19] concatenates both
703  // source vectors with offset 4. The shuffle should be canonicalized.
704  static bool TryMatchConcat(const uint8_t* shuffle, uint8_t* offset);
705 
706  // Tries to match a byte shuffle to a blend operation, which is a shuffle
707  // where no lanes change position. E.g. [0 9 2 11 .. 14 31] interleaves the
708  // even lanes of the first source with the odd lanes of the second. The
709  // shuffle should be canonicalized.
710  static bool TryMatchBlend(const uint8_t* shuffle);
711 
712  // Packs 4 bytes of shuffle into a 32 bit immediate.
713  static int32_t Pack4Lanes(const uint8_t* shuffle);
714 
715  // ===========================================================================
716 
717  Schedule* schedule() const { return schedule_; }
718  Linkage* linkage() const { return linkage_; }
719  InstructionSequence* sequence() const { return sequence_; }
720  Zone* instruction_zone() const { return sequence()->zone(); }
721  Zone* zone() const { return zone_; }
722 
723  void set_instruction_selection_failed() {
724  instruction_selection_failed_ = true;
725  }
726  bool instruction_selection_failed() { return instruction_selection_failed_; }
727 
728  void MarkPairProjectionsAsWord32(Node* node);
729  bool IsSourcePositionUsed(Node* node);
730  void VisitWord32AtomicBinaryOperation(Node* node, ArchOpcode int8_op,
731  ArchOpcode uint8_op,
732  ArchOpcode int16_op,
733  ArchOpcode uint16_op,
734  ArchOpcode word32_op);
735  void VisitWord64AtomicBinaryOperation(Node* node, ArchOpcode uint8_op,
736  ArchOpcode uint16_op,
737  ArchOpcode uint32_op,
738  ArchOpcode uint64_op);
739  void VisitWord64AtomicNarrowBinop(Node* node, ArchOpcode uint8_op,
740  ArchOpcode uint16_op, ArchOpcode uint32_op);
741 
742  // ===========================================================================
743 
744  Zone* const zone_;
745  Linkage* const linkage_;
746  InstructionSequence* const sequence_;
747  SourcePositionTable* const source_positions_;
748  SourcePositionMode const source_position_mode_;
749  Features features_;
750  Schedule* const schedule_;
751  BasicBlock* current_block_;
752  ZoneVector<Instruction*> instructions_;
753  InstructionOperandVector continuation_inputs_;
754  InstructionOperandVector continuation_outputs_;
755  BoolVector defined_;
756  BoolVector used_;
757  IntVector effect_level_;
758  IntVector virtual_registers_;
759  IntVector virtual_register_rename_;
760  InstructionScheduler* scheduler_;
761  EnableScheduling enable_scheduling_;
762  EnableRootsRelativeAddressing enable_roots_relative_addressing_;
763  EnableSwitchJumpTable enable_switch_jump_table_;
764 
765  PoisoningMitigationLevel poisoning_level_;
766  Frame* frame_;
767  bool instruction_selection_failed_;
768  ZoneVector<std::pair<int, int>> instr_origins_;
769  EnableTraceTurboJson trace_turbo_;
770 };
771 
772 } // namespace compiler
773 } // namespace internal
774 } // namespace v8
775 
776 #endif // V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_H_
Definition: libplatform.h:13