V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
instruction-scheduler.h
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_COMPILER_BACKEND_INSTRUCTION_SCHEDULER_H_
6 #define V8_COMPILER_BACKEND_INSTRUCTION_SCHEDULER_H_
7 
8 #include "src/compiler/backend/instruction.h"
9 #include "src/zone/zone-containers.h"
10 
11 namespace v8 {
12 namespace internal {
13 namespace compiler {
14 
15 // A set of flags describing properties of the instructions so that the
16 // scheduler is aware of dependencies between instructions.
17 enum ArchOpcodeFlags {
18  kNoOpcodeFlags = 0,
19  kHasSideEffect = 1, // The instruction has some side effects (memory
20  // store, function call...)
21  kIsLoadOperation = 2, // The instruction is a memory load.
22  kMayNeedDeoptOrTrapCheck = 4, // The instruction may be associated with a
23  // deopt or trap check which must be run before
24  // instruction e.g. div on Intel platform which
25  // will raise an exception when the divisor is
26  // zero.
27 };
28 
29 class InstructionScheduler final : public ZoneObject {
30  public:
32 
33  void StartBlock(RpoNumber rpo);
34  void EndBlock(RpoNumber rpo);
35 
36  void AddInstruction(Instruction* instr);
37  void AddTerminator(Instruction* instr);
38 
39  static bool SchedulerSupported();
40 
41  private:
42  // A scheduling graph node.
43  // Represent an instruction and their dependencies.
44  class ScheduleGraphNode : public ZoneObject {
45  public:
46  ScheduleGraphNode(Zone* zone, Instruction* instr);
47 
48  // Mark the instruction represented by 'node' as a dependecy of this one.
49  // The current instruction will be registered as an unscheduled predecessor
50  // of 'node' (i.e. it must be scheduled before 'node').
51  void AddSuccessor(ScheduleGraphNode* node);
52 
53  // Check if all the predecessors of this instruction have been scheduled.
54  bool HasUnscheduledPredecessor() {
55  return unscheduled_predecessors_count_ != 0;
56  }
57 
58  // Record that we have scheduled one of the predecessors of this node.
59  void DropUnscheduledPredecessor() {
60  DCHECK_LT(0, unscheduled_predecessors_count_);
61  unscheduled_predecessors_count_--;
62  }
63 
64  Instruction* instruction() { return instr_; }
65  ZoneDeque<ScheduleGraphNode*>& successors() { return successors_; }
66  int latency() const { return latency_; }
67 
68  int total_latency() const { return total_latency_; }
69  void set_total_latency(int latency) { total_latency_ = latency; }
70 
71  int start_cycle() const { return start_cycle_; }
72  void set_start_cycle(int start_cycle) { start_cycle_ = start_cycle; }
73 
74  private:
75  Instruction* instr_;
77 
78  // Number of unscheduled predecessors for this node.
79  int unscheduled_predecessors_count_;
80 
81  // Estimate of the instruction latency (the number of cycles it takes for
82  // instruction to complete).
83  int latency_;
84 
85  // The sum of all the latencies on the path from this node to the end of
86  // the graph (i.e. a node with no successor).
87  int total_latency_;
88 
89  // The scheduler keeps a nominal cycle count to keep track of when the
90  // result of an instruction is available. This field is updated by the
91  // scheduler to indicate when the value of all the operands of this
92  // instruction will be available.
93  int start_cycle_;
94  };
95 
96  // Keep track of all nodes ready to be scheduled (i.e. all their dependencies
97  // have been scheduled. Note that this class is inteded to be extended by
98  // concrete implementation of the scheduling queue which define the policy
99  // to pop node from the queue.
100  class SchedulingQueueBase {
101  public:
102  explicit SchedulingQueueBase(InstructionScheduler* scheduler)
103  : scheduler_(scheduler), nodes_(scheduler->zone()) {}
104 
105  void AddNode(ScheduleGraphNode* node);
106 
107  bool IsEmpty() const { return nodes_.empty(); }
108 
109  protected:
110  InstructionScheduler* scheduler_;
112  };
113 
114  // A scheduling queue which prioritize nodes on the critical path (we look
115  // for the instruction with the highest latency on the path to reach the end
116  // of the graph).
117  class CriticalPathFirstQueue : public SchedulingQueueBase {
118  public:
119  explicit CriticalPathFirstQueue(InstructionScheduler* scheduler)
120  : SchedulingQueueBase(scheduler) {}
121 
122  // Look for the best candidate to schedule, remove it from the queue and
123  // return it.
124  ScheduleGraphNode* PopBestCandidate(int cycle);
125  };
126 
127  // A queue which pop a random node from the queue to perform stress tests on
128  // the scheduler.
129  class StressSchedulerQueue : public SchedulingQueueBase {
130  public:
131  explicit StressSchedulerQueue(InstructionScheduler* scheduler)
132  : SchedulingQueueBase(scheduler) {}
133 
134  ScheduleGraphNode* PopBestCandidate(int cycle);
135 
136  private:
137  Isolate* isolate() { return scheduler_->isolate(); }
138  };
139 
140  // Perform scheduling for the current block specifying the queue type to
141  // use to determine the next best candidate.
142  template <typename QueueType>
143  void ScheduleBlock();
144 
145  // Return the scheduling properties of the given instruction.
146  int GetInstructionFlags(const Instruction* instr) const;
147  int GetTargetInstructionFlags(const Instruction* instr) const;
148 
149  // Check whether the given instruction has side effects (e.g. function call,
150  // memory store).
151  bool HasSideEffect(const Instruction* instr) const {
152  return (GetInstructionFlags(instr) & kHasSideEffect) != 0;
153  }
154 
155  // Return true if the instruction is a memory load.
156  bool IsLoadOperation(const Instruction* instr) const {
157  return (GetInstructionFlags(instr) & kIsLoadOperation) != 0;
158  }
159 
160  // The scheduler will not move the following instructions before the last
161  // deopt/trap check:
162  // * loads (this is conservative)
163  // * instructions with side effect
164  // * other deopts/traps
165  // Any other instruction can be moved, apart from those that raise exceptions
166  // on specific inputs - these are filtered out by the deopt/trap check.
167  bool MayNeedDeoptOrTrapCheck(const Instruction* instr) const {
168  return (GetInstructionFlags(instr) & kMayNeedDeoptOrTrapCheck) != 0;
169  }
170 
171  // Return true if the instruction cannot be moved before the last deopt or
172  // trap point we encountered.
173  bool DependsOnDeoptOrTrap(const Instruction* instr) const {
174  return MayNeedDeoptOrTrapCheck(instr) || instr->IsDeoptimizeCall() ||
175  instr->IsTrap() || HasSideEffect(instr) || IsLoadOperation(instr);
176  }
177 
178  // Identify nops used as a definition point for live-in registers at
179  // function entry.
180  bool IsFixedRegisterParameter(const Instruction* instr) const {
181  return (instr->arch_opcode() == kArchNop) && (instr->OutputCount() == 1) &&
182  (instr->OutputAt(0)->IsUnallocated()) &&
183  (UnallocatedOperand::cast(instr->OutputAt(0))
184  ->HasFixedRegisterPolicy() ||
185  UnallocatedOperand::cast(instr->OutputAt(0))
186  ->HasFixedFPRegisterPolicy());
187  }
188 
189  void ComputeTotalLatencies();
190 
191  static int GetInstructionLatency(const Instruction* instr);
192 
193  Zone* zone() { return zone_; }
194  InstructionSequence* sequence() { return sequence_; }
195  Isolate* isolate() { return sequence()->isolate(); }
196 
197  Zone* zone_;
198  InstructionSequence* sequence_;
200 
201  friend class InstructionSchedulerTester;
202 
203  // Last side effect instruction encountered while building the graph.
204  ScheduleGraphNode* last_side_effect_instr_;
205 
206  // Set of load instructions encountered since the last side effect instruction
207  // which will be added as predecessors of the next instruction with side
208  // effects.
209  ZoneVector<ScheduleGraphNode*> pending_loads_;
210 
211  // Live-in register markers are nop instructions which are emitted at the
212  // beginning of a basic block so that the register allocator will find a
213  // defining instruction for live-in values. They must not be moved.
214  // All these nops are chained together and added as a predecessor of every
215  // other instructions in the basic block.
216  ScheduleGraphNode* last_live_in_reg_marker_;
217 
218  // Last deoptimization or trap instruction encountered while building the
219  // graph.
220  ScheduleGraphNode* last_deopt_or_trap_;
221 
222  // Keep track of definition points for virtual registers. This is used to
223  // record operand dependencies in the scheduling graph.
225 };
226 
227 } // namespace compiler
228 } // namespace internal
229 } // namespace v8
230 
231 #endif // V8_COMPILER_BACKEND_INSTRUCTION_SCHEDULER_H_
Definition: libplatform.h:13