5 #include "src/compiler/scheduler.h" 9 #include "src/base/adapters.h" 10 #include "src/bit-vector.h" 11 #include "src/compiler/common-operator.h" 12 #include "src/compiler/control-equivalence.h" 13 #include "src/compiler/graph.h" 14 #include "src/compiler/node-marker.h" 15 #include "src/compiler/node-properties.h" 16 #include "src/compiler/node.h" 17 #include "src/zone/zone-containers.h" 25 if (FLAG_trace_turbo_scheduler) PrintF(__VA_ARGS__); \ 28 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags,
29 size_t node_count_hint)
34 scheduled_nodes_(zone),
35 schedule_root_nodes_(zone),
36 schedule_queue_(zone),
38 node_data_.reserve(node_count_hint);
39 node_data_.resize(graph->NodeCount(), DefaultSchedulerData());
42 Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) {
44 (flags & Scheduler::kTempSchedule) ? zone : graph->zone();
48 float node_hint_multiplier = (flags & Scheduler::kSplitNodes) ? 1.1 : 1;
49 size_t node_count_hint = node_hint_multiplier * graph->NodeCount();
52 new (schedule_zone) Schedule(schedule_zone, node_count_hint);
53 Scheduler scheduler(zone, graph, schedule, flags, node_count_hint);
56 scheduler.ComputeSpecialRPONumbering();
57 scheduler.GenerateImmediateDominatorTree();
59 scheduler.PrepareUses();
60 scheduler.ScheduleEarly();
61 scheduler.ScheduleLate();
63 scheduler.SealFinalSchedule();
69 Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
70 SchedulerData def = {schedule_->start(), 0, kUnknown};
75 Scheduler::SchedulerData* Scheduler::GetData(Node* node) {
76 return &node_data_[node->id()];
79 Scheduler::Placement Scheduler::InitializePlacement(Node* node) {
80 SchedulerData* data = GetData(node);
81 if (data->placement_ == kFixed) {
84 return data->placement_;
86 DCHECK_EQ(kUnknown, data->placement_);
87 switch (node->opcode()) {
88 case IrOpcode::kParameter:
89 case IrOpcode::kOsrValue:
91 data->placement_ = kFixed;
94 case IrOpcode::kEffectPhi: {
97 Placement p = GetPlacement(NodeProperties::GetControlInput(node));
98 data->placement_ = (p == kFixed ? kFixed : kCoupled);
101 #define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V: 102 CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
103 #undef DEFINE_CONTROL_CASE 106 data->placement_ = kSchedulable;
110 data->placement_ = kSchedulable;
113 return data->placement_;
116 Scheduler::Placement Scheduler::GetPlacement(Node* node) {
117 return GetData(node)->placement_;
120 bool Scheduler::IsLive(Node* node) {
return GetPlacement(node) != kUnknown; }
122 void Scheduler::UpdatePlacement(Node* node, Placement placement) {
123 SchedulerData* data = GetData(node);
124 if (data->placement_ == kUnknown) {
128 DCHECK_EQ(Scheduler::kFixed, placement);
129 data->placement_ = placement;
133 switch (node->opcode()) {
134 case IrOpcode::kParameter:
139 case IrOpcode::kEffectPhi: {
141 DCHECK_EQ(Scheduler::kCoupled, data->placement_);
142 DCHECK_EQ(Scheduler::kFixed, placement);
143 Node* control = NodeProperties::GetControlInput(node);
144 BasicBlock* block = schedule_->block(control);
145 schedule_->AddNode(block, node);
148 #define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V: 149 CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
150 #undef DEFINE_CONTROL_CASE 153 for (
auto use : node->uses()) {
154 if (GetPlacement(use) == Scheduler::kCoupled) {
155 DCHECK_EQ(node, NodeProperties::GetControlInput(use));
156 UpdatePlacement(use, placement);
162 DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
163 DCHECK_EQ(Scheduler::kScheduled, placement);
169 for (Edge
const edge : node->input_edges()) {
170 DecrementUnscheduledUseCount(edge.to(), edge.index(), edge.from());
172 data->placement_ = placement;
176 bool Scheduler::IsCoupledControlEdge(Node* node,
int index) {
177 return GetPlacement(node) == kCoupled &&
178 NodeProperties::FirstControlIndex(node) == index;
182 void Scheduler::IncrementUnscheduledUseCount(Node* node,
int index,
185 if (IsCoupledControlEdge(from, index))
return;
188 if (GetPlacement(node) == kFixed)
return;
191 if (GetPlacement(node) == kCoupled) {
192 Node* control = NodeProperties::GetControlInput(node);
193 return IncrementUnscheduledUseCount(control, index, from);
196 ++(GetData(node)->unscheduled_count_);
197 if (FLAG_trace_turbo_scheduler) {
198 TRACE(
" Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(),
199 node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
200 GetData(node)->unscheduled_count_);
205 void Scheduler::DecrementUnscheduledUseCount(Node* node,
int index,
208 if (IsCoupledControlEdge(from, index))
return;
211 if (GetPlacement(node) == kFixed)
return;
214 if (GetPlacement(node) == kCoupled) {
215 Node* control = NodeProperties::GetControlInput(node);
216 return DecrementUnscheduledUseCount(control, index, from);
219 DCHECK_LT(0, GetData(node)->unscheduled_count_);
220 --(GetData(node)->unscheduled_count_);
221 if (FLAG_trace_turbo_scheduler) {
222 TRACE(
" Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
223 node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
224 GetData(node)->unscheduled_count_);
226 if (GetData(node)->unscheduled_count_ == 0) {
227 TRACE(
" newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
228 schedule_queue_.push(node);
245 scheduler_(scheduler),
246 schedule_(scheduler->schedule_),
247 queued_(scheduler->graph_, 2),
250 component_entry_(
nullptr),
251 component_start_(
nullptr),
252 component_end_(
nullptr) {}
258 ResetDataStructures();
259 Queue(scheduler_->graph_->end());
261 while (!queue_.empty()) {
262 Node* node = queue_.front();
264 int max = NodeProperties::PastControlIndex(node);
265 for (
int i = NodeProperties::FirstControlIndex(node);
i < max;
i++) {
266 Queue(node->InputAt(
i));
270 for (NodeVector::iterator
i = control_.begin();
i != control_.end(); ++
i) {
279 ResetDataStructures();
282 component_entry_ =
nullptr;
283 component_start_ = block;
284 component_end_ = schedule_->block(exit);
285 scheduler_->equivalence_->Run(exit);
286 while (!queue_.empty()) {
287 Node* node = queue_.front();
292 if (IsSingleEntrySingleExitRegion(node, exit)) {
293 TRACE(
"Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic());
294 DCHECK(!component_entry_);
295 component_entry_ = node;
299 int max = NodeProperties::PastControlIndex(node);
300 for (
int i = NodeProperties::FirstControlIndex(node);
i < max;
i++) {
301 Queue(node->InputAt(
i));
304 DCHECK(component_entry_);
306 for (NodeVector::iterator
i = control_.begin();
i != control_.end(); ++
i) {
316 schedule_->AddNode(block, node);
317 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
320 void Queue(
Node* node) {
322 if (!queued_.Get(node)) {
325 queued_.Set(node,
true);
326 control_.push_back(node);
330 void BuildBlocks(
Node* node) {
331 switch (node->opcode()) {
333 FixNode(schedule_->end(), node);
335 case IrOpcode::kStart:
336 FixNode(schedule_->start(), node);
338 case IrOpcode::kLoop:
339 case IrOpcode::kMerge:
340 BuildBlockForNode(node);
342 case IrOpcode::kTerminate: {
344 Node* loop = NodeProperties::GetControlInput(node);
346 FixNode(block, node);
349 case IrOpcode::kBranch:
350 case IrOpcode::kSwitch:
351 BuildBlocksForSuccessors(node);
353 #define BUILD_BLOCK_JS_CASE(Name) case IrOpcode::k##Name: 354 JS_OP_LIST(BUILD_BLOCK_JS_CASE)
356 #undef BUILD_BLOCK_JS_CASE 357 case IrOpcode::kCall:
358 case IrOpcode::kCallWithCallerSavedRegisters:
359 if (NodeProperties::IsExceptionalCall(node)) {
360 BuildBlocksForSuccessors(node);
368 void ConnectBlocks(
Node* node) {
369 switch (node->opcode()) {
370 case IrOpcode::kLoop:
371 case IrOpcode::kMerge:
374 case IrOpcode::kBranch:
375 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
378 case IrOpcode::kSwitch:
379 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
382 case IrOpcode::kDeoptimize:
383 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
384 ConnectDeoptimize(node);
386 case IrOpcode::kTailCall:
387 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
388 ConnectTailCall(node);
390 case IrOpcode::kReturn:
391 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
394 case IrOpcode::kThrow:
395 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
398 #define CONNECT_BLOCK_JS_CASE(Name) case IrOpcode::k##Name: 399 JS_OP_LIST(CONNECT_BLOCK_JS_CASE)
401 #undef CONNECT_BLOCK_JS_CASE 402 case IrOpcode::kCall:
403 case IrOpcode::kCallWithCallerSavedRegisters:
404 if (NodeProperties::IsExceptionalCall(node)) {
405 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
416 if (block ==
nullptr) {
417 block = schedule_->NewBasicBlock();
418 TRACE(
"Create block id:%d for #%d:%s\n", block->id().ToInt(), node->id(),
419 node->op()->mnemonic());
420 FixNode(block, node);
425 void BuildBlocksForSuccessors(
Node* node) {
426 size_t const successor_cnt = node->op()->ControlOutputCount();
427 Node** successors = zone_->NewArray<
Node*>(successor_cnt);
428 NodeProperties::CollectControlProjections(node, successors, successor_cnt);
429 for (
size_t index = 0; index < successor_cnt; ++index) {
430 BuildBlockForNode(successors[index]);
434 void CollectSuccessorBlocks(
Node* node,
BasicBlock** successor_blocks,
435 size_t successor_cnt) {
436 Node** successors =
reinterpret_cast<Node**
>(successor_blocks);
437 NodeProperties::CollectControlProjections(node, successors, successor_cnt);
438 for (
size_t index = 0; index < successor_cnt; ++index) {
439 successor_blocks[index] = schedule_->block(successors[index]);
446 predecessor_block = schedule_->block(node);
447 if (predecessor_block !=
nullptr)
break;
448 node = NodeProperties::GetControlInput(node);
450 return predecessor_block;
453 void ConnectCall(
Node* call) {
455 CollectSuccessorBlocks(call, successor_blocks, arraysize(successor_blocks));
458 successor_blocks[1]->set_deferred(
true);
460 Node* call_control = NodeProperties::GetControlInput(call);
461 BasicBlock* call_block = FindPredecessorBlock(call_control);
462 TraceConnect(call, call_block, successor_blocks[0]);
463 TraceConnect(call, call_block, successor_blocks[1]);
464 schedule_->AddCall(call_block, call, successor_blocks[0],
465 successor_blocks[1]);
468 void ConnectBranch(
Node* branch) {
470 CollectSuccessorBlocks(branch, successor_blocks,
471 arraysize(successor_blocks));
474 switch (BranchHintOf(branch->op())) {
475 case BranchHint::kNone:
477 case BranchHint::kTrue:
478 successor_blocks[1]->set_deferred(
true);
480 case BranchHint::kFalse:
481 successor_blocks[0]->set_deferred(
true);
485 if (branch == component_entry_) {
486 TraceConnect(branch, component_start_, successor_blocks[0]);
487 TraceConnect(branch, component_start_, successor_blocks[1]);
488 schedule_->InsertBranch(component_start_, component_end_, branch,
489 successor_blocks[0], successor_blocks[1]);
491 Node* branch_control = NodeProperties::GetControlInput(branch);
492 BasicBlock* branch_block = FindPredecessorBlock(branch_control);
493 TraceConnect(branch, branch_block, successor_blocks[0]);
494 TraceConnect(branch, branch_block, successor_blocks[1]);
495 schedule_->AddBranch(branch_block, branch, successor_blocks[0],
496 successor_blocks[1]);
500 void ConnectSwitch(
Node* sw) {
501 size_t const successor_count = sw->op()->ControlOutputCount();
503 zone_->NewArray<
BasicBlock*>(successor_count);
504 CollectSuccessorBlocks(sw, successor_blocks, successor_count);
506 if (sw == component_entry_) {
507 for (
size_t index = 0; index < successor_count; ++index) {
508 TraceConnect(sw, component_start_, successor_blocks[index]);
510 schedule_->InsertSwitch(component_start_, component_end_, sw,
511 successor_blocks, successor_count);
513 Node* switch_control = NodeProperties::GetControlInput(sw);
514 BasicBlock* switch_block = FindPredecessorBlock(switch_control);
515 for (
size_t index = 0; index < successor_count; ++index) {
516 TraceConnect(sw, switch_block, successor_blocks[index]);
518 schedule_->AddSwitch(switch_block, sw, successor_blocks, successor_count);
522 void ConnectMerge(
Node* merge) {
524 if (IsFinalMerge(merge))
return;
527 DCHECK_NOT_NULL(block);
530 for (
Node*
const input : merge->inputs()) {
531 BasicBlock* predecessor_block = FindPredecessorBlock(input);
532 TraceConnect(merge, predecessor_block, block);
533 schedule_->AddGoto(predecessor_block, block);
537 void ConnectTailCall(
Node* call) {
538 Node* call_control = NodeProperties::GetControlInput(call);
539 BasicBlock* call_block = FindPredecessorBlock(call_control);
540 TraceConnect(call, call_block,
nullptr);
541 schedule_->AddTailCall(call_block, call);
544 void ConnectReturn(
Node* ret) {
545 Node* return_control = NodeProperties::GetControlInput(ret);
546 BasicBlock* return_block = FindPredecessorBlock(return_control);
547 TraceConnect(ret, return_block,
nullptr);
548 schedule_->AddReturn(return_block, ret);
551 void ConnectDeoptimize(
Node* deopt) {
552 Node* deoptimize_control = NodeProperties::GetControlInput(deopt);
553 BasicBlock* deoptimize_block = FindPredecessorBlock(deoptimize_control);
554 TraceConnect(deopt, deoptimize_block,
nullptr);
555 schedule_->AddDeoptimize(deoptimize_block, deopt);
558 void ConnectThrow(
Node* thr) {
559 Node* throw_control = NodeProperties::GetControlInput(thr);
560 BasicBlock* throw_block = FindPredecessorBlock(throw_control);
561 TraceConnect(thr, throw_block,
nullptr);
562 schedule_->AddThrow(throw_block, thr);
566 DCHECK_NOT_NULL(block);
567 if (succ ==
nullptr) {
568 TRACE(
"Connect #%d:%s, id:%d -> end\n", node->id(),
569 node->op()->mnemonic(), block->id().ToInt());
571 TRACE(
"Connect #%d:%s, id:%d -> id:%d\n", node->id(),
572 node->op()->mnemonic(), block->id().ToInt(), succ->id().ToInt());
576 bool IsFinalMerge(
Node* node) {
577 return (node->opcode() == IrOpcode::kMerge &&
578 node == scheduler_->graph_->end()->InputAt(0));
581 bool IsSingleEntrySingleExitRegion(
Node* entry,
Node* exit)
const {
582 size_t entry_class = scheduler_->equivalence_->ClassOf(entry);
583 size_t exit_class = scheduler_->equivalence_->ClassOf(exit);
584 return entry != exit && entry_class == exit_class;
587 void ResetDataStructures() {
589 DCHECK(queue_.empty());
590 DCHECK(control_.empty());
599 Node* component_entry_;
605 void Scheduler::BuildCFG() {
606 TRACE(
"--- CREATING CFG -------------------------------------------\n");
613 control_flow_builder_ =
new (zone_)
CFGBuilder(zone_,
this);
614 control_flow_builder_->Run();
618 scheduled_nodes_.reserve(schedule_->BasicBlockCount() * 1.1);
619 scheduled_nodes_.resize(schedule_->BasicBlockCount());
644 beyond_end_(
nullptr),
648 previous_block_count_(0),
653 void ComputeSpecialRPO() {
654 DCHECK_EQ(0, schedule_->end()->SuccessorCount());
656 ComputeAndInsertSpecialRPO(schedule_->start(), schedule_->end());
664 ComputeAndInsertSpecialRPO(entry, end);
669 void SerializeRPOIntoSchedule() {
671 for (
BasicBlock* b = order_; b !=
nullptr; b = b->rpo_next()) {
672 b->set_rpo_number(number++);
673 schedule_->rpo_order()->push_back(b);
675 BeyondEndSentinel()->set_rpo_number(number);
679 void PrintAndVerifySpecialRPO() {
681 if (FLAG_trace_turbo_scheduler) PrintRPO();
687 if (HasLoopNumber(block)) {
688 LoopInfo
const& loop = loops_[GetLoopNumber(block)];
689 if (loop.outgoing)
return *loop.outgoing;
695 typedef std::pair<BasicBlock*, size_t> Backedge;
698 static const int kBlockOnStack = -2;
699 static const int kBlockVisited1 = -3;
700 static const int kBlockVisited2 = -4;
701 static const int kBlockUnvisited1 = -1;
702 static const int kBlockUnvisited2 = kBlockVisited1;
704 struct SpecialRPOStackFrame {
718 if (outgoing ==
nullptr) {
722 outgoing->push_back(block);
728 if (child->rpo_number() == unvisited) {
729 stack[depth].block = child;
730 stack[depth].index = 0;
731 child->set_rpo_number(kBlockOnStack);
738 block->set_rpo_next(head);
742 static int GetLoopNumber(
BasicBlock* block) {
return block->loop_number(); }
743 static void SetLoopNumber(
BasicBlock* block,
int loop_number) {
744 return block->set_loop_number(loop_number);
746 static bool HasLoopNumber(
BasicBlock* block) {
747 return block->loop_number() >= 0;
754 if (beyond_end_ ==
nullptr) {
756 beyond_end_ =
new (schedule_->zone())
BasicBlock(schedule_->zone(), id);
765 CHECK_EQ(kBlockUnvisited1, schedule_->start()->loop_number());
766 CHECK_EQ(kBlockUnvisited1, schedule_->start()->rpo_number());
767 CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size()));
770 BasicBlock* insertion_point = entry->rpo_next();
775 DCHECK_LT(previous_block_count_, schedule_->BasicBlockCount());
776 stack_.resize(schedule_->BasicBlockCount() - previous_block_count_);
777 previous_block_count_ = schedule_->BasicBlockCount();
778 int stack_depth = Push(stack_, 0, entry, kBlockUnvisited1);
779 int num_loops =
static_cast<int>(loops_.size());
781 while (stack_depth > 0) {
782 int current = stack_depth - 1;
783 SpecialRPOStackFrame* frame = &stack_[current];
785 if (frame->block != end &&
786 frame->index < frame->block->SuccessorCount()) {
788 BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
789 if (succ->rpo_number() == kBlockVisited1)
continue;
790 if (succ->rpo_number() == kBlockOnStack) {
792 backedges_.push_back(Backedge(frame->block, frame->index - 1));
793 if (!HasLoopNumber(succ)) {
795 SetLoopNumber(succ, num_loops++);
799 DCHECK_EQ(kBlockUnvisited1, succ->rpo_number());
800 stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited1);
804 order = PushFront(order, frame->block);
805 frame->block->set_rpo_number(kBlockVisited1);
811 if (num_loops > static_cast<int>(loops_.size())) {
814 ComputeLoopInfo(stack_, num_loops, &backedges_);
818 HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] :
nullptr;
819 order = insertion_point;
825 stack_depth = Push(stack_, 0, entry, kBlockUnvisited2);
826 while (stack_depth > 0) {
827 SpecialRPOStackFrame* frame = &stack_[stack_depth - 1];
831 if (block != end && frame->index < block->SuccessorCount()) {
833 succ = block->SuccessorAt(frame->index++);
834 }
else if (HasLoopNumber(block)) {
836 if (block->rpo_number() == kBlockOnStack) {
839 DCHECK(loop !=
nullptr && loop->header == block);
840 loop->start = PushFront(order, block);
842 block->set_rpo_number(kBlockVisited2);
851 size_t outgoing_index = frame->index - block->SuccessorCount();
852 LoopInfo* info = &loops_[GetLoopNumber(block)];
853 DCHECK(loop != info);
854 if (block != entry && info->outgoing !=
nullptr &&
855 outgoing_index < info->outgoing->size()) {
856 succ = info->outgoing->at(outgoing_index);
861 if (succ !=
nullptr) {
863 if (succ->rpo_number() == kBlockOnStack)
continue;
864 if (succ->rpo_number() == kBlockVisited2)
continue;
865 DCHECK_EQ(kBlockUnvisited2, succ->rpo_number());
866 if (loop !=
nullptr && !loop->members->Contains(succ->id().ToInt())) {
869 loop->AddOutgoing(zone_, succ);
872 stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited2);
873 if (HasLoopNumber(succ)) {
875 DCHECK(GetLoopNumber(succ) < num_loops);
876 LoopInfo* next = &loops_[GetLoopNumber(succ)];
884 if (HasLoopNumber(block)) {
886 LoopInfo* info = &loops_[GetLoopNumber(block)];
887 for (
BasicBlock* b = info->start;
true; b = b->rpo_next()) {
888 if (b->rpo_next() == info->end) {
889 b->set_rpo_next(order);
897 order = PushFront(order, block);
898 block->set_rpo_number(kBlockVisited2);
906 if (order_ ==
nullptr) order_ = order;
909 LoopInfo* current_loop =
nullptr;
910 BasicBlock* current_header = entry->loop_header();
911 int32_t loop_depth = entry->loop_depth();
912 if (entry->IsLoopHeader()) --loop_depth;
913 for (
BasicBlock* b = order; b != insertion_point; b = b->rpo_next()) {
917 current->set_rpo_number(kBlockUnvisited1);
920 while (current_header !=
nullptr &&
921 current == current_header->loop_end()) {
922 DCHECK(current_header->IsLoopHeader());
923 DCHECK_NOT_NULL(current_loop);
924 current_loop = current_loop->prev;
926 current_loop ==
nullptr ? nullptr : current_loop->header;
929 current->set_loop_header(current_header);
932 if (HasLoopNumber(current)) {
934 current_loop = &loops_[GetLoopNumber(current)];
936 current->set_loop_end(end ==
nullptr ? BeyondEndSentinel() : end);
937 current_header = current_loop->header;
938 TRACE(
"id:%d is a loop header, increment loop depth to %d\n",
939 current->id().ToInt(), loop_depth);
942 current->set_loop_depth(loop_depth);
944 if (current->loop_header() ==
nullptr) {
945 TRACE(
"id:%d is not in a loop (depth == %d)\n", current->id().ToInt(),
946 current->loop_depth());
948 TRACE(
"id:%d has loop header id:%d, (depth == %d)\n",
949 current->id().ToInt(), current->loop_header()->id().ToInt(),
950 current->loop_depth());
959 for (LoopInfo& loop : loops_) {
960 loop.members->Resize(static_cast<int>(schedule_->BasicBlockCount()),
965 loops_.resize(num_loops, LoopInfo());
969 for (
size_t i = 0;
i < backedges->size();
i++) {
971 BasicBlock* header = member->SuccessorAt(backedges->at(
i).second);
972 size_t loop_num = GetLoopNumber(header);
973 if (loops_[loop_num].header ==
nullptr) {
974 loops_[loop_num].header = header;
975 loops_[loop_num].members =
new (zone_)
976 BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
979 int queue_length = 0;
980 if (member != header) {
983 if (!loops_[loop_num].members->Contains(member->id().ToInt())) {
984 loops_[loop_num].members->Add(member->id().ToInt());
986 queue[queue_length++].block = member;
991 while (queue_length > 0) {
992 BasicBlock* block = queue[--queue_length].block;
993 for (
size_t i = 0;
i < block->PredecessorCount();
i++) {
995 if (pred != header) {
996 if (!loops_[loop_num].members->Contains(pred->id().ToInt())) {
997 loops_[loop_num].members->Add(pred->id().ToInt());
998 queue[queue_length++].block = pred;
1009 os <<
"RPO with " << loops_.size() <<
" loops";
1010 if (loops_.size() > 0) {
1012 for (
size_t i = 0;
i < loops_.size();
i++) {
1013 if (
i > 0) os <<
" ";
1014 os <<
"id:" << loops_[
i].header->id();
1020 for (
BasicBlock* block = order_; block !=
nullptr;
1021 block = block->rpo_next()) {
1022 os << std::setw(5) <<
"B" << block->rpo_number() <<
":";
1023 for (
size_t i = 0;
i < loops_.size();
i++) {
1024 bool range = loops_[
i].header->LoopContains(block);
1025 bool membership = loops_[
i].header != block && range;
1026 os << (membership ?
" |" :
" ");
1027 os << (range ?
"x" :
" ");
1029 os <<
" id:" << block->id() <<
": ";
1030 if (block->loop_end() !=
nullptr) {
1031 os <<
" range: [B" << block->rpo_number() <<
", B" 1032 << block->loop_end()->rpo_number() <<
")";
1034 if (block->loop_header() !=
nullptr) {
1035 os <<
" header: id:" << block->loop_header()->id();
1037 if (block->loop_depth() > 0) {
1038 os <<
" depth: " << block->loop_depth();
1044 void VerifySpecialRPO() {
1046 DCHECK_LT(0, order->size());
1047 DCHECK_EQ(0, (*order)[0]->
id().ToInt());
1049 for (
size_t i = 0;
i < loops_.size();
i++) {
1050 LoopInfo* loop = &loops_[
i];
1054 DCHECK_NOT_NULL(header);
1055 DCHECK_LE(0, header->rpo_number());
1056 DCHECK_LT(header->rpo_number(), order->size());
1057 DCHECK_NOT_NULL(end);
1058 DCHECK_LE(end->rpo_number(), order->size());
1059 DCHECK_GT(end->rpo_number(), header->rpo_number());
1060 DCHECK_NE(header->loop_header(), header);
1065 DCHECK_EQ(header, block);
1068 if (block ==
nullptr || block == loop->end) {
1069 end_found = (loop->end == block);
1073 DCHECK(block->rpo_number() == links + header->rpo_number());
1075 block = block->rpo_next();
1076 DCHECK_LT(links, static_cast<int>(2 * order->size()));
1078 DCHECK_LT(0, links);
1079 DCHECK_EQ(links, end->rpo_number() - header->rpo_number());
1084 for (LoopInfo* outer = loop; outer !=
nullptr; outer = outer->prev) {
1087 DCHECK_EQ(loop_depth, header->loop_depth());
1091 for (
int j = 0; j < static_cast<int>(order->size()); j++) {
1093 DCHECK_EQ(block->rpo_number(), j);
1094 if (j < header->rpo_number() || j >= end->rpo_number()) {
1095 DCHECK(!header->LoopContains(block));
1097 DCHECK(header->LoopContains(block));
1098 DCHECK_GE(block->loop_depth(), loop_depth);
1102 DCHECK_EQ(links, count);
1114 size_t previous_block_count_;
1121 numberer.ComputeSpecialRPO();
1122 numberer.SerializeRPOIntoSchedule();
1123 numberer.PrintAndVerifySpecialRPO();
1124 return schedule->rpo_order();
1128 void Scheduler::ComputeSpecialRPONumbering() {
1129 TRACE(
"--- COMPUTING SPECIAL RPO ----------------------------------\n");
1132 special_rpo_ =
new (zone_) SpecialRPONumberer(zone_, schedule_);
1133 special_rpo_->ComputeSpecialRPO();
1137 void Scheduler::PropagateImmediateDominators(BasicBlock* block) {
1138 for (; block !=
nullptr; block = block->rpo_next()) {
1139 auto pred = block->predecessors().begin();
1140 auto end = block->predecessors().end();
1141 DCHECK(pred != end);
1142 BasicBlock* dominator = *pred;
1143 bool deferred = dominator->deferred();
1147 for (++pred; pred != end; ++pred) {
1149 if ((*pred)->dominator_depth() < 0)
continue;
1150 dominator = BasicBlock::GetCommonDominator(dominator, *pred);
1151 deferred = deferred & (*pred)->deferred();
1153 block->set_dominator(dominator);
1154 block->set_dominator_depth(dominator->dominator_depth() + 1);
1155 block->set_deferred(deferred | block->deferred());
1156 TRACE(
"Block id:%d's idom is id:%d, depth = %d\n", block->id().ToInt(),
1157 dominator->id().ToInt(), block->dominator_depth());
1162 void Scheduler::GenerateImmediateDominatorTree() {
1163 TRACE(
"--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
1166 schedule_->start()->set_dominator_depth(0);
1169 PropagateImmediateDominators(schedule_->start()->rpo_next());
1180 : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
1182 void Pre(
Node* node) {
1183 if (scheduler_->InitializePlacement(node) == Scheduler::kFixed) {
1185 scheduler_->schedule_root_nodes_.push_back(node);
1186 if (!schedule_->IsScheduled(node)) {
1188 TRACE(
"Scheduling fixed position node #%d:%s\n", node->id(),
1189 node->op()->mnemonic());
1190 IrOpcode::Value opcode = node->opcode();
1192 opcode == IrOpcode::kParameter
1193 ? schedule_->start()
1194 : schedule_->block(NodeProperties::GetControlInput(node));
1195 DCHECK_NOT_NULL(block);
1196 schedule_->AddNode(block, node);
1201 void PostEdge(
Node* from,
int index,
Node* to) {
1205 if (!schedule_->IsScheduled(from)) {
1206 DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
1207 scheduler_->IncrementUnscheduledUseCount(to, index, from);
1217 void Scheduler::PrepareUses() {
1218 TRACE(
"--- PREPARE USES -------------------------------------------\n");
1225 BoolVector visited(graph_->NodeCount(),
false, zone_);
1227 Node* node = graph_->end();
1228 prepare_uses.Pre(node);
1229 visited[node->id()] =
true;
1230 stack.push(node->input_edges().begin());
1231 while (!stack.empty()) {
1232 Edge edge = *stack.top();
1233 Node* node = edge.to();
1234 if (visited[node->id()]) {
1235 prepare_uses.PostEdge(edge.from(), edge.index(), edge.to());
1236 if (++stack.top() == edge.from()->input_edges().end()) stack.pop();
1238 prepare_uses.Pre(node);
1239 visited[node->id()] =
true;
1240 if (node->InputCount() > 0) stack.push(node->input_edges().begin());
1253 : scheduler_(scheduler), schedule_(scheduler->schedule_), queue_(zone) {}
1257 for (
Node*
const root : *roots) {
1259 while (!queue_.empty()) {
1260 VisitNode(queue_.front());
1269 void VisitNode(
Node* node) {
1270 Scheduler::SchedulerData* data = scheduler_->GetData(node);
1273 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
1274 data->minimum_block_ = schedule_->block(node);
1275 TRACE(
"Fixing #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
1276 node->id(), node->op()->mnemonic(),
1277 data->minimum_block_->id().ToInt(),
1278 data->minimum_block_->dominator_depth());
1282 if (data->minimum_block_ == schedule_->start())
return;
1285 DCHECK_NOT_NULL(data->minimum_block_);
1286 for (
auto use : node->uses()) {
1287 if (scheduler_->IsLive(use)) {
1288 PropagateMinimumPositionToNode(data->minimum_block_, use);
1296 void PropagateMinimumPositionToNode(
BasicBlock* block,
Node* node) {
1297 Scheduler::SchedulerData* data = scheduler_->GetData(node);
1300 if (scheduler_->GetPlacement(node) == Scheduler::kFixed)
return;
1303 if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
1304 Node* control = NodeProperties::GetControlInput(node);
1305 PropagateMinimumPositionToNode(block, control);
1311 DCHECK(InsideSameDominatorChain(block, data->minimum_block_));
1312 if (block->dominator_depth() > data->minimum_block_->dominator_depth()) {
1313 data->minimum_block_ = block;
1315 TRACE(
"Propagating #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
1316 node->id(), node->op()->mnemonic(),
1317 data->minimum_block_->id().ToInt(),
1318 data->minimum_block_->dominator_depth());
1324 BasicBlock* dominator = BasicBlock::GetCommonDominator(b1, b2);
1325 return dominator == b1 || dominator == b2;
1335 void Scheduler::ScheduleEarly() {
1336 TRACE(
"--- SCHEDULE EARLY -----------------------------------------\n");
1337 if (FLAG_trace_turbo_scheduler) {
1339 for (
Node* node : schedule_root_nodes_) {
1340 TRACE(
"#%d:%s ", node->id(), node->op()->mnemonic());
1347 ScheduleEarlyNodeVisitor schedule_early_visitor(zone_,
this);
1348 schedule_early_visitor.Run(&schedule_root_nodes_);
1360 scheduler_(scheduler),
1361 schedule_(scheduler_->schedule_),
1362 marked_(scheduler->zone_),
1363 marking_queue_(scheduler->zone_) {}
1367 for (
Node*
const root : *roots) {
1373 void ProcessQueue(
Node* root) {
1375 for (
Node* node : root->inputs()) {
1377 if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
1378 node = NodeProperties::GetControlInput(node);
1382 if (scheduler_->GetData(node)->unscheduled_count_ != 0)
continue;
1386 Node*
const node = queue->front();
1389 }
while (!queue->empty());
1396 void VisitNode(
Node* node) {
1397 DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
1400 if (schedule_->IsScheduled(node))
return;
1401 DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node));
1405 TRACE(
"Scheduling #%d:%s\n", node->id(), node->op()->mnemonic());
1406 BasicBlock* block = GetCommonDominatorOfUses(node);
1407 DCHECK_NOT_NULL(block);
1410 BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_;
1411 DCHECK_EQ(min_block, BasicBlock::GetCommonDominator(block, min_block));
1413 "Schedule late of #%d:%s is id:%d at loop depth %d, minimum = id:%d\n",
1414 node->id(), node->op()->mnemonic(), block->id().ToInt(),
1415 block->loop_depth(), min_block->id().ToInt());
1420 BasicBlock* hoist_block = GetHoistBlock(block);
1422 hoist_block->dominator_depth() >= min_block->dominator_depth()) {
1424 TRACE(
" hoisting #%d:%s to block id:%d\n", node->id(),
1425 node->op()->mnemonic(), hoist_block->id().ToInt());
1426 DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
1427 block = hoist_block;
1428 hoist_block = GetHoistBlock(hoist_block);
1429 }
while (hoist_block &&
1430 hoist_block->dominator_depth() >= min_block->dominator_depth());
1431 }
else if (scheduler_->flags_ & Scheduler::kSplitNodes) {
1433 block = SplitNode(block, node);
1437 if (IrOpcode::IsMergeOpcode(node->opcode())) {
1438 ScheduleFloatingControl(block, node);
1439 }
else if (node->opcode() == IrOpcode::kFinishRegion) {
1440 ScheduleRegion(block, node);
1442 ScheduleNode(block, node);
1447 DCHECK_LT(block->id().ToSize(), marked_.size());
1448 return marked_[block->id().ToSize()];
1451 void Mark(
BasicBlock* block) { marked_[block->id().ToSize()] =
true; }
1455 DCHECK_LT(block->id().ToSize(), marked_.size());
1457 for (
BasicBlock* pred_block : block->predecessors()) {
1458 if (IsMarked(pred_block))
continue;
1459 marking_queue_.push_back(pred_block);
1465 if (!node->op()->HasProperty(Operator::kPure))
return block;
1467 if (node->opcode() == IrOpcode::kProjection)
return block;
1471 DCHECK_EQ(block, GetCommonDominatorOfUses(node));
1472 if (block->SuccessorCount() < 2)
return block;
1475 DCHECK(marking_queue_.empty());
1476 std::fill(marked_.begin(), marked_.end(),
false);
1477 marked_.resize(schedule_->BasicBlockCount() + 1,
false);
1480 for (
Edge edge : node->use_edges()) {
1481 if (!scheduler_->IsLive(edge.from()))
continue;
1482 BasicBlock* use_block = GetBlockForUse(edge);
1483 if (use_block ==
nullptr || IsMarked(use_block))
continue;
1484 if (use_block == block) {
1485 TRACE(
" not splitting #%d:%s, it is used in id:%d\n", node->id(),
1486 node->op()->mnemonic(), block->id().ToInt());
1487 marking_queue_.clear();
1490 MarkBlock(use_block);
1496 BasicBlock* top_block = marking_queue_.front();
1497 marking_queue_.pop_front();
1498 if (IsMarked(top_block))
continue;
1500 for (
BasicBlock* successor : top_block->successors()) {
1501 if (!IsMarked(successor)) {
1506 if (marked) MarkBlock(top_block);
1507 }
while (!marking_queue_.empty());
1512 if (IsMarked(block)) {
1513 TRACE(
" not splitting #%d:%s, its common dominator id:%d is perfect\n",
1514 node->id(), node->op()->mnemonic(), block->id().ToInt());
1523 for (
Edge edge : node->use_edges()) {
1524 if (!scheduler_->IsLive(edge.from()))
continue;
1525 BasicBlock* use_block = GetBlockForUse(edge);
1526 if (use_block ==
nullptr)
continue;
1527 while (IsMarked(use_block->dominator())) {
1528 use_block = use_block->dominator();
1530 auto& use_node = dominators[use_block];
1531 if (use_node ==
nullptr) {
1532 if (dominators.size() == 1u) {
1536 TRACE(
" pushing #%d:%s down to id:%d\n", node->id(),
1537 node->op()->mnemonic(), block->id().ToInt());
1540 use_node = CloneNode(node);
1541 TRACE(
" cloning #%d:%s for id:%d\n", use_node->id(),
1542 use_node->op()->mnemonic(), use_block->id().ToInt());
1543 scheduler_->schedule_queue_.push(use_node);
1546 edge.UpdateTo(use_node);
1552 if (block->IsLoopHeader())
return block->dominator();
1558 if (
BasicBlock* header_block = block->loop_header()) {
1560 scheduler_->special_rpo_->GetOutgoingBlocks(header_block)) {
1561 if (BasicBlock::GetCommonDominator(block, outgoing_block) != block) {
1565 return header_block->dominator();
1572 for (
Edge edge : node->use_edges()) {
1573 if (!scheduler_->IsLive(edge.from()))
continue;
1574 BasicBlock* use_block = GetBlockForUse(edge);
1575 block = block ==
nullptr 1577 : use_block ==
nullptr 1579 : BasicBlock::GetCommonDominator(block, use_block);
1585 return scheduler_->control_flow_builder_->FindPredecessorBlock(node);
1591 Node* use = edge.from();
1592 if (IrOpcode::IsPhiOpcode(use->opcode())) {
1595 if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
1596 TRACE(
" inspecting uses of coupled #%d:%s\n", use->id(),
1597 use->op()->mnemonic());
1600 return GetCommonDominatorOfUses(use);
1604 if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
1605 TRACE(
" input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(),
1606 use->op()->mnemonic());
1607 Node* merge = NodeProperties::GetControlInput(use, 0);
1608 DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
1609 Node* input = NodeProperties::GetControlInput(merge, edge.index());
1610 return FindPredecessorBlock(input);
1612 }
else if (IrOpcode::IsMergeOpcode(use->opcode())) {
1615 if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
1616 TRACE(
" input@%d into a fixed merge #%d:%s\n", edge.index(), use->id(),
1617 use->op()->mnemonic());
1618 return FindPredecessorBlock(edge.to());
1622 if (result ==
nullptr)
return nullptr;
1623 TRACE(
" must dominate use #%d:%s in id:%d\n", use->id(),
1624 use->op()->mnemonic(), result->id().ToInt());
1629 scheduler_->FuseFloatingControl(block, node);
1638 CHECK_EQ(IrOpcode::kFinishRegion, region_end->opcode());
1639 ScheduleNode(block, region_end);
1642 Node* node = NodeProperties::GetEffectInput(region_end);
1643 while (node->opcode() != IrOpcode::kBeginRegion) {
1644 DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
1645 DCHECK_EQ(1, node->op()->EffectInputCount());
1646 DCHECK_EQ(1, node->op()->EffectOutputCount());
1647 DCHECK_EQ(0, node->op()->ControlOutputCount());
1650 DCHECK(node->op()->ValueOutputCount() == 0 ||
1651 node == region_end->InputAt(0));
1652 ScheduleNode(block, node);
1653 node = NodeProperties::GetEffectInput(node);
1656 DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
1657 ScheduleNode(block, node);
1661 schedule_->PlanNode(block, node);
1662 size_t block_id = block->id().ToSize();
1663 if (!scheduler_->scheduled_nodes_[block_id]) {
1664 scheduler_->scheduled_nodes_[block_id] =
1667 scheduler_->scheduled_nodes_[block_id]->push_back(node);
1668 scheduler_->UpdatePlacement(node, Scheduler::kScheduled);
1672 int const input_count = node->InputCount();
1673 for (
int index = 0; index < input_count; ++index) {
1674 Node*
const input = node->InputAt(index);
1675 scheduler_->IncrementUnscheduledUseCount(input, index, node);
1677 Node*
const copy = scheduler_->graph_->CloneNode(node);
1678 TRACE((
"clone #%d:%s -> #%d\n"), node->id(), node->op()->mnemonic(),
1680 scheduler_->node_data_.resize(copy->id() + 1,
1681 scheduler_->DefaultSchedulerData());
1682 scheduler_->node_data_[copy->id()] = scheduler_->node_data_[node->id()];
1694 void Scheduler::ScheduleLate() {
1695 TRACE(
"--- SCHEDULE LATE ------------------------------------------\n");
1696 if (FLAG_trace_turbo_scheduler) {
1698 for (
Node* node : schedule_root_nodes_) {
1699 TRACE(
"#%d:%s ", node->id(), node->op()->mnemonic());
1705 ScheduleLateNodeVisitor schedule_late_visitor(zone_,
this);
1706 schedule_late_visitor.Run(&schedule_root_nodes_);
1714 void Scheduler::SealFinalSchedule() {
1715 TRACE(
"--- SEAL FINAL SCHEDULE ------------------------------------\n");
1718 special_rpo_->SerializeRPOIntoSchedule();
1719 special_rpo_->PrintAndVerifySpecialRPO();
1723 for (NodeVector* nodes : scheduled_nodes_) {
1724 BasicBlock::Id
id = BasicBlock::Id::FromInt(block_num++);
1725 BasicBlock* block = schedule_->GetBlockById(
id);
1727 for (Node* node : base::Reversed(*nodes)) {
1728 schedule_->AddNode(block, node);
1738 void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
1739 TRACE(
"--- FUSE FLOATING CONTROL ----------------------------------\n");
1740 if (FLAG_trace_turbo_scheduler) {
1741 StdoutStream{} <<
"Schedule before control flow fusion:\n" << *schedule_;
1745 control_flow_builder_->Run(block, node);
1748 special_rpo_->UpdateSpecialRPO(block, schedule_->block(node));
1750 for (BasicBlock* b = block->rpo_next(); b !=
nullptr; b = b->rpo_next()) {
1751 b->set_dominator_depth(-1);
1752 b->set_dominator(
nullptr);
1754 PropagateImmediateDominators(block->rpo_next());
1760 NodeVector propagation_roots(control_flow_builder_->control_);
1761 for (Node* node : control_flow_builder_->control_) {
1762 for (Node* use : node->uses()) {
1763 if (NodeProperties::IsPhi(use) && IsLive(use)) {
1764 propagation_roots.push_back(use);
1768 if (FLAG_trace_turbo_scheduler) {
1769 TRACE(
"propagation roots: ");
1770 for (Node* node : propagation_roots) {
1771 TRACE(
"#%d:%s ", node->id(), node->op()->mnemonic());
1775 ScheduleEarlyNodeVisitor schedule_early_visitor(zone_,
this);
1776 schedule_early_visitor.Run(&propagation_roots);
1780 scheduled_nodes_.resize(schedule_->BasicBlockCount());
1781 MovePlannedNodes(block, schedule_->block(node));
1783 if (FLAG_trace_turbo_scheduler) {
1784 StdoutStream{} <<
"Schedule after control flow fusion:\n" << *schedule_;
1789 void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) {
1790 TRACE(
"Move planned nodes from id:%d to id:%d\n", from->id().ToInt(),
1792 NodeVector* from_nodes = scheduled_nodes_[from->id().ToSize()];
1793 NodeVector* to_nodes = scheduled_nodes_[to->id().ToSize()];
1794 if (!from_nodes)
return;
1796 for (Node*
const node : *from_nodes) {
1797 schedule_->SetBlockForNode(to, node);
1800 to_nodes->insert(to_nodes->end(), from_nodes->begin(), from_nodes->end());
1801 from_nodes->clear();
1803 std::swap(scheduled_nodes_[from->id().ToSize()],
1804 scheduled_nodes_[to->id().ToSize()]);