V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
register-allocator.cc
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/backend/register-allocator.h"
6 
7 #include <iomanip>
8 
9 #include "src/assembler-inl.h"
10 #include "src/base/adapters.h"
11 #include "src/compiler/linkage.h"
12 #include "src/string-stream.h"
13 
14 namespace v8 {
15 namespace internal {
16 namespace compiler {
17 
18 #define TRACE(...) \
19  do { \
20  if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
21  } while (false)
22 
23 namespace {
24 
25 static constexpr int kFloat32Bit =
26  RepresentationBit(MachineRepresentation::kFloat32);
27 static constexpr int kSimd128Bit =
28  RepresentationBit(MachineRepresentation::kSimd128);
29 
30 int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) {
31  return kind == FP_REGISTERS ? cfg->num_double_registers()
32  : cfg->num_general_registers();
33 }
34 
35 int GetAllocatableRegisterCount(const RegisterConfiguration* cfg,
36  RegisterKind kind) {
37  return kind == FP_REGISTERS ? cfg->num_allocatable_double_registers()
38  : cfg->num_allocatable_general_registers();
39 }
40 
41 const int* GetAllocatableRegisterCodes(const RegisterConfiguration* cfg,
42  RegisterKind kind) {
43  return kind == FP_REGISTERS ? cfg->allocatable_double_codes()
44  : cfg->allocatable_general_codes();
45 }
46 
47 const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence,
48  const InstructionBlock* block) {
49  RpoNumber index = block->loop_header();
50  if (!index.IsValid()) return nullptr;
51  return sequence->InstructionBlockAt(index);
52 }
53 
54 const InstructionBlock* GetInstructionBlock(const InstructionSequence* code,
55  LifetimePosition pos) {
56  return code->GetInstructionBlock(pos.ToInstructionIndex());
57 }
58 
59 Instruction* GetLastInstruction(InstructionSequence* code,
60  const InstructionBlock* block) {
61  return code->InstructionAt(block->last_instruction_index());
62 }
63 
64 // TODO(dcarney): fix frame to allow frame accesses to half size location.
65 int GetByteWidth(MachineRepresentation rep) {
66  switch (rep) {
67  case MachineRepresentation::kBit:
68  case MachineRepresentation::kWord8:
69  case MachineRepresentation::kWord16:
70  case MachineRepresentation::kWord32:
71  case MachineRepresentation::kTaggedSigned:
72  case MachineRepresentation::kTaggedPointer:
73  case MachineRepresentation::kTagged:
74  case MachineRepresentation::kFloat32:
75  return kPointerSize;
76  case MachineRepresentation::kWord64:
77  case MachineRepresentation::kFloat64:
78  return kDoubleSize;
79  case MachineRepresentation::kSimd128:
80  return kSimd128Size;
81  case MachineRepresentation::kNone:
82  break;
83  }
84  UNREACHABLE();
85 }
86 
87 } // namespace
88 
90  public:
91  explicit LiveRangeBound(LiveRange* range, bool skip)
92  : range_(range), start_(range->Start()), end_(range->End()), skip_(skip) {
93  DCHECK(!range->IsEmpty());
94  }
95 
96  bool CanCover(LifetimePosition position) {
97  return start_ <= position && position < end_;
98  }
99 
100  LiveRange* const range_;
101  const LifetimePosition start_;
102  const LifetimePosition end_;
103  const bool skip_;
104 
105  private:
106  DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
107 };
108 
109 struct FindResult {
110  LiveRange* cur_cover_;
111  LiveRange* pred_cover_;
112 };
113 
115  public:
116  LiveRangeBoundArray() : length_(0), start_(nullptr) {}
117 
118  bool ShouldInitialize() { return start_ == nullptr; }
119 
120  void Initialize(Zone* zone, TopLevelLiveRange* range) {
121  length_ = range->GetChildCount();
122 
123  start_ = zone->NewArray<LiveRangeBound>(length_);
124  LiveRangeBound* curr = start_;
125  // Normally, spilled ranges do not need connecting moves, because the spill
126  // location has been assigned at definition. For ranges spilled in deferred
127  // blocks, that is not the case, so we need to connect the spilled children.
128  for (LiveRange *i = range; i != nullptr; i = i->next(), ++curr) {
129  new (curr) LiveRangeBound(i, i->spilled());
130  }
131  }
132 
133  LiveRangeBound* Find(const LifetimePosition position) const {
134  size_t left_index = 0;
135  size_t right_index = length_;
136  while (true) {
137  size_t current_index = left_index + (right_index - left_index) / 2;
138  DCHECK(right_index > current_index);
139  LiveRangeBound* bound = &start_[current_index];
140  if (bound->start_ <= position) {
141  if (position < bound->end_) return bound;
142  DCHECK(left_index < current_index);
143  left_index = current_index;
144  } else {
145  right_index = current_index;
146  }
147  }
148  }
149 
150  LiveRangeBound* FindPred(const InstructionBlock* pred) {
151  LifetimePosition pred_end =
152  LifetimePosition::InstructionFromInstructionIndex(
153  pred->last_instruction_index());
154  return Find(pred_end);
155  }
156 
157  LiveRangeBound* FindSucc(const InstructionBlock* succ) {
158  LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex(
159  succ->first_instruction_index());
160  return Find(succ_start);
161  }
162 
163  bool FindConnectableSubranges(const InstructionBlock* block,
164  const InstructionBlock* pred,
165  FindResult* result) const {
166  LifetimePosition pred_end =
167  LifetimePosition::InstructionFromInstructionIndex(
168  pred->last_instruction_index());
169  LiveRangeBound* bound = Find(pred_end);
170  result->pred_cover_ = bound->range_;
171  LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex(
172  block->first_instruction_index());
173 
174  if (bound->CanCover(cur_start)) {
175  // Both blocks are covered by the same range, so there is nothing to
176  // connect.
177  return false;
178  }
179  bound = Find(cur_start);
180  if (bound->skip_) {
181  return false;
182  }
183  result->cur_cover_ = bound->range_;
184  DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
185  return (result->cur_cover_ != result->pred_cover_);
186  }
187 
188  private:
189  size_t length_;
190  LiveRangeBound* start_;
191 
192  DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
193 };
194 
196  public:
197  explicit LiveRangeFinder(const RegisterAllocationData* data, Zone* zone)
198  : data_(data),
199  bounds_length_(static_cast<int>(data_->live_ranges().size())),
200  bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)),
201  zone_(zone) {
202  for (int i = 0; i < bounds_length_; ++i) {
203  new (&bounds_[i]) LiveRangeBoundArray();
204  }
205  }
206 
207  LiveRangeBoundArray* ArrayFor(int operand_index) {
208  DCHECK(operand_index < bounds_length_);
209  TopLevelLiveRange* range = data_->live_ranges()[operand_index];
210  DCHECK(range != nullptr && !range->IsEmpty());
211  LiveRangeBoundArray* array = &bounds_[operand_index];
212  if (array->ShouldInitialize()) {
213  array->Initialize(zone_, range);
214  }
215  return array;
216  }
217 
218  private:
219  const RegisterAllocationData* const data_;
220  const int bounds_length_;
221  LiveRangeBoundArray* const bounds_;
222  Zone* const zone_;
223 
224  DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
225 };
226 
227 typedef std::pair<ParallelMove*, InstructionOperand> DelayedInsertionMapKey;
228 
230  bool operator()(const DelayedInsertionMapKey& a,
231  const DelayedInsertionMapKey& b) const {
232  if (a.first == b.first) {
233  return a.second.Compare(b.second);
234  }
235  return a.first < b.first;
236  }
237 };
238 
239 typedef ZoneMap<DelayedInsertionMapKey, InstructionOperand,
242 
243 UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
244  void* hint, UsePositionHintType hint_type)
245  : operand_(operand), hint_(hint), next_(nullptr), pos_(pos), flags_(0) {
246  DCHECK_IMPLIES(hint == nullptr, hint_type == UsePositionHintType::kNone);
247  bool register_beneficial = true;
248  UsePositionType type = UsePositionType::kRegisterOrSlot;
249  if (operand_ != nullptr && operand_->IsUnallocated()) {
250  const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_);
251  if (unalloc->HasRegisterPolicy()) {
252  type = UsePositionType::kRequiresRegister;
253  } else if (unalloc->HasSlotPolicy()) {
254  type = UsePositionType::kRequiresSlot;
255  register_beneficial = false;
256  } else if (unalloc->HasRegisterOrSlotOrConstantPolicy()) {
257  type = UsePositionType::kRegisterOrSlotOrConstant;
258  register_beneficial = false;
259  } else {
260  register_beneficial = !unalloc->HasRegisterOrSlotPolicy();
261  }
262  }
263  flags_ = TypeField::encode(type) | HintTypeField::encode(hint_type) |
264  RegisterBeneficialField::encode(register_beneficial) |
265  AssignedRegisterField::encode(kUnassignedRegister);
266  DCHECK(pos_.IsValid());
267 }
268 
269 bool UsePosition::HasHint() const {
270  int hint_register;
271  return HintRegister(&hint_register);
272 }
273 
274 bool UsePosition::HintRegister(int* register_code) const {
275  if (hint_ == nullptr) return false;
276  switch (HintTypeField::decode(flags_)) {
277  case UsePositionHintType::kNone:
278  case UsePositionHintType::kUnresolved:
279  return false;
280  case UsePositionHintType::kUsePos: {
281  UsePosition* use_pos = reinterpret_cast<UsePosition*>(hint_);
282  int assigned_register = AssignedRegisterField::decode(use_pos->flags_);
283  if (assigned_register == kUnassignedRegister) return false;
284  *register_code = assigned_register;
285  return true;
286  }
287  case UsePositionHintType::kOperand: {
288  InstructionOperand* operand =
289  reinterpret_cast<InstructionOperand*>(hint_);
290  *register_code = LocationOperand::cast(operand)->register_code();
291  return true;
292  }
293  case UsePositionHintType::kPhi: {
294  RegisterAllocationData::PhiMapValue* phi =
295  reinterpret_cast<RegisterAllocationData::PhiMapValue*>(hint_);
296  int assigned_register = phi->assigned_register();
297  if (assigned_register == kUnassignedRegister) return false;
298  *register_code = assigned_register;
299  return true;
300  }
301  }
302  UNREACHABLE();
303 }
304 
305 UsePositionHintType UsePosition::HintTypeForOperand(
306  const InstructionOperand& op) {
307  switch (op.kind()) {
308  case InstructionOperand::CONSTANT:
309  case InstructionOperand::IMMEDIATE:
310  case InstructionOperand::EXPLICIT:
311  return UsePositionHintType::kNone;
312  case InstructionOperand::UNALLOCATED:
313  return UsePositionHintType::kUnresolved;
314  case InstructionOperand::ALLOCATED:
315  if (op.IsRegister() || op.IsFPRegister()) {
316  return UsePositionHintType::kOperand;
317  } else {
318  DCHECK(op.IsStackSlot() || op.IsFPStackSlot());
319  return UsePositionHintType::kNone;
320  }
321  case InstructionOperand::INVALID:
322  break;
323  }
324  UNREACHABLE();
325 }
326 
327 void UsePosition::SetHint(UsePosition* use_pos) {
328  DCHECK_NOT_NULL(use_pos);
329  hint_ = use_pos;
330  flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos);
331 }
332 
333 void UsePosition::ResolveHint(UsePosition* use_pos) {
334  DCHECK_NOT_NULL(use_pos);
335  if (HintTypeField::decode(flags_) != UsePositionHintType::kUnresolved) return;
336  hint_ = use_pos;
337  flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos);
338 }
339 
340 void UsePosition::set_type(UsePositionType type, bool register_beneficial) {
341  DCHECK_IMPLIES(type == UsePositionType::kRequiresSlot, !register_beneficial);
342  DCHECK_EQ(kUnassignedRegister, AssignedRegisterField::decode(flags_));
343  flags_ = TypeField::encode(type) |
344  RegisterBeneficialField::encode(register_beneficial) |
345  HintTypeField::encode(HintTypeField::decode(flags_)) |
346  AssignedRegisterField::encode(kUnassignedRegister);
347 }
348 
349 UseInterval* UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
350  DCHECK(Contains(pos) && pos != start());
351  UseInterval* after = new (zone) UseInterval(pos, end_);
352  after->next_ = next_;
353  next_ = nullptr;
354  end_ = pos;
355  return after;
356 }
357 
358 void LifetimePosition::Print() const { StdoutStream{} << *this << std::endl; }
359 
360 std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) {
361  os << '@' << pos.ToInstructionIndex();
362  if (pos.IsGapPosition()) {
363  os << 'g';
364  } else {
365  os << 'i';
366  }
367  if (pos.IsStart()) {
368  os << 's';
369  } else {
370  os << 'e';
371  }
372  return os;
373 }
374 
375 LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
376  TopLevelLiveRange* top_level)
377  : relative_id_(relative_id),
378  bits_(0),
379  last_interval_(nullptr),
380  first_interval_(nullptr),
381  first_pos_(nullptr),
382  top_level_(top_level),
383  next_(nullptr),
384  current_interval_(nullptr),
385  last_processed_use_(nullptr),
386  current_hint_position_(nullptr),
387  splitting_pointer_(nullptr) {
388  DCHECK(AllocatedOperand::IsSupportedRepresentation(rep));
389  bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
390  RepresentationField::encode(rep);
391 }
392 
393 void LiveRange::VerifyPositions() const {
394  // Walk the positions, verifying that each is in an interval.
395  UseInterval* interval = first_interval_;
396  for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
397  CHECK(Start() <= pos->pos());
398  CHECK(pos->pos() <= End());
399  CHECK_NOT_NULL(interval);
400  while (!interval->Contains(pos->pos()) && interval->end() != pos->pos()) {
401  interval = interval->next();
402  CHECK_NOT_NULL(interval);
403  }
404  }
405 }
406 
407 void LiveRange::VerifyIntervals() const {
408  DCHECK(first_interval()->start() == Start());
409  LifetimePosition last_end = first_interval()->end();
410  for (UseInterval* interval = first_interval()->next(); interval != nullptr;
411  interval = interval->next()) {
412  DCHECK(last_end <= interval->start());
413  last_end = interval->end();
414  }
415  DCHECK(last_end == End());
416 }
417 
418 void LiveRange::set_assigned_register(int reg) {
419  DCHECK(!HasRegisterAssigned() && !spilled());
420  bits_ = AssignedRegisterField::update(bits_, reg);
421 }
422 
423 void LiveRange::UnsetAssignedRegister() {
424  DCHECK(HasRegisterAssigned() && !spilled());
425  bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
426 }
427 
428 void LiveRange::Spill() {
429  DCHECK(!spilled());
430  DCHECK(!TopLevel()->HasNoSpillType());
431  set_spilled(true);
432  bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
433 }
434 
435 RegisterKind LiveRange::kind() const {
436  return IsFloatingPoint(representation()) ? FP_REGISTERS : GENERAL_REGISTERS;
437 }
438 
439 UsePosition* LiveRange::FirstHintPosition(int* register_index) const {
440  for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
441  if (pos->HintRegister(register_index)) return pos;
442  }
443  return nullptr;
444 }
445 
446 UsePosition* LiveRange::NextUsePosition(LifetimePosition start) const {
447  UsePosition* use_pos = last_processed_use_;
448  if (use_pos == nullptr || use_pos->pos() > start) {
449  use_pos = first_pos();
450  }
451  while (use_pos != nullptr && use_pos->pos() < start) {
452  use_pos = use_pos->next();
453  }
454  last_processed_use_ = use_pos;
455  return use_pos;
456 }
457 
458 UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
459  LifetimePosition start) const {
460  UsePosition* pos = NextUsePosition(start);
461  while (pos != nullptr && !pos->RegisterIsBeneficial()) {
462  pos = pos->next();
463  }
464  return pos;
465 }
466 
467 LifetimePosition LiveRange::NextLifetimePositionRegisterIsBeneficial(
468  const LifetimePosition& start) const {
469  UsePosition* next_use = NextUsePositionRegisterIsBeneficial(start);
470  if (next_use == nullptr) return End();
471  return next_use->pos();
472 }
473 
474 UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
475  LifetimePosition start) const {
476  UsePosition* pos = first_pos();
477  UsePosition* prev = nullptr;
478  while (pos != nullptr && pos->pos() < start) {
479  if (pos->RegisterIsBeneficial()) prev = pos;
480  pos = pos->next();
481  }
482  return prev;
483 }
484 
485 UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const {
486  UsePosition* pos = NextUsePosition(start);
487  while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister) {
488  pos = pos->next();
489  }
490  return pos;
491 }
492 
493 UsePosition* LiveRange::NextSlotPosition(LifetimePosition start) const {
494  for (UsePosition* pos = NextUsePosition(start); pos != nullptr;
495  pos = pos->next()) {
496  if (pos->type() != UsePositionType::kRequiresSlot) continue;
497  return pos;
498  }
499  return nullptr;
500 }
501 
502 bool LiveRange::CanBeSpilled(LifetimePosition pos) const {
503  // We cannot spill a live range that has a use requiring a register
504  // at the current or the immediate next position.
505  UsePosition* use_pos = NextRegisterPosition(pos);
506  if (use_pos == nullptr) return true;
507  return use_pos->pos() > pos.NextStart().End();
508 }
509 
510 bool LiveRange::IsTopLevel() const { return top_level_ == this; }
511 
512 InstructionOperand LiveRange::GetAssignedOperand() const {
513  if (HasRegisterAssigned()) {
514  DCHECK(!spilled());
515  return AllocatedOperand(LocationOperand::REGISTER, representation(),
516  assigned_register());
517  }
518  DCHECK(spilled());
519  DCHECK(!HasRegisterAssigned());
520  if (TopLevel()->HasSpillOperand()) {
521  InstructionOperand* op = TopLevel()->GetSpillOperand();
522  DCHECK(!op->IsUnallocated());
523  return *op;
524  }
525  return TopLevel()->GetSpillRangeOperand();
526 }
527 
528 UseInterval* LiveRange::FirstSearchIntervalForPosition(
529  LifetimePosition position) const {
530  if (current_interval_ == nullptr) return first_interval_;
531  if (current_interval_->start() > position) {
532  current_interval_ = nullptr;
533  return first_interval_;
534  }
535  return current_interval_;
536 }
537 
538 void LiveRange::AdvanceLastProcessedMarker(
539  UseInterval* to_start_of, LifetimePosition but_not_past) const {
540  if (to_start_of == nullptr) return;
541  if (to_start_of->start() > but_not_past) return;
542  LifetimePosition start = current_interval_ == nullptr
543  ? LifetimePosition::Invalid()
544  : current_interval_->start();
545  if (to_start_of->start() > start) {
546  current_interval_ = to_start_of;
547  }
548 }
549 
550 LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) {
551  int new_id = TopLevel()->GetNextChildId();
552  LiveRange* child = new (zone) LiveRange(new_id, representation(), TopLevel());
553  // If we split, we do so because we're about to switch registers or move
554  // to/from a slot, so there's no value in connecting hints.
555  DetachAt(position, child, zone, DoNotConnectHints);
556 
557  child->top_level_ = TopLevel();
558  child->next_ = next_;
559  next_ = child;
560  return child;
561 }
562 
563 UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
564  Zone* zone,
565  HintConnectionOption connect_hints) {
566  DCHECK(Start() < position);
567  DCHECK(End() > position);
568  DCHECK(result->IsEmpty());
569  // Find the last interval that ends before the position. If the
570  // position is contained in one of the intervals in the chain, we
571  // split that interval and use the first part.
572  UseInterval* current = FirstSearchIntervalForPosition(position);
573 
574  // If the split position coincides with the beginning of a use interval
575  // we need to split use positons in a special way.
576  bool split_at_start = false;
577 
578  if (current->start() == position) {
579  // When splitting at start we need to locate the previous use interval.
580  current = first_interval_;
581  }
582 
583  UseInterval* after = nullptr;
584  while (current != nullptr) {
585  if (current->Contains(position)) {
586  after = current->SplitAt(position, zone);
587  break;
588  }
589  UseInterval* next = current->next();
590  if (next->start() >= position) {
591  split_at_start = (next->start() == position);
592  after = next;
593  current->set_next(nullptr);
594  break;
595  }
596  current = next;
597  }
598  DCHECK_NOT_NULL(after);
599 
600  // Partition original use intervals to the two live ranges.
601  UseInterval* before = current;
602  result->last_interval_ =
603  (last_interval_ == before)
604  ? after // Only interval in the range after split.
605  : last_interval_; // Last interval of the original range.
606  result->first_interval_ = after;
607  last_interval_ = before;
608 
609  // Find the last use position before the split and the first use
610  // position after it.
611  UsePosition* use_after =
612  splitting_pointer_ == nullptr || splitting_pointer_->pos() > position
613  ? first_pos()
614  : splitting_pointer_;
615  UsePosition* use_before = nullptr;
616  if (split_at_start) {
617  // The split position coincides with the beginning of a use interval (the
618  // end of a lifetime hole). Use at this position should be attributed to
619  // the split child because split child owns use interval covering it.
620  while (use_after != nullptr && use_after->pos() < position) {
621  use_before = use_after;
622  use_after = use_after->next();
623  }
624  } else {
625  while (use_after != nullptr && use_after->pos() <= position) {
626  use_before = use_after;
627  use_after = use_after->next();
628  }
629  }
630 
631  // Partition original use positions to the two live ranges.
632  if (use_before != nullptr) {
633  use_before->set_next(nullptr);
634  } else {
635  first_pos_ = nullptr;
636  }
637  result->first_pos_ = use_after;
638 
639  // Discard cached iteration state. It might be pointing
640  // to the use that no longer belongs to this live range.
641  last_processed_use_ = nullptr;
642  current_interval_ = nullptr;
643 
644  if (connect_hints == ConnectHints && use_before != nullptr &&
645  use_after != nullptr) {
646  use_after->SetHint(use_before);
647  }
648 #ifdef DEBUG
649  VerifyChildStructure();
650  result->VerifyChildStructure();
651 #endif
652  return use_before;
653 }
654 
655 void LiveRange::UpdateParentForAllChildren(TopLevelLiveRange* new_top_level) {
656  LiveRange* child = this;
657  for (; child != nullptr; child = child->next()) {
658  child->top_level_ = new_top_level;
659  }
660 }
661 
662 void LiveRange::ConvertUsesToOperand(const InstructionOperand& op,
663  const InstructionOperand& spill_op) {
664  for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
665  DCHECK(Start() <= pos->pos() && pos->pos() <= End());
666  if (!pos->HasOperand()) continue;
667  switch (pos->type()) {
668  case UsePositionType::kRequiresSlot:
669  DCHECK(spill_op.IsStackSlot() || spill_op.IsFPStackSlot());
670  InstructionOperand::ReplaceWith(pos->operand(), &spill_op);
671  break;
672  case UsePositionType::kRequiresRegister:
673  DCHECK(op.IsRegister() || op.IsFPRegister());
674  V8_FALLTHROUGH;
675  case UsePositionType::kRegisterOrSlot:
676  case UsePositionType::kRegisterOrSlotOrConstant:
677  InstructionOperand::ReplaceWith(pos->operand(), &op);
678  break;
679  }
680  }
681 }
682 
683 // This implements an ordering on live ranges so that they are ordered by their
684 // start positions. This is needed for the correctness of the register
685 // allocation algorithm. If two live ranges start at the same offset then there
686 // is a tie breaker based on where the value is first used. This part of the
687 // ordering is merely a heuristic.
688 bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
689  LifetimePosition start = Start();
690  LifetimePosition other_start = other->Start();
691  if (start == other_start) {
692  UsePosition* pos = first_pos();
693  if (pos == nullptr) return false;
694  UsePosition* other_pos = other->first_pos();
695  if (other_pos == nullptr) return true;
696  return pos->pos() < other_pos->pos();
697  }
698  return start < other_start;
699 }
700 
701 void LiveRange::SetUseHints(int register_index) {
702  for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
703  if (!pos->HasOperand()) continue;
704  switch (pos->type()) {
705  case UsePositionType::kRequiresSlot:
706  break;
707  case UsePositionType::kRequiresRegister:
708  case UsePositionType::kRegisterOrSlot:
709  case UsePositionType::kRegisterOrSlotOrConstant:
710  pos->set_assigned_register(register_index);
711  break;
712  }
713  }
714 }
715 
716 bool LiveRange::CanCover(LifetimePosition position) const {
717  if (IsEmpty()) return false;
718  return Start() <= position && position < End();
719 }
720 
721 bool LiveRange::Covers(LifetimePosition position) const {
722  if (!CanCover(position)) return false;
723  UseInterval* start_search = FirstSearchIntervalForPosition(position);
724  for (UseInterval* interval = start_search; interval != nullptr;
725  interval = interval->next()) {
726  DCHECK(interval->next() == nullptr ||
727  interval->next()->start() >= interval->start());
728  AdvanceLastProcessedMarker(interval, position);
729  if (interval->Contains(position)) return true;
730  if (interval->start() > position) return false;
731  }
732  return false;
733 }
734 
735 LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
736  UseInterval* b = other->first_interval();
737  if (b == nullptr) return LifetimePosition::Invalid();
738  LifetimePosition advance_last_processed_up_to = b->start();
739  UseInterval* a = FirstSearchIntervalForPosition(b->start());
740  while (a != nullptr && b != nullptr) {
741  if (a->start() > other->End()) break;
742  if (b->start() > End()) break;
743  LifetimePosition cur_intersection = a->Intersect(b);
744  if (cur_intersection.IsValid()) {
745  return cur_intersection;
746  }
747  if (a->start() < b->start()) {
748  a = a->next();
749  if (a == nullptr || a->start() > other->End()) break;
750  AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
751  } else {
752  b = b->next();
753  }
754  }
755  return LifetimePosition::Invalid();
756 }
757 
758 void LiveRange::Print(const RegisterConfiguration* config,
759  bool with_children) const {
760  StdoutStream os;
761  PrintableLiveRange wrapper;
762  wrapper.register_configuration_ = config;
763  for (const LiveRange* i = this; i != nullptr; i = i->next()) {
764  wrapper.range_ = i;
765  os << wrapper << std::endl;
766  if (!with_children) break;
767  }
768 }
769 
770 void LiveRange::Print(bool with_children) const {
771  Print(RegisterConfiguration::Default(), with_children);
772 }
773 
775  SpillMoveInsertionList(int gap_index, InstructionOperand* operand,
777  : gap_index(gap_index), operand(operand), next(next) {}
778  const int gap_index;
779  InstructionOperand* const operand;
780  SpillMoveInsertionList* const next;
781 };
782 
783 TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineRepresentation rep)
784  : LiveRange(0, rep, this),
785  vreg_(vreg),
786  last_child_id_(0),
787  splintered_from_(nullptr),
788  spill_operand_(nullptr),
789  spill_move_insertion_locations_(nullptr),
790  spilled_in_deferred_blocks_(false),
791  spill_start_index_(kMaxInt),
792  last_pos_(nullptr),
793  splinter_(nullptr),
794  has_preassigned_slot_(false) {
795  bits_ |= SpillTypeField::encode(SpillType::kNoSpillType);
796 }
797 
798 #if DEBUG
799 int TopLevelLiveRange::debug_virt_reg() const {
800  return IsSplinter() ? splintered_from()->vreg() : vreg();
801 }
802 #endif
803 
804 void TopLevelLiveRange::RecordSpillLocation(Zone* zone, int gap_index,
805  InstructionOperand* operand) {
806  DCHECK(HasNoSpillType());
807  spill_move_insertion_locations_ = new (zone) SpillMoveInsertionList(
808  gap_index, operand, spill_move_insertion_locations_);
809 }
810 
811 void TopLevelLiveRange::CommitSpillMoves(InstructionSequence* sequence,
812  const InstructionOperand& op,
813  bool might_be_duplicated) {
814  DCHECK_IMPLIES(op.IsConstant(), GetSpillMoveInsertionLocations() == nullptr);
815  Zone* zone = sequence->zone();
816 
817  for (SpillMoveInsertionList* to_spill = GetSpillMoveInsertionLocations();
818  to_spill != nullptr; to_spill = to_spill->next) {
819  Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
820  ParallelMove* move =
821  instr->GetOrCreateParallelMove(Instruction::START, zone);
822  // Skip insertion if it's possible that the move exists already as a
823  // constraint move from a fixed output register to a slot.
824  if (might_be_duplicated || has_preassigned_slot()) {
825  bool found = false;
826  for (MoveOperands* move_op : *move) {
827  if (move_op->IsEliminated()) continue;
828  if (move_op->source().Equals(*to_spill->operand) &&
829  move_op->destination().Equals(op)) {
830  found = true;
831  if (has_preassigned_slot()) move_op->Eliminate();
832  break;
833  }
834  }
835  if (found) continue;
836  }
837  if (!has_preassigned_slot()) {
838  move->AddMove(*to_spill->operand, op);
839  }
840  }
841 }
842 
843 void TopLevelLiveRange::SetSpillOperand(InstructionOperand* operand) {
844  DCHECK(HasNoSpillType());
845  DCHECK(!operand->IsUnallocated() && !operand->IsImmediate());
846  set_spill_type(SpillType::kSpillOperand);
847  spill_operand_ = operand;
848 }
849 
850 void TopLevelLiveRange::SetSpillRange(SpillRange* spill_range) {
851  DCHECK(!HasSpillOperand());
852  DCHECK(spill_range);
853  spill_range_ = spill_range;
854 }
855 
856 AllocatedOperand TopLevelLiveRange::GetSpillRangeOperand() const {
857  SpillRange* spill_range = GetSpillRange();
858  int index = spill_range->assigned_slot();
859  return AllocatedOperand(LocationOperand::STACK_SLOT, representation(), index);
860 }
861 
862 void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end,
863  Zone* zone) {
864  DCHECK(start != Start() || end != End());
865  DCHECK(start < end);
866 
867  TopLevelLiveRange splinter_temp(-1, representation());
868  UsePosition* last_in_splinter = nullptr;
869  // Live ranges defined in deferred blocks stay in deferred blocks, so we
870  // don't need to splinter them. That means that start should always be
871  // after the beginning of the range.
872  DCHECK(start > Start());
873 
874  if (end >= End()) {
875  DCHECK(start > Start());
876  DetachAt(start, &splinter_temp, zone, ConnectHints);
877  next_ = nullptr;
878  } else {
879  DCHECK(start < End() && Start() < end);
880 
881  const int kInvalidId = std::numeric_limits<int>::max();
882 
883  UsePosition* last = DetachAt(start, &splinter_temp, zone, ConnectHints);
884 
885  LiveRange end_part(kInvalidId, this->representation(), nullptr);
886  // The last chunk exits the deferred region, and we don't want to connect
887  // hints here, because the non-deferred region shouldn't be affected
888  // by allocation decisions on the deferred path.
889  last_in_splinter =
890  splinter_temp.DetachAt(end, &end_part, zone, DoNotConnectHints);
891 
892  next_ = end_part.next_;
893  last_interval_->set_next(end_part.first_interval_);
894  // The next splinter will happen either at or after the current interval.
895  // We can optimize DetachAt by setting current_interval_ accordingly,
896  // which will then be picked up by FirstSearchIntervalForPosition.
897  current_interval_ = last_interval_;
898  last_interval_ = end_part.last_interval_;
899 
900  if (first_pos_ == nullptr) {
901  first_pos_ = end_part.first_pos_;
902  } else {
903  splitting_pointer_ = last;
904  if (last != nullptr) last->set_next(end_part.first_pos_);
905  }
906  }
907 
908  if (splinter()->IsEmpty()) {
909  splinter()->first_interval_ = splinter_temp.first_interval_;
910  splinter()->last_interval_ = splinter_temp.last_interval_;
911  } else {
912  splinter()->last_interval_->set_next(splinter_temp.first_interval_);
913  splinter()->last_interval_ = splinter_temp.last_interval_;
914  }
915  if (splinter()->first_pos() == nullptr) {
916  splinter()->first_pos_ = splinter_temp.first_pos_;
917  } else {
918  splinter()->last_pos_->set_next(splinter_temp.first_pos_);
919  }
920  if (last_in_splinter != nullptr) {
921  splinter()->last_pos_ = last_in_splinter;
922  } else {
923  if (splinter()->first_pos() != nullptr &&
924  splinter()->last_pos_ == nullptr) {
925  splinter()->last_pos_ = splinter()->first_pos();
926  for (UsePosition* pos = splinter()->first_pos(); pos != nullptr;
927  pos = pos->next()) {
928  splinter()->last_pos_ = pos;
929  }
930  }
931  }
932 #if DEBUG
933  Verify();
934  splinter()->Verify();
935 #endif
936 }
937 
938 void TopLevelLiveRange::SetSplinteredFrom(TopLevelLiveRange* splinter_parent) {
939  splintered_from_ = splinter_parent;
940  if (!HasSpillOperand() && splinter_parent->spill_range_ != nullptr) {
941  SetSpillRange(splinter_parent->spill_range_);
942  }
943 }
944 
945 void TopLevelLiveRange::UpdateSpillRangePostMerge(TopLevelLiveRange* merged) {
946  DCHECK(merged->TopLevel() == this);
947 
948  if (HasNoSpillType() && merged->HasSpillRange()) {
949  set_spill_type(merged->spill_type());
950  DCHECK_LT(0, GetSpillRange()->live_ranges().size());
951  merged->spill_range_ = nullptr;
952  merged->bits_ =
953  SpillTypeField::update(merged->bits_, SpillType::kNoSpillType);
954  }
955 }
956 
957 void TopLevelLiveRange::Merge(TopLevelLiveRange* other, Zone* zone) {
958  DCHECK(Start() < other->Start());
959  DCHECK(other->splintered_from() == this);
960 
961  LiveRange* first = this;
962  LiveRange* second = other;
963  DCHECK(first->Start() < second->Start());
964  while (first != nullptr && second != nullptr) {
965  DCHECK(first != second);
966  // Make sure the ranges are in order each time we iterate.
967  if (second->Start() < first->Start()) {
968  LiveRange* tmp = second;
969  second = first;
970  first = tmp;
971  continue;
972  }
973 
974  if (first->End() <= second->Start()) {
975  if (first->next() == nullptr ||
976  first->next()->Start() > second->Start()) {
977  // First is in order before second.
978  LiveRange* temp = first->next();
979  first->next_ = second;
980  first = temp;
981  } else {
982  // First is in order before its successor (or second), so advance first.
983  first = first->next();
984  }
985  continue;
986  }
987 
988  DCHECK(first->Start() < second->Start());
989  // If first and second intersect, split first.
990  if (first->Start() < second->End() && second->Start() < first->End()) {
991  LiveRange* temp = first->SplitAt(second->Start(), zone);
992  CHECK(temp != first);
993  temp->set_spilled(first->spilled());
994  if (!temp->spilled())
995  temp->set_assigned_register(first->assigned_register());
996 
997  first->next_ = second;
998  first = temp;
999  continue;
1000  }
1001  DCHECK(first->End() <= second->Start());
1002  }
1003 
1004  TopLevel()->UpdateParentForAllChildren(TopLevel());
1005  TopLevel()->UpdateSpillRangePostMerge(other);
1006  TopLevel()->set_has_slot_use(TopLevel()->has_slot_use() ||
1007  other->has_slot_use());
1008 
1009 #if DEBUG
1010  Verify();
1011 #endif
1012 }
1013 
1014 void TopLevelLiveRange::VerifyChildrenInOrder() const {
1015  LifetimePosition last_end = End();
1016  for (const LiveRange* child = this->next(); child != nullptr;
1017  child = child->next()) {
1018  DCHECK(last_end <= child->Start());
1019  last_end = child->End();
1020  }
1021 }
1022 
1023 void TopLevelLiveRange::Verify() const {
1024  VerifyChildrenInOrder();
1025  for (const LiveRange* child = this; child != nullptr; child = child->next()) {
1026  VerifyChildStructure();
1027  }
1028 }
1029 
1030 void TopLevelLiveRange::ShortenTo(LifetimePosition start) {
1031  TRACE("Shorten live range %d to [%d\n", vreg(), start.value());
1032  DCHECK_NOT_NULL(first_interval_);
1033  DCHECK(first_interval_->start() <= start);
1034  DCHECK(start < first_interval_->end());
1035  first_interval_->set_start(start);
1036 }
1037 
1038 void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
1039  LifetimePosition end, Zone* zone) {
1040  TRACE("Ensure live range %d in interval [%d %d[\n", vreg(), start.value(),
1041  end.value());
1042  LifetimePosition new_end = end;
1043  while (first_interval_ != nullptr && first_interval_->start() <= end) {
1044  if (first_interval_->end() > end) {
1045  new_end = first_interval_->end();
1046  }
1047  first_interval_ = first_interval_->next();
1048  }
1049 
1050  UseInterval* new_interval = new (zone) UseInterval(start, new_end);
1051  new_interval->set_next(first_interval_);
1052  first_interval_ = new_interval;
1053  if (new_interval->next() == nullptr) {
1054  last_interval_ = new_interval;
1055  }
1056 }
1057 
1058 void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
1059  LifetimePosition end, Zone* zone) {
1060  TRACE("Add to live range %d interval [%d %d[\n", vreg(), start.value(),
1061  end.value());
1062  if (first_interval_ == nullptr) {
1063  UseInterval* interval = new (zone) UseInterval(start, end);
1064  first_interval_ = interval;
1065  last_interval_ = interval;
1066  } else {
1067  if (end == first_interval_->start()) {
1068  first_interval_->set_start(start);
1069  } else if (end < first_interval_->start()) {
1070  UseInterval* interval = new (zone) UseInterval(start, end);
1071  interval->set_next(first_interval_);
1072  first_interval_ = interval;
1073  } else {
1074  // Order of instruction's processing (see ProcessInstructions) guarantees
1075  // that each new use interval either precedes, intersects with or touches
1076  // the last added interval.
1077  DCHECK(start <= first_interval_->end());
1078  first_interval_->set_start(Min(start, first_interval_->start()));
1079  first_interval_->set_end(Max(end, first_interval_->end()));
1080  }
1081  }
1082 }
1083 
1084 void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos) {
1085  LifetimePosition pos = use_pos->pos();
1086  TRACE("Add to live range %d use position %d\n", vreg(), pos.value());
1087  UsePosition* prev_hint = nullptr;
1088  UsePosition* prev = nullptr;
1089  UsePosition* current = first_pos_;
1090  while (current != nullptr && current->pos() < pos) {
1091  prev_hint = current->HasHint() ? current : prev_hint;
1092  prev = current;
1093  current = current->next();
1094  }
1095 
1096  if (prev == nullptr) {
1097  use_pos->set_next(first_pos_);
1098  first_pos_ = use_pos;
1099  } else {
1100  use_pos->set_next(prev->next());
1101  prev->set_next(use_pos);
1102  }
1103 
1104  if (prev_hint == nullptr && use_pos->HasHint()) {
1105  current_hint_position_ = use_pos;
1106  }
1107 }
1108 
1109 static bool AreUseIntervalsIntersecting(UseInterval* interval1,
1110  UseInterval* interval2) {
1111  while (interval1 != nullptr && interval2 != nullptr) {
1112  if (interval1->start() < interval2->start()) {
1113  if (interval1->end() > interval2->start()) {
1114  return true;
1115  }
1116  interval1 = interval1->next();
1117  } else {
1118  if (interval2->end() > interval1->start()) {
1119  return true;
1120  }
1121  interval2 = interval2->next();
1122  }
1123  }
1124  return false;
1125 }
1126 
1127 std::ostream& operator<<(std::ostream& os,
1128  const PrintableLiveRange& printable_range) {
1129  const LiveRange* range = printable_range.range_;
1130  os << "Range: " << range->TopLevel()->vreg() << ":" << range->relative_id()
1131  << " ";
1132  if (range->TopLevel()->is_phi()) os << "phi ";
1133  if (range->TopLevel()->is_non_loop_phi()) os << "nlphi ";
1134 
1135  os << "{" << std::endl;
1136  UseInterval* interval = range->first_interval();
1137  UsePosition* use_pos = range->first_pos();
1138  while (use_pos != nullptr) {
1139  if (use_pos->HasOperand()) {
1140  os << *use_pos->operand() << use_pos->pos() << " ";
1141  }
1142  use_pos = use_pos->next();
1143  }
1144  os << std::endl;
1145 
1146  while (interval != nullptr) {
1147  os << '[' << interval->start() << ", " << interval->end() << ')'
1148  << std::endl;
1149  interval = interval->next();
1150  }
1151  os << "}";
1152  return os;
1153 }
1154 
1155 namespace {
1156 void PrintBlockRow(std::ostream& os, const InstructionBlocks& blocks) {
1157  os << " ";
1158  for (auto block : blocks) {
1159  LifetimePosition start_pos = LifetimePosition::GapFromInstructionIndex(
1160  block->first_instruction_index());
1161  LifetimePosition end_pos = LifetimePosition::GapFromInstructionIndex(
1162  block->last_instruction_index())
1163  .NextFullStart();
1164  int length = end_pos.value() - start_pos.value();
1165  constexpr int kMaxPrefixLength = 32;
1166  char buffer[kMaxPrefixLength];
1167  int rpo_number = block->rpo_number().ToInt();
1168  const char* deferred_marker = block->IsDeferred() ? "(deferred)" : "";
1169  int max_prefix_length = std::min(length, kMaxPrefixLength);
1170  int prefix = snprintf(buffer, max_prefix_length, "[-B%d-%s", rpo_number,
1171  deferred_marker);
1172  os << buffer;
1173  int remaining = length - std::min(prefix, max_prefix_length) - 1;
1174  for (int i = 0; i < remaining; ++i) os << '-';
1175  os << ']';
1176  }
1177  os << '\n';
1178 }
1179 } // namespace
1180 
1181 void LinearScanAllocator::PrintRangeOverview(std::ostream& os) {
1182  int rowcount = 0;
1183  for (auto toplevel : data()->live_ranges()) {
1184  if (!CanProcessRange(toplevel)) continue;
1185  if (rowcount++ % 10 == 0) PrintBlockRow(os, code()->instruction_blocks());
1186  int position = 0;
1187  os << std::setw(3) << toplevel->vreg()
1188  << (toplevel->IsSplinter() ? "s:" : ": ");
1189  for (LiveRange* range = toplevel; range != nullptr; range = range->next()) {
1190  for (UseInterval* interval = range->first_interval(); interval != nullptr;
1191  interval = interval->next()) {
1192  LifetimePosition start = interval->start();
1193  LifetimePosition end = interval->end();
1194  CHECK_GE(start.value(), position);
1195  for (; start.value() > position; position++) {
1196  os << ' ';
1197  }
1198  int length = end.value() - start.value();
1199  constexpr int kMaxPrefixLength = 32;
1200  char buffer[kMaxPrefixLength];
1201  int max_prefix_length = std::min(length + 1, kMaxPrefixLength);
1202  int prefix;
1203  if (range->spilled()) {
1204  prefix = snprintf(buffer, max_prefix_length, "|ss");
1205  } else {
1206  const char* reg_name;
1207  if (range->assigned_register() == kUnassignedRegister) {
1208  reg_name = "???";
1209  } else {
1210  reg_name = RegisterName(range->assigned_register());
1211  }
1212  prefix = snprintf(buffer, max_prefix_length, "|%s", reg_name);
1213  }
1214  os << buffer;
1215  position += std::min(prefix, max_prefix_length - 1);
1216  CHECK_GE(end.value(), position);
1217  const char line_style = range->spilled() ? '-' : '=';
1218  for (; end.value() > position; position++) {
1219  os << line_style;
1220  }
1221  }
1222  }
1223  os << '\n';
1224  }
1225 }
1226 
1227 SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
1228  : live_ranges_(zone),
1229  assigned_slot_(kUnassignedSlot),
1230  byte_width_(GetByteWidth(parent->representation())) {
1231  // Spill ranges are created for top level, non-splintered ranges. This is so
1232  // that, when merging decisions are made, we consider the full extent of the
1233  // virtual register, and avoid clobbering it.
1234  DCHECK(!parent->IsSplinter());
1235  UseInterval* result = nullptr;
1236  UseInterval* node = nullptr;
1237  // Copy the intervals for all ranges.
1238  for (LiveRange* range = parent; range != nullptr; range = range->next()) {
1239  UseInterval* src = range->first_interval();
1240  while (src != nullptr) {
1241  UseInterval* new_node = new (zone) UseInterval(src->start(), src->end());
1242  if (result == nullptr) {
1243  result = new_node;
1244  } else {
1245  node->set_next(new_node);
1246  }
1247  node = new_node;
1248  src = src->next();
1249  }
1250  }
1251  use_interval_ = result;
1252  live_ranges().push_back(parent);
1253  end_position_ = node->end();
1254  parent->SetSpillRange(this);
1255 }
1256 
1257 bool SpillRange::IsIntersectingWith(SpillRange* other) const {
1258  if (this->use_interval_ == nullptr || other->use_interval_ == nullptr ||
1259  this->End() <= other->use_interval_->start() ||
1260  other->End() <= this->use_interval_->start()) {
1261  return false;
1262  }
1263  return AreUseIntervalsIntersecting(use_interval_, other->use_interval_);
1264 }
1265 
1266 bool SpillRange::TryMerge(SpillRange* other) {
1267  if (HasSlot() || other->HasSlot()) return false;
1268  if (byte_width() != other->byte_width() || IsIntersectingWith(other))
1269  return false;
1270 
1271  LifetimePosition max = LifetimePosition::MaxPosition();
1272  if (End() < other->End() && other->End() != max) {
1273  end_position_ = other->End();
1274  }
1275  other->end_position_ = max;
1276 
1277  MergeDisjointIntervals(other->use_interval_);
1278  other->use_interval_ = nullptr;
1279 
1280  for (TopLevelLiveRange* range : other->live_ranges()) {
1281  DCHECK(range->GetSpillRange() == other);
1282  range->SetSpillRange(this);
1283  }
1284 
1285  live_ranges().insert(live_ranges().end(), other->live_ranges().begin(),
1286  other->live_ranges().end());
1287  other->live_ranges().clear();
1288 
1289  return true;
1290 }
1291 
1292 void SpillRange::MergeDisjointIntervals(UseInterval* other) {
1293  UseInterval* tail = nullptr;
1294  UseInterval* current = use_interval_;
1295  while (other != nullptr) {
1296  // Make sure the 'current' list starts first
1297  if (current == nullptr || current->start() > other->start()) {
1298  std::swap(current, other);
1299  }
1300  // Check disjointness
1301  DCHECK(other == nullptr || current->end() <= other->start());
1302  // Append the 'current' node to the result accumulator and move forward
1303  if (tail == nullptr) {
1304  use_interval_ = current;
1305  } else {
1306  tail->set_next(current);
1307  }
1308  tail = current;
1309  current = current->next();
1310  }
1311  // Other list is empty => we are done
1312 }
1313 
1314 void SpillRange::Print() const {
1315  StdoutStream os;
1316  os << "{" << std::endl;
1317  for (TopLevelLiveRange* range : live_ranges()) {
1318  os << range->vreg() << " ";
1319  }
1320  os << std::endl;
1321 
1322  for (UseInterval* i = interval(); i != nullptr; i = i->next()) {
1323  os << '[' << i->start() << ", " << i->end() << ')' << std::endl;
1324  }
1325  os << "}" << std::endl;
1326 }
1327 
1328 RegisterAllocationData::PhiMapValue::PhiMapValue(PhiInstruction* phi,
1329  const InstructionBlock* block,
1330  Zone* zone)
1331  : phi_(phi),
1332  block_(block),
1333  incoming_operands_(zone),
1334  assigned_register_(kUnassignedRegister) {
1335  incoming_operands_.reserve(phi->operands().size());
1336 }
1337 
1338 void RegisterAllocationData::PhiMapValue::AddOperand(
1339  InstructionOperand* operand) {
1340  incoming_operands_.push_back(operand);
1341 }
1342 
1343 void RegisterAllocationData::PhiMapValue::CommitAssignment(
1344  const InstructionOperand& assigned) {
1345  for (InstructionOperand* operand : incoming_operands_) {
1346  InstructionOperand::ReplaceWith(operand, &assigned);
1347  }
1348 }
1349 
1350 RegisterAllocationData::RegisterAllocationData(
1351  const RegisterConfiguration* config, Zone* zone, Frame* frame,
1352  InstructionSequence* code, const char* debug_name)
1353  : allocation_zone_(zone),
1354  frame_(frame),
1355  code_(code),
1356  debug_name_(debug_name),
1357  config_(config),
1358  phi_map_(allocation_zone()),
1359  live_in_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
1360  live_out_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
1361  live_ranges_(code->VirtualRegisterCount() * 2, nullptr,
1362  allocation_zone()),
1363  fixed_live_ranges_(this->config()->num_general_registers(), nullptr,
1364  allocation_zone()),
1365  fixed_float_live_ranges_(allocation_zone()),
1366  fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr,
1367  allocation_zone()),
1368  fixed_simd128_live_ranges_(allocation_zone()),
1369  spill_ranges_(code->VirtualRegisterCount(), nullptr, allocation_zone()),
1370  delayed_references_(allocation_zone()),
1371  assigned_registers_(nullptr),
1372  assigned_double_registers_(nullptr),
1373  virtual_register_count_(code->VirtualRegisterCount()),
1374  preassigned_slot_ranges_(zone) {
1375  if (!kSimpleFPAliasing) {
1376  fixed_float_live_ranges_.resize(this->config()->num_float_registers(),
1377  nullptr);
1378  fixed_simd128_live_ranges_.resize(this->config()->num_simd128_registers(),
1379  nullptr);
1380  }
1381 
1382  assigned_registers_ = new (code_zone())
1383  BitVector(this->config()->num_general_registers(), code_zone());
1384  assigned_double_registers_ = new (code_zone())
1385  BitVector(this->config()->num_double_registers(), code_zone());
1386  this->frame()->SetAllocatedRegisters(assigned_registers_);
1387  this->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
1388 }
1389 
1390 MoveOperands* RegisterAllocationData::AddGapMove(
1391  int index, Instruction::GapPosition position,
1392  const InstructionOperand& from, const InstructionOperand& to) {
1393  Instruction* instr = code()->InstructionAt(index);
1394  ParallelMove* moves = instr->GetOrCreateParallelMove(position, code_zone());
1395  return moves->AddMove(from, to);
1396 }
1397 
1398 MachineRepresentation RegisterAllocationData::RepresentationFor(
1399  int virtual_register) {
1400  DCHECK_LT(virtual_register, code()->VirtualRegisterCount());
1401  return code()->GetRepresentation(virtual_register);
1402 }
1403 
1404 TopLevelLiveRange* RegisterAllocationData::GetOrCreateLiveRangeFor(int index) {
1405  if (index >= static_cast<int>(live_ranges().size())) {
1406  live_ranges().resize(index + 1, nullptr);
1407  }
1408  TopLevelLiveRange* result = live_ranges()[index];
1409  if (result == nullptr) {
1410  result = NewLiveRange(index, RepresentationFor(index));
1411  live_ranges()[index] = result;
1412  }
1413  return result;
1414 }
1415 
1416 TopLevelLiveRange* RegisterAllocationData::NewLiveRange(
1417  int index, MachineRepresentation rep) {
1418  return new (allocation_zone()) TopLevelLiveRange(index, rep);
1419 }
1420 
1421 int RegisterAllocationData::GetNextLiveRangeId() {
1422  int vreg = virtual_register_count_++;
1423  if (vreg >= static_cast<int>(live_ranges().size())) {
1424  live_ranges().resize(vreg + 1, nullptr);
1425  }
1426  return vreg;
1427 }
1428 
1429 TopLevelLiveRange* RegisterAllocationData::NextLiveRange(
1430  MachineRepresentation rep) {
1431  int vreg = GetNextLiveRangeId();
1432  TopLevelLiveRange* ret = NewLiveRange(vreg, rep);
1433  return ret;
1434 }
1435 
1436 RegisterAllocationData::PhiMapValue* RegisterAllocationData::InitializePhiMap(
1437  const InstructionBlock* block, PhiInstruction* phi) {
1438  RegisterAllocationData::PhiMapValue* map_value = new (allocation_zone())
1439  RegisterAllocationData::PhiMapValue(phi, block, allocation_zone());
1440  auto res =
1441  phi_map_.insert(std::make_pair(phi->virtual_register(), map_value));
1442  DCHECK(res.second);
1443  USE(res);
1444  return map_value;
1445 }
1446 
1447 RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
1448  int virtual_register) {
1449  auto it = phi_map_.find(virtual_register);
1450  DCHECK(it != phi_map_.end());
1451  return it->second;
1452 }
1453 
1454 RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
1455  TopLevelLiveRange* top_range) {
1456  return GetPhiMapValueFor(top_range->vreg());
1457 }
1458 
1459 bool RegisterAllocationData::ExistsUseWithoutDefinition() {
1460  bool found = false;
1461  BitVector::Iterator iterator(live_in_sets()[0]);
1462  while (!iterator.Done()) {
1463  found = true;
1464  int operand_index = iterator.Current();
1465  PrintF("Register allocator error: live v%d reached first block.\n",
1466  operand_index);
1467  LiveRange* range = GetOrCreateLiveRangeFor(operand_index);
1468  PrintF(" (first use is at %d)\n", range->first_pos()->pos().value());
1469  if (debug_name() == nullptr) {
1470  PrintF("\n");
1471  } else {
1472  PrintF(" (function: %s)\n", debug_name());
1473  }
1474  iterator.Advance();
1475  }
1476  return found;
1477 }
1478 
1479 // If a range is defined in a deferred block, we can expect all the range
1480 // to only cover positions in deferred blocks. Otherwise, a block on the
1481 // hot path would be dominated by a deferred block, meaning it is unreachable
1482 // without passing through the deferred block, which is contradictory.
1483 // In particular, when such a range contributes a result back on the hot
1484 // path, it will be as one of the inputs of a phi. In that case, the value
1485 // will be transferred via a move in the Gap::END's of the last instruction
1486 // of a deferred block.
1487 bool RegisterAllocationData::RangesDefinedInDeferredStayInDeferred() {
1488  const size_t live_ranges_size = live_ranges().size();
1489  for (const TopLevelLiveRange* range : live_ranges()) {
1490  CHECK_EQ(live_ranges_size,
1491  live_ranges().size()); // TODO(neis): crbug.com/831822
1492  if (range == nullptr || range->IsEmpty() ||
1493  !code()
1494  ->GetInstructionBlock(range->Start().ToInstructionIndex())
1495  ->IsDeferred()) {
1496  continue;
1497  }
1498  for (const UseInterval* i = range->first_interval(); i != nullptr;
1499  i = i->next()) {
1500  int first = i->FirstGapIndex();
1501  int last = i->LastGapIndex();
1502  for (int instr = first; instr <= last;) {
1503  const InstructionBlock* block = code()->GetInstructionBlock(instr);
1504  if (!block->IsDeferred()) return false;
1505  instr = block->last_instruction_index() + 1;
1506  }
1507  }
1508  }
1509  return true;
1510 }
1511 
1512 SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
1513  TopLevelLiveRange* range) {
1514  DCHECK(!range->HasSpillOperand());
1515 
1516  SpillRange* spill_range = range->GetAllocatedSpillRange();
1517  if (spill_range == nullptr) {
1518  DCHECK(!range->IsSplinter());
1519  spill_range = new (allocation_zone()) SpillRange(range, allocation_zone());
1520  }
1521  range->set_spill_type(TopLevelLiveRange::SpillType::kSpillRange);
1522 
1523  int spill_range_index =
1524  range->IsSplinter() ? range->splintered_from()->vreg() : range->vreg();
1525 
1526  spill_ranges()[spill_range_index] = spill_range;
1527 
1528  return spill_range;
1529 }
1530 
1531 SpillRange* RegisterAllocationData::CreateSpillRangeForLiveRange(
1532  TopLevelLiveRange* range) {
1533  DCHECK(!range->HasSpillOperand());
1534  DCHECK(!range->IsSplinter());
1535  SpillRange* spill_range =
1536  new (allocation_zone()) SpillRange(range, allocation_zone());
1537  return spill_range;
1538 }
1539 
1540 void RegisterAllocationData::MarkAllocated(MachineRepresentation rep,
1541  int index) {
1542  switch (rep) {
1543  case MachineRepresentation::kFloat32:
1544  case MachineRepresentation::kSimd128:
1545  if (kSimpleFPAliasing) {
1546  assigned_double_registers_->Add(index);
1547  } else {
1548  int alias_base_index = -1;
1549  int aliases = config()->GetAliases(
1550  rep, index, MachineRepresentation::kFloat64, &alias_base_index);
1551  DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
1552  while (aliases--) {
1553  int aliased_reg = alias_base_index + aliases;
1554  assigned_double_registers_->Add(aliased_reg);
1555  }
1556  }
1557  break;
1558  case MachineRepresentation::kFloat64:
1559  assigned_double_registers_->Add(index);
1560  break;
1561  default:
1562  DCHECK(!IsFloatingPoint(rep));
1563  assigned_registers_->Add(index);
1564  break;
1565  }
1566 }
1567 
1568 bool RegisterAllocationData::IsBlockBoundary(LifetimePosition pos) const {
1569  return pos.IsFullStart() &&
1570  code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
1571  pos.ToInstructionIndex();
1572 }
1573 
1574 ConstraintBuilder::ConstraintBuilder(RegisterAllocationData* data)
1575  : data_(data) {}
1576 
1577 InstructionOperand* ConstraintBuilder::AllocateFixed(
1578  UnallocatedOperand* operand, int pos, bool is_tagged) {
1579  TRACE("Allocating fixed reg for op %d\n", operand->virtual_register());
1580  DCHECK(operand->HasFixedPolicy());
1581  InstructionOperand allocated;
1582  MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
1583  int virtual_register = operand->virtual_register();
1584  if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
1585  rep = data()->RepresentationFor(virtual_register);
1586  }
1587  if (operand->HasFixedSlotPolicy()) {
1588  allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, rep,
1589  operand->fixed_slot_index());
1590  } else if (operand->HasFixedRegisterPolicy()) {
1591  DCHECK(!IsFloatingPoint(rep));
1592  DCHECK(data()->config()->IsAllocatableGeneralCode(
1593  operand->fixed_register_index()));
1594  allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
1595  operand->fixed_register_index());
1596  } else if (operand->HasFixedFPRegisterPolicy()) {
1597  DCHECK(IsFloatingPoint(rep));
1598  DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, virtual_register);
1599  allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
1600  operand->fixed_register_index());
1601  } else {
1602  UNREACHABLE();
1603  }
1604  InstructionOperand::ReplaceWith(operand, &allocated);
1605  if (is_tagged) {
1606  TRACE("Fixed reg is tagged at %d\n", pos);
1607  Instruction* instr = code()->InstructionAt(pos);
1608  if (instr->HasReferenceMap()) {
1609  instr->reference_map()->RecordReference(*AllocatedOperand::cast(operand));
1610  }
1611  }
1612  return operand;
1613 }
1614 
1615 void ConstraintBuilder::MeetRegisterConstraints() {
1616  for (InstructionBlock* block : code()->instruction_blocks()) {
1617  MeetRegisterConstraints(block);
1618  }
1619 }
1620 
1621 void ConstraintBuilder::MeetRegisterConstraints(const InstructionBlock* block) {
1622  int start = block->first_instruction_index();
1623  int end = block->last_instruction_index();
1624  DCHECK_NE(-1, start);
1625  for (int i = start; i <= end; ++i) {
1626  MeetConstraintsBefore(i);
1627  if (i != end) MeetConstraintsAfter(i);
1628  }
1629  // Meet register constraints for the instruction in the end.
1630  MeetRegisterConstraintsForLastInstructionInBlock(block);
1631 }
1632 
1633 void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
1634  const InstructionBlock* block) {
1635  int end = block->last_instruction_index();
1636  Instruction* last_instruction = code()->InstructionAt(end);
1637  for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
1638  InstructionOperand* output_operand = last_instruction->OutputAt(i);
1639  DCHECK(!output_operand->IsConstant());
1640  UnallocatedOperand* output = UnallocatedOperand::cast(output_operand);
1641  int output_vreg = output->virtual_register();
1642  TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
1643  bool assigned = false;
1644  if (output->HasFixedPolicy()) {
1645  AllocateFixed(output, -1, false);
1646  // This value is produced on the stack, we never need to spill it.
1647  if (output->IsStackSlot()) {
1648  DCHECK(LocationOperand::cast(output)->index() <
1649  data()->frame()->GetSpillSlotCount());
1650  range->SetSpillOperand(LocationOperand::cast(output));
1651  range->SetSpillStartIndex(end);
1652  assigned = true;
1653  }
1654 
1655  for (const RpoNumber& succ : block->successors()) {
1656  const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1657  DCHECK_EQ(1, successor->PredecessorCount());
1658  int gap_index = successor->first_instruction_index();
1659  // Create an unconstrained operand for the same virtual register
1660  // and insert a gap move from the fixed output to the operand.
1661  UnallocatedOperand output_copy(UnallocatedOperand::REGISTER_OR_SLOT,
1662  output_vreg);
1663  data()->AddGapMove(gap_index, Instruction::START, *output, output_copy);
1664  }
1665  }
1666 
1667  if (!assigned) {
1668  for (const RpoNumber& succ : block->successors()) {
1669  const InstructionBlock* successor = code()->InstructionBlockAt(succ);
1670  DCHECK_EQ(1, successor->PredecessorCount());
1671  int gap_index = successor->first_instruction_index();
1672  range->RecordSpillLocation(allocation_zone(), gap_index, output);
1673  range->SetSpillStartIndex(gap_index);
1674  }
1675  }
1676  }
1677 }
1678 
1679 void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
1680  Instruction* first = code()->InstructionAt(instr_index);
1681  // Handle fixed temporaries.
1682  for (size_t i = 0; i < first->TempCount(); i++) {
1683  UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i));
1684  if (temp->HasFixedPolicy()) AllocateFixed(temp, instr_index, false);
1685  }
1686  // Handle constant/fixed output operands.
1687  for (size_t i = 0; i < first->OutputCount(); i++) {
1688  InstructionOperand* output = first->OutputAt(i);
1689  if (output->IsConstant()) {
1690  int output_vreg = ConstantOperand::cast(output)->virtual_register();
1691  TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
1692  range->SetSpillStartIndex(instr_index + 1);
1693  range->SetSpillOperand(output);
1694  continue;
1695  }
1696  UnallocatedOperand* first_output = UnallocatedOperand::cast(output);
1697  TopLevelLiveRange* range =
1698  data()->GetOrCreateLiveRangeFor(first_output->virtual_register());
1699  bool assigned = false;
1700  if (first_output->HasFixedPolicy()) {
1701  int output_vreg = first_output->virtual_register();
1702  UnallocatedOperand output_copy(UnallocatedOperand::REGISTER_OR_SLOT,
1703  output_vreg);
1704  bool is_tagged = code()->IsReference(output_vreg);
1705  if (first_output->HasSecondaryStorage()) {
1706  range->MarkHasPreassignedSlot();
1707  data()->preassigned_slot_ranges().push_back(
1708  std::make_pair(range, first_output->GetSecondaryStorage()));
1709  }
1710  AllocateFixed(first_output, instr_index, is_tagged);
1711 
1712  // This value is produced on the stack, we never need to spill it.
1713  if (first_output->IsStackSlot()) {
1714  DCHECK(LocationOperand::cast(first_output)->index() <
1715  data()->frame()->GetTotalFrameSlotCount());
1716  range->SetSpillOperand(LocationOperand::cast(first_output));
1717  range->SetSpillStartIndex(instr_index + 1);
1718  assigned = true;
1719  }
1720  data()->AddGapMove(instr_index + 1, Instruction::START, *first_output,
1721  output_copy);
1722  }
1723  // Make sure we add a gap move for spilling (if we have not done
1724  // so already).
1725  if (!assigned) {
1726  range->RecordSpillLocation(allocation_zone(), instr_index + 1,
1727  first_output);
1728  range->SetSpillStartIndex(instr_index + 1);
1729  }
1730  }
1731 }
1732 
1733 void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
1734  Instruction* second = code()->InstructionAt(instr_index);
1735  // Handle fixed input operands of second instruction.
1736  for (size_t i = 0; i < second->InputCount(); i++) {
1737  InstructionOperand* input = second->InputAt(i);
1738  if (input->IsImmediate() || input->IsExplicit()) {
1739  continue; // Ignore immediates and explicitly reserved registers.
1740  }
1741  UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
1742  if (cur_input->HasFixedPolicy()) {
1743  int input_vreg = cur_input->virtual_register();
1744  UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
1745  input_vreg);
1746  bool is_tagged = code()->IsReference(input_vreg);
1747  AllocateFixed(cur_input, instr_index, is_tagged);
1748  data()->AddGapMove(instr_index, Instruction::END, input_copy, *cur_input);
1749  }
1750  }
1751  // Handle "output same as input" for second instruction.
1752  for (size_t i = 0; i < second->OutputCount(); i++) {
1753  InstructionOperand* output = second->OutputAt(i);
1754  if (!output->IsUnallocated()) continue;
1755  UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
1756  if (!second_output->HasSameAsInputPolicy()) continue;
1757  DCHECK_EQ(0, i); // Only valid for first output.
1758  UnallocatedOperand* cur_input =
1759  UnallocatedOperand::cast(second->InputAt(0));
1760  int output_vreg = second_output->virtual_register();
1761  int input_vreg = cur_input->virtual_register();
1762  UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
1763  input_vreg);
1764  *cur_input =
1765  UnallocatedOperand(*cur_input, second_output->virtual_register());
1766  MoveOperands* gap_move = data()->AddGapMove(instr_index, Instruction::END,
1767  input_copy, *cur_input);
1768  if (code()->IsReference(input_vreg) && !code()->IsReference(output_vreg)) {
1769  if (second->HasReferenceMap()) {
1770  RegisterAllocationData::DelayedReference delayed_reference = {
1771  second->reference_map(), &gap_move->source()};
1772  data()->delayed_references().push_back(delayed_reference);
1773  }
1774  } else if (!code()->IsReference(input_vreg) &&
1775  code()->IsReference(output_vreg)) {
1776  // The input is assumed to immediately have a tagged representation,
1777  // before the pointer map can be used. I.e. the pointer map at the
1778  // instruction will include the output operand (whose value at the
1779  // beginning of the instruction is equal to the input operand). If
1780  // this is not desired, then the pointer map at this instruction needs
1781  // to be adjusted manually.
1782  }
1783  }
1784 }
1785 
1786 void ConstraintBuilder::ResolvePhis() {
1787  // Process the blocks in reverse order.
1788  for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) {
1789  ResolvePhis(block);
1790  }
1791 }
1792 
1793 void ConstraintBuilder::ResolvePhis(const InstructionBlock* block) {
1794  for (PhiInstruction* phi : block->phis()) {
1795  int phi_vreg = phi->virtual_register();
1796  RegisterAllocationData::PhiMapValue* map_value =
1797  data()->InitializePhiMap(block, phi);
1798  InstructionOperand& output = phi->output();
1799  // Map the destination operands, so the commitment phase can find them.
1800  for (size_t i = 0; i < phi->operands().size(); ++i) {
1801  InstructionBlock* cur_block =
1802  code()->InstructionBlockAt(block->predecessors()[i]);
1803  UnallocatedOperand input(UnallocatedOperand::REGISTER_OR_SLOT,
1804  phi->operands()[i]);
1805  MoveOperands* move = data()->AddGapMove(
1806  cur_block->last_instruction_index(), Instruction::END, input, output);
1807  map_value->AddOperand(&move->destination());
1808  DCHECK(!code()
1809  ->InstructionAt(cur_block->last_instruction_index())
1810  ->HasReferenceMap());
1811  }
1812  TopLevelLiveRange* live_range = data()->GetOrCreateLiveRangeFor(phi_vreg);
1813  int gap_index = block->first_instruction_index();
1814  live_range->RecordSpillLocation(allocation_zone(), gap_index, &output);
1815  live_range->SetSpillStartIndex(gap_index);
1816  // We use the phi-ness of some nodes in some later heuristics.
1817  live_range->set_is_phi(true);
1818  live_range->set_is_non_loop_phi(!block->IsLoopHeader());
1819  }
1820 }
1821 
1822 LiveRangeBuilder::LiveRangeBuilder(RegisterAllocationData* data,
1823  Zone* local_zone)
1824  : data_(data), phi_hints_(local_zone) {}
1825 
1826 BitVector* LiveRangeBuilder::ComputeLiveOut(const InstructionBlock* block,
1827  RegisterAllocationData* data) {
1828  size_t block_index = block->rpo_number().ToSize();
1829  BitVector* live_out = data->live_out_sets()[block_index];
1830  if (live_out == nullptr) {
1831  // Compute live out for the given block, except not including backward
1832  // successor edges.
1833  Zone* zone = data->allocation_zone();
1834  const InstructionSequence* code = data->code();
1835 
1836  live_out = new (zone) BitVector(code->VirtualRegisterCount(), zone);
1837 
1838  // Process all successor blocks.
1839  for (const RpoNumber& succ : block->successors()) {
1840  // Add values live on entry to the successor.
1841  if (succ <= block->rpo_number()) continue;
1842  BitVector* live_in = data->live_in_sets()[succ.ToSize()];
1843  if (live_in != nullptr) live_out->Union(*live_in);
1844 
1845  // All phi input operands corresponding to this successor edge are live
1846  // out from this block.
1847  const InstructionBlock* successor = code->InstructionBlockAt(succ);
1848  size_t index = successor->PredecessorIndexOf(block->rpo_number());
1849  DCHECK(index < successor->PredecessorCount());
1850  for (PhiInstruction* phi : successor->phis()) {
1851  live_out->Add(phi->operands()[index]);
1852  }
1853  }
1854  data->live_out_sets()[block_index] = live_out;
1855  }
1856  return live_out;
1857 }
1858 
1859 void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block,
1860  BitVector* live_out) {
1861  // Add an interval that includes the entire block to the live range for
1862  // each live_out value.
1863  LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
1864  block->first_instruction_index());
1865  LifetimePosition end = LifetimePosition::InstructionFromInstructionIndex(
1866  block->last_instruction_index())
1867  .NextStart();
1868  BitVector::Iterator iterator(live_out);
1869  while (!iterator.Done()) {
1870  int operand_index = iterator.Current();
1871  TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
1872  range->AddUseInterval(start, end, allocation_zone());
1873  iterator.Advance();
1874  }
1875 }
1876 
1877 int LiveRangeBuilder::FixedFPLiveRangeID(int index, MachineRepresentation rep) {
1878  int result = -index - 1;
1879  switch (rep) {
1880  case MachineRepresentation::kSimd128:
1881  result -= config()->num_float_registers();
1882  V8_FALLTHROUGH;
1883  case MachineRepresentation::kFloat32:
1884  result -= config()->num_double_registers();
1885  V8_FALLTHROUGH;
1886  case MachineRepresentation::kFloat64:
1887  result -= config()->num_general_registers();
1888  break;
1889  default:
1890  UNREACHABLE();
1891  break;
1892  }
1893  return result;
1894 }
1895 
1896 TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
1897  DCHECK(index < config()->num_general_registers());
1898  TopLevelLiveRange* result = data()->fixed_live_ranges()[index];
1899  if (result == nullptr) {
1900  MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
1901  result = data()->NewLiveRange(FixedLiveRangeID(index), rep);
1902  DCHECK(result->IsFixed());
1903  result->set_assigned_register(index);
1904  data()->MarkAllocated(rep, index);
1905  data()->fixed_live_ranges()[index] = result;
1906  }
1907  return result;
1908 }
1909 
1910 TopLevelLiveRange* LiveRangeBuilder::FixedFPLiveRangeFor(
1911  int index, MachineRepresentation rep) {
1912  int num_regs = config()->num_double_registers();
1913  ZoneVector<TopLevelLiveRange*>* live_ranges =
1914  &data()->fixed_double_live_ranges();
1915  if (!kSimpleFPAliasing) {
1916  switch (rep) {
1917  case MachineRepresentation::kFloat32:
1918  num_regs = config()->num_float_registers();
1919  live_ranges = &data()->fixed_float_live_ranges();
1920  break;
1921  case MachineRepresentation::kSimd128:
1922  num_regs = config()->num_simd128_registers();
1923  live_ranges = &data()->fixed_simd128_live_ranges();
1924  break;
1925  default:
1926  break;
1927  }
1928  }
1929 
1930  DCHECK(index < num_regs);
1931  USE(num_regs);
1932  TopLevelLiveRange* result = (*live_ranges)[index];
1933  if (result == nullptr) {
1934  result = data()->NewLiveRange(FixedFPLiveRangeID(index, rep), rep);
1935  DCHECK(result->IsFixed());
1936  result->set_assigned_register(index);
1937  data()->MarkAllocated(rep, index);
1938  (*live_ranges)[index] = result;
1939  }
1940  return result;
1941 }
1942 
1943 TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand) {
1944  if (operand->IsUnallocated()) {
1945  return data()->GetOrCreateLiveRangeFor(
1946  UnallocatedOperand::cast(operand)->virtual_register());
1947  } else if (operand->IsConstant()) {
1948  return data()->GetOrCreateLiveRangeFor(
1949  ConstantOperand::cast(operand)->virtual_register());
1950  } else if (operand->IsRegister()) {
1951  return FixedLiveRangeFor(
1952  LocationOperand::cast(operand)->GetRegister().code());
1953  } else if (operand->IsFPRegister()) {
1954  LocationOperand* op = LocationOperand::cast(operand);
1955  return FixedFPLiveRangeFor(op->register_code(), op->representation());
1956  } else {
1957  return nullptr;
1958  }
1959 }
1960 
1961 UsePosition* LiveRangeBuilder::NewUsePosition(LifetimePosition pos,
1962  InstructionOperand* operand,
1963  void* hint,
1964  UsePositionHintType hint_type) {
1965  return new (allocation_zone()) UsePosition(pos, operand, hint, hint_type);
1966 }
1967 
1968 UsePosition* LiveRangeBuilder::Define(LifetimePosition position,
1969  InstructionOperand* operand, void* hint,
1970  UsePositionHintType hint_type) {
1971  TopLevelLiveRange* range = LiveRangeFor(operand);
1972  if (range == nullptr) return nullptr;
1973 
1974  if (range->IsEmpty() || range->Start() > position) {
1975  // Can happen if there is a definition without use.
1976  range->AddUseInterval(position, position.NextStart(), allocation_zone());
1977  range->AddUsePosition(NewUsePosition(position.NextStart()));
1978  } else {
1979  range->ShortenTo(position);
1980  }
1981  if (!operand->IsUnallocated()) return nullptr;
1982  UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
1983  UsePosition* use_pos =
1984  NewUsePosition(position, unalloc_operand, hint, hint_type);
1985  range->AddUsePosition(use_pos);
1986  return use_pos;
1987 }
1988 
1989 UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start,
1990  LifetimePosition position,
1991  InstructionOperand* operand, void* hint,
1992  UsePositionHintType hint_type) {
1993  TopLevelLiveRange* range = LiveRangeFor(operand);
1994  if (range == nullptr) return nullptr;
1995  UsePosition* use_pos = nullptr;
1996  if (operand->IsUnallocated()) {
1997  UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
1998  use_pos = NewUsePosition(position, unalloc_operand, hint, hint_type);
1999  range->AddUsePosition(use_pos);
2000  }
2001  range->AddUseInterval(block_start, position, allocation_zone());
2002  return use_pos;
2003 }
2004 
2005 void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
2006  BitVector* live) {
2007  int block_start = block->first_instruction_index();
2008  LifetimePosition block_start_position =
2009  LifetimePosition::GapFromInstructionIndex(block_start);
2010  bool fixed_float_live_ranges = false;
2011  bool fixed_simd128_live_ranges = false;
2012  if (!kSimpleFPAliasing) {
2013  int mask = data()->code()->representation_mask();
2014  fixed_float_live_ranges = (mask & kFloat32Bit) != 0;
2015  fixed_simd128_live_ranges = (mask & kSimd128Bit) != 0;
2016  }
2017 
2018  for (int index = block->last_instruction_index(); index >= block_start;
2019  index--) {
2020  LifetimePosition curr_position =
2021  LifetimePosition::InstructionFromInstructionIndex(index);
2022  Instruction* instr = code()->InstructionAt(index);
2023  DCHECK_NOT_NULL(instr);
2024  DCHECK(curr_position.IsInstructionPosition());
2025  // Process output, inputs, and temps of this instruction.
2026  for (size_t i = 0; i < instr->OutputCount(); i++) {
2027  InstructionOperand* output = instr->OutputAt(i);
2028  if (output->IsUnallocated()) {
2029  // Unsupported.
2030  DCHECK(!UnallocatedOperand::cast(output)->HasSlotPolicy());
2031  int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
2032  live->Remove(out_vreg);
2033  } else if (output->IsConstant()) {
2034  int out_vreg = ConstantOperand::cast(output)->virtual_register();
2035  live->Remove(out_vreg);
2036  }
2037  if (block->IsHandler() && index == block_start && output->IsAllocated() &&
2038  output->IsRegister() &&
2039  AllocatedOperand::cast(output)->GetRegister() ==
2040  v8::internal::kReturnRegister0) {
2041  // The register defined here is blocked from gap start - it is the
2042  // exception value.
2043  // TODO(mtrofin): should we explore an explicit opcode for
2044  // the first instruction in the handler?
2045  Define(LifetimePosition::GapFromInstructionIndex(index), output);
2046  } else {
2047  Define(curr_position, output);
2048  }
2049  }
2050 
2051  if (instr->ClobbersRegisters()) {
2052  for (int i = 0; i < config()->num_allocatable_general_registers(); ++i) {
2053  // Create a UseInterval at this instruction for all fixed registers,
2054  // (including the instruction outputs). Adding another UseInterval here
2055  // is OK because AddUseInterval will just merge it with the existing
2056  // one at the end of the range.
2057  int code = config()->GetAllocatableGeneralCode(i);
2058  TopLevelLiveRange* range = FixedLiveRangeFor(code);
2059  range->AddUseInterval(curr_position, curr_position.End(),
2060  allocation_zone());
2061  }
2062  }
2063 
2064  if (instr->ClobbersDoubleRegisters()) {
2065  for (int i = 0; i < config()->num_allocatable_double_registers(); ++i) {
2066  // Add a UseInterval for all DoubleRegisters. See comment above for
2067  // general registers.
2068  int code = config()->GetAllocatableDoubleCode(i);
2069  TopLevelLiveRange* range =
2070  FixedFPLiveRangeFor(code, MachineRepresentation::kFloat64);
2071  range->AddUseInterval(curr_position, curr_position.End(),
2072  allocation_zone());
2073  }
2074  // Clobber fixed float registers on archs with non-simple aliasing.
2075  if (!kSimpleFPAliasing) {
2076  if (fixed_float_live_ranges) {
2077  for (int i = 0; i < config()->num_allocatable_float_registers();
2078  ++i) {
2079  // Add a UseInterval for all FloatRegisters. See comment above for
2080  // general registers.
2081  int code = config()->GetAllocatableFloatCode(i);
2082  TopLevelLiveRange* range =
2083  FixedFPLiveRangeFor(code, MachineRepresentation::kFloat32);
2084  range->AddUseInterval(curr_position, curr_position.End(),
2085  allocation_zone());
2086  }
2087  }
2088  if (fixed_simd128_live_ranges) {
2089  for (int i = 0; i < config()->num_allocatable_simd128_registers();
2090  ++i) {
2091  int code = config()->GetAllocatableSimd128Code(i);
2092  TopLevelLiveRange* range =
2093  FixedFPLiveRangeFor(code, MachineRepresentation::kSimd128);
2094  range->AddUseInterval(curr_position, curr_position.End(),
2095  allocation_zone());
2096  }
2097  }
2098  }
2099  }
2100 
2101  for (size_t i = 0; i < instr->InputCount(); i++) {
2102  InstructionOperand* input = instr->InputAt(i);
2103  if (input->IsImmediate() || input->IsExplicit()) {
2104  continue; // Ignore immediates and explicitly reserved registers.
2105  }
2106  LifetimePosition use_pos;
2107  if (input->IsUnallocated() &&
2108  UnallocatedOperand::cast(input)->IsUsedAtStart()) {
2109  use_pos = curr_position;
2110  } else {
2111  use_pos = curr_position.End();
2112  }
2113 
2114  if (input->IsUnallocated()) {
2115  UnallocatedOperand* unalloc = UnallocatedOperand::cast(input);
2116  int vreg = unalloc->virtual_register();
2117  live->Add(vreg);
2118  if (unalloc->HasSlotPolicy()) {
2119  data()->GetOrCreateLiveRangeFor(vreg)->set_has_slot_use(true);
2120  }
2121  }
2122  Use(block_start_position, use_pos, input);
2123  }
2124 
2125  for (size_t i = 0; i < instr->TempCount(); i++) {
2126  InstructionOperand* temp = instr->TempAt(i);
2127  // Unsupported.
2128  DCHECK_IMPLIES(temp->IsUnallocated(),
2129  !UnallocatedOperand::cast(temp)->HasSlotPolicy());
2130  if (instr->ClobbersTemps()) {
2131  if (temp->IsRegister()) continue;
2132  if (temp->IsUnallocated()) {
2133  UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp);
2134  if (temp_unalloc->HasFixedPolicy()) {
2135  continue;
2136  }
2137  }
2138  }
2139  Use(block_start_position, curr_position.End(), temp);
2140  Define(curr_position, temp);
2141  }
2142 
2143  // Process the moves of the instruction's gaps, making their sources live.
2144  const Instruction::GapPosition kPositions[] = {Instruction::END,
2145  Instruction::START};
2146  curr_position = curr_position.PrevStart();
2147  DCHECK(curr_position.IsGapPosition());
2148  for (const Instruction::GapPosition& position : kPositions) {
2149  ParallelMove* move = instr->GetParallelMove(position);
2150  if (move == nullptr) continue;
2151  if (position == Instruction::END) {
2152  curr_position = curr_position.End();
2153  } else {
2154  curr_position = curr_position.Start();
2155  }
2156  for (MoveOperands* cur : *move) {
2157  InstructionOperand& from = cur->source();
2158  InstructionOperand& to = cur->destination();
2159  void* hint = &to;
2160  UsePositionHintType hint_type = UsePosition::HintTypeForOperand(to);
2161  UsePosition* to_use = nullptr;
2162  int phi_vreg = -1;
2163  if (to.IsUnallocated()) {
2164  int to_vreg = UnallocatedOperand::cast(to).virtual_register();
2165  TopLevelLiveRange* to_range =
2166  data()->GetOrCreateLiveRangeFor(to_vreg);
2167  if (to_range->is_phi()) {
2168  phi_vreg = to_vreg;
2169  if (to_range->is_non_loop_phi()) {
2170  hint = to_range->current_hint_position();
2171  hint_type = hint == nullptr ? UsePositionHintType::kNone
2172  : UsePositionHintType::kUsePos;
2173  } else {
2174  hint_type = UsePositionHintType::kPhi;
2175  hint = data()->GetPhiMapValueFor(to_vreg);
2176  }
2177  } else {
2178  if (live->Contains(to_vreg)) {
2179  to_use = Define(curr_position, &to, &from,
2180  UsePosition::HintTypeForOperand(from));
2181  live->Remove(to_vreg);
2182  } else {
2183  cur->Eliminate();
2184  continue;
2185  }
2186  }
2187  } else {
2188  Define(curr_position, &to);
2189  }
2190  UsePosition* from_use =
2191  Use(block_start_position, curr_position, &from, hint, hint_type);
2192  // Mark range live.
2193  if (from.IsUnallocated()) {
2194  live->Add(UnallocatedOperand::cast(from).virtual_register());
2195  }
2196  // Resolve use position hints just created.
2197  if (to_use != nullptr && from_use != nullptr) {
2198  to_use->ResolveHint(from_use);
2199  from_use->ResolveHint(to_use);
2200  }
2201  DCHECK_IMPLIES(to_use != nullptr, to_use->IsResolved());
2202  DCHECK_IMPLIES(from_use != nullptr, from_use->IsResolved());
2203  // Potentially resolve phi hint.
2204  if (phi_vreg != -1) ResolvePhiHint(&from, from_use);
2205  }
2206  }
2207  }
2208 }
2209 
2210 void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
2211  BitVector* live) {
2212  for (PhiInstruction* phi : block->phis()) {
2213  // The live range interval already ends at the first instruction of the
2214  // block.
2215  int phi_vreg = phi->virtual_register();
2216  live->Remove(phi_vreg);
2217  // Select a hint from a predecessor block that precedes this block in the
2218  // rpo order. In order of priority:
2219  // - Avoid hints from deferred blocks.
2220  // - Prefer hints from allocated (or explicit) operands.
2221  // - Prefer hints from empty blocks (containing just parallel moves and a
2222  // jump). In these cases, if we can elide the moves, the jump threader
2223  // is likely to be able to elide the jump.
2224  // The enforcement of hinting in rpo order is required because hint
2225  // resolution that happens later in the compiler pipeline visits
2226  // instructions in reverse rpo order, relying on the fact that phis are
2227  // encountered before their hints.
2228  InstructionOperand* hint = nullptr;
2229  int hint_preference = 0;
2230 
2231  // The cost of hinting increases with the number of predecessors. At the
2232  // same time, the typical benefit decreases, since this hinting only
2233  // optimises the execution path through one predecessor. A limit of 2 is
2234  // sufficient to hit the common if/else pattern.
2235  int predecessor_limit = 2;
2236 
2237  for (RpoNumber predecessor : block->predecessors()) {
2238  const InstructionBlock* predecessor_block =
2239  code()->InstructionBlockAt(predecessor);
2240  DCHECK_EQ(predecessor_block->rpo_number(), predecessor);
2241 
2242  // Only take hints from earlier rpo numbers.
2243  if (predecessor >= block->rpo_number()) continue;
2244 
2245  // Look up the predecessor instruction.
2246  const Instruction* predecessor_instr =
2247  GetLastInstruction(code(), predecessor_block);
2248  InstructionOperand* predecessor_hint = nullptr;
2249  // Phis are assigned in the END position of the last instruction in each
2250  // predecessor block.
2251  for (MoveOperands* move :
2252  *predecessor_instr->GetParallelMove(Instruction::END)) {
2253  InstructionOperand& to = move->destination();
2254  if (to.IsUnallocated() &&
2255  UnallocatedOperand::cast(to).virtual_register() == phi_vreg) {
2256  predecessor_hint = &move->source();
2257  break;
2258  }
2259  }
2260  DCHECK_NOT_NULL(predecessor_hint);
2261 
2262  // For each predecessor, generate a score according to the priorities
2263  // described above, and pick the best one. Flags in higher-order bits have
2264  // a higher priority than those in lower-order bits.
2265  int predecessor_hint_preference = 0;
2266  const int kNotDeferredBlockPreference = (1 << 2);
2267  const int kMoveIsAllocatedPreference = (1 << 1);
2268  const int kBlockIsEmptyPreference = (1 << 0);
2269 
2270  // - Avoid hints from deferred blocks.
2271  if (!predecessor_block->IsDeferred()) {
2272  predecessor_hint_preference |= kNotDeferredBlockPreference;
2273  }
2274 
2275  // - Prefer hints from allocated (or explicit) operands.
2276  //
2277  // Already-allocated or explicit operands are typically assigned using
2278  // the parallel moves on the last instruction. For example:
2279  //
2280  // gap (v101 = [x0|R|w32]) (v100 = v101)
2281  // ArchJmp
2282  // ...
2283  // phi: v100 = v101 v102
2284  //
2285  // We have already found the END move, so look for a matching START move
2286  // from an allocated (or explicit) operand.
2287  //
2288  // Note that we cannot simply look up data()->live_ranges()[vreg] here
2289  // because the live ranges are still being built when this function is
2290  // called.
2291  // TODO(v8): Find a way to separate hinting from live range analysis in
2292  // BuildLiveRanges so that we can use the O(1) live-range look-up.
2293  auto moves = predecessor_instr->GetParallelMove(Instruction::START);
2294  if (moves != nullptr) {
2295  for (MoveOperands* move : *moves) {
2296  InstructionOperand& to = move->destination();
2297  if (predecessor_hint->Equals(to)) {
2298  if (move->source().IsAllocated() || move->source().IsExplicit()) {
2299  predecessor_hint_preference |= kMoveIsAllocatedPreference;
2300  }
2301  break;
2302  }
2303  }
2304  }
2305 
2306  // - Prefer hints from empty blocks.
2307  if (predecessor_block->last_instruction_index() ==
2308  predecessor_block->first_instruction_index()) {
2309  predecessor_hint_preference |= kBlockIsEmptyPreference;
2310  }
2311 
2312  if ((hint == nullptr) ||
2313  (predecessor_hint_preference > hint_preference)) {
2314  // Take the hint from this predecessor.
2315  hint = predecessor_hint;
2316  hint_preference = predecessor_hint_preference;
2317  }
2318 
2319  if (--predecessor_limit <= 0) break;
2320  }
2321  DCHECK_NOT_NULL(hint);
2322 
2323  LifetimePosition block_start = LifetimePosition::GapFromInstructionIndex(
2324  block->first_instruction_index());
2325  UsePosition* use_pos = Define(block_start, &phi->output(), hint,
2326  UsePosition::HintTypeForOperand(*hint));
2327  MapPhiHint(hint, use_pos);
2328  }
2329 }
2330 
2331 void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block,
2332  BitVector* live) {
2333  DCHECK(block->IsLoopHeader());
2334  // Add a live range stretching from the first loop instruction to the last
2335  // for each value live on entry to the header.
2336  BitVector::Iterator iterator(live);
2337  LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
2338  block->first_instruction_index());
2339  LifetimePosition end = LifetimePosition::GapFromInstructionIndex(
2340  code()->LastLoopInstructionIndex(block))
2341  .NextFullStart();
2342  while (!iterator.Done()) {
2343  int operand_index = iterator.Current();
2344  TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
2345  range->EnsureInterval(start, end, allocation_zone());
2346  iterator.Advance();
2347  }
2348  // Insert all values into the live in sets of all blocks in the loop.
2349  for (int i = block->rpo_number().ToInt() + 1; i < block->loop_end().ToInt();
2350  ++i) {
2351  live_in_sets()[i]->Union(*live);
2352  }
2353 }
2354 
2355 void LiveRangeBuilder::BuildLiveRanges() {
2356  // Process the blocks in reverse order.
2357  for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
2358  --block_id) {
2359  InstructionBlock* block =
2360  code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
2361  BitVector* live = ComputeLiveOut(block, data());
2362  // Initially consider all live_out values live for the entire block. We
2363  // will shorten these intervals if necessary.
2364  AddInitialIntervals(block, live);
2365  // Process the instructions in reverse order, generating and killing
2366  // live values.
2367  ProcessInstructions(block, live);
2368  // All phi output operands are killed by this block.
2369  ProcessPhis(block, live);
2370  // Now live is live_in for this block except not including values live
2371  // out on backward successor edges.
2372  if (block->IsLoopHeader()) ProcessLoopHeader(block, live);
2373  live_in_sets()[block_id] = live;
2374  }
2375  // Postprocess the ranges.
2376  const size_t live_ranges_size = data()->live_ranges().size();
2377  for (TopLevelLiveRange* range : data()->live_ranges()) {
2378  CHECK_EQ(live_ranges_size,
2379  data()->live_ranges().size()); // TODO(neis): crbug.com/831822
2380  if (range == nullptr) continue;
2381  // Give slots to all ranges with a non fixed slot use.
2382  if (range->has_slot_use() && range->HasNoSpillType()) {
2383  data()->AssignSpillRangeToLiveRange(range);
2384  }
2385  // TODO(bmeurer): This is a horrible hack to make sure that for constant
2386  // live ranges, every use requires the constant to be in a register.
2387  // Without this hack, all uses with "any" policy would get the constant
2388  // operand assigned.
2389  if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
2390  for (UsePosition* pos = range->first_pos(); pos != nullptr;
2391  pos = pos->next()) {
2392  if (pos->type() == UsePositionType::kRequiresSlot ||
2393  pos->type() == UsePositionType::kRegisterOrSlotOrConstant) {
2394  continue;
2395  }
2396  UsePositionType new_type = UsePositionType::kRegisterOrSlot;
2397  // Can't mark phis as needing a register.
2398  if (!pos->pos().IsGapPosition()) {
2399  new_type = UsePositionType::kRequiresRegister;
2400  }
2401  pos->set_type(new_type, true);
2402  }
2403  }
2404  }
2405  for (auto preassigned : data()->preassigned_slot_ranges()) {
2406  TopLevelLiveRange* range = preassigned.first;
2407  int slot_id = preassigned.second;
2408  SpillRange* spill = range->HasSpillRange()
2409  ? range->GetSpillRange()
2410  : data()->AssignSpillRangeToLiveRange(range);
2411  spill->set_assigned_slot(slot_id);
2412  }
2413 #ifdef DEBUG
2414  Verify();
2415 #endif
2416 }
2417 
2418 void LiveRangeBuilder::MapPhiHint(InstructionOperand* operand,
2419  UsePosition* use_pos) {
2420  DCHECK(!use_pos->IsResolved());
2421  auto res = phi_hints_.insert(std::make_pair(operand, use_pos));
2422  DCHECK(res.second);
2423  USE(res);
2424 }
2425 
2426 void LiveRangeBuilder::ResolvePhiHint(InstructionOperand* operand,
2427  UsePosition* use_pos) {
2428  auto it = phi_hints_.find(operand);
2429  if (it == phi_hints_.end()) return;
2430  DCHECK(!it->second->IsResolved());
2431  it->second->ResolveHint(use_pos);
2432 }
2433 
2434 void LiveRangeBuilder::Verify() const {
2435  for (auto& hint : phi_hints_) {
2436  CHECK(hint.second->IsResolved());
2437  }
2438  for (const TopLevelLiveRange* current : data()->live_ranges()) {
2439  if (current != nullptr && !current->IsEmpty()) {
2440  // New LiveRanges should not be split.
2441  CHECK_NULL(current->next());
2442  // General integrity check.
2443  current->Verify();
2444  const UseInterval* first = current->first_interval();
2445  if (first->next() == nullptr) continue;
2446 
2447  // Consecutive intervals should not end and start in the same block,
2448  // otherwise the intervals should have been joined, because the
2449  // variable is live throughout that block.
2450  CHECK(NextIntervalStartsInDifferentBlocks(first));
2451 
2452  for (const UseInterval* i = first->next(); i != nullptr; i = i->next()) {
2453  // Except for the first interval, the other intevals must start at
2454  // a block boundary, otherwise data wouldn't flow to them.
2455  CHECK(IntervalStartsAtBlockBoundary(i));
2456  // The last instruction of the predecessors of the block the interval
2457  // starts must be covered by the range.
2458  CHECK(IntervalPredecessorsCoveredByRange(i, current));
2459  if (i->next() != nullptr) {
2460  // Check the consecutive intervals property, except for the last
2461  // interval, where it doesn't apply.
2462  CHECK(NextIntervalStartsInDifferentBlocks(i));
2463  }
2464  }
2465  }
2466  }
2467 }
2468 
2469 bool LiveRangeBuilder::IntervalStartsAtBlockBoundary(
2470  const UseInterval* interval) const {
2471  LifetimePosition start = interval->start();
2472  if (!start.IsFullStart()) return false;
2473  int instruction_index = start.ToInstructionIndex();
2474  const InstructionBlock* block =
2475  data()->code()->GetInstructionBlock(instruction_index);
2476  return block->first_instruction_index() == instruction_index;
2477 }
2478 
2479 bool LiveRangeBuilder::IntervalPredecessorsCoveredByRange(
2480  const UseInterval* interval, const TopLevelLiveRange* range) const {
2481  LifetimePosition start = interval->start();
2482  int instruction_index = start.ToInstructionIndex();
2483  const InstructionBlock* block =
2484  data()->code()->GetInstructionBlock(instruction_index);
2485  for (RpoNumber pred_index : block->predecessors()) {
2486  const InstructionBlock* predecessor =
2487  data()->code()->InstructionBlockAt(pred_index);
2488  LifetimePosition last_pos = LifetimePosition::GapFromInstructionIndex(
2489  predecessor->last_instruction_index());
2490  last_pos = last_pos.NextStart().End();
2491  if (!range->Covers(last_pos)) return false;
2492  }
2493  return true;
2494 }
2495 
2496 bool LiveRangeBuilder::NextIntervalStartsInDifferentBlocks(
2497  const UseInterval* interval) const {
2498  DCHECK_NOT_NULL(interval->next());
2499  LifetimePosition end = interval->end();
2500  LifetimePosition next_start = interval->next()->start();
2501  // Since end is not covered, but the previous position is, move back a
2502  // position
2503  end = end.IsStart() ? end.PrevStart().End() : end.Start();
2504  int last_covered_index = end.ToInstructionIndex();
2505  const InstructionBlock* block =
2506  data()->code()->GetInstructionBlock(last_covered_index);
2507  const InstructionBlock* next_block =
2508  data()->code()->GetInstructionBlock(next_start.ToInstructionIndex());
2509  return block->rpo_number() < next_block->rpo_number();
2510 }
2511 
2512 RegisterAllocator::RegisterAllocator(RegisterAllocationData* data,
2513  RegisterKind kind)
2514  : data_(data),
2515  mode_(kind),
2516  num_registers_(GetRegisterCount(data->config(), kind)),
2517  num_allocatable_registers_(
2518  GetAllocatableRegisterCount(data->config(), kind)),
2519  allocatable_register_codes_(
2520  GetAllocatableRegisterCodes(data->config(), kind)),
2521  check_fp_aliasing_(false) {
2522  if (!kSimpleFPAliasing && kind == FP_REGISTERS) {
2523  check_fp_aliasing_ = (data->code()->representation_mask() &
2524  (kFloat32Bit | kSimd128Bit)) != 0;
2525  }
2526 }
2527 
2528 LifetimePosition RegisterAllocator::GetSplitPositionForInstruction(
2529  const LiveRange* range, int instruction_index) {
2530  LifetimePosition ret = LifetimePosition::Invalid();
2531 
2532  ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
2533  if (range->Start() >= ret || ret >= range->End()) {
2534  return LifetimePosition::Invalid();
2535  }
2536  return ret;
2537 }
2538 
2539 void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
2540  size_t initial_range_count = data()->live_ranges().size();
2541  for (size_t i = 0; i < initial_range_count; ++i) {
2542  CHECK_EQ(initial_range_count,
2543  data()->live_ranges().size()); // TODO(neis): crbug.com/831822
2544  TopLevelLiveRange* range = data()->live_ranges()[i];
2545  if (!CanProcessRange(range)) continue;
2546  if (range->HasNoSpillType() ||
2547  (range->HasSpillRange() && !range->has_slot_use())) {
2548  continue;
2549  }
2550  LifetimePosition start = range->Start();
2551  TRACE("Live range %d:%d is defined by a spill operand.\n",
2552  range->TopLevel()->vreg(), range->relative_id());
2553  LifetimePosition next_pos = start;
2554  if (next_pos.IsGapPosition()) {
2555  next_pos = next_pos.NextStart();
2556  }
2557 
2558  // With splinters, we can be more strict and skip over positions
2559  // not strictly needing registers.
2560  UsePosition* pos =
2561  range->IsSplinter()
2562  ? range->NextRegisterPosition(next_pos)
2563  : range->NextUsePositionRegisterIsBeneficial(next_pos);
2564  // If the range already has a spill operand and it doesn't need a
2565  // register immediately, split it and spill the first part of the range.
2566  if (pos == nullptr) {
2567  Spill(range);
2568  } else if (pos->pos() > range->Start().NextStart()) {
2569  // Do not spill live range eagerly if use position that can benefit from
2570  // the register is too close to the start of live range.
2571  LifetimePosition split_pos = GetSplitPositionForInstruction(
2572  range, pos->pos().ToInstructionIndex());
2573  // There is no place to split, so we can't split and spill.
2574  if (!split_pos.IsValid()) continue;
2575 
2576  split_pos =
2577  FindOptimalSplitPos(range->Start().NextFullStart(), split_pos);
2578 
2579  SplitRangeAt(range, split_pos);
2580  Spill(range);
2581  }
2582  }
2583 }
2584 
2585 LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
2586  LifetimePosition pos) {
2587  DCHECK(!range->TopLevel()->IsFixed());
2588  TRACE("Splitting live range %d:%d at %d\n", range->TopLevel()->vreg(),
2589  range->relative_id(), pos.value());
2590 
2591  if (pos <= range->Start()) return range;
2592 
2593  // We can't properly connect liveranges if splitting occurred at the end
2594  // a block.
2595  DCHECK(pos.IsStart() || pos.IsGapPosition() ||
2596  (GetInstructionBlock(code(), pos)->last_instruction_index() !=
2597  pos.ToInstructionIndex()));
2598 
2599  LiveRange* result = range->SplitAt(pos, allocation_zone());
2600  return result;
2601 }
2602 
2603 LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
2604  LifetimePosition start,
2605  LifetimePosition end) {
2606  DCHECK(!range->TopLevel()->IsFixed());
2607  TRACE("Splitting live range %d:%d in position between [%d, %d]\n",
2608  range->TopLevel()->vreg(), range->relative_id(), start.value(),
2609  end.value());
2610 
2611  LifetimePosition split_pos = FindOptimalSplitPos(start, end);
2612  DCHECK(split_pos >= start);
2613  return SplitRangeAt(range, split_pos);
2614 }
2615 
2616 LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
2617  LifetimePosition end) {
2618  int start_instr = start.ToInstructionIndex();
2619  int end_instr = end.ToInstructionIndex();
2620  DCHECK_LE(start_instr, end_instr);
2621 
2622  // We have no choice
2623  if (start_instr == end_instr) return end;
2624 
2625  const InstructionBlock* start_block = GetInstructionBlock(code(), start);
2626  const InstructionBlock* end_block = GetInstructionBlock(code(), end);
2627 
2628  if (end_block == start_block) {
2629  // The interval is split in the same basic block. Split at the latest
2630  // possible position.
2631  return end;
2632  }
2633 
2634  const InstructionBlock* block = end_block;
2635  // Find header of outermost loop.
2636  do {
2637  const InstructionBlock* loop = GetContainingLoop(code(), block);
2638  if (loop == nullptr ||
2639  loop->rpo_number().ToInt() <= start_block->rpo_number().ToInt()) {
2640  // No more loops or loop starts before the lifetime start.
2641  break;
2642  }
2643  block = loop;
2644  } while (true);
2645 
2646  // We did not find any suitable outer loop. Split at the latest possible
2647  // position unless end_block is a loop header itself.
2648  if (block == end_block && !end_block->IsLoopHeader()) return end;
2649 
2650  return LifetimePosition::GapFromInstructionIndex(
2651  block->first_instruction_index());
2652 }
2653 
2654 LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
2655  LiveRange* range, LifetimePosition pos) {
2656  const InstructionBlock* block = GetInstructionBlock(code(), pos.Start());
2657  const InstructionBlock* loop_header =
2658  block->IsLoopHeader() ? block : GetContainingLoop(code(), block);
2659 
2660  if (loop_header == nullptr) return pos;
2661 
2662  const UsePosition* prev_use =
2663  range->PreviousUsePositionRegisterIsBeneficial(pos);
2664 
2665  while (loop_header != nullptr) {
2666  // We are going to spill live range inside the loop.
2667  // If possible try to move spilling position backwards to loop header.
2668  // This will reduce number of memory moves on the back edge.
2669  LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex(
2670  loop_header->first_instruction_index());
2671 
2672  if (range->Covers(loop_start)) {
2673  if (prev_use == nullptr || prev_use->pos() < loop_start) {
2674  // No register beneficial use inside the loop before the pos.
2675  pos = loop_start;
2676  }
2677  }
2678 
2679  // Try hoisting out to an outer loop.
2680  loop_header = GetContainingLoop(code(), loop_header);
2681  }
2682 
2683  return pos;
2684 }
2685 
2686 void RegisterAllocator::Spill(LiveRange* range) {
2687  DCHECK(!range->spilled());
2688  TopLevelLiveRange* first = range->TopLevel();
2689  TRACE("Spilling live range %d:%d\n", first->vreg(), range->relative_id());
2690 
2691  if (first->HasNoSpillType()) {
2692  data()->AssignSpillRangeToLiveRange(first);
2693  }
2694  range->Spill();
2695 }
2696 
2697 const char* RegisterAllocator::RegisterName(int register_code) const {
2698  return mode() == GENERAL_REGISTERS
2699  ? i::RegisterName(Register::from_code(register_code))
2700  : i::RegisterName(DoubleRegister::from_code(register_code));
2701 }
2702 
2703 LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
2704  RegisterKind kind, Zone* local_zone)
2705  : RegisterAllocator(data, kind),
2706  unhandled_live_ranges_(local_zone),
2707  active_live_ranges_(local_zone),
2708  inactive_live_ranges_(local_zone) {
2709  active_live_ranges().reserve(8);
2710  inactive_live_ranges().reserve(8);
2711  // TryAllocateFreeReg and AllocateBlockedReg assume this
2712  // when allocating local arrays.
2713  DCHECK_GE(RegisterConfiguration::kMaxFPRegisters,
2714  this->data()->config()->num_general_registers());
2715 }
2716 
2717 void LinearScanAllocator::AllocateRegisters() {
2718  DCHECK(unhandled_live_ranges().empty());
2719  DCHECK(active_live_ranges().empty());
2720  DCHECK(inactive_live_ranges().empty());
2721 
2722  SplitAndSpillRangesDefinedByMemoryOperand();
2723 
2724  const size_t live_ranges_size = data()->live_ranges().size();
2725  for (TopLevelLiveRange* range : data()->live_ranges()) {
2726  CHECK_EQ(live_ranges_size,
2727  data()->live_ranges().size()); // TODO(neis): crbug.com/831822
2728  if (!CanProcessRange(range)) continue;
2729  for (LiveRange* to_add = range; to_add != nullptr;
2730  to_add = to_add->next()) {
2731  if (!to_add->spilled()) {
2732  AddToUnhandled(to_add);
2733  }
2734  }
2735  }
2736 
2737  if (mode() == GENERAL_REGISTERS) {
2738  for (TopLevelLiveRange* current : data()->fixed_live_ranges()) {
2739  if (current != nullptr) AddToInactive(current);
2740  }
2741  } else {
2742  for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
2743  if (current != nullptr) AddToInactive(current);
2744  }
2745  if (!kSimpleFPAliasing && check_fp_aliasing()) {
2746  for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
2747  if (current != nullptr) AddToInactive(current);
2748  }
2749  for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) {
2750  if (current != nullptr) AddToInactive(current);
2751  }
2752  }
2753  }
2754 
2755  while (!unhandled_live_ranges().empty()) {
2756  LiveRange* current = *unhandled_live_ranges().begin();
2757  unhandled_live_ranges().erase(unhandled_live_ranges().begin());
2758  LifetimePosition position = current->Start();
2759 #ifdef DEBUG
2760  allocation_finger_ = position;
2761 #endif
2762  TRACE("Processing interval %d:%d start=%d\n", current->TopLevel()->vreg(),
2763  current->relative_id(), position.value());
2764 
2765  if (current->IsTopLevel() && TryReuseSpillForPhi(current->TopLevel()))
2766  continue;
2767 
2768  for (auto it = active_live_ranges().begin();
2769  it != active_live_ranges().end();) {
2770  LiveRange* cur_active = *it;
2771  if (cur_active->End() <= position) {
2772  it = ActiveToHandled(it);
2773  } else if (!cur_active->Covers(position)) {
2774  it = ActiveToInactive(it);
2775  } else {
2776  ++it;
2777  }
2778  }
2779 
2780  for (auto it = inactive_live_ranges().begin();
2781  it != inactive_live_ranges().end();) {
2782  LiveRange* cur_inactive = *it;
2783  if (cur_inactive->End() <= position) {
2784  it = InactiveToHandled(it);
2785  } else if (cur_inactive->Covers(position)) {
2786  it = InactiveToActive(it);
2787  } else {
2788  ++it;
2789  }
2790  }
2791 
2792  DCHECK(!current->HasRegisterAssigned() && !current->spilled());
2793 
2794  ProcessCurrentRange(current);
2795  }
2796 
2797  if (FLAG_trace_alloc) {
2798  PrintRangeOverview(std::cout);
2799  }
2800 }
2801 
2802 bool LinearScanAllocator::TrySplitAndSpillSplinter(LiveRange* range) {
2803  DCHECK(range->TopLevel()->IsSplinter());
2804  // If we can spill the whole range, great. Otherwise, split above the
2805  // first use needing a register and spill the top part.
2806  const UsePosition* next_reg = range->NextRegisterPosition(range->Start());
2807  if (next_reg == nullptr) {
2808  Spill(range);
2809  return true;
2810  } else if (range->FirstHintPosition() == nullptr) {
2811  // If there was no hint, but we have a use position requiring a
2812  // register, apply the hot path heuristics.
2813  return false;
2814  } else if (next_reg->pos().PrevStart() > range->Start()) {
2815  LiveRange* tail = SplitRangeAt(range, next_reg->pos().PrevStart());
2816  AddToUnhandled(tail);
2817  Spill(range);
2818  return true;
2819  }
2820  return false;
2821 }
2822 
2823 void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
2824  int reg) {
2825  data()->MarkAllocated(range->representation(), reg);
2826  range->set_assigned_register(reg);
2827  range->SetUseHints(reg);
2828  if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
2829  data()->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg);
2830  }
2831 }
2832 
2833 void LinearScanAllocator::AddToActive(LiveRange* range) {
2834  TRACE("Add live range %d:%d to active\n", range->TopLevel()->vreg(),
2835  range->relative_id());
2836  active_live_ranges().push_back(range);
2837 }
2838 
2839 void LinearScanAllocator::AddToInactive(LiveRange* range) {
2840  TRACE("Add live range %d:%d to inactive\n", range->TopLevel()->vreg(),
2841  range->relative_id());
2842  inactive_live_ranges().push_back(range);
2843 }
2844 
2845 void LinearScanAllocator::AddToUnhandled(LiveRange* range) {
2846  if (range == nullptr || range->IsEmpty()) return;
2847  DCHECK(!range->HasRegisterAssigned() && !range->spilled());
2848  DCHECK(allocation_finger_ <= range->Start());
2849 
2850  TRACE("Add live range %d:%d to unhandled\n", range->TopLevel()->vreg(),
2851  range->relative_id());
2852  unhandled_live_ranges().insert(range);
2853 }
2854 
2855 ZoneVector<LiveRange*>::iterator LinearScanAllocator::ActiveToHandled(
2856  const ZoneVector<LiveRange*>::iterator it) {
2857  TRACE("Moving live range %d:%d from active to handled\n",
2858  (*it)->TopLevel()->vreg(), (*it)->relative_id());
2859  return active_live_ranges().erase(it);
2860 }
2861 
2862 ZoneVector<LiveRange*>::iterator LinearScanAllocator::ActiveToInactive(
2863  const ZoneVector<LiveRange*>::iterator it) {
2864  LiveRange* range = *it;
2865  inactive_live_ranges().push_back(range);
2866  TRACE("Moving live range %d:%d from active to inactive\n",
2867  (range)->TopLevel()->vreg(), range->relative_id());
2868  return active_live_ranges().erase(it);
2869 }
2870 
2871 ZoneVector<LiveRange*>::iterator LinearScanAllocator::InactiveToHandled(
2872  ZoneVector<LiveRange*>::iterator it) {
2873  TRACE("Moving live range %d:%d from inactive to handled\n",
2874  (*it)->TopLevel()->vreg(), (*it)->relative_id());
2875  return inactive_live_ranges().erase(it);
2876 }
2877 
2878 ZoneVector<LiveRange*>::iterator LinearScanAllocator::InactiveToActive(
2879  ZoneVector<LiveRange*>::iterator it) {
2880  LiveRange* range = *it;
2881  active_live_ranges().push_back(range);
2882  TRACE("Moving live range %d:%d from inactive to active\n",
2883  range->TopLevel()->vreg(), range->relative_id());
2884  return inactive_live_ranges().erase(it);
2885 }
2886 
2887 void LinearScanAllocator::GetFPRegisterSet(MachineRepresentation rep,
2888  int* num_regs, int* num_codes,
2889  const int** codes) const {
2890  DCHECK(!kSimpleFPAliasing);
2891  if (rep == MachineRepresentation::kFloat32) {
2892  *num_regs = data()->config()->num_float_registers();
2893  *num_codes = data()->config()->num_allocatable_float_registers();
2894  *codes = data()->config()->allocatable_float_codes();
2895  } else if (rep == MachineRepresentation::kSimd128) {
2896  *num_regs = data()->config()->num_simd128_registers();
2897  *num_codes = data()->config()->num_allocatable_simd128_registers();
2898  *codes = data()->config()->allocatable_simd128_codes();
2899  } else {
2900  UNREACHABLE();
2901  }
2902 }
2903 
2904 void LinearScanAllocator::FindFreeRegistersForRange(
2905  LiveRange* range, Vector<LifetimePosition> positions) {
2906  int num_regs = num_registers();
2907  int num_codes = num_allocatable_registers();
2908  const int* codes = allocatable_register_codes();
2909  MachineRepresentation rep = range->representation();
2910  if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
2911  rep == MachineRepresentation::kSimd128))
2912  GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
2913  DCHECK_GE(positions.length(), num_regs);
2914 
2915  for (int i = 0; i < num_regs; ++i) {
2916  positions[i] = LifetimePosition::MaxPosition();
2917  }
2918 
2919  for (LiveRange* cur_active : active_live_ranges()) {
2920  int cur_reg = cur_active->assigned_register();
2921  if (kSimpleFPAliasing || !check_fp_aliasing()) {
2922  positions[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
2923  TRACE("Register %s is free until pos %d (1)\n", RegisterName(cur_reg),
2924  LifetimePosition::GapFromInstructionIndex(0).value());
2925  } else {
2926  int alias_base_index = -1;
2927  int aliases = data()->config()->GetAliases(
2928  cur_active->representation(), cur_reg, rep, &alias_base_index);
2929  DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
2930  while (aliases--) {
2931  int aliased_reg = alias_base_index + aliases;
2932  positions[aliased_reg] = LifetimePosition::GapFromInstructionIndex(0);
2933  }
2934  }
2935  }
2936 
2937  for (LiveRange* cur_inactive : inactive_live_ranges()) {
2938  DCHECK(cur_inactive->End() > range->Start());
2939  int cur_reg = cur_inactive->assigned_register();
2940  // No need to carry out intersections, when this register won't be
2941  // interesting to this range anyway.
2942  // TODO(mtrofin): extend to aliased ranges, too.
2943  if ((kSimpleFPAliasing || !check_fp_aliasing()) &&
2944  positions[cur_reg] < range->Start()) {
2945  continue;
2946  }
2947 
2948  LifetimePosition next_intersection = cur_inactive->FirstIntersection(range);
2949  if (!next_intersection.IsValid()) continue;
2950  if (kSimpleFPAliasing || !check_fp_aliasing()) {
2951  positions[cur_reg] = Min(positions[cur_reg], next_intersection);
2952  TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
2953  Min(positions[cur_reg], next_intersection).value());
2954  } else {
2955  int alias_base_index = -1;
2956  int aliases = data()->config()->GetAliases(
2957  cur_inactive->representation(), cur_reg, rep, &alias_base_index);
2958  DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
2959  while (aliases--) {
2960  int aliased_reg = alias_base_index + aliases;
2961  positions[aliased_reg] = Min(positions[aliased_reg], next_intersection);
2962  }
2963  }
2964  }
2965 }
2966 
2967 // High-level register allocation summary:
2968 //
2969 // For regular, or hot (i.e. not splinter) ranges, we attempt to first
2970 // allocate first the preferred (hint) register. If that is not possible,
2971 // we find a register that's free, and allocate that. If that's not possible,
2972 // we search for a register to steal from a range that was allocated. The
2973 // goal is to optimize for throughput by avoiding register-to-memory
2974 // moves, which are expensive.
2975 //
2976 // For splinters, the goal is to minimize the number of moves. First we try
2977 // to allocate the preferred register (more discussion follows). Failing that,
2978 // we bail out and spill as far as we can, unless the first use is at start,
2979 // case in which we apply the same behavior as we do for regular ranges.
2980 // If there is no hint, we apply the hot-path behavior.
2981 //
2982 // For the splinter, the hint register may come from:
2983 //
2984 // - the hot path (we set it at splintering time with SetHint). In this case, if
2985 // we cannot offer the hint register, spilling is better because it's at most
2986 // 1 move, while trying to find and offer another register is at least 1 move.
2987 //
2988 // - a constraint. If we cannot offer that register, it's because there is some
2989 // interference. So offering the hint register up to the interference would
2990 // result
2991 // in a move at the interference, plus a move to satisfy the constraint. This is
2992 // also the number of moves if we spill, with the potential of the range being
2993 // already spilled and thus saving a move (the spill).
2994 // Note that this can only be an input constraint, if it were an output one,
2995 // the range wouldn't be a splinter because it means it'd be defined in a
2996 // deferred
2997 // block, and we don't mark those as splinters (they live in deferred blocks
2998 // only).
2999 //
3000 // - a phi. The same analysis as in the case of the input constraint applies.
3001 //
3002 void LinearScanAllocator::ProcessCurrentRange(LiveRange* current) {
3003  LifetimePosition free_until_pos_buff[RegisterConfiguration::kMaxFPRegisters];
3004  Vector<LifetimePosition> free_until_pos(
3005  free_until_pos_buff, RegisterConfiguration::kMaxFPRegisters);
3006  FindFreeRegistersForRange(current, free_until_pos);
3007  if (!TryAllocatePreferredReg(current, free_until_pos)) {
3008  if (current->TopLevel()->IsSplinter()) {
3009  if (TrySplitAndSpillSplinter(current)) return;
3010  }
3011  if (!TryAllocateFreeReg(current, free_until_pos)) {
3012  AllocateBlockedReg(current);
3013  }
3014  }
3015  if (current->HasRegisterAssigned()) {
3016  AddToActive(current);
3017  }
3018 }
3019 
3020 bool LinearScanAllocator::TryAllocatePreferredReg(
3021  LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
3022  int hint_register;
3023  if (current->FirstHintPosition(&hint_register) != nullptr) {
3024  TRACE(
3025  "Found reg hint %s (free until [%d) for live range %d:%d (end %d[).\n",
3026  RegisterName(hint_register), free_until_pos[hint_register].value(),
3027  current->TopLevel()->vreg(), current->relative_id(),
3028  current->End().value());
3029 
3030  // The desired register is free until the end of the current live range.
3031  if (free_until_pos[hint_register] >= current->End()) {
3032  TRACE("Assigning preferred reg %s to live range %d:%d\n",
3033  RegisterName(hint_register), current->TopLevel()->vreg(),
3034  current->relative_id());
3035  SetLiveRangeAssignedRegister(current, hint_register);
3036  return true;
3037  }
3038  }
3039  return false;
3040 }
3041 
3042 bool LinearScanAllocator::TryAllocateFreeReg(
3043  LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
3044  int num_regs = 0; // used only for the call to GetFPRegisterSet.
3045  int num_codes = num_allocatable_registers();
3046  const int* codes = allocatable_register_codes();
3047  MachineRepresentation rep = current->representation();
3048  if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
3049  rep == MachineRepresentation::kSimd128)) {
3050  GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
3051  }
3052 
3053  DCHECK_GE(free_until_pos.length(), num_codes);
3054 
3055  // Find the register which stays free for the longest time. Check for
3056  // the hinted register first, as we might want to use that one. Only
3057  // count full instructions for free ranges, as an instruction's internal
3058  // positions do not help but might shadow a hinted register. This is
3059  // typically the case for function calls, where all registered are
3060  // cloberred after the call except for the argument registers, which are
3061  // set before the call. Hence, the argument registers always get ignored,
3062  // as their available time is shorter.
3063  int reg;
3064  if (current->FirstHintPosition(&reg) == nullptr) {
3065  reg = codes[0];
3066  }
3067  for (int i = 0; i < num_codes; ++i) {
3068  int code = codes[i];
3069  if (free_until_pos[code].ToInstructionIndex() >
3070  free_until_pos[reg].ToInstructionIndex()) {
3071  reg = code;
3072  }
3073  }
3074 
3075  LifetimePosition pos = free_until_pos[reg];
3076 
3077  if (pos <= current->Start()) {
3078  // All registers are blocked.
3079  return false;
3080  }
3081 
3082  if (pos < current->End()) {
3083  // Register reg is available at the range start but becomes blocked before
3084  // the range end. Split current at position where it becomes blocked.
3085  LiveRange* tail = SplitRangeAt(current, pos);
3086  AddToUnhandled(tail);
3087 
3088  // Try to allocate preferred register once more.
3089  if (TryAllocatePreferredReg(current, free_until_pos)) return true;
3090  }
3091 
3092  // Register reg is available at the range start and is free until the range
3093  // end.
3094  DCHECK(pos >= current->End());
3095  TRACE("Assigning free reg %s to live range %d:%d\n", RegisterName(reg),
3096  current->TopLevel()->vreg(), current->relative_id());
3097  SetLiveRangeAssignedRegister(current, reg);
3098 
3099  return true;
3100 }
3101 
3102 void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
3103  UsePosition* register_use = current->NextRegisterPosition(current->Start());
3104  if (register_use == nullptr) {
3105  // There is no use in the current live range that requires a register.
3106  // We can just spill it.
3107  Spill(current);
3108  return;
3109  }
3110 
3111  int num_regs = num_registers();
3112  int num_codes = num_allocatable_registers();
3113  const int* codes = allocatable_register_codes();
3114  MachineRepresentation rep = current->representation();
3115  if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
3116  rep == MachineRepresentation::kSimd128))
3117  GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
3118 
3119  // use_pos keeps track of positions a register/alias is used at.
3120  // block_pos keeps track of positions where a register/alias is blocked
3121  // from.
3122  LifetimePosition use_pos[RegisterConfiguration::kMaxFPRegisters];
3123  LifetimePosition block_pos[RegisterConfiguration::kMaxFPRegisters];
3124  for (int i = 0; i < num_regs; i++) {
3125  use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
3126  }
3127 
3128  for (LiveRange* range : active_live_ranges()) {
3129  int cur_reg = range->assigned_register();
3130  bool is_fixed_or_cant_spill =
3131  range->TopLevel()->IsFixed() || !range->CanBeSpilled(current->Start());
3132  if (kSimpleFPAliasing || !check_fp_aliasing()) {
3133  if (is_fixed_or_cant_spill) {
3134  block_pos[cur_reg] = use_pos[cur_reg] =
3135  LifetimePosition::GapFromInstructionIndex(0);
3136  } else {
3137  DCHECK_NE(LifetimePosition::GapFromInstructionIndex(0),
3138  block_pos[cur_reg]);
3139  use_pos[cur_reg] =
3140  range->NextLifetimePositionRegisterIsBeneficial(current->Start());
3141  }
3142  } else {
3143  int alias_base_index = -1;
3144  int aliases = data()->config()->GetAliases(
3145  range->representation(), cur_reg, rep, &alias_base_index);
3146  DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
3147  while (aliases--) {
3148  int aliased_reg = alias_base_index + aliases;
3149  if (is_fixed_or_cant_spill) {
3150  block_pos[aliased_reg] = use_pos[aliased_reg] =
3151  LifetimePosition::GapFromInstructionIndex(0);
3152  } else {
3153  use_pos[aliased_reg] =
3154  Min(block_pos[aliased_reg],
3155  range->NextLifetimePositionRegisterIsBeneficial(
3156  current->Start()));
3157  }
3158  }
3159  }
3160  }
3161 
3162  for (LiveRange* range : inactive_live_ranges()) {
3163  DCHECK(range->End() > current->Start());
3164  int cur_reg = range->assigned_register();
3165  bool is_fixed = range->TopLevel()->IsFixed();
3166 
3167  // Don't perform costly intersections if they are guaranteed to not update
3168  // block_pos or use_pos.
3169  // TODO(mtrofin): extend to aliased ranges, too.
3170  if ((kSimpleFPAliasing || !check_fp_aliasing())) {
3171  if (is_fixed) {
3172  if (block_pos[cur_reg] < range->Start()) continue;
3173  } else {
3174  if (use_pos[cur_reg] < range->Start()) continue;
3175  }
3176  }
3177 
3178  LifetimePosition next_intersection = range->FirstIntersection(current);
3179  if (!next_intersection.IsValid()) continue;
3180 
3181  if (kSimpleFPAliasing || !check_fp_aliasing()) {
3182  if (is_fixed) {
3183  block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
3184  use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
3185  } else {
3186  use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
3187  }
3188  } else {
3189  int alias_base_index = -1;
3190  int aliases = data()->config()->GetAliases(
3191  range->representation(), cur_reg, rep, &alias_base_index);
3192  DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
3193  while (aliases--) {
3194  int aliased_reg = alias_base_index + aliases;
3195  if (is_fixed) {
3196  block_pos[aliased_reg] =
3197  Min(block_pos[aliased_reg], next_intersection);
3198  use_pos[aliased_reg] =
3199  Min(block_pos[aliased_reg], use_pos[aliased_reg]);
3200  } else {
3201  use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection);
3202  }
3203  }
3204  }
3205  }
3206 
3207  int reg = codes[0];
3208  for (int i = 1; i < num_codes; ++i) {
3209  int code = codes[i];
3210  if (use_pos[code] > use_pos[reg]) {
3211  reg = code;
3212  }
3213  }
3214 
3215  if (use_pos[reg] < register_use->pos()) {
3216  // If there is a gap position before the next register use, we can
3217  // spill until there. The gap position will then fit the fill move.
3218  if (LifetimePosition::ExistsGapPositionBetween(current->Start(),
3219  register_use->pos())) {
3220  SpillBetween(current, current->Start(), register_use->pos());
3221  return;
3222  }
3223  }
3224 
3225  // We couldn't spill until the next register use. Split before the register
3226  // is blocked, if applicable.
3227  if (block_pos[reg] < current->End()) {
3228  // Register becomes blocked before the current range end. Split before that
3229  // position.
3230  LiveRange* tail =
3231  SplitBetween(current, current->Start(), block_pos[reg].Start());
3232  AddToUnhandled(tail);
3233  }
3234 
3235  // Register reg is not blocked for the whole range.
3236  DCHECK(block_pos[reg] >= current->End());
3237  TRACE("Assigning blocked reg %s to live range %d:%d\n", RegisterName(reg),
3238  current->TopLevel()->vreg(), current->relative_id());
3239  SetLiveRangeAssignedRegister(current, reg);
3240 
3241  // This register was not free. Thus we need to find and spill
3242  // parts of active and inactive live regions that use the same register
3243  // at the same lifetime positions as current.
3244  SplitAndSpillIntersecting(current);
3245 }
3246 
3247 void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
3248  DCHECK(current->HasRegisterAssigned());
3249  int reg = current->assigned_register();
3250  LifetimePosition split_pos = current->Start();
3251  for (auto it = active_live_ranges().begin();
3252  it != active_live_ranges().end();) {
3253  LiveRange* range = *it;
3254  if (kSimpleFPAliasing || !check_fp_aliasing()) {
3255  if (range->assigned_register() != reg) {
3256  ++it;
3257  continue;
3258  }
3259  } else {
3260  if (!data()->config()->AreAliases(current->representation(), reg,
3261  range->representation(),
3262  range->assigned_register())) {
3263  ++it;
3264  continue;
3265  }
3266  }
3267 
3268  UsePosition* next_pos = range->NextRegisterPosition(current->Start());
3269  LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
3270  if (next_pos == nullptr) {
3271  SpillAfter(range, spill_pos);
3272  } else {
3273  // When spilling between spill_pos and next_pos ensure that the range
3274  // remains spilled at least until the start of the current live range.
3275  // This guarantees that we will not introduce new unhandled ranges that
3276  // start before the current range as this violates allocation invariants
3277  // and will lead to an inconsistent state of active and inactive
3278  // live-ranges: ranges are allocated in order of their start positions,
3279  // ranges are retired from active/inactive when the start of the
3280  // current live-range is larger than their end.
3281  DCHECK(LifetimePosition::ExistsGapPositionBetween(current->Start(),
3282  next_pos->pos()));
3283  SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
3284  }
3285  it = ActiveToHandled(it);
3286  }
3287 
3288  for (auto it = inactive_live_ranges().begin();
3289  it != inactive_live_ranges().end();) {
3290  LiveRange* range = *it;
3291  DCHECK(range->End() > current->Start());
3292  if (range->TopLevel()->IsFixed()) {
3293  ++it;
3294  continue;
3295  }
3296  if (kSimpleFPAliasing || !check_fp_aliasing()) {
3297  if (range->assigned_register() != reg) {
3298  ++it;
3299  continue;
3300  }
3301  } else {
3302  if (!data()->config()->AreAliases(current->representation(), reg,
3303  range->representation(),
3304  range->assigned_register())) {
3305  ++it;
3306  continue;
3307  }
3308  }
3309 
3310  LifetimePosition next_intersection = range->FirstIntersection(current);
3311  if (next_intersection.IsValid()) {
3312  UsePosition* next_pos = range->NextRegisterPosition(current->Start());
3313  if (next_pos == nullptr) {
3314  SpillAfter(range, split_pos);
3315  } else {
3316  next_intersection = Min(next_intersection, next_pos->pos());
3317  SpillBetween(range, split_pos, next_intersection);
3318  }
3319  it = InactiveToHandled(it);
3320  } else {
3321  ++it;
3322  }
3323  }
3324 }
3325 
3326 bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
3327  if (!range->is_phi()) return false;
3328 
3329  DCHECK(!range->HasSpillOperand());
3330  RegisterAllocationData::PhiMapValue* phi_map_value =
3331  data()->GetPhiMapValueFor(range);
3332  const PhiInstruction* phi = phi_map_value->phi();
3333  const InstructionBlock* block = phi_map_value->block();
3334  // Count the number of spilled operands.
3335  size_t spilled_count = 0;
3336  LiveRange* first_op = nullptr;
3337  for (size_t i = 0; i < phi->operands().size(); i++) {
3338  int op = phi->operands()[i];
3339  LiveRange* op_range = data()->GetOrCreateLiveRangeFor(op);
3340  if (!op_range->TopLevel()->HasSpillRange()) continue;
3341  const InstructionBlock* pred =
3342  code()->InstructionBlockAt(block->predecessors()[i]);
3343  LifetimePosition pred_end =
3344  LifetimePosition::InstructionFromInstructionIndex(
3345  pred->last_instruction_index());
3346  while (op_range != nullptr && !op_range->CanCover(pred_end)) {
3347  op_range = op_range->next();
3348  }
3349  if (op_range != nullptr && op_range->spilled()) {
3350  spilled_count++;
3351  if (first_op == nullptr) {
3352  first_op = op_range->TopLevel();
3353  }
3354  }
3355  }
3356 
3357  // Only continue if more than half of the operands are spilled.
3358  if (spilled_count * 2 <= phi->operands().size()) {
3359  return false;
3360  }
3361 
3362  // Try to merge the spilled operands and count the number of merged spilled
3363  // operands.
3364  DCHECK_NOT_NULL(first_op);
3365  SpillRange* first_op_spill = first_op->TopLevel()->GetSpillRange();
3366  size_t num_merged = 1;
3367  for (size_t i = 1; i < phi->operands().size(); i++) {
3368  int op = phi->operands()[i];
3369  TopLevelLiveRange* op_range = data()->live_ranges()[op];
3370  if (!op_range->HasSpillRange()) continue;
3371  SpillRange* op_spill = op_range->GetSpillRange();
3372  if (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill)) {
3373  num_merged++;
3374  }
3375  }
3376 
3377  // Only continue if enough operands could be merged to the
3378  // same spill slot.
3379  if (num_merged * 2 <= phi->operands().size() ||
3380  AreUseIntervalsIntersecting(first_op_spill->interval(),
3381  range->first_interval())) {
3382  return false;
3383  }
3384 
3385  // If the range does not need register soon, spill it to the merged
3386  // spill range.
3387  LifetimePosition next_pos = range->Start();
3388  if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
3389  UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
3390  if (pos == nullptr) {
3391  SpillRange* spill_range =
3392  range->TopLevel()->HasSpillRange()
3393  ? range->TopLevel()->GetSpillRange()
3394  : data()->AssignSpillRangeToLiveRange(range->TopLevel());
3395  bool merged = first_op_spill->TryMerge(spill_range);
3396  if (!merged) return false;
3397  Spill(range);
3398  return true;
3399  } else if (pos->pos() > range->Start().NextStart()) {
3400  SpillRange* spill_range =
3401  range->TopLevel()->HasSpillRange()
3402  ? range->TopLevel()->GetSpillRange()
3403  : data()->AssignSpillRangeToLiveRange(range->TopLevel());
3404  bool merged = first_op_spill->TryMerge(spill_range);
3405  if (!merged) return false;
3406  SpillBetween(range, range->Start(), pos->pos());
3407  return true;
3408  }
3409  return false;
3410 }
3411 
3412 void LinearScanAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
3413  LiveRange* second_part = SplitRangeAt(range, pos);
3414  Spill(second_part);
3415 }
3416 
3417 void LinearScanAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
3418  LifetimePosition end) {
3419  SpillBetweenUntil(range, start, start, end);
3420 }
3421 
3422 void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
3423  LifetimePosition start,
3424  LifetimePosition until,
3425  LifetimePosition end) {
3426  CHECK(start < end);
3427  LiveRange* second_part = SplitRangeAt(range, start);
3428 
3429  if (second_part->Start() < end) {
3430  // The split result intersects with [start, end[.
3431  // Split it at position between ]start+1, end[, spill the middle part
3432  // and put the rest to unhandled.
3433  LifetimePosition third_part_end = end.PrevStart().End();
3434  if (data()->IsBlockBoundary(end.Start())) {
3435  third_part_end = end.Start();
3436  }
3437  LiveRange* third_part = SplitBetween(
3438  second_part, Max(second_part->Start().End(), until), third_part_end);
3439 
3440  DCHECK(third_part != second_part);
3441 
3442  Spill(second_part);
3443  AddToUnhandled(third_part);
3444  } else {
3445  // The split result does not intersect with [start, end[.
3446  // Nothing to spill. Just put it to unhandled as whole.
3447  AddToUnhandled(second_part);
3448  }
3449 }
3450 
3451 SpillSlotLocator::SpillSlotLocator(RegisterAllocationData* data)
3452  : data_(data) {}
3453 
3454 void SpillSlotLocator::LocateSpillSlots() {
3455  const InstructionSequence* code = data()->code();
3456  const size_t live_ranges_size = data()->live_ranges().size();
3457  for (TopLevelLiveRange* range : data()->live_ranges()) {
3458  CHECK_EQ(live_ranges_size,
3459  data()->live_ranges().size()); // TODO(neis): crbug.com/831822
3460  if (range == nullptr || range->IsEmpty()) continue;
3461  // We care only about ranges which spill in the frame.
3462  if (!range->HasSpillRange() || range->IsSpilledOnlyInDeferredBlocks()) {
3463  continue;
3464  }
3465  TopLevelLiveRange::SpillMoveInsertionList* spills =
3466  range->GetSpillMoveInsertionLocations();
3467  DCHECK_NOT_NULL(spills);
3468  for (; spills != nullptr; spills = spills->next) {
3469  code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
3470  }
3471  }
3472 }
3473 
3474 OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {}
3475 
3476 void OperandAssigner::AssignSpillSlots() {
3477  ZoneVector<SpillRange*>& spill_ranges = data()->spill_ranges();
3478  // Merge disjoint spill ranges
3479  for (size_t i = 0; i < spill_ranges.size(); ++i) {
3480  SpillRange* range = spill_ranges[i];
3481  if (range == nullptr) continue;
3482  if (range->IsEmpty()) continue;
3483  for (size_t j = i + 1; j < spill_ranges.size(); ++j) {
3484  SpillRange* other = spill_ranges[j];
3485  if (other != nullptr && !other->IsEmpty()) {
3486  range->TryMerge(other);
3487  }
3488  }
3489  }
3490  // Allocate slots for the merged spill ranges.
3491  for (SpillRange* range : spill_ranges) {
3492  if (range == nullptr || range->IsEmpty()) continue;
3493  // Allocate a new operand referring to the spill slot.
3494  if (!range->HasSlot()) {
3495  int index = data()->frame()->AllocateSpillSlot(range->byte_width());
3496  range->set_assigned_slot(index);
3497  }
3498  }
3499 }
3500 
3501 void OperandAssigner::CommitAssignment() {
3502  const size_t live_ranges_size = data()->live_ranges().size();
3503  for (TopLevelLiveRange* top_range : data()->live_ranges()) {
3504  CHECK_EQ(live_ranges_size,
3505  data()->live_ranges().size()); // TODO(neis): crbug.com/831822
3506  if (top_range == nullptr || top_range->IsEmpty()) continue;
3507  InstructionOperand spill_operand;
3508  if (top_range->HasSpillOperand()) {
3509  spill_operand = *top_range->TopLevel()->GetSpillOperand();
3510  } else if (top_range->TopLevel()->HasSpillRange()) {
3511  spill_operand = top_range->TopLevel()->GetSpillRangeOperand();
3512  }
3513  if (top_range->is_phi()) {
3514  data()->GetPhiMapValueFor(top_range)->CommitAssignment(
3515  top_range->GetAssignedOperand());
3516  }
3517  for (LiveRange* range = top_range; range != nullptr;
3518  range = range->next()) {
3519  InstructionOperand assigned = range->GetAssignedOperand();
3520  DCHECK(!assigned.IsUnallocated());
3521  range->ConvertUsesToOperand(assigned, spill_operand);
3522  }
3523 
3524  if (!spill_operand.IsInvalid()) {
3525  // If this top level range has a child spilled in a deferred block, we use
3526  // the range and control flow connection mechanism instead of spilling at
3527  // definition. Refer to the ConnectLiveRanges and ResolveControlFlow
3528  // phases. Normally, when we spill at definition, we do not insert a
3529  // connecting move when a successor child range is spilled - because the
3530  // spilled range picks up its value from the slot which was assigned at
3531  // definition. For ranges that are determined to spill only in deferred
3532  // blocks, we let ConnectLiveRanges and ResolveControlFlow find the blocks
3533  // where a spill operand is expected, and then finalize by inserting the
3534  // spills in the deferred blocks dominators.
3535  if (!top_range->IsSpilledOnlyInDeferredBlocks()) {
3536  // Spill at definition if the range isn't spilled only in deferred
3537  // blocks.
3538  top_range->CommitSpillMoves(
3539  data()->code(), spill_operand,
3540  top_range->has_slot_use() || top_range->spilled());
3541  }
3542  }
3543  }
3544 }
3545 
3546 ReferenceMapPopulator::ReferenceMapPopulator(RegisterAllocationData* data)
3547  : data_(data) {}
3548 
3549 bool ReferenceMapPopulator::SafePointsAreInOrder() const {
3550  int safe_point = 0;
3551  for (ReferenceMap* map : *data()->code()->reference_maps()) {
3552  if (safe_point > map->instruction_position()) return false;
3553  safe_point = map->instruction_position();
3554  }
3555  return true;
3556 }
3557 
3558 void ReferenceMapPopulator::PopulateReferenceMaps() {
3559  DCHECK(SafePointsAreInOrder());
3560  // Map all delayed references.
3561  for (RegisterAllocationData::DelayedReference& delayed_reference :
3562  data()->delayed_references()) {
3563  delayed_reference.map->RecordReference(
3564  AllocatedOperand::cast(*delayed_reference.operand));
3565  }
3566  // Iterate over all safe point positions and record a pointer
3567  // for all spilled live ranges at this point.
3568  int last_range_start = 0;
3569  const ReferenceMapDeque* reference_maps = data()->code()->reference_maps();
3570  ReferenceMapDeque::const_iterator first_it = reference_maps->begin();
3571  const size_t live_ranges_size = data()->live_ranges().size();
3572  for (TopLevelLiveRange* range : data()->live_ranges()) {
3573  CHECK_EQ(live_ranges_size,
3574  data()->live_ranges().size()); // TODO(neis): crbug.com/831822
3575  if (range == nullptr) continue;
3576  // Skip non-reference values.
3577  if (!data()->IsReference(range)) continue;
3578  // Skip empty live ranges.
3579  if (range->IsEmpty()) continue;
3580  if (range->has_preassigned_slot()) continue;
3581 
3582  // Find the extent of the range and its children.
3583  int start = range->Start().ToInstructionIndex();
3584  int end = 0;
3585  for (LiveRange* cur = range; cur != nullptr; cur = cur->next()) {
3586  LifetimePosition this_end = cur->End();
3587  if (this_end.ToInstructionIndex() > end)
3588  end = this_end.ToInstructionIndex();
3589  DCHECK(cur->Start().ToInstructionIndex() >= start);
3590  }
3591 
3592  // Most of the ranges are in order, but not all. Keep an eye on when they
3593  // step backwards and reset the first_it so we don't miss any safe points.
3594  if (start < last_range_start) first_it = reference_maps->begin();
3595  last_range_start = start;
3596 
3597  // Step across all the safe points that are before the start of this range,
3598  // recording how far we step in order to save doing this for the next range.
3599  for (; first_it != reference_maps->end(); ++first_it) {
3600  ReferenceMap* map = *first_it;
3601  if (map->instruction_position() >= start) break;
3602  }
3603 
3604  InstructionOperand spill_operand;
3605  if (((range->HasSpillOperand() &&
3606  !range->GetSpillOperand()->IsConstant()) ||
3607  range->HasSpillRange())) {
3608  if (range->HasSpillOperand()) {
3609  spill_operand = *range->GetSpillOperand();
3610  } else {
3611  spill_operand = range->GetSpillRangeOperand();
3612  }
3613  DCHECK(spill_operand.IsStackSlot());
3614  DCHECK(CanBeTaggedPointer(
3615  AllocatedOperand::cast(spill_operand).representation()));
3616  }
3617 
3618  LiveRange* cur = range;
3619  // Step through the safe points to see whether they are in the range.
3620  for (auto it = first_it; it != reference_maps->end(); ++it) {
3621  ReferenceMap* map = *it;
3622  int safe_point = map->instruction_position();
3623 
3624  // The safe points are sorted so we can stop searching here.
3625  if (safe_point - 1 > end) break;
3626 
3627  // Advance to the next active range that covers the current
3628  // safe point position.
3629  LifetimePosition safe_point_pos =
3630  LifetimePosition::InstructionFromInstructionIndex(safe_point);
3631 
3632  // Search for the child range (cur) that covers safe_point_pos. If we
3633  // don't find it before the children pass safe_point_pos, keep cur at
3634  // the last child, because the next safe_point_pos may be covered by cur.
3635  // This may happen if cur has more than one interval, and the current
3636  // safe_point_pos is in between intervals.
3637  // For that reason, cur may be at most the last child.
3638  DCHECK_NOT_NULL(cur);
3639  DCHECK(safe_point_pos >= cur->Start() || range == cur);
3640  bool found = false;
3641  while (!found) {
3642  if (cur->Covers(safe_point_pos)) {
3643  found = true;
3644  } else {
3645  LiveRange* next = cur->next();
3646  if (next == nullptr || next->Start() > safe_point_pos) {
3647  break;
3648  }
3649  cur = next;
3650  }
3651  }
3652 
3653  if (!found) {
3654  continue;
3655  }
3656 
3657  // Check if the live range is spilled and the safe point is after
3658  // the spill position.
3659  int spill_index = range->IsSpilledOnlyInDeferredBlocks()
3660  ? cur->Start().ToInstructionIndex()
3661  : range->spill_start_index();
3662 
3663  if (!spill_operand.IsInvalid() && safe_point >= spill_index) {
3664  TRACE("Pointer for range %d (spilled at %d) at safe point %d\n",
3665  range->vreg(), spill_index, safe_point);
3666  map->RecordReference(AllocatedOperand::cast(spill_operand));
3667  }
3668 
3669  if (!cur->spilled()) {
3670  TRACE(
3671  "Pointer in register for range %d:%d (start at %d) "
3672  "at safe point %d\n",
3673  range->vreg(), cur->relative_id(), cur->Start().value(),
3674  safe_point);
3675  InstructionOperand operand = cur->GetAssignedOperand();
3676  DCHECK(!operand.IsStackSlot());
3677  DCHECK(CanBeTaggedPointer(
3678  AllocatedOperand::cast(operand).representation()));
3679  map->RecordReference(AllocatedOperand::cast(operand));
3680  }
3681  }
3682  }
3683 }
3684 
3685 LiveRangeConnector::LiveRangeConnector(RegisterAllocationData* data)
3686  : data_(data) {}
3687 
3688 bool LiveRangeConnector::CanEagerlyResolveControlFlow(
3689  const InstructionBlock* block) const {
3690  if (block->PredecessorCount() != 1) return false;
3691  return block->predecessors()[0].IsNext(block->rpo_number());
3692 }
3693 
3694 void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
3695  // Lazily linearize live ranges in memory for fast lookup.
3696  LiveRangeFinder finder(data(), local_zone);
3697  ZoneVector<BitVector*>& live_in_sets = data()->live_in_sets();
3698  for (const InstructionBlock* block : code()->instruction_blocks()) {
3699  if (CanEagerlyResolveControlFlow(block)) continue;
3700  BitVector* live = live_in_sets[block->rpo_number().ToInt()];
3701  BitVector::Iterator iterator(live);
3702  while (!iterator.Done()) {
3703  int vreg = iterator.Current();
3704  LiveRangeBoundArray* array = finder.ArrayFor(vreg);
3705  for (const RpoNumber& pred : block->predecessors()) {
3706  FindResult result;
3707  const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
3708  if (!array->FindConnectableSubranges(block, pred_block, &result)) {
3709  continue;
3710  }
3711  InstructionOperand pred_op = result.pred_cover_->GetAssignedOperand();
3712  InstructionOperand cur_op = result.cur_cover_->GetAssignedOperand();
3713  if (pred_op.Equals(cur_op)) continue;
3714  if (!pred_op.IsAnyRegister() && cur_op.IsAnyRegister()) {
3715  // We're doing a reload.
3716  // We don't need to, if:
3717  // 1) there's no register use in this block, and
3718  // 2) the range ends before the block does, and
3719  // 3) we don't have a successor, or the successor is spilled.
3720  LifetimePosition block_start =
3721  LifetimePosition::GapFromInstructionIndex(block->code_start());
3722  LifetimePosition block_end =
3723  LifetimePosition::GapFromInstructionIndex(block->code_end());
3724  const LiveRange* current = result.cur_cover_;
3725  const LiveRange* successor = current->next();
3726  if (current->End() < block_end &&
3727  (successor == nullptr || successor->spilled())) {
3728  // verify point 1: no register use. We can go to the end of the
3729  // range, since it's all within the block.
3730 
3731  bool uses_reg = false;
3732  for (const UsePosition* use = current->NextUsePosition(block_start);
3733  use != nullptr; use = use->next()) {
3734  if (use->operand()->IsAnyRegister()) {
3735  uses_reg = true;
3736  break;
3737  }
3738  }
3739  if (!uses_reg) continue;
3740  }
3741  if (current->TopLevel()->IsSpilledOnlyInDeferredBlocks() &&
3742  pred_block->IsDeferred()) {
3743  // The spill location should be defined in pred_block, so add
3744  // pred_block to the list of blocks requiring a spill operand.
3745  current->TopLevel()->GetListOfBlocksRequiringSpillOperands()->Add(
3746  pred_block->rpo_number().ToInt());
3747  }
3748  }
3749  int move_loc = ResolveControlFlow(block, cur_op, pred_block, pred_op);
3750  USE(move_loc);
3751  DCHECK_IMPLIES(
3752  result.cur_cover_->TopLevel()->IsSpilledOnlyInDeferredBlocks() &&
3753  !(pred_op.IsAnyRegister() && cur_op.IsAnyRegister()),
3754  code()->GetInstructionBlock(move_loc)->IsDeferred());
3755  }
3756  iterator.Advance();
3757  }
3758  }
3759 
3760  // At this stage, we collected blocks needing a spill operand from
3761  // ConnectRanges and from ResolveControlFlow. Time to commit the spills for
3762  // deferred blocks.
3763  const size_t live_ranges_size = data()->live_ranges().size();
3764  for (TopLevelLiveRange* top : data()->live_ranges()) {
3765  CHECK_EQ(live_ranges_size,
3766  data()->live_ranges().size()); // TODO(neis): crbug.com/831822
3767  if (top == nullptr || top->IsEmpty() ||
3768  !top->IsSpilledOnlyInDeferredBlocks())
3769  continue;
3770  CommitSpillsInDeferredBlocks(top, finder.ArrayFor(top->vreg()), local_zone);
3771  }
3772 }
3773 
3774 int LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
3775  const InstructionOperand& cur_op,
3776  const InstructionBlock* pred,
3777  const InstructionOperand& pred_op) {
3778  DCHECK(!pred_op.Equals(cur_op));
3779  int gap_index;
3780  Instruction::GapPosition position;
3781  if (block->PredecessorCount() == 1) {
3782  gap_index = block->first_instruction_index();
3783  position = Instruction::START;
3784  } else {
3785  DCHECK_EQ(1, pred->SuccessorCount());
3786  DCHECK(!code()
3787  ->InstructionAt(pred->last_instruction_index())
3788  ->HasReferenceMap());
3789  gap_index = pred->last_instruction_index();
3790  position = Instruction::END;
3791  }
3792  data()->AddGapMove(gap_index, position, pred_op, cur_op);
3793  return gap_index;
3794 }
3795 
3796 void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
3797  DelayedInsertionMap delayed_insertion_map(local_zone);
3798  const size_t live_ranges_size = data()->live_ranges().size();
3799  for (TopLevelLiveRange* top_range : data()->live_ranges()) {
3800  CHECK_EQ(live_ranges_size,
3801  data()->live_ranges().size()); // TODO(neis): crbug.com/831822
3802  if (top_range == nullptr) continue;
3803  bool connect_spilled = top_range->IsSpilledOnlyInDeferredBlocks();
3804  LiveRange* first_range = top_range;
3805  for (LiveRange *second_range = first_range->next(); second_range != nullptr;
3806  first_range = second_range, second_range = second_range->next()) {
3807  LifetimePosition pos = second_range->Start();
3808  // Add gap move if the two live ranges touch and there is no block
3809  // boundary.
3810  if (second_range->spilled()) continue;
3811  if (first_range->End() != pos) continue;
3812  if (data()->IsBlockBoundary(pos) &&
3813  !CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) {
3814  continue;
3815  }
3816  InstructionOperand prev_operand = first_range->GetAssignedOperand();
3817  InstructionOperand cur_operand = second_range->GetAssignedOperand();
3818  if (prev_operand.Equals(cur_operand)) continue;
3819  bool delay_insertion = false;
3820  Instruction::GapPosition gap_pos;
3821  int gap_index = pos.ToInstructionIndex();
3822  if (connect_spilled && !prev_operand.IsAnyRegister() &&
3823  cur_operand.IsAnyRegister()) {
3824  const InstructionBlock* block = code()->GetInstructionBlock(gap_index);
3825  DCHECK(block->IsDeferred());
3826  // Performing a reload in this block, meaning the spill operand must
3827  // be defined here.
3828  top_range->GetListOfBlocksRequiringSpillOperands()->Add(
3829  block->rpo_number().ToInt());
3830  }
3831 
3832  if (pos.IsGapPosition()) {
3833  gap_pos = pos.IsStart() ? Instruction::START : Instruction::END;
3834  } else {
3835  if (pos.IsStart()) {
3836  delay_insertion = true;
3837  } else {
3838  gap_index++;
3839  }
3840  gap_pos = delay_insertion ? Instruction::END : Instruction::START;
3841  }
3842  // Reloads or spills for spilled in deferred blocks ranges must happen
3843  // only in deferred blocks.
3844  DCHECK_IMPLIES(connect_spilled && !(prev_operand.IsAnyRegister() &&
3845  cur_operand.IsAnyRegister()),
3846  code()->GetInstructionBlock(gap_index)->IsDeferred());
3847 
3848  ParallelMove* move =
3849  code()->InstructionAt(gap_index)->GetOrCreateParallelMove(
3850  gap_pos, code_zone());
3851  if (!delay_insertion) {
3852  move->AddMove(prev_operand, cur_operand);
3853  } else {
3854  delayed_insertion_map.insert(
3855  std::make_pair(std::make_pair(move, prev_operand), cur_operand));
3856  }
3857  }
3858  }
3859  if (delayed_insertion_map.empty()) return;
3860  // Insert all the moves which should occur after the stored move.
3861  ZoneVector<MoveOperands*> to_insert(local_zone);
3862  ZoneVector<MoveOperands*> to_eliminate(local_zone);
3863  to_insert.reserve(4);
3864  to_eliminate.reserve(4);
3865  ParallelMove* moves = delayed_insertion_map.begin()->first.first;
3866  for (auto it = delayed_insertion_map.begin();; ++it) {
3867  bool done = it == delayed_insertion_map.end();
3868  if (done || it->first.first != moves) {
3869  // Commit the MoveOperands for current ParallelMove.
3870  for (MoveOperands* move : to_eliminate) {
3871  move->Eliminate();
3872  }
3873  for (MoveOperands* move : to_insert) {
3874  moves->push_back(move);
3875  }
3876  if (done) break;
3877  // Reset state.
3878  to_eliminate.clear();
3879  to_insert.clear();
3880  moves = it->first.first;
3881  }
3882  // Gather all MoveOperands for a single ParallelMove.
3883  MoveOperands* move =
3884  new (code_zone()) MoveOperands(it->first.second, it->second);
3885  moves->PrepareInsertAfter(move, &to_eliminate);
3886  to_insert.push_back(move);
3887  }
3888 }
3889 
3890 void LiveRangeConnector::CommitSpillsInDeferredBlocks(
3891  TopLevelLiveRange* range, LiveRangeBoundArray* array, Zone* temp_zone) {
3892  DCHECK(range->IsSpilledOnlyInDeferredBlocks());
3893  DCHECK(!range->spilled());
3894 
3895  InstructionSequence* code = data()->code();
3896  InstructionOperand spill_operand = range->GetSpillRangeOperand();
3897 
3898  TRACE("Live Range %d will be spilled only in deferred blocks.\n",
3899  range->vreg());
3900  // If we have ranges that aren't spilled but require the operand on the stack,
3901  // make sure we insert the spill.
3902  for (const LiveRange* child = range; child != nullptr;
3903  child = child->next()) {
3904  for (const UsePosition* pos = child->first_pos(); pos != nullptr;
3905  pos = pos->next()) {
3906  if (pos->type() != UsePositionType::kRequiresSlot && !child->spilled())
3907  continue;
3908  range->AddBlockRequiringSpillOperand(
3909  code->GetInstructionBlock(pos->pos().ToInstructionIndex())
3910  ->rpo_number());
3911  }
3912  }
3913 
3914  ZoneQueue<int> worklist(temp_zone);
3915 
3916  for (BitVector::Iterator iterator(
3917  range->GetListOfBlocksRequiringSpillOperands());
3918  !iterator.Done(); iterator.Advance()) {
3919  worklist.push(iterator.Current());
3920  }
3921 
3922  ZoneSet<std::pair<RpoNumber, int>> done_moves(temp_zone);
3923  // Seek the deferred blocks that dominate locations requiring spill operands,
3924  // and spill there. We only need to spill at the start of such blocks.
3925  BitVector done_blocks(
3926  range->GetListOfBlocksRequiringSpillOperands()->length(), temp_zone);
3927  while (!worklist.empty()) {
3928  int block_id = worklist.front();
3929  worklist.pop();
3930  if (done_blocks.Contains(block_id)) continue;
3931  done_blocks.Add(block_id);
3932  InstructionBlock* spill_block =
3933  code->InstructionBlockAt(RpoNumber::FromInt(block_id));
3934 
3935  for (const RpoNumber& pred : spill_block->predecessors()) {
3936  const InstructionBlock* pred_block = code->InstructionBlockAt(pred);
3937 
3938  if (pred_block->IsDeferred()) {
3939  worklist.push(pred_block->rpo_number().ToInt());
3940  } else {
3941  LifetimePosition pred_end =
3942  LifetimePosition::InstructionFromInstructionIndex(
3943  pred_block->last_instruction_index());
3944 
3945  LiveRangeBound* bound = array->Find(pred_end);
3946 
3947  InstructionOperand pred_op = bound->range_->GetAssignedOperand();
3948 
3949  RpoNumber spill_block_number = spill_block->rpo_number();
3950  if (done_moves.find(std::make_pair(
3951  spill_block_number, range->vreg())) == done_moves.end()) {
3952  data()->AddGapMove(spill_block->first_instruction_index(),
3953  Instruction::GapPosition::START, pred_op,
3954  spill_operand);
3955  done_moves.insert(std::make_pair(spill_block_number, range->vreg()));
3956  spill_block->mark_needs_frame();
3957  }
3958  }
3959  }
3960  }
3961 }
3962 
3963 #undef TRACE
3964 
3965 } // namespace compiler
3966 } // namespace internal
3967 } // namespace v8
Definition: libplatform.h:13