V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
instruction.cc
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/backend/instruction.h"
6 
7 #include <iomanip>
8 
9 #include "src/compiler/common-operator.h"
10 #include "src/compiler/graph.h"
11 #include "src/compiler/schedule.h"
12 #include "src/compiler/state-values-utils.h"
13 #include "src/register-configuration.h"
14 #include "src/source-position.h"
15 
16 namespace v8 {
17 namespace internal {
18 namespace compiler {
19 
20 const RegisterConfiguration* (*GetRegConfig)() = RegisterConfiguration::Default;
21 
22 FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
23  switch (condition) {
24  case kSignedLessThan:
25  return kSignedGreaterThan;
26  case kSignedGreaterThanOrEqual:
27  return kSignedLessThanOrEqual;
28  case kSignedLessThanOrEqual:
29  return kSignedGreaterThanOrEqual;
30  case kSignedGreaterThan:
31  return kSignedLessThan;
32  case kUnsignedLessThan:
33  return kUnsignedGreaterThan;
34  case kUnsignedGreaterThanOrEqual:
35  return kUnsignedLessThanOrEqual;
36  case kUnsignedLessThanOrEqual:
37  return kUnsignedGreaterThanOrEqual;
38  case kUnsignedGreaterThan:
39  return kUnsignedLessThan;
40  case kFloatLessThanOrUnordered:
41  return kFloatGreaterThanOrUnordered;
42  case kFloatGreaterThanOrEqual:
43  return kFloatLessThanOrEqual;
44  case kFloatLessThanOrEqual:
45  return kFloatGreaterThanOrEqual;
46  case kFloatGreaterThanOrUnordered:
47  return kFloatLessThanOrUnordered;
48  case kFloatLessThan:
49  return kFloatGreaterThan;
50  case kFloatGreaterThanOrEqualOrUnordered:
51  return kFloatLessThanOrEqualOrUnordered;
52  case kFloatLessThanOrEqualOrUnordered:
53  return kFloatGreaterThanOrEqualOrUnordered;
54  case kFloatGreaterThan:
55  return kFloatLessThan;
56  case kPositiveOrZero:
57  case kNegative:
58  UNREACHABLE();
59  break;
60  case kEqual:
61  case kNotEqual:
62  case kOverflow:
63  case kNotOverflow:
64  case kUnorderedEqual:
65  case kUnorderedNotEqual:
66  return condition;
67  }
68  UNREACHABLE();
69 }
70 
71 bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
72  if (kSimpleFPAliasing || !this->IsFPLocationOperand() ||
73  !other.IsFPLocationOperand())
74  return EqualsCanonicalized(other);
75  // Aliasing is complex and both operands are fp locations.
76  const LocationOperand& loc = *LocationOperand::cast(this);
77  const LocationOperand& other_loc = LocationOperand::cast(other);
78  LocationOperand::LocationKind kind = loc.location_kind();
79  LocationOperand::LocationKind other_kind = other_loc.location_kind();
80  if (kind != other_kind) return false;
81  MachineRepresentation rep = loc.representation();
82  MachineRepresentation other_rep = other_loc.representation();
83  if (rep == other_rep) return EqualsCanonicalized(other);
84  if (kind == LocationOperand::REGISTER) {
85  // FP register-register interference.
86  return GetRegConfig()->AreAliases(rep, loc.register_code(), other_rep,
87  other_loc.register_code());
88  } else {
89  // FP slot-slot interference. Slots of different FP reps can alias because
90  // the gap resolver may break a move into 2 or 4 equivalent smaller moves.
91  DCHECK_EQ(LocationOperand::STACK_SLOT, kind);
92  int index_hi = loc.index();
93  int index_lo = index_hi - (1 << ElementSizeLog2Of(rep)) / kPointerSize + 1;
94  int other_index_hi = other_loc.index();
95  int other_index_lo =
96  other_index_hi - (1 << ElementSizeLog2Of(other_rep)) / kPointerSize + 1;
97  return other_index_hi >= index_lo && index_hi >= other_index_lo;
98  }
99  return false;
100 }
101 
102 bool LocationOperand::IsCompatible(LocationOperand* op) {
103  if (IsRegister() || IsStackSlot()) {
104  return op->IsRegister() || op->IsStackSlot();
105  } else if (kSimpleFPAliasing) {
106  // A backend may choose to generate the same instruction sequence regardless
107  // of the FP representation. As a result, we can relax the compatibility and
108  // allow a Double to be moved in a Float for example. However, this is only
109  // allowed if registers do not overlap.
110  return (IsFPRegister() || IsFPStackSlot()) &&
111  (op->IsFPRegister() || op->IsFPStackSlot());
112  } else if (IsFloatRegister() || IsFloatStackSlot()) {
113  return op->IsFloatRegister() || op->IsFloatStackSlot();
114  } else if (IsDoubleRegister() || IsDoubleStackSlot()) {
115  return op->IsDoubleRegister() || op->IsDoubleStackSlot();
116  } else {
117  return (IsSimd128Register() || IsSimd128StackSlot()) &&
118  (op->IsSimd128Register() || op->IsSimd128StackSlot());
119  }
120 }
121 
122 void InstructionOperand::Print() const { StdoutStream{} << *this << std::endl; }
123 
124 std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) {
125  switch (op.kind()) {
126  case InstructionOperand::UNALLOCATED: {
127  const UnallocatedOperand* unalloc = UnallocatedOperand::cast(&op);
128  os << "v" << unalloc->virtual_register();
129  if (unalloc->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
130  return os << "(=" << unalloc->fixed_slot_index() << "S)";
131  }
132  switch (unalloc->extended_policy()) {
133  case UnallocatedOperand::NONE:
134  return os;
135  case UnallocatedOperand::FIXED_REGISTER:
136  return os << "(="
137  << Register::from_code(unalloc->fixed_register_index())
138  << ")";
139  case UnallocatedOperand::FIXED_FP_REGISTER:
140  return os << "(="
141  << DoubleRegister::from_code(
142  unalloc->fixed_register_index())
143  << ")";
144  case UnallocatedOperand::MUST_HAVE_REGISTER:
145  return os << "(R)";
146  case UnallocatedOperand::MUST_HAVE_SLOT:
147  return os << "(S)";
148  case UnallocatedOperand::SAME_AS_FIRST_INPUT:
149  return os << "(1)";
150  case UnallocatedOperand::REGISTER_OR_SLOT:
151  return os << "(-)";
152  case UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT:
153  return os << "(*)";
154  }
155  }
156  case InstructionOperand::CONSTANT:
157  return os << "[constant:" << ConstantOperand::cast(op).virtual_register()
158  << "]";
159  case InstructionOperand::IMMEDIATE: {
160  ImmediateOperand imm = ImmediateOperand::cast(op);
161  switch (imm.type()) {
162  case ImmediateOperand::INLINE:
163  return os << "#" << imm.inline_value();
164  case ImmediateOperand::INDEXED:
165  return os << "[immediate:" << imm.indexed_value() << "]";
166  }
167  }
168  case InstructionOperand::EXPLICIT:
169  case InstructionOperand::ALLOCATED: {
170  LocationOperand allocated = LocationOperand::cast(op);
171  if (op.IsStackSlot()) {
172  os << "[stack:" << allocated.index();
173  } else if (op.IsFPStackSlot()) {
174  os << "[fp_stack:" << allocated.index();
175  } else if (op.IsRegister()) {
176  const char* name =
177  allocated.register_code() < Register::kNumRegisters
178  ? RegisterName(Register::from_code(allocated.register_code()))
179  : Assembler::GetSpecialRegisterName(allocated.register_code());
180  os << "[" << name << "|R";
181  } else if (op.IsDoubleRegister()) {
182  os << "[" << DoubleRegister::from_code(allocated.register_code())
183  << "|R";
184  } else if (op.IsFloatRegister()) {
185  os << "[" << FloatRegister::from_code(allocated.register_code())
186  << "|R";
187  } else {
188  DCHECK(op.IsSimd128Register());
189  os << "[" << Simd128Register::from_code(allocated.register_code())
190  << "|R";
191  }
192  if (allocated.IsExplicit()) {
193  os << "|E";
194  }
195  switch (allocated.representation()) {
196  case MachineRepresentation::kNone:
197  os << "|-";
198  break;
199  case MachineRepresentation::kBit:
200  os << "|b";
201  break;
202  case MachineRepresentation::kWord8:
203  os << "|w8";
204  break;
205  case MachineRepresentation::kWord16:
206  os << "|w16";
207  break;
208  case MachineRepresentation::kWord32:
209  os << "|w32";
210  break;
211  case MachineRepresentation::kWord64:
212  os << "|w64";
213  break;
214  case MachineRepresentation::kFloat32:
215  os << "|f32";
216  break;
217  case MachineRepresentation::kFloat64:
218  os << "|f64";
219  break;
220  case MachineRepresentation::kSimd128:
221  os << "|s128";
222  break;
223  case MachineRepresentation::kTaggedSigned:
224  os << "|ts";
225  break;
226  case MachineRepresentation::kTaggedPointer:
227  os << "|tp";
228  break;
229  case MachineRepresentation::kTagged:
230  os << "|t";
231  break;
232  }
233  return os << "]";
234  }
235  case InstructionOperand::INVALID:
236  return os << "(x)";
237  }
238  UNREACHABLE();
239 }
240 
241 void MoveOperands::Print() const {
242  StdoutStream{} << destination() << " = " << source() << std::endl;
243 }
244 
245 std::ostream& operator<<(std::ostream& os, const MoveOperands& mo) {
246  os << mo.destination();
247  if (!mo.source().Equals(mo.destination())) {
248  os << " = " << mo.source();
249  }
250  return os << ";";
251 }
252 
253 bool ParallelMove::IsRedundant() const {
254  for (MoveOperands* move : *this) {
255  if (!move->IsRedundant()) return false;
256  }
257  return true;
258 }
259 
260 void ParallelMove::PrepareInsertAfter(
261  MoveOperands* move, ZoneVector<MoveOperands*>* to_eliminate) const {
262  bool no_aliasing =
263  kSimpleFPAliasing || !move->destination().IsFPLocationOperand();
264  MoveOperands* replacement = nullptr;
265  MoveOperands* eliminated = nullptr;
266  for (MoveOperands* curr : *this) {
267  if (curr->IsEliminated()) continue;
268  if (curr->destination().EqualsCanonicalized(move->source())) {
269  // We must replace move's source with curr's destination in order to
270  // insert it into this ParallelMove.
271  DCHECK(!replacement);
272  replacement = curr;
273  if (no_aliasing && eliminated != nullptr) break;
274  } else if (curr->destination().InterferesWith(move->destination())) {
275  // We can eliminate curr, since move overwrites at least a part of its
276  // destination, implying its value is no longer live.
277  eliminated = curr;
278  to_eliminate->push_back(curr);
279  if (no_aliasing && replacement != nullptr) break;
280  }
281  }
282  if (replacement != nullptr) move->set_source(replacement->source());
283 }
284 
285 ExplicitOperand::ExplicitOperand(LocationKind kind, MachineRepresentation rep,
286  int index)
287  : LocationOperand(EXPLICIT, kind, rep, index) {
288  DCHECK_IMPLIES(kind == REGISTER && !IsFloatingPoint(rep),
289  GetRegConfig()->IsAllocatableGeneralCode(index));
290  DCHECK_IMPLIES(kind == REGISTER && rep == MachineRepresentation::kFloat32,
291  GetRegConfig()->IsAllocatableFloatCode(index));
292  DCHECK_IMPLIES(kind == REGISTER && (rep == MachineRepresentation::kFloat64),
293  GetRegConfig()->IsAllocatableDoubleCode(index));
294 }
295 
296 Instruction::Instruction(InstructionCode opcode)
297  : opcode_(opcode),
298  bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) |
299  TempCountField::encode(0) | IsCallField::encode(false)),
300  reference_map_(nullptr),
301  block_(nullptr) {
302  parallel_moves_[0] = nullptr;
303  parallel_moves_[1] = nullptr;
304 }
305 
306 Instruction::Instruction(InstructionCode opcode, size_t output_count,
307  InstructionOperand* outputs, size_t input_count,
308  InstructionOperand* inputs, size_t temp_count,
309  InstructionOperand* temps)
310  : opcode_(opcode),
311  bit_field_(OutputCountField::encode(output_count) |
312  InputCountField::encode(input_count) |
313  TempCountField::encode(temp_count) |
314  IsCallField::encode(false)),
315  reference_map_(nullptr),
316  block_(nullptr) {
317  parallel_moves_[0] = nullptr;
318  parallel_moves_[1] = nullptr;
319  size_t offset = 0;
320  for (size_t i = 0; i < output_count; ++i) {
321  DCHECK(!outputs[i].IsInvalid());
322  operands_[offset++] = outputs[i];
323  }
324  for (size_t i = 0; i < input_count; ++i) {
325  DCHECK(!inputs[i].IsInvalid());
326  operands_[offset++] = inputs[i];
327  }
328  for (size_t i = 0; i < temp_count; ++i) {
329  DCHECK(!temps[i].IsInvalid());
330  operands_[offset++] = temps[i];
331  }
332 }
333 
334 bool Instruction::AreMovesRedundant() const {
335  for (int i = Instruction::FIRST_GAP_POSITION;
336  i <= Instruction::LAST_GAP_POSITION; i++) {
337  if (parallel_moves_[i] != nullptr && !parallel_moves_[i]->IsRedundant()) {
338  return false;
339  }
340  }
341  return true;
342 }
343 
344 void Instruction::Print() const { StdoutStream{} << *this << std::endl; }
345 
346 std::ostream& operator<<(std::ostream& os, const ParallelMove& pm) {
347  const char* space = "";
348  for (MoveOperands* move : pm) {
349  if (move->IsEliminated()) continue;
350  os << space << *move;
351  space = " ";
352  }
353  return os;
354 }
355 
356 void ReferenceMap::RecordReference(const AllocatedOperand& op) {
357  // Do not record arguments as pointers.
358  if (op.IsStackSlot() && LocationOperand::cast(op).index() < 0) return;
359  DCHECK(!op.IsFPRegister() && !op.IsFPStackSlot());
360  reference_operands_.push_back(op);
361 }
362 
363 std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm) {
364  os << "{";
365  const char* separator = "";
366  for (const InstructionOperand& op : pm.reference_operands_) {
367  os << separator << op;
368  separator = ";";
369  }
370  return os << "}";
371 }
372 
373 std::ostream& operator<<(std::ostream& os, const ArchOpcode& ao) {
374  switch (ao) {
375 #define CASE(Name) \
376  case k##Name: \
377  return os << #Name;
378  ARCH_OPCODE_LIST(CASE)
379 #undef CASE
380  }
381  UNREACHABLE();
382 }
383 
384 std::ostream& operator<<(std::ostream& os, const AddressingMode& am) {
385  switch (am) {
386  case kMode_None:
387  return os;
388 #define CASE(Name) \
389  case kMode_##Name: \
390  return os << #Name;
391  TARGET_ADDRESSING_MODE_LIST(CASE)
392 #undef CASE
393  }
394  UNREACHABLE();
395 }
396 
397 std::ostream& operator<<(std::ostream& os, const FlagsMode& fm) {
398  switch (fm) {
399  case kFlags_none:
400  return os;
401  case kFlags_branch:
402  return os << "branch";
403  case kFlags_branch_and_poison:
404  return os << "branch_and_poison";
405  case kFlags_deoptimize:
406  return os << "deoptimize";
407  case kFlags_deoptimize_and_poison:
408  return os << "deoptimize_and_poison";
409  case kFlags_set:
410  return os << "set";
411  case kFlags_trap:
412  return os << "trap";
413  }
414  UNREACHABLE();
415 }
416 
417 std::ostream& operator<<(std::ostream& os, const FlagsCondition& fc) {
418  switch (fc) {
419  case kEqual:
420  return os << "equal";
421  case kNotEqual:
422  return os << "not equal";
423  case kSignedLessThan:
424  return os << "signed less than";
425  case kSignedGreaterThanOrEqual:
426  return os << "signed greater than or equal";
427  case kSignedLessThanOrEqual:
428  return os << "signed less than or equal";
429  case kSignedGreaterThan:
430  return os << "signed greater than";
431  case kUnsignedLessThan:
432  return os << "unsigned less than";
433  case kUnsignedGreaterThanOrEqual:
434  return os << "unsigned greater than or equal";
435  case kUnsignedLessThanOrEqual:
436  return os << "unsigned less than or equal";
437  case kUnsignedGreaterThan:
438  return os << "unsigned greater than";
439  case kFloatLessThanOrUnordered:
440  return os << "less than or unordered (FP)";
441  case kFloatGreaterThanOrEqual:
442  return os << "greater than or equal (FP)";
443  case kFloatLessThanOrEqual:
444  return os << "less than or equal (FP)";
445  case kFloatGreaterThanOrUnordered:
446  return os << "greater than or unordered (FP)";
447  case kFloatLessThan:
448  return os << "less than (FP)";
449  case kFloatGreaterThanOrEqualOrUnordered:
450  return os << "greater than, equal or unordered (FP)";
451  case kFloatLessThanOrEqualOrUnordered:
452  return os << "less than, equal or unordered (FP)";
453  case kFloatGreaterThan:
454  return os << "greater than (FP)";
455  case kUnorderedEqual:
456  return os << "unordered equal";
457  case kUnorderedNotEqual:
458  return os << "unordered not equal";
459  case kOverflow:
460  return os << "overflow";
461  case kNotOverflow:
462  return os << "not overflow";
463  case kPositiveOrZero:
464  return os << "positive or zero";
465  case kNegative:
466  return os << "negative";
467  }
468  UNREACHABLE();
469 }
470 
471 std::ostream& operator<<(std::ostream& os, const Instruction& instr) {
472  os << "gap ";
473  for (int i = Instruction::FIRST_GAP_POSITION;
474  i <= Instruction::LAST_GAP_POSITION; i++) {
475  os << "(";
476  if (instr.parallel_moves()[i] != nullptr) {
477  os << *instr.parallel_moves()[i];
478  }
479  os << ") ";
480  }
481  os << "\n ";
482 
483  if (instr.OutputCount() == 1) {
484  os << *instr.OutputAt(0) << " = ";
485  } else if (instr.OutputCount() > 1) {
486  os << "(" << *instr.OutputAt(0);
487  for (size_t i = 1; i < instr.OutputCount(); i++) {
488  os << ", " << *instr.OutputAt(i);
489  }
490  os << ") = ";
491  }
492 
493  os << ArchOpcodeField::decode(instr.opcode());
494  AddressingMode am = AddressingModeField::decode(instr.opcode());
495  if (am != kMode_None) {
496  os << " : " << AddressingModeField::decode(instr.opcode());
497  }
498  FlagsMode fm = FlagsModeField::decode(instr.opcode());
499  if (fm != kFlags_none) {
500  os << " && " << fm << " if " << FlagsConditionField::decode(instr.opcode());
501  }
502  for (size_t i = 0; i < instr.InputCount(); i++) {
503  os << " " << *instr.InputAt(i);
504  }
505  return os;
506 }
507 
508 Constant::Constant(int32_t v) : type_(kInt32), value_(v) {}
509 
510 Constant::Constant(RelocatablePtrConstantInfo info) {
511  if (info.type() == RelocatablePtrConstantInfo::kInt32) {
512  type_ = kInt32;
513  } else if (info.type() == RelocatablePtrConstantInfo::kInt64) {
514  type_ = kInt64;
515  } else {
516  UNREACHABLE();
517  }
518  value_ = info.value();
519  rmode_ = info.rmode();
520 }
521 
522 Handle<HeapObject> Constant::ToHeapObject() const {
523  DCHECK_EQ(kHeapObject, type());
524  Handle<HeapObject> value(
525  reinterpret_cast<Address*>(static_cast<intptr_t>(value_)));
526  return value;
527 }
528 
529 Handle<Code> Constant::ToCode() const {
530  DCHECK_EQ(kHeapObject, type());
531  Handle<Code> value(reinterpret_cast<Address*>(static_cast<intptr_t>(value_)));
532  return value;
533 }
534 
535 const StringConstantBase* Constant::ToDelayedStringConstant() const {
536  DCHECK_EQ(kDelayedStringConstant, type());
537  const StringConstantBase* value =
538  bit_cast<StringConstantBase*>(static_cast<intptr_t>(value_));
539  return value;
540 }
541 
542 std::ostream& operator<<(std::ostream& os, const Constant& constant) {
543  switch (constant.type()) {
544  case Constant::kInt32:
545  return os << constant.ToInt32();
546  case Constant::kInt64:
547  return os << constant.ToInt64() << "l";
548  case Constant::kFloat32:
549  return os << constant.ToFloat32() << "f";
550  case Constant::kFloat64:
551  return os << constant.ToFloat64().value();
552  case Constant::kExternalReference:
553  return os << constant.ToExternalReference().address();
554  case Constant::kHeapObject:
555  return os << Brief(*constant.ToHeapObject());
556  case Constant::kRpoNumber:
557  return os << "RPO" << constant.ToRpoNumber().ToInt();
558  case Constant::kDelayedStringConstant:
559  return os << "DelayedStringConstant: "
560  << constant.ToDelayedStringConstant();
561  }
562  UNREACHABLE();
563 }
564 
565 PhiInstruction::PhiInstruction(Zone* zone, int virtual_register,
566  size_t input_count)
567  : virtual_register_(virtual_register),
568  output_(UnallocatedOperand(UnallocatedOperand::NONE, virtual_register)),
569  operands_(input_count, InstructionOperand::kInvalidVirtualRegister,
570  zone) {}
571 
572 void PhiInstruction::SetInput(size_t offset, int virtual_register) {
573  DCHECK_EQ(InstructionOperand::kInvalidVirtualRegister, operands_[offset]);
574  operands_[offset] = virtual_register;
575 }
576 
577 void PhiInstruction::RenameInput(size_t offset, int virtual_register) {
578  DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, operands_[offset]);
579  operands_[offset] = virtual_register;
580 }
581 
582 InstructionBlock::InstructionBlock(Zone* zone, RpoNumber rpo_number,
583  RpoNumber loop_header, RpoNumber loop_end,
584  bool deferred, bool handler)
585  : successors_(zone),
586  predecessors_(zone),
587  phis_(zone),
588  ao_number_(RpoNumber::Invalid()),
589  rpo_number_(rpo_number),
590  loop_header_(loop_header),
591  loop_end_(loop_end),
592  deferred_(deferred),
593  handler_(handler) {}
594 
595 size_t InstructionBlock::PredecessorIndexOf(RpoNumber rpo_number) const {
596  size_t j = 0;
597  for (InstructionBlock::Predecessors::const_iterator i = predecessors_.begin();
598  i != predecessors_.end(); ++i, ++j) {
599  if (*i == rpo_number) break;
600  }
601  return j;
602 }
603 
604 static RpoNumber GetRpo(const BasicBlock* block) {
605  if (block == nullptr) return RpoNumber::Invalid();
606  return RpoNumber::FromInt(block->rpo_number());
607 }
608 
609 static RpoNumber GetLoopEndRpo(const BasicBlock* block) {
610  if (!block->IsLoopHeader()) return RpoNumber::Invalid();
611  return RpoNumber::FromInt(block->loop_end()->rpo_number());
612 }
613 
614 static InstructionBlock* InstructionBlockFor(Zone* zone,
615  const BasicBlock* block) {
616  bool is_handler =
617  !block->empty() && block->front()->opcode() == IrOpcode::kIfException;
618  InstructionBlock* instr_block = new (zone)
619  InstructionBlock(zone, GetRpo(block), GetRpo(block->loop_header()),
620  GetLoopEndRpo(block), block->deferred(), is_handler);
621  // Map successors and precessors
622  instr_block->successors().reserve(block->SuccessorCount());
623  for (BasicBlock* successor : block->successors()) {
624  instr_block->successors().push_back(GetRpo(successor));
625  }
626  instr_block->predecessors().reserve(block->PredecessorCount());
627  for (BasicBlock* predecessor : block->predecessors()) {
628  instr_block->predecessors().push_back(GetRpo(predecessor));
629  }
630  return instr_block;
631 }
632 
633 std::ostream& operator<<(std::ostream& os,
634  const PrintableInstructionBlock& printable_block) {
635  const InstructionBlock* block = printable_block.block_;
636  const InstructionSequence* code = printable_block.code_;
637 
638  os << "B" << block->rpo_number();
639  os << ": AO#" << block->ao_number();
640  if (block->IsDeferred()) os << " (deferred)";
641  if (!block->needs_frame()) os << " (no frame)";
642  if (block->must_construct_frame()) os << " (construct frame)";
643  if (block->must_deconstruct_frame()) os << " (deconstruct frame)";
644  if (block->IsLoopHeader()) {
645  os << " loop blocks: [" << block->rpo_number() << ", " << block->loop_end()
646  << ")";
647  }
648  os << " instructions: [" << block->code_start() << ", " << block->code_end()
649  << ")" << std::endl
650  << " predecessors:";
651 
652  for (RpoNumber pred : block->predecessors()) {
653  os << " B" << pred.ToInt();
654  }
655  os << std::endl;
656 
657  for (const PhiInstruction* phi : block->phis()) {
658  os << " phi: " << phi->output() << " =";
659  for (int input : phi->operands()) {
660  os << " v" << input;
661  }
662  os << std::endl;
663  }
664 
665  for (int j = block->first_instruction_index();
666  j <= block->last_instruction_index(); j++) {
667  os << " " << std::setw(5) << j << ": " << *code->InstructionAt(j)
668  << std::endl;
669  }
670 
671  os << " successors:";
672  for (RpoNumber succ : block->successors()) {
673  os << " B" << succ.ToInt();
674  }
675  os << std::endl;
676  return os;
677 }
678 
679 InstructionBlocks* InstructionSequence::InstructionBlocksFor(
680  Zone* zone, const Schedule* schedule) {
681  InstructionBlocks* blocks = zone->NewArray<InstructionBlocks>(1);
682  new (blocks) InstructionBlocks(
683  static_cast<int>(schedule->rpo_order()->size()), nullptr, zone);
684  size_t rpo_number = 0;
685  for (BasicBlockVector::const_iterator it = schedule->rpo_order()->begin();
686  it != schedule->rpo_order()->end(); ++it, ++rpo_number) {
687  DCHECK(!(*blocks)[rpo_number]);
688  DCHECK(GetRpo(*it).ToSize() == rpo_number);
689  (*blocks)[rpo_number] = InstructionBlockFor(zone, *it);
690  }
691  return blocks;
692 }
693 
694 void InstructionSequence::ValidateEdgeSplitForm() const {
695  // Validate blocks are in edge-split form: no block with multiple successors
696  // has an edge to a block (== a successor) with more than one predecessors.
697  for (const InstructionBlock* block : instruction_blocks()) {
698  if (block->SuccessorCount() > 1) {
699  for (const RpoNumber& successor_id : block->successors()) {
700  const InstructionBlock* successor = InstructionBlockAt(successor_id);
701  // Expect precisely one predecessor: "block".
702  CHECK(successor->PredecessorCount() == 1 &&
703  successor->predecessors()[0] == block->rpo_number());
704  }
705  }
706  }
707 }
708 
709 void InstructionSequence::ValidateDeferredBlockExitPaths() const {
710  // A deferred block with more than one successor must have all its successors
711  // deferred.
712  for (const InstructionBlock* block : instruction_blocks()) {
713  if (!block->IsDeferred() || block->SuccessorCount() <= 1) continue;
714  for (RpoNumber successor_id : block->successors()) {
715  CHECK(InstructionBlockAt(successor_id)->IsDeferred());
716  }
717  }
718 }
719 
720 void InstructionSequence::ValidateDeferredBlockEntryPaths() const {
721  // If a deferred block has multiple predecessors, they have to
722  // all be deferred. Otherwise, we can run into a situation where a range
723  // that spills only in deferred blocks inserts its spill in the block, but
724  // other ranges need moves inserted by ResolveControlFlow in the predecessors,
725  // which may clobber the register of this range.
726  for (const InstructionBlock* block : instruction_blocks()) {
727  if (!block->IsDeferred() || block->PredecessorCount() <= 1) continue;
728  for (RpoNumber predecessor_id : block->predecessors()) {
729  CHECK(InstructionBlockAt(predecessor_id)->IsDeferred());
730  }
731  }
732 }
733 
734 void InstructionSequence::ValidateSSA() const {
735  // TODO(mtrofin): We could use a local zone here instead.
736  BitVector definitions(VirtualRegisterCount(), zone());
737  for (const Instruction* instruction : *this) {
738  for (size_t i = 0; i < instruction->OutputCount(); ++i) {
739  const InstructionOperand* output = instruction->OutputAt(i);
740  int vreg = (output->IsConstant())
741  ? ConstantOperand::cast(output)->virtual_register()
742  : UnallocatedOperand::cast(output)->virtual_register();
743  CHECK(!definitions.Contains(vreg));
744  definitions.Add(vreg);
745  }
746  }
747 }
748 
749 void InstructionSequence::ComputeAssemblyOrder() {
750  int ao = 0;
751  RpoNumber invalid = RpoNumber::Invalid();
752 
753  ao_blocks_ = zone()->NewArray<InstructionBlocks>(1);
754  new (ao_blocks_) InstructionBlocks(zone());
755  ao_blocks_->reserve(instruction_blocks_->size());
756 
757  // Place non-deferred blocks.
758  for (InstructionBlock* const block : *instruction_blocks_) {
759  DCHECK_NOT_NULL(block);
760  if (block->IsDeferred()) continue; // skip deferred blocks.
761  if (block->ao_number() != invalid) continue; // loop rotated.
762  if (block->IsLoopHeader()) {
763  bool header_align = true;
764  if (FLAG_turbo_loop_rotation) {
765  // Perform loop rotation for non-deferred loops.
766  InstructionBlock* loop_end =
767  instruction_blocks_->at(block->loop_end().ToSize() - 1);
768  if (loop_end->SuccessorCount() == 1 && /* ends with goto */
769  loop_end != block /* not a degenerate infinite loop */) {
770  // If the last block has an unconditional jump back to the header,
771  // then move it to be in front of the header in the assembly order.
772  DCHECK_EQ(block->rpo_number(), loop_end->successors()[0]);
773  loop_end->set_ao_number(RpoNumber::FromInt(ao++));
774  ao_blocks_->push_back(loop_end);
775  // This block will be the new machine-level loop header, so align
776  // this block instead of the loop header block.
777  loop_end->set_alignment(true);
778  header_align = false;
779  }
780  }
781  block->set_alignment(header_align);
782  }
783  block->set_ao_number(RpoNumber::FromInt(ao++));
784  ao_blocks_->push_back(block);
785  }
786  // Add all leftover (deferred) blocks.
787  for (InstructionBlock* const block : *instruction_blocks_) {
788  if (block->ao_number() == invalid) {
789  block->set_ao_number(RpoNumber::FromInt(ao++));
790  ao_blocks_->push_back(block);
791  }
792  }
793  DCHECK_EQ(instruction_blocks_->size(), ao);
794 }
795 
796 void InstructionSequence::RecomputeAssemblyOrderForTesting() {
797  RpoNumber invalid = RpoNumber::Invalid();
798  for (InstructionBlock* block : *instruction_blocks_) {
799  block->set_ao_number(invalid);
800  }
801  ComputeAssemblyOrder();
802 }
803 
804 InstructionSequence::InstructionSequence(Isolate* isolate,
805  Zone* instruction_zone,
806  InstructionBlocks* instruction_blocks)
807  : isolate_(isolate),
808  zone_(instruction_zone),
809  instruction_blocks_(instruction_blocks),
810  ao_blocks_(nullptr),
811  source_positions_(zone()),
812  constants_(ConstantMap::key_compare(),
813  ConstantMap::allocator_type(zone())),
814  immediates_(zone()),
815  instructions_(zone()),
816  next_virtual_register_(0),
817  reference_maps_(zone()),
818  representations_(zone()),
819  representation_mask_(0),
820  deoptimization_entries_(zone()),
821  current_block_(nullptr) {
822  ComputeAssemblyOrder();
823 }
824 
825 int InstructionSequence::NextVirtualRegister() {
826  int virtual_register = next_virtual_register_++;
827  CHECK_NE(virtual_register, InstructionOperand::kInvalidVirtualRegister);
828  return virtual_register;
829 }
830 
831 Instruction* InstructionSequence::GetBlockStart(RpoNumber rpo) const {
832  const InstructionBlock* block = InstructionBlockAt(rpo);
833  return InstructionAt(block->code_start());
834 }
835 
836 void InstructionSequence::StartBlock(RpoNumber rpo) {
837  DCHECK_NULL(current_block_);
838  current_block_ = InstructionBlockAt(rpo);
839  int code_start = static_cast<int>(instructions_.size());
840  current_block_->set_code_start(code_start);
841 }
842 
843 void InstructionSequence::EndBlock(RpoNumber rpo) {
844  int end = static_cast<int>(instructions_.size());
845  DCHECK_EQ(current_block_->rpo_number(), rpo);
846  CHECK(current_block_->code_start() >= 0 &&
847  current_block_->code_start() < end);
848  current_block_->set_code_end(end);
849  current_block_ = nullptr;
850 }
851 
852 int InstructionSequence::AddInstruction(Instruction* instr) {
853  DCHECK_NOT_NULL(current_block_);
854  int index = static_cast<int>(instructions_.size());
855  instr->set_block(current_block_);
856  instructions_.push_back(instr);
857  if (instr->NeedsReferenceMap()) {
858  DCHECK_NULL(instr->reference_map());
859  ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
860  reference_map->set_instruction_position(index);
861  instr->set_reference_map(reference_map);
862  reference_maps_.push_back(reference_map);
863  }
864  return index;
865 }
866 
867 InstructionBlock* InstructionSequence::GetInstructionBlock(
868  int instruction_index) const {
869  return instructions()[instruction_index]->block();
870 }
871 
872 static MachineRepresentation FilterRepresentation(MachineRepresentation rep) {
873  switch (rep) {
874  case MachineRepresentation::kBit:
875  case MachineRepresentation::kWord8:
876  case MachineRepresentation::kWord16:
877  return InstructionSequence::DefaultRepresentation();
878  case MachineRepresentation::kWord32:
879  case MachineRepresentation::kWord64:
880  case MachineRepresentation::kTaggedSigned:
881  case MachineRepresentation::kTaggedPointer:
882  case MachineRepresentation::kTagged:
883  case MachineRepresentation::kFloat32:
884  case MachineRepresentation::kFloat64:
885  case MachineRepresentation::kSimd128:
886  return rep;
887  case MachineRepresentation::kNone:
888  break;
889  }
890 
891  UNREACHABLE();
892 }
893 
894 MachineRepresentation InstructionSequence::GetRepresentation(
895  int virtual_register) const {
896  DCHECK_LE(0, virtual_register);
897  DCHECK_LT(virtual_register, VirtualRegisterCount());
898  if (virtual_register >= static_cast<int>(representations_.size())) {
899  return DefaultRepresentation();
900  }
901  return representations_[virtual_register];
902 }
903 
904 void InstructionSequence::MarkAsRepresentation(MachineRepresentation rep,
905  int virtual_register) {
906  DCHECK_LE(0, virtual_register);
907  DCHECK_LT(virtual_register, VirtualRegisterCount());
908  if (virtual_register >= static_cast<int>(representations_.size())) {
909  representations_.resize(VirtualRegisterCount(), DefaultRepresentation());
910  }
911  rep = FilterRepresentation(rep);
912  DCHECK_IMPLIES(representations_[virtual_register] != rep,
913  representations_[virtual_register] == DefaultRepresentation());
914  representations_[virtual_register] = rep;
915  representation_mask_ |= RepresentationBit(rep);
916 }
917 
918 int InstructionSequence::AddDeoptimizationEntry(
919  FrameStateDescriptor* descriptor, DeoptimizeKind kind,
920  DeoptimizeReason reason, VectorSlotPair const& feedback) {
921  int deoptimization_id = static_cast<int>(deoptimization_entries_.size());
922  deoptimization_entries_.push_back(
923  DeoptimizationEntry(descriptor, kind, reason, feedback));
924  return deoptimization_id;
925 }
926 
927 DeoptimizationEntry const& InstructionSequence::GetDeoptimizationEntry(
928  int state_id) {
929  return deoptimization_entries_[state_id];
930 }
931 
932 RpoNumber InstructionSequence::InputRpo(Instruction* instr, size_t index) {
933  InstructionOperand* operand = instr->InputAt(index);
934  Constant constant =
935  operand->IsImmediate()
936  ? GetImmediate(ImmediateOperand::cast(operand))
937  : GetConstant(ConstantOperand::cast(operand)->virtual_register());
938  return constant.ToRpoNumber();
939 }
940 
941 bool InstructionSequence::GetSourcePosition(const Instruction* instr,
942  SourcePosition* result) const {
943  auto it = source_positions_.find(instr);
944  if (it == source_positions_.end()) return false;
945  *result = it->second;
946  return true;
947 }
948 
949 void InstructionSequence::SetSourcePosition(const Instruction* instr,
950  SourcePosition value) {
951  source_positions_.insert(std::make_pair(instr, value));
952 }
953 
954 void InstructionSequence::Print() const {
955  StdoutStream{} << *this << std::endl;
956 }
957 
958 void InstructionSequence::PrintBlock(int block_id) const {
959  RpoNumber rpo = RpoNumber::FromInt(block_id);
960  const InstructionBlock* block = InstructionBlockAt(rpo);
961  CHECK(block->rpo_number() == rpo);
962  StdoutStream{} << PrintableInstructionBlock{block, this} << std::endl;
963 }
964 
965 const RegisterConfiguration*
966  InstructionSequence::registerConfigurationForTesting_ = nullptr;
967 
968 const RegisterConfiguration*
969 InstructionSequence::RegisterConfigurationForTesting() {
970  DCHECK_NOT_NULL(registerConfigurationForTesting_);
971  return registerConfigurationForTesting_;
972 }
973 
974 void InstructionSequence::SetRegisterConfigurationForTesting(
975  const RegisterConfiguration* regConfig) {
976  registerConfigurationForTesting_ = regConfig;
977  GetRegConfig = InstructionSequence::RegisterConfigurationForTesting;
978 }
979 
980 FrameStateDescriptor::FrameStateDescriptor(
981  Zone* zone, FrameStateType type, BailoutId bailout_id,
982  OutputFrameStateCombine state_combine, size_t parameters_count,
983  size_t locals_count, size_t stack_count,
984  MaybeHandle<SharedFunctionInfo> shared_info,
985  FrameStateDescriptor* outer_state)
986  : type_(type),
987  bailout_id_(bailout_id),
988  frame_state_combine_(state_combine),
989  parameters_count_(parameters_count),
990  locals_count_(locals_count),
991  stack_count_(stack_count),
992  values_(zone),
993  shared_info_(shared_info),
994  outer_state_(outer_state) {}
995 
996 size_t FrameStateDescriptor::GetSize() const {
997  return 1 + parameters_count() + locals_count() + stack_count() +
998  (HasContext() ? 1 : 0);
999 }
1000 
1001 size_t FrameStateDescriptor::GetTotalSize() const {
1002  size_t total_size = 0;
1003  for (const FrameStateDescriptor* iter = this; iter != nullptr;
1004  iter = iter->outer_state_) {
1005  total_size += iter->GetSize();
1006  }
1007  return total_size;
1008 }
1009 
1010 size_t FrameStateDescriptor::GetFrameCount() const {
1011  size_t count = 0;
1012  for (const FrameStateDescriptor* iter = this; iter != nullptr;
1013  iter = iter->outer_state_) {
1014  ++count;
1015  }
1016  return count;
1017 }
1018 
1019 size_t FrameStateDescriptor::GetJSFrameCount() const {
1020  size_t count = 0;
1021  for (const FrameStateDescriptor* iter = this; iter != nullptr;
1022  iter = iter->outer_state_) {
1023  if (FrameStateFunctionInfo::IsJSFunctionType(iter->type_)) {
1024  ++count;
1025  }
1026  }
1027  return count;
1028 }
1029 
1030 std::ostream& operator<<(std::ostream& os, const RpoNumber& rpo) {
1031  return os << rpo.ToSize();
1032 }
1033 
1034 std::ostream& operator<<(std::ostream& os, const InstructionSequence& code) {
1035  for (size_t i = 0; i < code.immediates_.size(); ++i) {
1036  Constant constant = code.immediates_[i];
1037  os << "IMM#" << i << ": " << constant << "\n";
1038  }
1039  int i = 0;
1040  for (ConstantMap::const_iterator it = code.constants_.begin();
1041  it != code.constants_.end(); ++i, ++it) {
1042  os << "CST#" << i << ": v" << it->first << " = " << it->second << "\n";
1043  }
1044  for (int i = 0; i < code.InstructionBlockCount(); i++) {
1045  auto* block = code.InstructionBlockAt(RpoNumber::FromInt(i));
1046  os << PrintableInstructionBlock{block, &code};
1047  }
1048  return os;
1049 }
1050 
1051 } // namespace compiler
1052 } // namespace internal
1053 } // namespace v8
Definition: libplatform.h:13