V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
instruction-selector.cc
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/backend/instruction-selector.h"
6 
7 #include <limits>
8 
9 #include "src/assembler-inl.h"
10 #include "src/base/adapters.h"
11 #include "src/compiler/backend/instruction-selector-impl.h"
12 #include "src/compiler/compiler-source-position-table.h"
13 #include "src/compiler/node-matchers.h"
14 #include "src/compiler/pipeline.h"
15 #include "src/compiler/schedule.h"
16 #include "src/compiler/state-values-utils.h"
17 #include "src/deoptimizer.h"
18 
19 namespace v8 {
20 namespace internal {
21 namespace compiler {
22 
23 InstructionSelector::InstructionSelector(
24  Zone* zone, size_t node_count, Linkage* linkage,
25  InstructionSequence* sequence, Schedule* schedule,
26  SourcePositionTable* source_positions, Frame* frame,
27  EnableSwitchJumpTable enable_switch_jump_table,
28  SourcePositionMode source_position_mode, Features features,
29  EnableScheduling enable_scheduling,
30  EnableRootsRelativeAddressing enable_roots_relative_addressing,
31  PoisoningMitigationLevel poisoning_level, EnableTraceTurboJson trace_turbo)
32  : zone_(zone),
33  linkage_(linkage),
34  sequence_(sequence),
35  source_positions_(source_positions),
36  source_position_mode_(source_position_mode),
37  features_(features),
38  schedule_(schedule),
39  current_block_(nullptr),
40  instructions_(zone),
41  continuation_inputs_(sequence->zone()),
42  continuation_outputs_(sequence->zone()),
43  defined_(node_count, false, zone),
44  used_(node_count, false, zone),
45  effect_level_(node_count, 0, zone),
46  virtual_registers_(node_count,
47  InstructionOperand::kInvalidVirtualRegister, zone),
48  virtual_register_rename_(zone),
49  scheduler_(nullptr),
50  enable_scheduling_(enable_scheduling),
51  enable_roots_relative_addressing_(enable_roots_relative_addressing),
52  enable_switch_jump_table_(enable_switch_jump_table),
53  poisoning_level_(poisoning_level),
54  frame_(frame),
55  instruction_selection_failed_(false),
56  instr_origins_(sequence->zone()),
57  trace_turbo_(trace_turbo) {
58  instructions_.reserve(node_count);
59  continuation_inputs_.reserve(5);
60  continuation_outputs_.reserve(2);
61 
62  if (trace_turbo_ == kEnableTraceTurboJson) {
63  instr_origins_.assign(node_count, {-1, 0});
64  }
65 }
66 
67 bool InstructionSelector::SelectInstructions() {
68  // Mark the inputs of all phis in loop headers as used.
69  BasicBlockVector* blocks = schedule()->rpo_order();
70  for (auto const block : *blocks) {
71  if (!block->IsLoopHeader()) continue;
72  DCHECK_LE(2u, block->PredecessorCount());
73  for (Node* const phi : *block) {
74  if (phi->opcode() != IrOpcode::kPhi) continue;
75 
76  // Mark all inputs as used.
77  for (Node* const input : phi->inputs()) {
78  MarkAsUsed(input);
79  }
80  }
81  }
82 
83  // Visit each basic block in post order.
84  for (auto i = blocks->rbegin(); i != blocks->rend(); ++i) {
85  VisitBlock(*i);
86  if (instruction_selection_failed()) return false;
87  }
88 
89  // Schedule the selected instructions.
90  if (UseInstructionScheduling()) {
91  scheduler_ = new (zone()) InstructionScheduler(zone(), sequence());
92  }
93 
94  for (auto const block : *blocks) {
95  InstructionBlock* instruction_block =
96  sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
97  for (size_t i = 0; i < instruction_block->phis().size(); i++) {
98  UpdateRenamesInPhi(instruction_block->PhiAt(i));
99  }
100  size_t end = instruction_block->code_end();
101  size_t start = instruction_block->code_start();
102  DCHECK_LE(end, start);
103  StartBlock(RpoNumber::FromInt(block->rpo_number()));
104  if (end != start) {
105  while (start-- > end + 1) {
106  UpdateRenames(instructions_[start]);
107  AddInstruction(instructions_[start]);
108  }
109  UpdateRenames(instructions_[end]);
110  AddTerminator(instructions_[end]);
111  }
112  EndBlock(RpoNumber::FromInt(block->rpo_number()));
113  }
114 #if DEBUG
115  sequence()->ValidateSSA();
116 #endif
117  return true;
118 }
119 
120 void InstructionSelector::StartBlock(RpoNumber rpo) {
121  if (UseInstructionScheduling()) {
122  DCHECK_NOT_NULL(scheduler_);
123  scheduler_->StartBlock(rpo);
124  } else {
125  sequence()->StartBlock(rpo);
126  }
127 }
128 
129 void InstructionSelector::EndBlock(RpoNumber rpo) {
130  if (UseInstructionScheduling()) {
131  DCHECK_NOT_NULL(scheduler_);
132  scheduler_->EndBlock(rpo);
133  } else {
134  sequence()->EndBlock(rpo);
135  }
136 }
137 
138 void InstructionSelector::AddTerminator(Instruction* instr) {
139  if (UseInstructionScheduling()) {
140  DCHECK_NOT_NULL(scheduler_);
141  scheduler_->AddTerminator(instr);
142  } else {
143  sequence()->AddInstruction(instr);
144  }
145 }
146 
147 void InstructionSelector::AddInstruction(Instruction* instr) {
148  if (UseInstructionScheduling()) {
149  DCHECK_NOT_NULL(scheduler_);
150  scheduler_->AddInstruction(instr);
151  } else {
152  sequence()->AddInstruction(instr);
153  }
154 }
155 
156 Instruction* InstructionSelector::Emit(InstructionCode opcode,
157  InstructionOperand output,
158  size_t temp_count,
159  InstructionOperand* temps) {
160  size_t output_count = output.IsInvalid() ? 0 : 1;
161  return Emit(opcode, output_count, &output, 0, nullptr, temp_count, temps);
162 }
163 
164 Instruction* InstructionSelector::Emit(InstructionCode opcode,
165  InstructionOperand output,
166  InstructionOperand a, size_t temp_count,
167  InstructionOperand* temps) {
168  size_t output_count = output.IsInvalid() ? 0 : 1;
169  return Emit(opcode, output_count, &output, 1, &a, temp_count, temps);
170 }
171 
172 Instruction* InstructionSelector::Emit(InstructionCode opcode,
173  InstructionOperand output,
174  InstructionOperand a,
175  InstructionOperand b, size_t temp_count,
176  InstructionOperand* temps) {
177  size_t output_count = output.IsInvalid() ? 0 : 1;
178  InstructionOperand inputs[] = {a, b};
179  size_t input_count = arraysize(inputs);
180  return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
181  temps);
182 }
183 
184 Instruction* InstructionSelector::Emit(InstructionCode opcode,
185  InstructionOperand output,
186  InstructionOperand a,
187  InstructionOperand b,
188  InstructionOperand c, size_t temp_count,
189  InstructionOperand* temps) {
190  size_t output_count = output.IsInvalid() ? 0 : 1;
191  InstructionOperand inputs[] = {a, b, c};
192  size_t input_count = arraysize(inputs);
193  return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
194  temps);
195 }
196 
197 Instruction* InstructionSelector::Emit(
198  InstructionCode opcode, InstructionOperand output, InstructionOperand a,
199  InstructionOperand b, InstructionOperand c, InstructionOperand d,
200  size_t temp_count, InstructionOperand* temps) {
201  size_t output_count = output.IsInvalid() ? 0 : 1;
202  InstructionOperand inputs[] = {a, b, c, d};
203  size_t input_count = arraysize(inputs);
204  return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
205  temps);
206 }
207 
208 Instruction* InstructionSelector::Emit(
209  InstructionCode opcode, InstructionOperand output, InstructionOperand a,
210  InstructionOperand b, InstructionOperand c, InstructionOperand d,
211  InstructionOperand e, size_t temp_count, InstructionOperand* temps) {
212  size_t output_count = output.IsInvalid() ? 0 : 1;
213  InstructionOperand inputs[] = {a, b, c, d, e};
214  size_t input_count = arraysize(inputs);
215  return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
216  temps);
217 }
218 
219 Instruction* InstructionSelector::Emit(
220  InstructionCode opcode, InstructionOperand output, InstructionOperand a,
221  InstructionOperand b, InstructionOperand c, InstructionOperand d,
222  InstructionOperand e, InstructionOperand f, size_t temp_count,
223  InstructionOperand* temps) {
224  size_t output_count = output.IsInvalid() ? 0 : 1;
225  InstructionOperand inputs[] = {a, b, c, d, e, f};
226  size_t input_count = arraysize(inputs);
227  return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
228  temps);
229 }
230 
231 Instruction* InstructionSelector::Emit(
232  InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
233  size_t input_count, InstructionOperand* inputs, size_t temp_count,
234  InstructionOperand* temps) {
235  if (output_count >= Instruction::kMaxOutputCount ||
236  input_count >= Instruction::kMaxInputCount ||
237  temp_count >= Instruction::kMaxTempCount) {
238  set_instruction_selection_failed();
239  return nullptr;
240  }
241 
242  Instruction* instr =
243  Instruction::New(instruction_zone(), opcode, output_count, outputs,
244  input_count, inputs, temp_count, temps);
245  return Emit(instr);
246 }
247 
248 Instruction* InstructionSelector::Emit(Instruction* instr) {
249  instructions_.push_back(instr);
250  return instr;
251 }
252 
253 bool InstructionSelector::CanCover(Node* user, Node* node) const {
254  // 1. Both {user} and {node} must be in the same basic block.
255  if (schedule()->block(node) != schedule()->block(user)) {
256  return false;
257  }
258  // 2. Pure {node}s must be owned by the {user}.
259  if (node->op()->HasProperty(Operator::kPure)) {
260  return node->OwnedBy(user);
261  }
262  // 3. Impure {node}s must match the effect level of {user}.
263  if (GetEffectLevel(node) != GetEffectLevel(user)) {
264  return false;
265  }
266  // 4. Only {node} must have value edges pointing to {user}.
267  for (Edge const edge : node->use_edges()) {
268  if (edge.from() != user && NodeProperties::IsValueEdge(edge)) {
269  return false;
270  }
271  }
272  return true;
273 }
274 
275 bool InstructionSelector::CanCoverTransitively(Node* user, Node* node,
276  Node* node_input) const {
277  if (CanCover(user, node) && CanCover(node, node_input)) {
278  // If {node} is pure, transitivity might not hold.
279  if (node->op()->HasProperty(Operator::kPure)) {
280  // If {node_input} is pure, the effect levels do not matter.
281  if (node_input->op()->HasProperty(Operator::kPure)) return true;
282  // Otherwise, {user} and {node_input} must have the same effect level.
283  return GetEffectLevel(user) == GetEffectLevel(node_input);
284  }
285  return true;
286  }
287  return false;
288 }
289 
290 bool InstructionSelector::IsOnlyUserOfNodeInSameBlock(Node* user,
291  Node* node) const {
292  BasicBlock* bb_user = schedule()->block(user);
293  BasicBlock* bb_node = schedule()->block(node);
294  if (bb_user != bb_node) return false;
295  for (Edge const edge : node->use_edges()) {
296  Node* from = edge.from();
297  if ((from != user) && (schedule()->block(from) == bb_user)) {
298  return false;
299  }
300  }
301  return true;
302 }
303 
304 void InstructionSelector::UpdateRenames(Instruction* instruction) {
305  for (size_t i = 0; i < instruction->InputCount(); i++) {
306  TryRename(instruction->InputAt(i));
307  }
308 }
309 
310 void InstructionSelector::UpdateRenamesInPhi(PhiInstruction* phi) {
311  for (size_t i = 0; i < phi->operands().size(); i++) {
312  int vreg = phi->operands()[i];
313  int renamed = GetRename(vreg);
314  if (vreg != renamed) {
315  phi->RenameInput(i, renamed);
316  }
317  }
318 }
319 
320 int InstructionSelector::GetRename(int virtual_register) {
321  int rename = virtual_register;
322  while (true) {
323  if (static_cast<size_t>(rename) >= virtual_register_rename_.size()) break;
324  int next = virtual_register_rename_[rename];
325  if (next == InstructionOperand::kInvalidVirtualRegister) {
326  break;
327  }
328  rename = next;
329  }
330  return rename;
331 }
332 
333 void InstructionSelector::TryRename(InstructionOperand* op) {
334  if (!op->IsUnallocated()) return;
335  UnallocatedOperand* unalloc = UnallocatedOperand::cast(op);
336  int vreg = unalloc->virtual_register();
337  int rename = GetRename(vreg);
338  if (rename != vreg) {
339  *unalloc = UnallocatedOperand(*unalloc, rename);
340  }
341 }
342 
343 void InstructionSelector::SetRename(const Node* node, const Node* rename) {
344  int vreg = GetVirtualRegister(node);
345  if (static_cast<size_t>(vreg) >= virtual_register_rename_.size()) {
346  int invalid = InstructionOperand::kInvalidVirtualRegister;
347  virtual_register_rename_.resize(vreg + 1, invalid);
348  }
349  virtual_register_rename_[vreg] = GetVirtualRegister(rename);
350 }
351 
352 int InstructionSelector::GetVirtualRegister(const Node* node) {
353  DCHECK_NOT_NULL(node);
354  size_t const id = node->id();
355  DCHECK_LT(id, virtual_registers_.size());
356  int virtual_register = virtual_registers_[id];
357  if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
358  virtual_register = sequence()->NextVirtualRegister();
359  virtual_registers_[id] = virtual_register;
360  }
361  return virtual_register;
362 }
363 
364 const std::map<NodeId, int> InstructionSelector::GetVirtualRegistersForTesting()
365  const {
366  std::map<NodeId, int> virtual_registers;
367  for (size_t n = 0; n < virtual_registers_.size(); ++n) {
368  if (virtual_registers_[n] != InstructionOperand::kInvalidVirtualRegister) {
369  NodeId const id = static_cast<NodeId>(n);
370  virtual_registers.insert(std::make_pair(id, virtual_registers_[n]));
371  }
372  }
373  return virtual_registers;
374 }
375 
376 bool InstructionSelector::IsDefined(Node* node) const {
377  DCHECK_NOT_NULL(node);
378  size_t const id = node->id();
379  DCHECK_LT(id, defined_.size());
380  return defined_[id];
381 }
382 
383 void InstructionSelector::MarkAsDefined(Node* node) {
384  DCHECK_NOT_NULL(node);
385  size_t const id = node->id();
386  DCHECK_LT(id, defined_.size());
387  defined_[id] = true;
388 }
389 
390 bool InstructionSelector::IsUsed(Node* node) const {
391  DCHECK_NOT_NULL(node);
392  // TODO(bmeurer): This is a terrible monster hack, but we have to make sure
393  // that the Retain is actually emitted, otherwise the GC will mess up.
394  if (node->opcode() == IrOpcode::kRetain) return true;
395  if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
396  size_t const id = node->id();
397  DCHECK_LT(id, used_.size());
398  return used_[id];
399 }
400 
401 void InstructionSelector::MarkAsUsed(Node* node) {
402  DCHECK_NOT_NULL(node);
403  size_t const id = node->id();
404  DCHECK_LT(id, used_.size());
405  used_[id] = true;
406 }
407 
408 int InstructionSelector::GetEffectLevel(Node* node) const {
409  DCHECK_NOT_NULL(node);
410  size_t const id = node->id();
411  DCHECK_LT(id, effect_level_.size());
412  return effect_level_[id];
413 }
414 
415 void InstructionSelector::SetEffectLevel(Node* node, int effect_level) {
416  DCHECK_NOT_NULL(node);
417  size_t const id = node->id();
418  DCHECK_LT(id, effect_level_.size());
419  effect_level_[id] = effect_level;
420 }
421 
422 bool InstructionSelector::CanAddressRelativeToRootsRegister() const {
423  return enable_roots_relative_addressing_ == kEnableRootsRelativeAddressing &&
424  CanUseRootsRegister();
425 }
426 
427 bool InstructionSelector::CanUseRootsRegister() const {
428  return linkage()->GetIncomingDescriptor()->flags() &
429  CallDescriptor::kCanUseRoots;
430 }
431 
432 void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
433  const InstructionOperand& op) {
434  UnallocatedOperand unalloc = UnallocatedOperand::cast(op);
435  sequence()->MarkAsRepresentation(rep, unalloc.virtual_register());
436 }
437 
438 void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
439  Node* node) {
440  sequence()->MarkAsRepresentation(rep, GetVirtualRegister(node));
441 }
442 
443 namespace {
444 
445 InstructionOperand OperandForDeopt(Isolate* isolate, OperandGenerator* g,
446  Node* input, FrameStateInputKind kind,
447  MachineRepresentation rep) {
448  if (rep == MachineRepresentation::kNone) {
449  return g->TempImmediate(FrameStateDescriptor::kImpossibleValue);
450  }
451 
452  switch (input->opcode()) {
453  case IrOpcode::kInt32Constant:
454  case IrOpcode::kInt64Constant:
455  case IrOpcode::kNumberConstant:
456  case IrOpcode::kFloat32Constant:
457  case IrOpcode::kFloat64Constant:
458  case IrOpcode::kDelayedStringConstant:
459  return g->UseImmediate(input);
460  case IrOpcode::kHeapConstant: {
461  if (!CanBeTaggedPointer(rep)) {
462  // If we have inconsistent static and dynamic types, e.g. if we
463  // smi-check a string, we can get here with a heap object that
464  // says it is a smi. In that case, we return an invalid instruction
465  // operand, which will be interpreted as an optimized-out value.
466 
467  // TODO(jarin) Ideally, we should turn the current instruction
468  // into an abort (we should never execute it).
469  return InstructionOperand();
470  }
471 
472  Handle<HeapObject> constant = HeapConstantOf(input->op());
473  RootIndex root_index;
474  if (isolate->roots_table().IsRootHandle(constant, &root_index) &&
475  root_index == RootIndex::kOptimizedOut) {
476  // For an optimized-out object we return an invalid instruction
477  // operand, so that we take the fast path for optimized-out values.
478  return InstructionOperand();
479  }
480 
481  return g->UseImmediate(input);
482  }
483  case IrOpcode::kArgumentsElementsState:
484  case IrOpcode::kArgumentsLengthState:
485  case IrOpcode::kObjectState:
486  case IrOpcode::kTypedObjectState:
487  UNREACHABLE();
488  break;
489  default:
490  switch (kind) {
491  case FrameStateInputKind::kStackSlot:
492  return g->UseUniqueSlot(input);
493  case FrameStateInputKind::kAny:
494  // Currently deopts "wrap" other operations, so the deopt's inputs
495  // are potentially needed until the end of the deoptimising code.
496  return g->UseAnyAtEnd(input);
497  }
498  }
499  UNREACHABLE();
500 }
501 
502 } // namespace
503 
505  public:
506  explicit StateObjectDeduplicator(Zone* zone) : objects_(zone) {}
507  static const size_t kNotDuplicated = SIZE_MAX;
508 
509  size_t GetObjectId(Node* node) {
510  DCHECK(node->opcode() == IrOpcode::kTypedObjectState ||
511  node->opcode() == IrOpcode::kObjectId ||
512  node->opcode() == IrOpcode::kArgumentsElementsState);
513  for (size_t i = 0; i < objects_.size(); ++i) {
514  if (objects_[i] == node) return i;
515  // ObjectId nodes are the Turbofan way to express objects with the same
516  // identity in the deopt info. So they should always be mapped to
517  // previously appearing TypedObjectState nodes.
518  if (HasObjectId(objects_[i]) && HasObjectId(node) &&
519  ObjectIdOf(objects_[i]->op()) == ObjectIdOf(node->op())) {
520  return i;
521  }
522  }
523  DCHECK(node->opcode() == IrOpcode::kTypedObjectState ||
524  node->opcode() == IrOpcode::kArgumentsElementsState);
525  return kNotDuplicated;
526  }
527 
528  size_t InsertObject(Node* node) {
529  DCHECK(node->opcode() == IrOpcode::kTypedObjectState ||
530  node->opcode() == IrOpcode::kObjectId ||
531  node->opcode() == IrOpcode::kArgumentsElementsState);
532  size_t id = objects_.size();
533  objects_.push_back(node);
534  return id;
535  }
536 
537  private:
538  static bool HasObjectId(Node* node) {
539  return node->opcode() == IrOpcode::kTypedObjectState ||
540  node->opcode() == IrOpcode::kObjectId;
541  }
542 
543  ZoneVector<Node*> objects_;
544 };
545 
546 // Returns the number of instruction operands added to inputs.
547 size_t InstructionSelector::AddOperandToStateValueDescriptor(
548  StateValueList* values, InstructionOperandVector* inputs,
549  OperandGenerator* g, StateObjectDeduplicator* deduplicator, Node* input,
550  MachineType type, FrameStateInputKind kind, Zone* zone) {
551  if (input == nullptr) {
552  values->PushOptimizedOut();
553  return 0;
554  }
555 
556  switch (input->opcode()) {
557  case IrOpcode::kArgumentsElementsState: {
558  values->PushArgumentsElements(ArgumentsStateTypeOf(input->op()));
559  // The elements backing store of an arguments object participates in the
560  // duplicate object counting, but can itself never appear duplicated.
561  DCHECK_EQ(StateObjectDeduplicator::kNotDuplicated,
562  deduplicator->GetObjectId(input));
563  deduplicator->InsertObject(input);
564  return 0;
565  }
566  case IrOpcode::kArgumentsLengthState: {
567  values->PushArgumentsLength(ArgumentsStateTypeOf(input->op()));
568  return 0;
569  }
570  case IrOpcode::kObjectState: {
571  UNREACHABLE();
572  }
573  case IrOpcode::kTypedObjectState:
574  case IrOpcode::kObjectId: {
575  size_t id = deduplicator->GetObjectId(input);
576  if (id == StateObjectDeduplicator::kNotDuplicated) {
577  DCHECK_EQ(IrOpcode::kTypedObjectState, input->opcode());
578  size_t entries = 0;
579  id = deduplicator->InsertObject(input);
580  StateValueList* nested = values->PushRecursiveField(zone, id);
581  int const input_count = input->op()->ValueInputCount();
582  ZoneVector<MachineType> const* types = MachineTypesOf(input->op());
583  for (int i = 0; i < input_count; ++i) {
584  entries += AddOperandToStateValueDescriptor(
585  nested, inputs, g, deduplicator, input->InputAt(i), types->at(i),
586  kind, zone);
587  }
588  return entries;
589  } else {
590  // Deoptimizer counts duplicate objects for the running id, so we have
591  // to push the input again.
592  deduplicator->InsertObject(input);
593  values->PushDuplicate(id);
594  return 0;
595  }
596  }
597  default: {
598  InstructionOperand op =
599  OperandForDeopt(isolate(), g, input, kind, type.representation());
600  if (op.kind() == InstructionOperand::INVALID) {
601  // Invalid operand means the value is impossible or optimized-out.
602  values->PushOptimizedOut();
603  return 0;
604  } else {
605  inputs->push_back(op);
606  values->PushPlain(type);
607  return 1;
608  }
609  }
610  }
611 }
612 
613 // Returns the number of instruction operands added to inputs.
614 size_t InstructionSelector::AddInputsToFrameStateDescriptor(
615  FrameStateDescriptor* descriptor, Node* state, OperandGenerator* g,
616  StateObjectDeduplicator* deduplicator, InstructionOperandVector* inputs,
617  FrameStateInputKind kind, Zone* zone) {
618  DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
619 
620  size_t entries = 0;
621  size_t initial_size = inputs->size();
622  USE(initial_size); // initial_size is only used for debug.
623 
624  if (descriptor->outer_state()) {
625  entries += AddInputsToFrameStateDescriptor(
626  descriptor->outer_state(), state->InputAt(kFrameStateOuterStateInput),
627  g, deduplicator, inputs, kind, zone);
628  }
629 
630  Node* parameters = state->InputAt(kFrameStateParametersInput);
631  Node* locals = state->InputAt(kFrameStateLocalsInput);
632  Node* stack = state->InputAt(kFrameStateStackInput);
633  Node* context = state->InputAt(kFrameStateContextInput);
634  Node* function = state->InputAt(kFrameStateFunctionInput);
635 
636  DCHECK_EQ(descriptor->parameters_count(),
637  StateValuesAccess(parameters).size());
638  DCHECK_EQ(descriptor->locals_count(), StateValuesAccess(locals).size());
639  DCHECK_EQ(descriptor->stack_count(), StateValuesAccess(stack).size());
640 
641  StateValueList* values_descriptor = descriptor->GetStateValueDescriptors();
642 
643  DCHECK_EQ(values_descriptor->size(), 0u);
644  values_descriptor->ReserveSize(descriptor->GetSize());
645 
646  entries += AddOperandToStateValueDescriptor(
647  values_descriptor, inputs, g, deduplicator, function,
648  MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
649  for (StateValuesAccess::TypedNode input_node :
650  StateValuesAccess(parameters)) {
651  entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
652  deduplicator, input_node.node,
653  input_node.type, kind, zone);
654  }
655  if (descriptor->HasContext()) {
656  entries += AddOperandToStateValueDescriptor(
657  values_descriptor, inputs, g, deduplicator, context,
658  MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
659  }
660  for (StateValuesAccess::TypedNode input_node : StateValuesAccess(locals)) {
661  entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
662  deduplicator, input_node.node,
663  input_node.type, kind, zone);
664  }
665  for (StateValuesAccess::TypedNode input_node : StateValuesAccess(stack)) {
666  entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
667  deduplicator, input_node.node,
668  input_node.type, kind, zone);
669  }
670  DCHECK_EQ(initial_size + entries, inputs->size());
671  return entries;
672 }
673 
674 Instruction* InstructionSelector::EmitWithContinuation(
675  InstructionCode opcode, FlagsContinuation* cont) {
676  return EmitWithContinuation(opcode, 0, nullptr, 0, nullptr, cont);
677 }
678 
679 Instruction* InstructionSelector::EmitWithContinuation(
680  InstructionCode opcode, InstructionOperand a, FlagsContinuation* cont) {
681  return EmitWithContinuation(opcode, 0, nullptr, 1, &a, cont);
682 }
683 
684 Instruction* InstructionSelector::EmitWithContinuation(
685  InstructionCode opcode, InstructionOperand a, InstructionOperand b,
686  FlagsContinuation* cont) {
687  InstructionOperand inputs[] = {a, b};
688  return EmitWithContinuation(opcode, 0, nullptr, arraysize(inputs), inputs,
689  cont);
690 }
691 
692 Instruction* InstructionSelector::EmitWithContinuation(
693  InstructionCode opcode, InstructionOperand a, InstructionOperand b,
694  InstructionOperand c, FlagsContinuation* cont) {
695  InstructionOperand inputs[] = {a, b, c};
696  return EmitWithContinuation(opcode, 0, nullptr, arraysize(inputs), inputs,
697  cont);
698 }
699 
700 Instruction* InstructionSelector::EmitWithContinuation(
701  InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
702  size_t input_count, InstructionOperand* inputs, FlagsContinuation* cont) {
703  OperandGenerator g(this);
704 
705  opcode = cont->Encode(opcode);
706 
707  continuation_inputs_.resize(0);
708  for (size_t i = 0; i < input_count; i++) {
709  continuation_inputs_.push_back(inputs[i]);
710  }
711 
712  continuation_outputs_.resize(0);
713  for (size_t i = 0; i < output_count; i++) {
714  continuation_outputs_.push_back(outputs[i]);
715  }
716 
717  if (cont->IsBranch()) {
718  continuation_inputs_.push_back(g.Label(cont->true_block()));
719  continuation_inputs_.push_back(g.Label(cont->false_block()));
720  } else if (cont->IsDeoptimize()) {
721  opcode |= MiscField::encode(static_cast<int>(input_count));
722  AppendDeoptimizeArguments(&continuation_inputs_, cont->kind(),
723  cont->reason(), cont->feedback(),
724  cont->frame_state());
725  } else if (cont->IsSet()) {
726  continuation_outputs_.push_back(g.DefineAsRegister(cont->result()));
727  } else if (cont->IsTrap()) {
728  int trap_id = static_cast<int>(cont->trap_id());
729  continuation_inputs_.push_back(g.UseImmediate(trap_id));
730  } else {
731  DCHECK(cont->IsNone());
732  }
733 
734  size_t const emit_inputs_size = continuation_inputs_.size();
735  auto* emit_inputs =
736  emit_inputs_size ? &continuation_inputs_.front() : nullptr;
737  size_t const emit_outputs_size = continuation_outputs_.size();
738  auto* emit_outputs =
739  emit_outputs_size ? &continuation_outputs_.front() : nullptr;
740  return Emit(opcode, emit_outputs_size, emit_outputs, emit_inputs_size,
741  emit_inputs, 0, nullptr);
742 }
743 
744 void InstructionSelector::AppendDeoptimizeArguments(
745  InstructionOperandVector* args, DeoptimizeKind kind,
746  DeoptimizeReason reason, VectorSlotPair const& feedback,
747  Node* frame_state) {
748  OperandGenerator g(this);
749  FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
750  DCHECK_NE(DeoptimizeKind::kLazy, kind);
751  int const state_id =
752  sequence()->AddDeoptimizationEntry(descriptor, kind, reason, feedback);
753  args->push_back(g.TempImmediate(state_id));
754  StateObjectDeduplicator deduplicator(instruction_zone());
755  AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator,
756  args, FrameStateInputKind::kAny,
757  instruction_zone());
758 }
759 
760 Instruction* InstructionSelector::EmitDeoptimize(
761  InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
762  size_t input_count, InstructionOperand* inputs, DeoptimizeKind kind,
763  DeoptimizeReason reason, VectorSlotPair const& feedback,
764  Node* frame_state) {
765  InstructionOperandVector args(instruction_zone());
766  for (size_t i = 0; i < input_count; ++i) {
767  args.push_back(inputs[i]);
768  }
769  opcode |= MiscField::encode(static_cast<int>(input_count));
770  AppendDeoptimizeArguments(&args, kind, reason, feedback, frame_state);
771  return Emit(opcode, output_count, outputs, args.size(), &args.front(), 0,
772  nullptr);
773 }
774 
775 // An internal helper class for generating the operands to calls.
776 // TODO(bmeurer): Get rid of the CallBuffer business and make
777 // InstructionSelector::VisitCall platform independent instead.
778 struct CallBuffer {
779  CallBuffer(Zone* zone, const CallDescriptor* call_descriptor,
780  FrameStateDescriptor* frame_state)
781  : descriptor(call_descriptor),
782  frame_state_descriptor(frame_state),
783  output_nodes(zone),
784  outputs(zone),
785  instruction_args(zone),
786  pushed_nodes(zone) {
787  output_nodes.reserve(call_descriptor->ReturnCount());
788  outputs.reserve(call_descriptor->ReturnCount());
789  pushed_nodes.reserve(input_count());
790  instruction_args.reserve(input_count() + frame_state_value_count());
791  }
792 
793  const CallDescriptor* descriptor;
794  FrameStateDescriptor* frame_state_descriptor;
795  ZoneVector<PushParameter> output_nodes;
796  InstructionOperandVector outputs;
797  InstructionOperandVector instruction_args;
798  ZoneVector<PushParameter> pushed_nodes;
799 
800  size_t input_count() const { return descriptor->InputCount(); }
801 
802  size_t frame_state_count() const { return descriptor->FrameStateCount(); }
803 
804  size_t frame_state_value_count() const {
805  return (frame_state_descriptor == nullptr)
806  ? 0
807  : (frame_state_descriptor->GetTotalSize() +
808  1); // Include deopt id.
809  }
810 };
811 
812 // TODO(bmeurer): Get rid of the CallBuffer business and make
813 // InstructionSelector::VisitCall platform independent instead.
814 void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
815  CallBufferFlags flags,
816  bool is_tail_call,
817  int stack_param_delta) {
818  OperandGenerator g(this);
819  size_t ret_count = buffer->descriptor->ReturnCount();
820  DCHECK_LE(call->op()->ValueOutputCount(), ret_count);
821  DCHECK_EQ(
822  call->op()->ValueInputCount(),
823  static_cast<int>(buffer->input_count() + buffer->frame_state_count()));
824 
825  if (ret_count > 0) {
826  // Collect the projections that represent multiple outputs from this call.
827  if (ret_count == 1) {
828  PushParameter result = {call, buffer->descriptor->GetReturnLocation(0)};
829  buffer->output_nodes.push_back(result);
830  } else {
831  buffer->output_nodes.resize(ret_count);
832  int stack_count = 0;
833  for (size_t i = 0; i < ret_count; ++i) {
834  LinkageLocation location = buffer->descriptor->GetReturnLocation(i);
835  buffer->output_nodes[i] = PushParameter(nullptr, location);
836  if (location.IsCallerFrameSlot()) {
837  stack_count += location.GetSizeInPointers();
838  }
839  }
840  for (Edge const edge : call->use_edges()) {
841  if (!NodeProperties::IsValueEdge(edge)) continue;
842  Node* node = edge.from();
843  DCHECK_EQ(IrOpcode::kProjection, node->opcode());
844  size_t const index = ProjectionIndexOf(node->op());
845 
846  DCHECK_LT(index, buffer->output_nodes.size());
847  DCHECK(!buffer->output_nodes[index].node);
848  buffer->output_nodes[index].node = node;
849  }
850  frame_->EnsureReturnSlots(stack_count);
851  }
852 
853  // Filter out the outputs that aren't live because no projection uses them.
854  size_t outputs_needed_by_framestate =
855  buffer->frame_state_descriptor == nullptr
856  ? 0
857  : buffer->frame_state_descriptor->state_combine()
858  .ConsumedOutputCount();
859  for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
860  bool output_is_live = buffer->output_nodes[i].node != nullptr ||
861  i < outputs_needed_by_framestate;
862  if (output_is_live) {
863  LinkageLocation location = buffer->output_nodes[i].location;
864  MachineRepresentation rep = location.GetType().representation();
865 
866  Node* output = buffer->output_nodes[i].node;
867  InstructionOperand op = output == nullptr
868  ? g.TempLocation(location)
869  : g.DefineAsLocation(output, location);
870  MarkAsRepresentation(rep, op);
871 
872  if (!UnallocatedOperand::cast(op).HasFixedSlotPolicy()) {
873  buffer->outputs.push_back(op);
874  buffer->output_nodes[i].node = nullptr;
875  }
876  }
877  }
878  }
879 
880  // The first argument is always the callee code.
881  Node* callee = call->InputAt(0);
882  bool call_code_immediate = (flags & kCallCodeImmediate) != 0;
883  bool call_address_immediate = (flags & kCallAddressImmediate) != 0;
884  bool call_use_fixed_target_reg = (flags & kCallFixedTargetRegister) != 0;
885  bool call_through_slot = (flags & kAllowCallThroughSlot) != 0;
886  switch (buffer->descriptor->kind()) {
887  case CallDescriptor::kCallCodeObject:
888  // TODO(jgruber, v8:7449): The below is a hack to support tail-calls from
889  // JS-linkage callers with a register code target. The problem is that the
890  // code target register may be clobbered before the final jmp by
891  // AssemblePopArgumentsAdaptorFrame. As a more permanent fix we could
892  // entirely remove support for tail-calls from JS-linkage callers.
893  buffer->instruction_args.push_back(
894  (call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
895  ? g.UseImmediate(callee)
896  : call_use_fixed_target_reg
897  ? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
898  : is_tail_call ? g.UseUniqueRegister(callee)
899  : call_through_slot ? g.UseUniqueSlot(callee)
900  : g.UseRegister(callee));
901  break;
902  case CallDescriptor::kCallAddress:
903  buffer->instruction_args.push_back(
904  (call_address_immediate &&
905  callee->opcode() == IrOpcode::kExternalConstant)
906  ? g.UseImmediate(callee)
907  : call_use_fixed_target_reg
908  ? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
909  : g.UseRegister(callee));
910  break;
911  case CallDescriptor::kCallWasmFunction:
912  case CallDescriptor::kCallWasmImportWrapper:
913  buffer->instruction_args.push_back(
914  (call_address_immediate &&
915  (callee->opcode() == IrOpcode::kRelocatableInt64Constant ||
916  callee->opcode() == IrOpcode::kRelocatableInt32Constant))
917  ? g.UseImmediate(callee)
918  : call_use_fixed_target_reg
919  ? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
920  : g.UseRegister(callee));
921  break;
922  case CallDescriptor::kCallJSFunction:
923  buffer->instruction_args.push_back(
924  g.UseLocation(callee, buffer->descriptor->GetInputLocation(0)));
925  break;
926  }
927  DCHECK_EQ(1u, buffer->instruction_args.size());
928 
929  // Argument 1 is used for poison-alias index (encoded in a word-sized
930  // immediate. This an index of the operand that aliases with poison register
931  // or -1 if there is no aliasing.
932  buffer->instruction_args.push_back(g.TempImmediate(-1));
933  const size_t poison_alias_index = 1;
934  DCHECK_EQ(buffer->instruction_args.size() - 1, poison_alias_index);
935 
936  // If the call needs a frame state, we insert the state information as
937  // follows (n is the number of value inputs to the frame state):
938  // arg 2 : deoptimization id.
939  // arg 3 - arg (n + 2) : value inputs to the frame state.
940  size_t frame_state_entries = 0;
941  USE(frame_state_entries); // frame_state_entries is only used for debug.
942  if (buffer->frame_state_descriptor != nullptr) {
943  Node* frame_state =
944  call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
945 
946  // If it was a syntactic tail call we need to drop the current frame and
947  // all the frames on top of it that are either an arguments adaptor frame
948  // or a tail caller frame.
949  if (is_tail_call) {
950  frame_state = NodeProperties::GetFrameStateInput(frame_state);
951  buffer->frame_state_descriptor =
952  buffer->frame_state_descriptor->outer_state();
953  while (buffer->frame_state_descriptor != nullptr &&
954  buffer->frame_state_descriptor->type() ==
955  FrameStateType::kArgumentsAdaptor) {
956  frame_state = NodeProperties::GetFrameStateInput(frame_state);
957  buffer->frame_state_descriptor =
958  buffer->frame_state_descriptor->outer_state();
959  }
960  }
961 
962  int const state_id = sequence()->AddDeoptimizationEntry(
963  buffer->frame_state_descriptor, DeoptimizeKind::kLazy,
964  DeoptimizeReason::kUnknown, VectorSlotPair());
965  buffer->instruction_args.push_back(g.TempImmediate(state_id));
966 
967  StateObjectDeduplicator deduplicator(instruction_zone());
968 
969  frame_state_entries =
970  1 + AddInputsToFrameStateDescriptor(
971  buffer->frame_state_descriptor, frame_state, &g, &deduplicator,
972  &buffer->instruction_args, FrameStateInputKind::kStackSlot,
973  instruction_zone());
974 
975  DCHECK_EQ(2 + frame_state_entries, buffer->instruction_args.size());
976  }
977 
978  size_t input_count = static_cast<size_t>(buffer->input_count());
979 
980  // Split the arguments into pushed_nodes and instruction_args. Pushed
981  // arguments require an explicit push instruction before the call and do
982  // not appear as arguments to the call. Everything else ends up
983  // as an InstructionOperand argument to the call.
984  auto iter(call->inputs().begin());
985  size_t pushed_count = 0;
986  bool call_tail = (flags & kCallTail) != 0;
987  for (size_t index = 0; index < input_count; ++iter, ++index) {
988  DCHECK(iter != call->inputs().end());
989  DCHECK_NE(IrOpcode::kFrameState, (*iter)->op()->opcode());
990  if (index == 0) continue; // The first argument (callee) is already done.
991 
992  LinkageLocation location = buffer->descriptor->GetInputLocation(index);
993  if (call_tail) {
994  location = LinkageLocation::ConvertToTailCallerLocation(
995  location, stack_param_delta);
996  }
997  InstructionOperand op = g.UseLocation(*iter, location);
998  UnallocatedOperand unallocated = UnallocatedOperand::cast(op);
999  if (unallocated.HasFixedSlotPolicy() && !call_tail) {
1000  int stack_index = -unallocated.fixed_slot_index() - 1;
1001  if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
1002  buffer->pushed_nodes.resize(stack_index + 1);
1003  }
1004  PushParameter param = {*iter, location};
1005  buffer->pushed_nodes[stack_index] = param;
1006  pushed_count++;
1007  } else {
1008  // If we do load poisoning and the linkage uses the poisoning register,
1009  // then we request the input in memory location, and during code
1010  // generation, we move the input to the register.
1011  if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison &&
1012  unallocated.HasFixedRegisterPolicy()) {
1013  int reg = unallocated.fixed_register_index();
1014  if (Register::from_code(reg) == kSpeculationPoisonRegister) {
1015  buffer->instruction_args[poison_alias_index] = g.TempImmediate(
1016  static_cast<int32_t>(buffer->instruction_args.size()));
1017  op = g.UseRegisterOrSlotOrConstant(*iter);
1018  }
1019  }
1020  buffer->instruction_args.push_back(op);
1021  }
1022  }
1023  DCHECK_EQ(input_count, buffer->instruction_args.size() + pushed_count -
1024  frame_state_entries - 1);
1025  if (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK && call_tail &&
1026  stack_param_delta != 0) {
1027  // For tail calls that change the size of their parameter list and keep
1028  // their return address on the stack, move the return address to just above
1029  // the parameters.
1030  LinkageLocation saved_return_location =
1031  LinkageLocation::ForSavedCallerReturnAddress();
1032  InstructionOperand return_address =
1033  g.UsePointerLocation(LinkageLocation::ConvertToTailCallerLocation(
1034  saved_return_location, stack_param_delta),
1035  saved_return_location);
1036  buffer->instruction_args.push_back(return_address);
1037  }
1038 }
1039 
1040 bool InstructionSelector::IsSourcePositionUsed(Node* node) {
1041  return (source_position_mode_ == kAllSourcePositions ||
1042  node->opcode() == IrOpcode::kCall ||
1043  node->opcode() == IrOpcode::kCallWithCallerSavedRegisters ||
1044  node->opcode() == IrOpcode::kTrapIf ||
1045  node->opcode() == IrOpcode::kTrapUnless ||
1046  node->opcode() == IrOpcode::kProtectedLoad ||
1047  node->opcode() == IrOpcode::kProtectedStore);
1048 }
1049 
1050 void InstructionSelector::VisitBlock(BasicBlock* block) {
1051  DCHECK(!current_block_);
1052  current_block_ = block;
1053  auto current_num_instructions = [&] {
1054  DCHECK_GE(kMaxInt, instructions_.size());
1055  return static_cast<int>(instructions_.size());
1056  };
1057  int current_block_end = current_num_instructions();
1058 
1059  int effect_level = 0;
1060  for (Node* const node : *block) {
1061  SetEffectLevel(node, effect_level);
1062  if (node->opcode() == IrOpcode::kStore ||
1063  node->opcode() == IrOpcode::kUnalignedStore ||
1064  node->opcode() == IrOpcode::kCall ||
1065  node->opcode() == IrOpcode::kCallWithCallerSavedRegisters ||
1066  node->opcode() == IrOpcode::kProtectedLoad ||
1067  node->opcode() == IrOpcode::kProtectedStore) {
1068  ++effect_level;
1069  }
1070  }
1071 
1072  // We visit the control first, then the nodes in the block, so the block's
1073  // control input should be on the same effect level as the last node.
1074  if (block->control_input() != nullptr) {
1075  SetEffectLevel(block->control_input(), effect_level);
1076  }
1077 
1078  auto FinishEmittedInstructions = [&](Node* node, int instruction_start) {
1079  if (instruction_selection_failed()) return false;
1080  if (current_num_instructions() == instruction_start) return true;
1081  std::reverse(instructions_.begin() + instruction_start,
1082  instructions_.end());
1083  if (!node) return true;
1084  if (!source_positions_) return true;
1085  SourcePosition source_position = source_positions_->GetSourcePosition(node);
1086  if (source_position.IsKnown() && IsSourcePositionUsed(node)) {
1087  sequence()->SetSourcePosition(instructions_[instruction_start],
1088  source_position);
1089  }
1090  return true;
1091  };
1092 
1093  // Generate code for the block control "top down", but schedule the code
1094  // "bottom up".
1095  VisitControl(block);
1096  if (!FinishEmittedInstructions(block->control_input(), current_block_end))
1097  return;
1098 
1099  // Visit code in reverse control flow order, because architecture-specific
1100  // matching may cover more than one node at a time.
1101  for (auto node : base::Reversed(*block)) {
1102  int current_node_end = current_num_instructions();
1103  // Skip nodes that are unused or already defined.
1104  if (IsUsed(node) && !IsDefined(node)) {
1105  // Generate code for this node "top down", but schedule the code "bottom
1106  // up".
1107  VisitNode(node);
1108  if (!FinishEmittedInstructions(node, current_node_end)) return;
1109  }
1110  if (trace_turbo_ == kEnableTraceTurboJson) {
1111  instr_origins_[node->id()] = {current_num_instructions(),
1112  current_node_end};
1113  }
1114  }
1115 
1116  // We're done with the block.
1117  InstructionBlock* instruction_block =
1118  sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
1119  if (current_num_instructions() == current_block_end) {
1120  // Avoid empty block: insert a {kArchNop} instruction.
1121  Emit(Instruction::New(sequence()->zone(), kArchNop));
1122  }
1123  instruction_block->set_code_start(current_num_instructions());
1124  instruction_block->set_code_end(current_block_end);
1125  current_block_ = nullptr;
1126 }
1127 
1128 void InstructionSelector::VisitControl(BasicBlock* block) {
1129 #ifdef DEBUG
1130  // SSA deconstruction requires targets of branches not to have phis.
1131  // Edge split form guarantees this property, but is more strict.
1132  if (block->SuccessorCount() > 1) {
1133  for (BasicBlock* const successor : block->successors()) {
1134  for (Node* const node : *successor) {
1135  if (IrOpcode::IsPhiOpcode(node->opcode())) {
1136  std::ostringstream str;
1137  str << "You might have specified merged variables for a label with "
1138  << "only one predecessor." << std::endl
1139  << "# Current Block: " << *successor << std::endl
1140  << "# Node: " << *node;
1141  FATAL("%s", str.str().c_str());
1142  }
1143  }
1144  }
1145  }
1146 #endif
1147 
1148  Node* input = block->control_input();
1149  int instruction_end = static_cast<int>(instructions_.size());
1150  switch (block->control()) {
1151  case BasicBlock::kGoto:
1152  VisitGoto(block->SuccessorAt(0));
1153  break;
1154  case BasicBlock::kCall: {
1155  DCHECK_EQ(IrOpcode::kCall, input->opcode());
1156  BasicBlock* success = block->SuccessorAt(0);
1157  BasicBlock* exception = block->SuccessorAt(1);
1158  VisitCall(input, exception);
1159  VisitGoto(success);
1160  break;
1161  }
1162  case BasicBlock::kTailCall: {
1163  DCHECK_EQ(IrOpcode::kTailCall, input->opcode());
1164  VisitTailCall(input);
1165  break;
1166  }
1167  case BasicBlock::kBranch: {
1168  DCHECK_EQ(IrOpcode::kBranch, input->opcode());
1169  BasicBlock* tbranch = block->SuccessorAt(0);
1170  BasicBlock* fbranch = block->SuccessorAt(1);
1171  if (tbranch == fbranch) {
1172  VisitGoto(tbranch);
1173  } else {
1174  VisitBranch(input, tbranch, fbranch);
1175  }
1176  break;
1177  }
1178  case BasicBlock::kSwitch: {
1179  DCHECK_EQ(IrOpcode::kSwitch, input->opcode());
1180  // Last successor must be {IfDefault}.
1181  BasicBlock* default_branch = block->successors().back();
1182  DCHECK_EQ(IrOpcode::kIfDefault, default_branch->front()->opcode());
1183  // All other successors must be {IfValue}s.
1184  int32_t min_value = std::numeric_limits<int32_t>::max();
1185  int32_t max_value = std::numeric_limits<int32_t>::min();
1186  size_t case_count = block->SuccessorCount() - 1;
1187  ZoneVector<CaseInfo> cases(case_count, zone());
1188  for (size_t i = 0; i < case_count; ++i) {
1189  BasicBlock* branch = block->SuccessorAt(i);
1190  const IfValueParameters& p = IfValueParametersOf(branch->front()->op());
1191  cases[i] = CaseInfo{p.value(), p.comparison_order(), branch};
1192  if (min_value > p.value()) min_value = p.value();
1193  if (max_value < p.value()) max_value = p.value();
1194  }
1195  SwitchInfo sw(cases, min_value, max_value, default_branch);
1196  VisitSwitch(input, sw);
1197  break;
1198  }
1199  case BasicBlock::kReturn: {
1200  DCHECK_EQ(IrOpcode::kReturn, input->opcode());
1201  VisitReturn(input);
1202  break;
1203  }
1204  case BasicBlock::kDeoptimize: {
1205  DeoptimizeParameters p = DeoptimizeParametersOf(input->op());
1206  Node* value = input->InputAt(0);
1207  VisitDeoptimize(p.kind(), p.reason(), p.feedback(), value);
1208  break;
1209  }
1210  case BasicBlock::kThrow:
1211  DCHECK_EQ(IrOpcode::kThrow, input->opcode());
1212  VisitThrow(input);
1213  break;
1214  case BasicBlock::kNone: {
1215  // Exit block doesn't have control.
1216  DCHECK_NULL(input);
1217  break;
1218  }
1219  default:
1220  UNREACHABLE();
1221  break;
1222  }
1223  if (trace_turbo_ == kEnableTraceTurboJson && input) {
1224  int instruction_start = static_cast<int>(instructions_.size());
1225  instr_origins_[input->id()] = {instruction_start, instruction_end};
1226  }
1227 }
1228 
1229 void InstructionSelector::MarkPairProjectionsAsWord32(Node* node) {
1230  Node* projection0 = NodeProperties::FindProjection(node, 0);
1231  if (projection0) {
1232  MarkAsWord32(projection0);
1233  }
1234  Node* projection1 = NodeProperties::FindProjection(node, 1);
1235  if (projection1) {
1236  MarkAsWord32(projection1);
1237  }
1238 }
1239 
1240 void InstructionSelector::VisitNode(Node* node) {
1241  DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes.
1242  switch (node->opcode()) {
1243  case IrOpcode::kStart:
1244  case IrOpcode::kLoop:
1245  case IrOpcode::kEnd:
1246  case IrOpcode::kBranch:
1247  case IrOpcode::kIfTrue:
1248  case IrOpcode::kIfFalse:
1249  case IrOpcode::kIfSuccess:
1250  case IrOpcode::kSwitch:
1251  case IrOpcode::kIfValue:
1252  case IrOpcode::kIfDefault:
1253  case IrOpcode::kEffectPhi:
1254  case IrOpcode::kMerge:
1255  case IrOpcode::kTerminate:
1256  case IrOpcode::kBeginRegion:
1257  // No code needed for these graph artifacts.
1258  return;
1259  case IrOpcode::kIfException:
1260  return MarkAsReference(node), VisitIfException(node);
1261  case IrOpcode::kFinishRegion:
1262  return MarkAsReference(node), VisitFinishRegion(node);
1263  case IrOpcode::kParameter: {
1264  MachineType type =
1265  linkage()->GetParameterType(ParameterIndexOf(node->op()));
1266  MarkAsRepresentation(type.representation(), node);
1267  return VisitParameter(node);
1268  }
1269  case IrOpcode::kOsrValue:
1270  return MarkAsReference(node), VisitOsrValue(node);
1271  case IrOpcode::kPhi: {
1272  MachineRepresentation rep = PhiRepresentationOf(node->op());
1273  if (rep == MachineRepresentation::kNone) return;
1274  MarkAsRepresentation(rep, node);
1275  return VisitPhi(node);
1276  }
1277  case IrOpcode::kProjection:
1278  return VisitProjection(node);
1279  case IrOpcode::kInt32Constant:
1280  case IrOpcode::kInt64Constant:
1281  case IrOpcode::kExternalConstant:
1282  case IrOpcode::kRelocatableInt32Constant:
1283  case IrOpcode::kRelocatableInt64Constant:
1284  return VisitConstant(node);
1285  case IrOpcode::kFloat32Constant:
1286  return MarkAsFloat32(node), VisitConstant(node);
1287  case IrOpcode::kFloat64Constant:
1288  return MarkAsFloat64(node), VisitConstant(node);
1289  case IrOpcode::kHeapConstant:
1290  return MarkAsReference(node), VisitConstant(node);
1291  case IrOpcode::kNumberConstant: {
1292  double value = OpParameter<double>(node->op());
1293  if (!IsSmiDouble(value)) MarkAsReference(node);
1294  return VisitConstant(node);
1295  }
1296  case IrOpcode::kDelayedStringConstant:
1297  return MarkAsReference(node), VisitConstant(node);
1298  case IrOpcode::kCall:
1299  return VisitCall(node);
1300  case IrOpcode::kCallWithCallerSavedRegisters:
1301  return VisitCallWithCallerSavedRegisters(node);
1302  case IrOpcode::kDeoptimizeIf:
1303  return VisitDeoptimizeIf(node);
1304  case IrOpcode::kDeoptimizeUnless:
1305  return VisitDeoptimizeUnless(node);
1306  case IrOpcode::kTrapIf:
1307  return VisitTrapIf(node, TrapIdOf(node->op()));
1308  case IrOpcode::kTrapUnless:
1309  return VisitTrapUnless(node, TrapIdOf(node->op()));
1310  case IrOpcode::kFrameState:
1311  case IrOpcode::kStateValues:
1312  case IrOpcode::kObjectState:
1313  return;
1314  case IrOpcode::kDebugAbort:
1315  VisitDebugAbort(node);
1316  return;
1317  case IrOpcode::kDebugBreak:
1318  VisitDebugBreak(node);
1319  return;
1320  case IrOpcode::kUnreachable:
1321  VisitUnreachable(node);
1322  return;
1323  case IrOpcode::kDeadValue:
1324  VisitDeadValue(node);
1325  return;
1326  case IrOpcode::kComment:
1327  VisitComment(node);
1328  return;
1329  case IrOpcode::kRetain:
1330  VisitRetain(node);
1331  return;
1332  case IrOpcode::kLoad: {
1333  LoadRepresentation type = LoadRepresentationOf(node->op());
1334  MarkAsRepresentation(type.representation(), node);
1335  return VisitLoad(node);
1336  }
1337  case IrOpcode::kPoisonedLoad: {
1338  LoadRepresentation type = LoadRepresentationOf(node->op());
1339  MarkAsRepresentation(type.representation(), node);
1340  return VisitPoisonedLoad(node);
1341  }
1342  case IrOpcode::kStore:
1343  return VisitStore(node);
1344  case IrOpcode::kProtectedStore:
1345  return VisitProtectedStore(node);
1346  case IrOpcode::kWord32And:
1347  return MarkAsWord32(node), VisitWord32And(node);
1348  case IrOpcode::kWord32Or:
1349  return MarkAsWord32(node), VisitWord32Or(node);
1350  case IrOpcode::kWord32Xor:
1351  return MarkAsWord32(node), VisitWord32Xor(node);
1352  case IrOpcode::kWord32Shl:
1353  return MarkAsWord32(node), VisitWord32Shl(node);
1354  case IrOpcode::kWord32Shr:
1355  return MarkAsWord32(node), VisitWord32Shr(node);
1356  case IrOpcode::kWord32Sar:
1357  return MarkAsWord32(node), VisitWord32Sar(node);
1358  case IrOpcode::kWord32Ror:
1359  return MarkAsWord32(node), VisitWord32Ror(node);
1360  case IrOpcode::kWord32Equal:
1361  return VisitWord32Equal(node);
1362  case IrOpcode::kWord32Clz:
1363  return MarkAsWord32(node), VisitWord32Clz(node);
1364  case IrOpcode::kWord32Ctz:
1365  return MarkAsWord32(node), VisitWord32Ctz(node);
1366  case IrOpcode::kWord32ReverseBits:
1367  return MarkAsWord32(node), VisitWord32ReverseBits(node);
1368  case IrOpcode::kWord32ReverseBytes:
1369  return MarkAsWord32(node), VisitWord32ReverseBytes(node);
1370  case IrOpcode::kInt32AbsWithOverflow:
1371  return MarkAsWord32(node), VisitInt32AbsWithOverflow(node);
1372  case IrOpcode::kWord32Popcnt:
1373  return MarkAsWord32(node), VisitWord32Popcnt(node);
1374  case IrOpcode::kWord64Popcnt:
1375  return MarkAsWord32(node), VisitWord64Popcnt(node);
1376  case IrOpcode::kWord64And:
1377  return MarkAsWord64(node), VisitWord64And(node);
1378  case IrOpcode::kWord64Or:
1379  return MarkAsWord64(node), VisitWord64Or(node);
1380  case IrOpcode::kWord64Xor:
1381  return MarkAsWord64(node), VisitWord64Xor(node);
1382  case IrOpcode::kWord64Shl:
1383  return MarkAsWord64(node), VisitWord64Shl(node);
1384  case IrOpcode::kWord64Shr:
1385  return MarkAsWord64(node), VisitWord64Shr(node);
1386  case IrOpcode::kWord64Sar:
1387  return MarkAsWord64(node), VisitWord64Sar(node);
1388  case IrOpcode::kWord64Ror:
1389  return MarkAsWord64(node), VisitWord64Ror(node);
1390  case IrOpcode::kWord64Clz:
1391  return MarkAsWord64(node), VisitWord64Clz(node);
1392  case IrOpcode::kWord64Ctz:
1393  return MarkAsWord64(node), VisitWord64Ctz(node);
1394  case IrOpcode::kWord64ReverseBits:
1395  return MarkAsWord64(node), VisitWord64ReverseBits(node);
1396  case IrOpcode::kWord64ReverseBytes:
1397  return MarkAsWord64(node), VisitWord64ReverseBytes(node);
1398  case IrOpcode::kInt64AbsWithOverflow:
1399  return MarkAsWord64(node), VisitInt64AbsWithOverflow(node);
1400  case IrOpcode::kWord64Equal:
1401  return VisitWord64Equal(node);
1402  case IrOpcode::kInt32Add:
1403  return MarkAsWord32(node), VisitInt32Add(node);
1404  case IrOpcode::kInt32AddWithOverflow:
1405  return MarkAsWord32(node), VisitInt32AddWithOverflow(node);
1406  case IrOpcode::kInt32Sub:
1407  return MarkAsWord32(node), VisitInt32Sub(node);
1408  case IrOpcode::kInt32SubWithOverflow:
1409  return VisitInt32SubWithOverflow(node);
1410  case IrOpcode::kInt32Mul:
1411  return MarkAsWord32(node), VisitInt32Mul(node);
1412  case IrOpcode::kInt32MulWithOverflow:
1413  return MarkAsWord32(node), VisitInt32MulWithOverflow(node);
1414  case IrOpcode::kInt32MulHigh:
1415  return VisitInt32MulHigh(node);
1416  case IrOpcode::kInt32Div:
1417  return MarkAsWord32(node), VisitInt32Div(node);
1418  case IrOpcode::kInt32Mod:
1419  return MarkAsWord32(node), VisitInt32Mod(node);
1420  case IrOpcode::kInt32LessThan:
1421  return VisitInt32LessThan(node);
1422  case IrOpcode::kInt32LessThanOrEqual:
1423  return VisitInt32LessThanOrEqual(node);
1424  case IrOpcode::kUint32Div:
1425  return MarkAsWord32(node), VisitUint32Div(node);
1426  case IrOpcode::kUint32LessThan:
1427  return VisitUint32LessThan(node);
1428  case IrOpcode::kUint32LessThanOrEqual:
1429  return VisitUint32LessThanOrEqual(node);
1430  case IrOpcode::kUint32Mod:
1431  return MarkAsWord32(node), VisitUint32Mod(node);
1432  case IrOpcode::kUint32MulHigh:
1433  return VisitUint32MulHigh(node);
1434  case IrOpcode::kInt64Add:
1435  return MarkAsWord64(node), VisitInt64Add(node);
1436  case IrOpcode::kInt64AddWithOverflow:
1437  return MarkAsWord64(node), VisitInt64AddWithOverflow(node);
1438  case IrOpcode::kInt64Sub:
1439  return MarkAsWord64(node), VisitInt64Sub(node);
1440  case IrOpcode::kInt64SubWithOverflow:
1441  return MarkAsWord64(node), VisitInt64SubWithOverflow(node);
1442  case IrOpcode::kInt64Mul:
1443  return MarkAsWord64(node), VisitInt64Mul(node);
1444  case IrOpcode::kInt64Div:
1445  return MarkAsWord64(node), VisitInt64Div(node);
1446  case IrOpcode::kInt64Mod:
1447  return MarkAsWord64(node), VisitInt64Mod(node);
1448  case IrOpcode::kInt64LessThan:
1449  return VisitInt64LessThan(node);
1450  case IrOpcode::kInt64LessThanOrEqual:
1451  return VisitInt64LessThanOrEqual(node);
1452  case IrOpcode::kUint64Div:
1453  return MarkAsWord64(node), VisitUint64Div(node);
1454  case IrOpcode::kUint64LessThan:
1455  return VisitUint64LessThan(node);
1456  case IrOpcode::kUint64LessThanOrEqual:
1457  return VisitUint64LessThanOrEqual(node);
1458  case IrOpcode::kUint64Mod:
1459  return MarkAsWord64(node), VisitUint64Mod(node);
1460  case IrOpcode::kBitcastTaggedToWord:
1461  return MarkAsRepresentation(MachineType::PointerRepresentation(), node),
1462  VisitBitcastTaggedToWord(node);
1463  case IrOpcode::kBitcastWordToTagged:
1464  return MarkAsReference(node), VisitBitcastWordToTagged(node);
1465  case IrOpcode::kBitcastWordToTaggedSigned:
1466  return MarkAsRepresentation(MachineRepresentation::kTaggedSigned, node),
1467  EmitIdentity(node);
1468  case IrOpcode::kChangeFloat32ToFloat64:
1469  return MarkAsFloat64(node), VisitChangeFloat32ToFloat64(node);
1470  case IrOpcode::kChangeInt32ToFloat64:
1471  return MarkAsFloat64(node), VisitChangeInt32ToFloat64(node);
1472  case IrOpcode::kChangeInt64ToFloat64:
1473  return MarkAsFloat64(node), VisitChangeInt64ToFloat64(node);
1474  case IrOpcode::kChangeUint32ToFloat64:
1475  return MarkAsFloat64(node), VisitChangeUint32ToFloat64(node);
1476  case IrOpcode::kChangeFloat64ToInt32:
1477  return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
1478  case IrOpcode::kChangeFloat64ToInt64:
1479  return MarkAsWord64(node), VisitChangeFloat64ToInt64(node);
1480  case IrOpcode::kChangeFloat64ToUint32:
1481  return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
1482  case IrOpcode::kChangeFloat64ToUint64:
1483  return MarkAsWord64(node), VisitChangeFloat64ToUint64(node);
1484  case IrOpcode::kFloat64SilenceNaN:
1485  MarkAsFloat64(node);
1486  if (CanProduceSignalingNaN(node->InputAt(0))) {
1487  return VisitFloat64SilenceNaN(node);
1488  } else {
1489  return EmitIdentity(node);
1490  }
1491  case IrOpcode::kTruncateFloat64ToInt64:
1492  return MarkAsWord64(node), VisitTruncateFloat64ToInt64(node);
1493  case IrOpcode::kTruncateFloat64ToUint32:
1494  return MarkAsWord32(node), VisitTruncateFloat64ToUint32(node);
1495  case IrOpcode::kTruncateFloat32ToInt32:
1496  return MarkAsWord32(node), VisitTruncateFloat32ToInt32(node);
1497  case IrOpcode::kTruncateFloat32ToUint32:
1498  return MarkAsWord32(node), VisitTruncateFloat32ToUint32(node);
1499  case IrOpcode::kTryTruncateFloat32ToInt64:
1500  return MarkAsWord64(node), VisitTryTruncateFloat32ToInt64(node);
1501  case IrOpcode::kTryTruncateFloat64ToInt64:
1502  return MarkAsWord64(node), VisitTryTruncateFloat64ToInt64(node);
1503  case IrOpcode::kTryTruncateFloat32ToUint64:
1504  return MarkAsWord64(node), VisitTryTruncateFloat32ToUint64(node);
1505  case IrOpcode::kTryTruncateFloat64ToUint64:
1506  return MarkAsWord64(node), VisitTryTruncateFloat64ToUint64(node);
1507  case IrOpcode::kChangeInt32ToInt64:
1508  return MarkAsWord64(node), VisitChangeInt32ToInt64(node);
1509  case IrOpcode::kChangeUint32ToUint64:
1510  return MarkAsWord64(node), VisitChangeUint32ToUint64(node);
1511  case IrOpcode::kTruncateFloat64ToFloat32:
1512  return MarkAsFloat32(node), VisitTruncateFloat64ToFloat32(node);
1513  case IrOpcode::kTruncateFloat64ToWord32:
1514  return MarkAsWord32(node), VisitTruncateFloat64ToWord32(node);
1515  case IrOpcode::kTruncateInt64ToInt32:
1516  return MarkAsWord32(node), VisitTruncateInt64ToInt32(node);
1517  case IrOpcode::kRoundFloat64ToInt32:
1518  return MarkAsWord32(node), VisitRoundFloat64ToInt32(node);
1519  case IrOpcode::kRoundInt64ToFloat32:
1520  return MarkAsFloat32(node), VisitRoundInt64ToFloat32(node);
1521  case IrOpcode::kRoundInt32ToFloat32:
1522  return MarkAsFloat32(node), VisitRoundInt32ToFloat32(node);
1523  case IrOpcode::kRoundInt64ToFloat64:
1524  return MarkAsFloat64(node), VisitRoundInt64ToFloat64(node);
1525  case IrOpcode::kBitcastFloat32ToInt32:
1526  return MarkAsWord32(node), VisitBitcastFloat32ToInt32(node);
1527  case IrOpcode::kRoundUint32ToFloat32:
1528  return MarkAsFloat32(node), VisitRoundUint32ToFloat32(node);
1529  case IrOpcode::kRoundUint64ToFloat32:
1530  return MarkAsFloat64(node), VisitRoundUint64ToFloat32(node);
1531  case IrOpcode::kRoundUint64ToFloat64:
1532  return MarkAsFloat64(node), VisitRoundUint64ToFloat64(node);
1533  case IrOpcode::kBitcastFloat64ToInt64:
1534  return MarkAsWord64(node), VisitBitcastFloat64ToInt64(node);
1535  case IrOpcode::kBitcastInt32ToFloat32:
1536  return MarkAsFloat32(node), VisitBitcastInt32ToFloat32(node);
1537  case IrOpcode::kBitcastInt64ToFloat64:
1538  return MarkAsFloat64(node), VisitBitcastInt64ToFloat64(node);
1539  case IrOpcode::kFloat32Add:
1540  return MarkAsFloat32(node), VisitFloat32Add(node);
1541  case IrOpcode::kFloat32Sub:
1542  return MarkAsFloat32(node), VisitFloat32Sub(node);
1543  case IrOpcode::kFloat32Neg:
1544  return MarkAsFloat32(node), VisitFloat32Neg(node);
1545  case IrOpcode::kFloat32Mul:
1546  return MarkAsFloat32(node), VisitFloat32Mul(node);
1547  case IrOpcode::kFloat32Div:
1548  return MarkAsFloat32(node), VisitFloat32Div(node);
1549  case IrOpcode::kFloat32Abs:
1550  return MarkAsFloat32(node), VisitFloat32Abs(node);
1551  case IrOpcode::kFloat32Sqrt:
1552  return MarkAsFloat32(node), VisitFloat32Sqrt(node);
1553  case IrOpcode::kFloat32Equal:
1554  return VisitFloat32Equal(node);
1555  case IrOpcode::kFloat32LessThan:
1556  return VisitFloat32LessThan(node);
1557  case IrOpcode::kFloat32LessThanOrEqual:
1558  return VisitFloat32LessThanOrEqual(node);
1559  case IrOpcode::kFloat32Max:
1560  return MarkAsFloat32(node), VisitFloat32Max(node);
1561  case IrOpcode::kFloat32Min:
1562  return MarkAsFloat32(node), VisitFloat32Min(node);
1563  case IrOpcode::kFloat64Add:
1564  return MarkAsFloat64(node), VisitFloat64Add(node);
1565  case IrOpcode::kFloat64Sub:
1566  return MarkAsFloat64(node), VisitFloat64Sub(node);
1567  case IrOpcode::kFloat64Neg:
1568  return MarkAsFloat64(node), VisitFloat64Neg(node);
1569  case IrOpcode::kFloat64Mul:
1570  return MarkAsFloat64(node), VisitFloat64Mul(node);
1571  case IrOpcode::kFloat64Div:
1572  return MarkAsFloat64(node), VisitFloat64Div(node);
1573  case IrOpcode::kFloat64Mod:
1574  return MarkAsFloat64(node), VisitFloat64Mod(node);
1575  case IrOpcode::kFloat64Min:
1576  return MarkAsFloat64(node), VisitFloat64Min(node);
1577  case IrOpcode::kFloat64Max:
1578  return MarkAsFloat64(node), VisitFloat64Max(node);
1579  case IrOpcode::kFloat64Abs:
1580  return MarkAsFloat64(node), VisitFloat64Abs(node);
1581  case IrOpcode::kFloat64Acos:
1582  return MarkAsFloat64(node), VisitFloat64Acos(node);
1583  case IrOpcode::kFloat64Acosh:
1584  return MarkAsFloat64(node), VisitFloat64Acosh(node);
1585  case IrOpcode::kFloat64Asin:
1586  return MarkAsFloat64(node), VisitFloat64Asin(node);
1587  case IrOpcode::kFloat64Asinh:
1588  return MarkAsFloat64(node), VisitFloat64Asinh(node);
1589  case IrOpcode::kFloat64Atan:
1590  return MarkAsFloat64(node), VisitFloat64Atan(node);
1591  case IrOpcode::kFloat64Atanh:
1592  return MarkAsFloat64(node), VisitFloat64Atanh(node);
1593  case IrOpcode::kFloat64Atan2:
1594  return MarkAsFloat64(node), VisitFloat64Atan2(node);
1595  case IrOpcode::kFloat64Cbrt:
1596  return MarkAsFloat64(node), VisitFloat64Cbrt(node);
1597  case IrOpcode::kFloat64Cos:
1598  return MarkAsFloat64(node), VisitFloat64Cos(node);
1599  case IrOpcode::kFloat64Cosh:
1600  return MarkAsFloat64(node), VisitFloat64Cosh(node);
1601  case IrOpcode::kFloat64Exp:
1602  return MarkAsFloat64(node), VisitFloat64Exp(node);
1603  case IrOpcode::kFloat64Expm1:
1604  return MarkAsFloat64(node), VisitFloat64Expm1(node);
1605  case IrOpcode::kFloat64Log:
1606  return MarkAsFloat64(node), VisitFloat64Log(node);
1607  case IrOpcode::kFloat64Log1p:
1608  return MarkAsFloat64(node), VisitFloat64Log1p(node);
1609  case IrOpcode::kFloat64Log10:
1610  return MarkAsFloat64(node), VisitFloat64Log10(node);
1611  case IrOpcode::kFloat64Log2:
1612  return MarkAsFloat64(node), VisitFloat64Log2(node);
1613  case IrOpcode::kFloat64Pow:
1614  return MarkAsFloat64(node), VisitFloat64Pow(node);
1615  case IrOpcode::kFloat64Sin:
1616  return MarkAsFloat64(node), VisitFloat64Sin(node);
1617  case IrOpcode::kFloat64Sinh:
1618  return MarkAsFloat64(node), VisitFloat64Sinh(node);
1619  case IrOpcode::kFloat64Sqrt:
1620  return MarkAsFloat64(node), VisitFloat64Sqrt(node);
1621  case IrOpcode::kFloat64Tan:
1622  return MarkAsFloat64(node), VisitFloat64Tan(node);
1623  case IrOpcode::kFloat64Tanh:
1624  return MarkAsFloat64(node), VisitFloat64Tanh(node);
1625  case IrOpcode::kFloat64Equal:
1626  return VisitFloat64Equal(node);
1627  case IrOpcode::kFloat64LessThan:
1628  return VisitFloat64LessThan(node);
1629  case IrOpcode::kFloat64LessThanOrEqual:
1630  return VisitFloat64LessThanOrEqual(node);
1631  case IrOpcode::kFloat32RoundDown:
1632  return MarkAsFloat32(node), VisitFloat32RoundDown(node);
1633  case IrOpcode::kFloat64RoundDown:
1634  return MarkAsFloat64(node), VisitFloat64RoundDown(node);
1635  case IrOpcode::kFloat32RoundUp:
1636  return MarkAsFloat32(node), VisitFloat32RoundUp(node);
1637  case IrOpcode::kFloat64RoundUp:
1638  return MarkAsFloat64(node), VisitFloat64RoundUp(node);
1639  case IrOpcode::kFloat32RoundTruncate:
1640  return MarkAsFloat32(node), VisitFloat32RoundTruncate(node);
1641  case IrOpcode::kFloat64RoundTruncate:
1642  return MarkAsFloat64(node), VisitFloat64RoundTruncate(node);
1643  case IrOpcode::kFloat64RoundTiesAway:
1644  return MarkAsFloat64(node), VisitFloat64RoundTiesAway(node);
1645  case IrOpcode::kFloat32RoundTiesEven:
1646  return MarkAsFloat32(node), VisitFloat32RoundTiesEven(node);
1647  case IrOpcode::kFloat64RoundTiesEven:
1648  return MarkAsFloat64(node), VisitFloat64RoundTiesEven(node);
1649  case IrOpcode::kFloat64ExtractLowWord32:
1650  return MarkAsWord32(node), VisitFloat64ExtractLowWord32(node);
1651  case IrOpcode::kFloat64ExtractHighWord32:
1652  return MarkAsWord32(node), VisitFloat64ExtractHighWord32(node);
1653  case IrOpcode::kFloat64InsertLowWord32:
1654  return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node);
1655  case IrOpcode::kFloat64InsertHighWord32:
1656  return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
1657  case IrOpcode::kTaggedPoisonOnSpeculation:
1658  return MarkAsReference(node), VisitTaggedPoisonOnSpeculation(node);
1659  case IrOpcode::kWord32PoisonOnSpeculation:
1660  return MarkAsWord32(node), VisitWord32PoisonOnSpeculation(node);
1661  case IrOpcode::kWord64PoisonOnSpeculation:
1662  return MarkAsWord64(node), VisitWord64PoisonOnSpeculation(node);
1663  case IrOpcode::kStackSlot:
1664  return VisitStackSlot(node);
1665  case IrOpcode::kLoadStackPointer:
1666  return VisitLoadStackPointer(node);
1667  case IrOpcode::kLoadFramePointer:
1668  return VisitLoadFramePointer(node);
1669  case IrOpcode::kLoadParentFramePointer:
1670  return VisitLoadParentFramePointer(node);
1671  case IrOpcode::kUnalignedLoad: {
1672  LoadRepresentation type = LoadRepresentationOf(node->op());
1673  MarkAsRepresentation(type.representation(), node);
1674  return VisitUnalignedLoad(node);
1675  }
1676  case IrOpcode::kUnalignedStore:
1677  return VisitUnalignedStore(node);
1678  case IrOpcode::kInt32PairAdd:
1679  MarkAsWord32(node);
1680  MarkPairProjectionsAsWord32(node);
1681  return VisitInt32PairAdd(node);
1682  case IrOpcode::kInt32PairSub:
1683  MarkAsWord32(node);
1684  MarkPairProjectionsAsWord32(node);
1685  return VisitInt32PairSub(node);
1686  case IrOpcode::kInt32PairMul:
1687  MarkAsWord32(node);
1688  MarkPairProjectionsAsWord32(node);
1689  return VisitInt32PairMul(node);
1690  case IrOpcode::kWord32PairShl:
1691  MarkAsWord32(node);
1692  MarkPairProjectionsAsWord32(node);
1693  return VisitWord32PairShl(node);
1694  case IrOpcode::kWord32PairShr:
1695  MarkAsWord32(node);
1696  MarkPairProjectionsAsWord32(node);
1697  return VisitWord32PairShr(node);
1698  case IrOpcode::kWord32PairSar:
1699  MarkAsWord32(node);
1700  MarkPairProjectionsAsWord32(node);
1701  return VisitWord32PairSar(node);
1702  case IrOpcode::kWord32AtomicLoad: {
1703  LoadRepresentation type = LoadRepresentationOf(node->op());
1704  MarkAsRepresentation(type.representation(), node);
1705  return VisitWord32AtomicLoad(node);
1706  }
1707  case IrOpcode::kWord64AtomicLoad: {
1708  LoadRepresentation type = LoadRepresentationOf(node->op());
1709  MarkAsRepresentation(type.representation(), node);
1710  return VisitWord64AtomicLoad(node);
1711  }
1712  case IrOpcode::kWord32AtomicStore:
1713  return VisitWord32AtomicStore(node);
1714  case IrOpcode::kWord64AtomicStore:
1715  return VisitWord64AtomicStore(node);
1716  case IrOpcode::kWord32AtomicPairStore:
1717  return VisitWord32AtomicPairStore(node);
1718  case IrOpcode::kWord32AtomicPairLoad: {
1719  MarkAsWord32(node);
1720  MarkPairProjectionsAsWord32(node);
1721  return VisitWord32AtomicPairLoad(node);
1722  }
1723 #define ATOMIC_CASE(name, rep) \
1724  case IrOpcode::k##rep##Atomic##name: { \
1725  MachineType type = AtomicOpType(node->op()); \
1726  MarkAsRepresentation(type.representation(), node); \
1727  return Visit##rep##Atomic##name(node); \
1728  }
1729  ATOMIC_CASE(Add, Word32)
1730  ATOMIC_CASE(Add, Word64)
1731  ATOMIC_CASE(Sub, Word32)
1732  ATOMIC_CASE(Sub, Word64)
1733  ATOMIC_CASE(And, Word32)
1734  ATOMIC_CASE(And, Word64)
1735  ATOMIC_CASE(Or, Word32)
1736  ATOMIC_CASE(Or, Word64)
1737  ATOMIC_CASE(Xor, Word32)
1738  ATOMIC_CASE(Xor, Word64)
1739  ATOMIC_CASE(Exchange, Word32)
1740  ATOMIC_CASE(Exchange, Word64)
1741  ATOMIC_CASE(CompareExchange, Word32)
1742  ATOMIC_CASE(CompareExchange, Word64)
1743 #undef ATOMIC_CASE
1744 #define ATOMIC_CASE(name) \
1745  case IrOpcode::kWord32AtomicPair##name: { \
1746  MarkAsWord32(node); \
1747  MarkPairProjectionsAsWord32(node); \
1748  return VisitWord32AtomicPair##name(node); \
1749  }
1750  ATOMIC_CASE(Add)
1751  ATOMIC_CASE(Sub)
1752  ATOMIC_CASE(And)
1753  ATOMIC_CASE(Or)
1754  ATOMIC_CASE(Xor)
1755  ATOMIC_CASE(Exchange)
1756  ATOMIC_CASE(CompareExchange)
1757 #undef ATOMIC_CASE
1758  case IrOpcode::kSpeculationFence:
1759  return VisitSpeculationFence(node);
1760  case IrOpcode::kProtectedLoad: {
1761  LoadRepresentation type = LoadRepresentationOf(node->op());
1762  MarkAsRepresentation(type.representation(), node);
1763  return VisitProtectedLoad(node);
1764  }
1765  case IrOpcode::kSignExtendWord8ToInt32:
1766  return MarkAsWord32(node), VisitSignExtendWord8ToInt32(node);
1767  case IrOpcode::kSignExtendWord16ToInt32:
1768  return MarkAsWord32(node), VisitSignExtendWord16ToInt32(node);
1769  case IrOpcode::kSignExtendWord8ToInt64:
1770  return MarkAsWord64(node), VisitSignExtendWord8ToInt64(node);
1771  case IrOpcode::kSignExtendWord16ToInt64:
1772  return MarkAsWord64(node), VisitSignExtendWord16ToInt64(node);
1773  case IrOpcode::kSignExtendWord32ToInt64:
1774  return MarkAsWord64(node), VisitSignExtendWord32ToInt64(node);
1775  case IrOpcode::kUnsafePointerAdd:
1776  MarkAsRepresentation(MachineType::PointerRepresentation(), node);
1777  return VisitUnsafePointerAdd(node);
1778  case IrOpcode::kF32x4Splat:
1779  return MarkAsSimd128(node), VisitF32x4Splat(node);
1780  case IrOpcode::kF32x4ExtractLane:
1781  return MarkAsFloat32(node), VisitF32x4ExtractLane(node);
1782  case IrOpcode::kF32x4ReplaceLane:
1783  return MarkAsSimd128(node), VisitF32x4ReplaceLane(node);
1784  case IrOpcode::kF32x4SConvertI32x4:
1785  return MarkAsSimd128(node), VisitF32x4SConvertI32x4(node);
1786  case IrOpcode::kF32x4UConvertI32x4:
1787  return MarkAsSimd128(node), VisitF32x4UConvertI32x4(node);
1788  case IrOpcode::kF32x4Abs:
1789  return MarkAsSimd128(node), VisitF32x4Abs(node);
1790  case IrOpcode::kF32x4Neg:
1791  return MarkAsSimd128(node), VisitF32x4Neg(node);
1792  case IrOpcode::kF32x4RecipApprox:
1793  return MarkAsSimd128(node), VisitF32x4RecipApprox(node);
1794  case IrOpcode::kF32x4RecipSqrtApprox:
1795  return MarkAsSimd128(node), VisitF32x4RecipSqrtApprox(node);
1796  case IrOpcode::kF32x4Add:
1797  return MarkAsSimd128(node), VisitF32x4Add(node);
1798  case IrOpcode::kF32x4AddHoriz:
1799  return MarkAsSimd128(node), VisitF32x4AddHoriz(node);
1800  case IrOpcode::kF32x4Sub:
1801  return MarkAsSimd128(node), VisitF32x4Sub(node);
1802  case IrOpcode::kF32x4Mul:
1803  return MarkAsSimd128(node), VisitF32x4Mul(node);
1804  case IrOpcode::kF32x4Min:
1805  return MarkAsSimd128(node), VisitF32x4Min(node);
1806  case IrOpcode::kF32x4Max:
1807  return MarkAsSimd128(node), VisitF32x4Max(node);
1808  case IrOpcode::kF32x4Eq:
1809  return MarkAsSimd128(node), VisitF32x4Eq(node);
1810  case IrOpcode::kF32x4Ne:
1811  return MarkAsSimd128(node), VisitF32x4Ne(node);
1812  case IrOpcode::kF32x4Lt:
1813  return MarkAsSimd128(node), VisitF32x4Lt(node);
1814  case IrOpcode::kF32x4Le:
1815  return MarkAsSimd128(node), VisitF32x4Le(node);
1816  case IrOpcode::kI32x4Splat:
1817  return MarkAsSimd128(node), VisitI32x4Splat(node);
1818  case IrOpcode::kI32x4ExtractLane:
1819  return MarkAsWord32(node), VisitI32x4ExtractLane(node);
1820  case IrOpcode::kI32x4ReplaceLane:
1821  return MarkAsSimd128(node), VisitI32x4ReplaceLane(node);
1822  case IrOpcode::kI32x4SConvertF32x4:
1823  return MarkAsSimd128(node), VisitI32x4SConvertF32x4(node);
1824  case IrOpcode::kI32x4SConvertI16x8Low:
1825  return MarkAsSimd128(node), VisitI32x4SConvertI16x8Low(node);
1826  case IrOpcode::kI32x4SConvertI16x8High:
1827  return MarkAsSimd128(node), VisitI32x4SConvertI16x8High(node);
1828  case IrOpcode::kI32x4Neg:
1829  return MarkAsSimd128(node), VisitI32x4Neg(node);
1830  case IrOpcode::kI32x4Shl:
1831  return MarkAsSimd128(node), VisitI32x4Shl(node);
1832  case IrOpcode::kI32x4ShrS:
1833  return MarkAsSimd128(node), VisitI32x4ShrS(node);
1834  case IrOpcode::kI32x4Add:
1835  return MarkAsSimd128(node), VisitI32x4Add(node);
1836  case IrOpcode::kI32x4AddHoriz:
1837  return MarkAsSimd128(node), VisitI32x4AddHoriz(node);
1838  case IrOpcode::kI32x4Sub:
1839  return MarkAsSimd128(node), VisitI32x4Sub(node);
1840  case IrOpcode::kI32x4Mul:
1841  return MarkAsSimd128(node), VisitI32x4Mul(node);
1842  case IrOpcode::kI32x4MinS:
1843  return MarkAsSimd128(node), VisitI32x4MinS(node);
1844  case IrOpcode::kI32x4MaxS:
1845  return MarkAsSimd128(node), VisitI32x4MaxS(node);
1846  case IrOpcode::kI32x4Eq:
1847  return MarkAsSimd128(node), VisitI32x4Eq(node);
1848  case IrOpcode::kI32x4Ne:
1849  return MarkAsSimd128(node), VisitI32x4Ne(node);
1850  case IrOpcode::kI32x4GtS:
1851  return MarkAsSimd128(node), VisitI32x4GtS(node);
1852  case IrOpcode::kI32x4GeS:
1853  return MarkAsSimd128(node), VisitI32x4GeS(node);
1854  case IrOpcode::kI32x4UConvertF32x4:
1855  return MarkAsSimd128(node), VisitI32x4UConvertF32x4(node);
1856  case IrOpcode::kI32x4UConvertI16x8Low:
1857  return MarkAsSimd128(node), VisitI32x4UConvertI16x8Low(node);
1858  case IrOpcode::kI32x4UConvertI16x8High:
1859  return MarkAsSimd128(node), VisitI32x4UConvertI16x8High(node);
1860  case IrOpcode::kI32x4ShrU:
1861  return MarkAsSimd128(node), VisitI32x4ShrU(node);
1862  case IrOpcode::kI32x4MinU:
1863  return MarkAsSimd128(node), VisitI32x4MinU(node);
1864  case IrOpcode::kI32x4MaxU:
1865  return MarkAsSimd128(node), VisitI32x4MaxU(node);
1866  case IrOpcode::kI32x4GtU:
1867  return MarkAsSimd128(node), VisitI32x4GtU(node);
1868  case IrOpcode::kI32x4GeU:
1869  return MarkAsSimd128(node), VisitI32x4GeU(node);
1870  case IrOpcode::kI16x8Splat:
1871  return MarkAsSimd128(node), VisitI16x8Splat(node);
1872  case IrOpcode::kI16x8ExtractLane:
1873  return MarkAsWord32(node), VisitI16x8ExtractLane(node);
1874  case IrOpcode::kI16x8ReplaceLane:
1875  return MarkAsSimd128(node), VisitI16x8ReplaceLane(node);
1876  case IrOpcode::kI16x8SConvertI8x16Low:
1877  return MarkAsSimd128(node), VisitI16x8SConvertI8x16Low(node);
1878  case IrOpcode::kI16x8SConvertI8x16High:
1879  return MarkAsSimd128(node), VisitI16x8SConvertI8x16High(node);
1880  case IrOpcode::kI16x8Neg:
1881  return MarkAsSimd128(node), VisitI16x8Neg(node);
1882  case IrOpcode::kI16x8Shl:
1883  return MarkAsSimd128(node), VisitI16x8Shl(node);
1884  case IrOpcode::kI16x8ShrS:
1885  return MarkAsSimd128(node), VisitI16x8ShrS(node);
1886  case IrOpcode::kI16x8SConvertI32x4:
1887  return MarkAsSimd128(node), VisitI16x8SConvertI32x4(node);
1888  case IrOpcode::kI16x8Add:
1889  return MarkAsSimd128(node), VisitI16x8Add(node);
1890  case IrOpcode::kI16x8AddSaturateS:
1891  return MarkAsSimd128(node), VisitI16x8AddSaturateS(node);
1892  case IrOpcode::kI16x8AddHoriz:
1893  return MarkAsSimd128(node), VisitI16x8AddHoriz(node);
1894  case IrOpcode::kI16x8Sub:
1895  return MarkAsSimd128(node), VisitI16x8Sub(node);
1896  case IrOpcode::kI16x8SubSaturateS:
1897  return MarkAsSimd128(node), VisitI16x8SubSaturateS(node);
1898  case IrOpcode::kI16x8Mul:
1899  return MarkAsSimd128(node), VisitI16x8Mul(node);
1900  case IrOpcode::kI16x8MinS:
1901  return MarkAsSimd128(node), VisitI16x8MinS(node);
1902  case IrOpcode::kI16x8MaxS:
1903  return MarkAsSimd128(node), VisitI16x8MaxS(node);
1904  case IrOpcode::kI16x8Eq:
1905  return MarkAsSimd128(node), VisitI16x8Eq(node);
1906  case IrOpcode::kI16x8Ne:
1907  return MarkAsSimd128(node), VisitI16x8Ne(node);
1908  case IrOpcode::kI16x8GtS:
1909  return MarkAsSimd128(node), VisitI16x8GtS(node);
1910  case IrOpcode::kI16x8GeS:
1911  return MarkAsSimd128(node), VisitI16x8GeS(node);
1912  case IrOpcode::kI16x8UConvertI8x16Low:
1913  return MarkAsSimd128(node), VisitI16x8UConvertI8x16Low(node);
1914  case IrOpcode::kI16x8UConvertI8x16High:
1915  return MarkAsSimd128(node), VisitI16x8UConvertI8x16High(node);
1916  case IrOpcode::kI16x8ShrU:
1917  return MarkAsSimd128(node), VisitI16x8ShrU(node);
1918  case IrOpcode::kI16x8UConvertI32x4:
1919  return MarkAsSimd128(node), VisitI16x8UConvertI32x4(node);
1920  case IrOpcode::kI16x8AddSaturateU:
1921  return MarkAsSimd128(node), VisitI16x8AddSaturateU(node);
1922  case IrOpcode::kI16x8SubSaturateU:
1923  return MarkAsSimd128(node), VisitI16x8SubSaturateU(node);
1924  case IrOpcode::kI16x8MinU:
1925  return MarkAsSimd128(node), VisitI16x8MinU(node);
1926  case IrOpcode::kI16x8MaxU:
1927  return MarkAsSimd128(node), VisitI16x8MaxU(node);
1928  case IrOpcode::kI16x8GtU:
1929  return MarkAsSimd128(node), VisitI16x8GtU(node);
1930  case IrOpcode::kI16x8GeU:
1931  return MarkAsSimd128(node), VisitI16x8GeU(node);
1932  case IrOpcode::kI8x16Splat:
1933  return MarkAsSimd128(node), VisitI8x16Splat(node);
1934  case IrOpcode::kI8x16ExtractLane:
1935  return MarkAsWord32(node), VisitI8x16ExtractLane(node);
1936  case IrOpcode::kI8x16ReplaceLane:
1937  return MarkAsSimd128(node), VisitI8x16ReplaceLane(node);
1938  case IrOpcode::kI8x16Neg:
1939  return MarkAsSimd128(node), VisitI8x16Neg(node);
1940  case IrOpcode::kI8x16Shl:
1941  return MarkAsSimd128(node), VisitI8x16Shl(node);
1942  case IrOpcode::kI8x16ShrS:
1943  return MarkAsSimd128(node), VisitI8x16ShrS(node);
1944  case IrOpcode::kI8x16SConvertI16x8:
1945  return MarkAsSimd128(node), VisitI8x16SConvertI16x8(node);
1946  case IrOpcode::kI8x16Add:
1947  return MarkAsSimd128(node), VisitI8x16Add(node);
1948  case IrOpcode::kI8x16AddSaturateS:
1949  return MarkAsSimd128(node), VisitI8x16AddSaturateS(node);
1950  case IrOpcode::kI8x16Sub:
1951  return MarkAsSimd128(node), VisitI8x16Sub(node);
1952  case IrOpcode::kI8x16SubSaturateS:
1953  return MarkAsSimd128(node), VisitI8x16SubSaturateS(node);
1954  case IrOpcode::kI8x16Mul:
1955  return MarkAsSimd128(node), VisitI8x16Mul(node);
1956  case IrOpcode::kI8x16MinS:
1957  return MarkAsSimd128(node), VisitI8x16MinS(node);
1958  case IrOpcode::kI8x16MaxS:
1959  return MarkAsSimd128(node), VisitI8x16MaxS(node);
1960  case IrOpcode::kI8x16Eq:
1961  return MarkAsSimd128(node), VisitI8x16Eq(node);
1962  case IrOpcode::kI8x16Ne:
1963  return MarkAsSimd128(node), VisitI8x16Ne(node);
1964  case IrOpcode::kI8x16GtS:
1965  return MarkAsSimd128(node), VisitI8x16GtS(node);
1966  case IrOpcode::kI8x16GeS:
1967  return MarkAsSimd128(node), VisitI8x16GeS(node);
1968  case IrOpcode::kI8x16ShrU:
1969  return MarkAsSimd128(node), VisitI8x16ShrU(node);
1970  case IrOpcode::kI8x16UConvertI16x8:
1971  return MarkAsSimd128(node), VisitI8x16UConvertI16x8(node);
1972  case IrOpcode::kI8x16AddSaturateU:
1973  return MarkAsSimd128(node), VisitI8x16AddSaturateU(node);
1974  case IrOpcode::kI8x16SubSaturateU:
1975  return MarkAsSimd128(node), VisitI8x16SubSaturateU(node);
1976  case IrOpcode::kI8x16MinU:
1977  return MarkAsSimd128(node), VisitI8x16MinU(node);
1978  case IrOpcode::kI8x16MaxU:
1979  return MarkAsSimd128(node), VisitI8x16MaxU(node);
1980  case IrOpcode::kI8x16GtU:
1981  return MarkAsSimd128(node), VisitI8x16GtU(node);
1982  case IrOpcode::kI8x16GeU:
1983  return MarkAsSimd128(node), VisitI16x8GeU(node);
1984  case IrOpcode::kS128Zero:
1985  return MarkAsSimd128(node), VisitS128Zero(node);
1986  case IrOpcode::kS128And:
1987  return MarkAsSimd128(node), VisitS128And(node);
1988  case IrOpcode::kS128Or:
1989  return MarkAsSimd128(node), VisitS128Or(node);
1990  case IrOpcode::kS128Xor:
1991  return MarkAsSimd128(node), VisitS128Xor(node);
1992  case IrOpcode::kS128Not:
1993  return MarkAsSimd128(node), VisitS128Not(node);
1994  case IrOpcode::kS128Select:
1995  return MarkAsSimd128(node), VisitS128Select(node);
1996  case IrOpcode::kS8x16Shuffle:
1997  return MarkAsSimd128(node), VisitS8x16Shuffle(node);
1998  case IrOpcode::kS1x4AnyTrue:
1999  return MarkAsWord32(node), VisitS1x4AnyTrue(node);
2000  case IrOpcode::kS1x4AllTrue:
2001  return MarkAsWord32(node), VisitS1x4AllTrue(node);
2002  case IrOpcode::kS1x8AnyTrue:
2003  return MarkAsWord32(node), VisitS1x8AnyTrue(node);
2004  case IrOpcode::kS1x8AllTrue:
2005  return MarkAsWord32(node), VisitS1x8AllTrue(node);
2006  case IrOpcode::kS1x16AnyTrue:
2007  return MarkAsWord32(node), VisitS1x16AnyTrue(node);
2008  case IrOpcode::kS1x16AllTrue:
2009  return MarkAsWord32(node), VisitS1x16AllTrue(node);
2010  default:
2011  FATAL("Unexpected operator #%d:%s @ node #%d", node->opcode(),
2012  node->op()->mnemonic(), node->id());
2013  break;
2014  }
2015 }
2016 
2017 void InstructionSelector::EmitWordPoisonOnSpeculation(Node* node) {
2018  if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
2019  OperandGenerator g(this);
2020  Node* input_node = NodeProperties::GetValueInput(node, 0);
2021  InstructionOperand input = g.UseRegister(input_node);
2022  InstructionOperand output = g.DefineSameAsFirst(node);
2023  Emit(kArchWordPoisonOnSpeculation, output, input);
2024  } else {
2025  EmitIdentity(node);
2026  }
2027 }
2028 
2029 void InstructionSelector::VisitWord32PoisonOnSpeculation(Node* node) {
2030  EmitWordPoisonOnSpeculation(node);
2031 }
2032 
2033 void InstructionSelector::VisitWord64PoisonOnSpeculation(Node* node) {
2034  EmitWordPoisonOnSpeculation(node);
2035 }
2036 
2037 void InstructionSelector::VisitTaggedPoisonOnSpeculation(Node* node) {
2038  EmitWordPoisonOnSpeculation(node);
2039 }
2040 
2041 void InstructionSelector::VisitLoadStackPointer(Node* node) {
2042  OperandGenerator g(this);
2043  Emit(kArchStackPointer, g.DefineAsRegister(node));
2044 }
2045 
2046 void InstructionSelector::VisitLoadFramePointer(Node* node) {
2047  OperandGenerator g(this);
2048  Emit(kArchFramePointer, g.DefineAsRegister(node));
2049 }
2050 
2051 void InstructionSelector::VisitLoadParentFramePointer(Node* node) {
2052  OperandGenerator g(this);
2053  Emit(kArchParentFramePointer, g.DefineAsRegister(node));
2054 }
2055 
2056 void InstructionSelector::VisitFloat64Acos(Node* node) {
2057  VisitFloat64Ieee754Unop(node, kIeee754Float64Acos);
2058 }
2059 
2060 void InstructionSelector::VisitFloat64Acosh(Node* node) {
2061  VisitFloat64Ieee754Unop(node, kIeee754Float64Acosh);
2062 }
2063 
2064 void InstructionSelector::VisitFloat64Asin(Node* node) {
2065  VisitFloat64Ieee754Unop(node, kIeee754Float64Asin);
2066 }
2067 
2068 void InstructionSelector::VisitFloat64Asinh(Node* node) {
2069  VisitFloat64Ieee754Unop(node, kIeee754Float64Asinh);
2070 }
2071 
2072 void InstructionSelector::VisitFloat64Atan(Node* node) {
2073  VisitFloat64Ieee754Unop(node, kIeee754Float64Atan);
2074 }
2075 
2076 void InstructionSelector::VisitFloat64Atanh(Node* node) {
2077  VisitFloat64Ieee754Unop(node, kIeee754Float64Atanh);
2078 }
2079 
2080 void InstructionSelector::VisitFloat64Atan2(Node* node) {
2081  VisitFloat64Ieee754Binop(node, kIeee754Float64Atan2);
2082 }
2083 
2084 void InstructionSelector::VisitFloat64Cbrt(Node* node) {
2085  VisitFloat64Ieee754Unop(node, kIeee754Float64Cbrt);
2086 }
2087 
2088 void InstructionSelector::VisitFloat64Cos(Node* node) {
2089  VisitFloat64Ieee754Unop(node, kIeee754Float64Cos);
2090 }
2091 
2092 void InstructionSelector::VisitFloat64Cosh(Node* node) {
2093  VisitFloat64Ieee754Unop(node, kIeee754Float64Cosh);
2094 }
2095 
2096 void InstructionSelector::VisitFloat64Exp(Node* node) {
2097  VisitFloat64Ieee754Unop(node, kIeee754Float64Exp);
2098 }
2099 
2100 void InstructionSelector::VisitFloat64Expm1(Node* node) {
2101  VisitFloat64Ieee754Unop(node, kIeee754Float64Expm1);
2102 }
2103 
2104 void InstructionSelector::VisitFloat64Log(Node* node) {
2105  VisitFloat64Ieee754Unop(node, kIeee754Float64Log);
2106 }
2107 
2108 void InstructionSelector::VisitFloat64Log1p(Node* node) {
2109  VisitFloat64Ieee754Unop(node, kIeee754Float64Log1p);
2110 }
2111 
2112 void InstructionSelector::VisitFloat64Log2(Node* node) {
2113  VisitFloat64Ieee754Unop(node, kIeee754Float64Log2);
2114 }
2115 
2116 void InstructionSelector::VisitFloat64Log10(Node* node) {
2117  VisitFloat64Ieee754Unop(node, kIeee754Float64Log10);
2118 }
2119 
2120 void InstructionSelector::VisitFloat64Pow(Node* node) {
2121  VisitFloat64Ieee754Binop(node, kIeee754Float64Pow);
2122 }
2123 
2124 void InstructionSelector::VisitFloat64Sin(Node* node) {
2125  VisitFloat64Ieee754Unop(node, kIeee754Float64Sin);
2126 }
2127 
2128 void InstructionSelector::VisitFloat64Sinh(Node* node) {
2129  VisitFloat64Ieee754Unop(node, kIeee754Float64Sinh);
2130 }
2131 
2132 void InstructionSelector::VisitFloat64Tan(Node* node) {
2133  VisitFloat64Ieee754Unop(node, kIeee754Float64Tan);
2134 }
2135 
2136 void InstructionSelector::VisitFloat64Tanh(Node* node) {
2137  VisitFloat64Ieee754Unop(node, kIeee754Float64Tanh);
2138 }
2139 
2140 void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
2141  InstructionOperand& index_operand) {
2142  OperandGenerator g(this);
2143  size_t input_count = 2 + sw.value_range();
2144  DCHECK_LE(sw.value_range(), std::numeric_limits<size_t>::max() - 2);
2145  auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
2146  inputs[0] = index_operand;
2147  InstructionOperand default_operand = g.Label(sw.default_branch());
2148  std::fill(&inputs[1], &inputs[input_count], default_operand);
2149  for (const CaseInfo& c : sw.CasesUnsorted()) {
2150  size_t value = c.value - sw.min_value();
2151  DCHECK_LE(0u, value);
2152  DCHECK_LT(value + 2, input_count);
2153  inputs[value + 2] = g.Label(c.branch);
2154  }
2155  Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
2156 }
2157 
2158 void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
2159  InstructionOperand& value_operand) {
2160  OperandGenerator g(this);
2161  std::vector<CaseInfo> cases = sw.CasesSortedByOriginalOrder();
2162  size_t input_count = 2 + sw.case_count() * 2;
2163  DCHECK_LE(sw.case_count(), (std::numeric_limits<size_t>::max() - 2) / 2);
2164  auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
2165  inputs[0] = value_operand;
2166  inputs[1] = g.Label(sw.default_branch());
2167  for (size_t index = 0; index < cases.size(); ++index) {
2168  const CaseInfo& c = cases[index];
2169  inputs[index * 2 + 2 + 0] = g.TempImmediate(c.value);
2170  inputs[index * 2 + 2 + 1] = g.Label(c.branch);
2171  }
2172  Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
2173 }
2174 
2175 void InstructionSelector::EmitBinarySearchSwitch(
2176  const SwitchInfo& sw, InstructionOperand& value_operand) {
2177  OperandGenerator g(this);
2178  size_t input_count = 2 + sw.case_count() * 2;
2179  DCHECK_LE(sw.case_count(), (std::numeric_limits<size_t>::max() - 2) / 2);
2180  auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
2181  inputs[0] = value_operand;
2182  inputs[1] = g.Label(sw.default_branch());
2183  std::vector<CaseInfo> cases = sw.CasesSortedByValue();
2184  std::stable_sort(cases.begin(), cases.end(),
2185  [](CaseInfo a, CaseInfo b) { return a.value < b.value; });
2186  for (size_t index = 0; index < cases.size(); ++index) {
2187  const CaseInfo& c = cases[index];
2188  inputs[index * 2 + 2 + 0] = g.TempImmediate(c.value);
2189  inputs[index * 2 + 2 + 1] = g.Label(c.branch);
2190  }
2191  Emit(kArchBinarySearchSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
2192 }
2193 
2194 void InstructionSelector::VisitBitcastTaggedToWord(Node* node) {
2195  EmitIdentity(node);
2196 }
2197 
2198 void InstructionSelector::VisitBitcastWordToTagged(Node* node) {
2199  OperandGenerator g(this);
2200  Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
2201 }
2202 
2203 // 32 bit targets do not implement the following instructions.
2204 #if V8_TARGET_ARCH_32_BIT
2205 
2206 void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
2207 
2208 void InstructionSelector::VisitWord64Or(Node* node) { UNIMPLEMENTED(); }
2209 
2210 void InstructionSelector::VisitWord64Xor(Node* node) { UNIMPLEMENTED(); }
2211 
2212 void InstructionSelector::VisitWord64Shl(Node* node) { UNIMPLEMENTED(); }
2213 
2214 void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED(); }
2215 
2216 void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); }
2217 
2218 void InstructionSelector::VisitWord64Ror(Node* node) { UNIMPLEMENTED(); }
2219 
2220 void InstructionSelector::VisitWord64Clz(Node* node) { UNIMPLEMENTED(); }
2221 
2222 void InstructionSelector::VisitWord64Ctz(Node* node) { UNIMPLEMENTED(); }
2223 
2224 void InstructionSelector::VisitWord64ReverseBits(Node* node) {
2225  UNIMPLEMENTED();
2226 }
2227 
2228 void InstructionSelector::VisitWord64Popcnt(Node* node) { UNIMPLEMENTED(); }
2229 
2230 void InstructionSelector::VisitWord64Equal(Node* node) { UNIMPLEMENTED(); }
2231 
2232 void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
2233 
2234 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
2235  UNIMPLEMENTED();
2236 }
2237 
2238 void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); }
2239 
2240 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
2241  UNIMPLEMENTED();
2242 }
2243 
2244 void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
2245 
2246 void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); }
2247 
2248 void InstructionSelector::VisitInt64LessThan(Node* node) { UNIMPLEMENTED(); }
2249 
2250 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2251  UNIMPLEMENTED();
2252 }
2253 
2254 void InstructionSelector::VisitUint64Div(Node* node) { UNIMPLEMENTED(); }
2255 
2256 void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); }
2257 
2258 void InstructionSelector::VisitUint64LessThan(Node* node) { UNIMPLEMENTED(); }
2259 
2260 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2261  UNIMPLEMENTED();
2262 }
2263 
2264 void InstructionSelector::VisitUint64Mod(Node* node) { UNIMPLEMENTED(); }
2265 
2266 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
2267  UNIMPLEMENTED();
2268 }
2269 
2270 void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) {
2271  UNIMPLEMENTED();
2272 }
2273 
2274 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
2275  UNIMPLEMENTED();
2276 }
2277 
2278 void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
2279  UNIMPLEMENTED();
2280 }
2281 
2282 void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
2283  UNIMPLEMENTED();
2284 }
2285 
2286 void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
2287  UNIMPLEMENTED();
2288 }
2289 
2290 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
2291  UNIMPLEMENTED();
2292 }
2293 
2294 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
2295  UNIMPLEMENTED();
2296 }
2297 
2298 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
2299  UNIMPLEMENTED();
2300 }
2301 
2302 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
2303  UNIMPLEMENTED();
2304 }
2305 
2306 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
2307  UNIMPLEMENTED();
2308 }
2309 
2310 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
2311  UNIMPLEMENTED();
2312 }
2313 
2314 void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
2315  UNIMPLEMENTED();
2316 }
2317 
2318 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
2319  UNIMPLEMENTED();
2320 }
2321 
2322 void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
2323  UNIMPLEMENTED();
2324 }
2325 
2326 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
2327  UNIMPLEMENTED();
2328 }
2329 
2330 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
2331  UNIMPLEMENTED();
2332 }
2333 
2334 void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
2335  UNIMPLEMENTED();
2336 }
2337 
2338 void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
2339  UNIMPLEMENTED();
2340 }
2341 
2342 void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
2343  UNIMPLEMENTED();
2344 }
2345 #endif // V8_TARGET_ARCH_32_BIT
2346 
2347 // 64 bit targets do not implement the following instructions.
2348 #if V8_TARGET_ARCH_64_BIT
2349 void InstructionSelector::VisitInt32PairAdd(Node* node) { UNIMPLEMENTED(); }
2350 
2351 void InstructionSelector::VisitInt32PairSub(Node* node) { UNIMPLEMENTED(); }
2352 
2353 void InstructionSelector::VisitInt32PairMul(Node* node) { UNIMPLEMENTED(); }
2354 
2355 void InstructionSelector::VisitWord32PairShl(Node* node) { UNIMPLEMENTED(); }
2356 
2357 void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
2358 
2359 void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
2360 #endif // V8_TARGET_ARCH_64_BIT
2361 
2362 #if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
2363 void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
2364  UNIMPLEMENTED();
2365 }
2366 
2367 void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
2368  UNIMPLEMENTED();
2369 }
2370 
2371 void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
2372  UNIMPLEMENTED();
2373 }
2374 
2375 void InstructionSelector::VisitWord32AtomicPairSub(Node* node) {
2376  UNIMPLEMENTED();
2377 }
2378 
2379 void InstructionSelector::VisitWord32AtomicPairAnd(Node* node) {
2380  UNIMPLEMENTED();
2381 }
2382 
2383 void InstructionSelector::VisitWord32AtomicPairOr(Node* node) {
2384  UNIMPLEMENTED();
2385 }
2386 
2387 void InstructionSelector::VisitWord32AtomicPairXor(Node* node) {
2388  UNIMPLEMENTED();
2389 }
2390 
2391 void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
2392  UNIMPLEMENTED();
2393 }
2394 
2395 void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
2396  UNIMPLEMENTED();
2397 }
2398 #endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
2399 
2400 #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \
2401  !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC
2402 void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); }
2403 
2404 void InstructionSelector::VisitWord64AtomicStore(Node* node) {
2405  UNIMPLEMENTED();
2406 }
2407 
2408 void InstructionSelector::VisitWord64AtomicAdd(Node* node) { UNIMPLEMENTED(); }
2409 
2410 void InstructionSelector::VisitWord64AtomicSub(Node* node) { UNIMPLEMENTED(); }
2411 
2412 void InstructionSelector::VisitWord64AtomicAnd(Node* node) { UNIMPLEMENTED(); }
2413 
2414 void InstructionSelector::VisitWord64AtomicOr(Node* node) { UNIMPLEMENTED(); }
2415 
2416 void InstructionSelector::VisitWord64AtomicXor(Node* node) { UNIMPLEMENTED(); }
2417 
2418 void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
2419  UNIMPLEMENTED();
2420 }
2421 
2422 void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
2423  UNIMPLEMENTED();
2424 }
2425 #endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC
2426  // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390
2427 
2428 #if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
2429  !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
2430 void InstructionSelector::VisitS8x16Shuffle(Node* node) { UNIMPLEMENTED(); }
2431 #endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
2432  // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
2433 
2434 void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
2435 
2436 void InstructionSelector::VisitParameter(Node* node) {
2437  OperandGenerator g(this);
2438  int index = ParameterIndexOf(node->op());
2439  InstructionOperand op =
2440  linkage()->ParameterHasSecondaryLocation(index)
2441  ? g.DefineAsDualLocation(
2442  node, linkage()->GetParameterLocation(index),
2443  linkage()->GetParameterSecondaryLocation(index))
2444  : g.DefineAsLocation(node, linkage()->GetParameterLocation(index));
2445 
2446  Emit(kArchNop, op);
2447 }
2448 
2449 namespace {
2450 LinkageLocation ExceptionLocation() {
2451  return LinkageLocation::ForRegister(kReturnRegister0.code(),
2452  MachineType::IntPtr());
2453 }
2454 } // namespace
2455 
2456 void InstructionSelector::VisitIfException(Node* node) {
2457  OperandGenerator g(this);
2458  DCHECK_EQ(IrOpcode::kCall, node->InputAt(1)->opcode());
2459  Emit(kArchNop, g.DefineAsLocation(node, ExceptionLocation()));
2460 }
2461 
2462 void InstructionSelector::VisitOsrValue(Node* node) {
2463  OperandGenerator g(this);
2464  int index = OsrValueIndexOf(node->op());
2465  Emit(kArchNop,
2466  g.DefineAsLocation(node, linkage()->GetOsrValueLocation(index)));
2467 }
2468 
2469 void InstructionSelector::VisitPhi(Node* node) {
2470  const int input_count = node->op()->ValueInputCount();
2471  DCHECK_EQ(input_count, current_block_->PredecessorCount());
2472  PhiInstruction* phi = new (instruction_zone())
2473  PhiInstruction(instruction_zone(), GetVirtualRegister(node),
2474  static_cast<size_t>(input_count));
2475  sequence()
2476  ->InstructionBlockAt(RpoNumber::FromInt(current_block_->rpo_number()))
2477  ->AddPhi(phi);
2478  for (int i = 0; i < input_count; ++i) {
2479  Node* const input = node->InputAt(i);
2480  MarkAsUsed(input);
2481  phi->SetInput(static_cast<size_t>(i), GetVirtualRegister(input));
2482  }
2483 }
2484 
2485 void InstructionSelector::VisitProjection(Node* node) {
2486  OperandGenerator g(this);
2487  Node* value = node->InputAt(0);
2488  switch (value->opcode()) {
2489  case IrOpcode::kInt32AddWithOverflow:
2490  case IrOpcode::kInt32SubWithOverflow:
2491  case IrOpcode::kInt32MulWithOverflow:
2492  case IrOpcode::kInt64AddWithOverflow:
2493  case IrOpcode::kInt64SubWithOverflow:
2494  case IrOpcode::kTryTruncateFloat32ToInt64:
2495  case IrOpcode::kTryTruncateFloat64ToInt64:
2496  case IrOpcode::kTryTruncateFloat32ToUint64:
2497  case IrOpcode::kTryTruncateFloat64ToUint64:
2498  case IrOpcode::kInt32PairAdd:
2499  case IrOpcode::kInt32PairSub:
2500  case IrOpcode::kInt32PairMul:
2501  case IrOpcode::kWord32PairShl:
2502  case IrOpcode::kWord32PairShr:
2503  case IrOpcode::kWord32PairSar:
2504  case IrOpcode::kInt32AbsWithOverflow:
2505  case IrOpcode::kInt64AbsWithOverflow:
2506  if (ProjectionIndexOf(node->op()) == 0u) {
2507  Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
2508  } else {
2509  DCHECK_EQ(1u, ProjectionIndexOf(node->op()));
2510  MarkAsUsed(value);
2511  }
2512  break;
2513  default:
2514  break;
2515  }
2516 }
2517 
2518 void InstructionSelector::VisitConstant(Node* node) {
2519  // We must emit a NOP here because every live range needs a defining
2520  // instruction in the register allocator.
2521  OperandGenerator g(this);
2522  Emit(kArchNop, g.DefineAsConstant(node));
2523 }
2524 
2525 void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
2526  OperandGenerator g(this);
2527  auto call_descriptor = CallDescriptorOf(node->op());
2528 
2529  FrameStateDescriptor* frame_state_descriptor = nullptr;
2530  if (call_descriptor->NeedsFrameState()) {
2531  frame_state_descriptor = GetFrameStateDescriptor(
2532  node->InputAt(static_cast<int>(call_descriptor->InputCount())));
2533  }
2534 
2535  CallBuffer buffer(zone(), call_descriptor, frame_state_descriptor);
2536  CallDescriptor::Flags flags = call_descriptor->flags();
2537 
2538  // Compute InstructionOperands for inputs and outputs.
2539  // TODO(turbofan): on some architectures it's probably better to use
2540  // the code object in a register if there are multiple uses of it.
2541  // Improve constant pool and the heuristics in the register allocator
2542  // for where to emit constants.
2543  CallBufferFlags call_buffer_flags(kCallCodeImmediate | kCallAddressImmediate);
2544  if (flags & CallDescriptor::kAllowCallThroughSlot) {
2545  // TODO(v8:6666): Remove kAllowCallThroughSlot and use a pc-relative call
2546  // instead once builtins are embedded in every build configuration.
2547  call_buffer_flags |= kAllowCallThroughSlot;
2548 #ifndef V8_TARGET_ARCH_32_BIT
2549  // kAllowCallThroughSlot is only supported on ia32.
2550  UNREACHABLE();
2551 #endif
2552  }
2553  InitializeCallBuffer(node, &buffer, call_buffer_flags, false);
2554 
2555  EmitPrepareArguments(&(buffer.pushed_nodes), call_descriptor, node);
2556 
2557  // Pass label of exception handler block.
2558  if (handler) {
2559  DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
2560  flags |= CallDescriptor::kHasExceptionHandler;
2561  buffer.instruction_args.push_back(g.Label(handler));
2562  }
2563 
2564  // Select the appropriate opcode based on the call type.
2565  InstructionCode opcode = kArchNop;
2566  switch (call_descriptor->kind()) {
2567  case CallDescriptor::kCallAddress:
2568  opcode = kArchCallCFunction | MiscField::encode(static_cast<int>(
2569  call_descriptor->ParameterCount()));
2570  break;
2571  case CallDescriptor::kCallCodeObject:
2572  opcode = kArchCallCodeObject | MiscField::encode(flags);
2573  break;
2574  case CallDescriptor::kCallJSFunction:
2575  opcode = kArchCallJSFunction | MiscField::encode(flags);
2576  break;
2577  case CallDescriptor::kCallWasmFunction:
2578  case CallDescriptor::kCallWasmImportWrapper:
2579  opcode = kArchCallWasmFunction | MiscField::encode(flags);
2580  break;
2581  }
2582 
2583  // Emit the call instruction.
2584  size_t const output_count = buffer.outputs.size();
2585  auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
2586  Instruction* call_instr =
2587  Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
2588  &buffer.instruction_args.front());
2589  if (instruction_selection_failed()) return;
2590  call_instr->MarkAsCall();
2591 
2592  EmitPrepareResults(&(buffer.output_nodes), call_descriptor, node);
2593 }
2594 
2595 void InstructionSelector::VisitCallWithCallerSavedRegisters(
2596  Node* node, BasicBlock* handler) {
2597  OperandGenerator g(this);
2598  const auto fp_mode = CallDescriptorOf(node->op())->get_save_fp_mode();
2599  Emit(kArchSaveCallerRegisters | MiscField::encode(static_cast<int>(fp_mode)),
2600  g.NoOutput());
2601  VisitCall(node, handler);
2602  Emit(kArchRestoreCallerRegisters |
2603  MiscField::encode(static_cast<int>(fp_mode)),
2604  g.NoOutput());
2605 }
2606 
2607 void InstructionSelector::VisitTailCall(Node* node) {
2608  OperandGenerator g(this);
2609  auto call_descriptor = CallDescriptorOf(node->op());
2610 
2611  CallDescriptor* caller = linkage()->GetIncomingDescriptor();
2612  DCHECK(caller->CanTailCall(node));
2613  const CallDescriptor* callee = CallDescriptorOf(node->op());
2614  int stack_param_delta = callee->GetStackParameterDelta(caller);
2615  CallBuffer buffer(zone(), call_descriptor, nullptr);
2616 
2617  // Compute InstructionOperands for inputs and outputs.
2618  CallBufferFlags flags(kCallCodeImmediate | kCallTail);
2619  if (IsTailCallAddressImmediate()) {
2620  flags |= kCallAddressImmediate;
2621  }
2622  if (callee->flags() & CallDescriptor::kFixedTargetRegister) {
2623  flags |= kCallFixedTargetRegister;
2624  }
2625  DCHECK_EQ(callee->flags() & CallDescriptor::kAllowCallThroughSlot, 0);
2626  InitializeCallBuffer(node, &buffer, flags, true, stack_param_delta);
2627 
2628  // Select the appropriate opcode based on the call type.
2629  InstructionCode opcode;
2630  InstructionOperandVector temps(zone());
2631  if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
2632  switch (call_descriptor->kind()) {
2633  case CallDescriptor::kCallCodeObject:
2634  opcode = kArchTailCallCodeObjectFromJSFunction;
2635  break;
2636  default:
2637  UNREACHABLE();
2638  return;
2639  }
2640  int temps_count = GetTempsCountForTailCallFromJSFunction();
2641  for (int i = 0; i < temps_count; i++) {
2642  temps.push_back(g.TempRegister());
2643  }
2644  } else {
2645  switch (call_descriptor->kind()) {
2646  case CallDescriptor::kCallCodeObject:
2647  opcode = kArchTailCallCodeObject;
2648  break;
2649  case CallDescriptor::kCallAddress:
2650  opcode = kArchTailCallAddress;
2651  break;
2652  case CallDescriptor::kCallWasmFunction:
2653  opcode = kArchTailCallWasm;
2654  break;
2655  default:
2656  UNREACHABLE();
2657  return;
2658  }
2659  }
2660  opcode |= MiscField::encode(call_descriptor->flags());
2661 
2662  Emit(kArchPrepareTailCall, g.NoOutput());
2663 
2664  // Add an immediate operand that represents the first slot that is unused
2665  // with respect to the stack pointer that has been updated for the tail call
2666  // instruction. This is used by backends that need to pad arguments for stack
2667  // alignment, in order to store an optional slot of padding above the
2668  // arguments.
2669  int optional_padding_slot = callee->GetFirstUnusedStackSlot();
2670  buffer.instruction_args.push_back(g.TempImmediate(optional_padding_slot));
2671 
2672  int first_unused_stack_slot =
2673  (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? true : false) +
2674  stack_param_delta;
2675  buffer.instruction_args.push_back(g.TempImmediate(first_unused_stack_slot));
2676 
2677  // Emit the tailcall instruction.
2678  Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
2679  &buffer.instruction_args.front(), temps.size(),
2680  temps.empty() ? nullptr : &temps.front());
2681 }
2682 
2683 void InstructionSelector::VisitGoto(BasicBlock* target) {
2684  // jump to the next block.
2685  OperandGenerator g(this);
2686  Emit(kArchJmp, g.NoOutput(), g.Label(target));
2687 }
2688 
2689 void InstructionSelector::VisitReturn(Node* ret) {
2690  OperandGenerator g(this);
2691  const int input_count = linkage()->GetIncomingDescriptor()->ReturnCount() == 0
2692  ? 1
2693  : ret->op()->ValueInputCount();
2694  DCHECK_GE(input_count, 1);
2695  auto value_locations = zone()->NewArray<InstructionOperand>(input_count);
2696  Node* pop_count = ret->InputAt(0);
2697  value_locations[0] = (pop_count->opcode() == IrOpcode::kInt32Constant ||
2698  pop_count->opcode() == IrOpcode::kInt64Constant)
2699  ? g.UseImmediate(pop_count)
2700  : g.UseRegister(pop_count);
2701  for (int i = 1; i < input_count; ++i) {
2702  value_locations[i] =
2703  g.UseLocation(ret->InputAt(i), linkage()->GetReturnLocation(i - 1));
2704  }
2705  Emit(kArchRet, 0, nullptr, input_count, value_locations);
2706 }
2707 
2708 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
2709  BasicBlock* fbranch) {
2710  if (NeedsPoisoning(IsSafetyCheckOf(branch->op()))) {
2711  FlagsContinuation cont =
2712  FlagsContinuation::ForBranchAndPoison(kNotEqual, tbranch, fbranch);
2713  VisitWordCompareZero(branch, branch->InputAt(0), &cont);
2714  } else {
2715  FlagsContinuation cont =
2716  FlagsContinuation::ForBranch(kNotEqual, tbranch, fbranch);
2717  VisitWordCompareZero(branch, branch->InputAt(0), &cont);
2718  }
2719 }
2720 
2721 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
2722  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
2723  if (NeedsPoisoning(p.is_safety_check())) {
2724  FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
2725  kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
2726  VisitWordCompareZero(node, node->InputAt(0), &cont);
2727  } else {
2728  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
2729  kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
2730  VisitWordCompareZero(node, node->InputAt(0), &cont);
2731  }
2732 }
2733 
2734 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
2735  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
2736  if (NeedsPoisoning(p.is_safety_check())) {
2737  FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
2738  kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
2739  VisitWordCompareZero(node, node->InputAt(0), &cont);
2740  } else {
2741  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
2742  kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
2743  VisitWordCompareZero(node, node->InputAt(0), &cont);
2744  }
2745 }
2746 
2747 void InstructionSelector::VisitTrapIf(Node* node, TrapId trap_id) {
2748  FlagsContinuation cont =
2749  FlagsContinuation::ForTrap(kNotEqual, trap_id, node->InputAt(1));
2750  VisitWordCompareZero(node, node->InputAt(0), &cont);
2751 }
2752 
2753 void InstructionSelector::VisitTrapUnless(Node* node, TrapId trap_id) {
2754  FlagsContinuation cont =
2755  FlagsContinuation::ForTrap(kEqual, trap_id, node->InputAt(1));
2756  VisitWordCompareZero(node, node->InputAt(0), &cont);
2757 }
2758 
2759 void InstructionSelector::EmitIdentity(Node* node) {
2760  OperandGenerator g(this);
2761  MarkAsUsed(node->InputAt(0));
2762  SetRename(node, node->InputAt(0));
2763 }
2764 
2765 void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind,
2766  DeoptimizeReason reason,
2767  VectorSlotPair const& feedback,
2768  Node* value) {
2769  EmitDeoptimize(kArchDeoptimize, 0, nullptr, 0, nullptr, kind, reason,
2770  feedback, value);
2771 }
2772 
2773 void InstructionSelector::VisitThrow(Node* node) {
2774  OperandGenerator g(this);
2775  Emit(kArchThrowTerminator, g.NoOutput());
2776 }
2777 
2778 void InstructionSelector::VisitDebugBreak(Node* node) {
2779  OperandGenerator g(this);
2780  Emit(kArchDebugBreak, g.NoOutput());
2781 }
2782 
2783 void InstructionSelector::VisitUnreachable(Node* node) {
2784  OperandGenerator g(this);
2785  Emit(kArchDebugBreak, g.NoOutput());
2786 }
2787 
2788 void InstructionSelector::VisitDeadValue(Node* node) {
2789  OperandGenerator g(this);
2790  MarkAsRepresentation(DeadValueRepresentationOf(node->op()), node);
2791  Emit(kArchDebugBreak, g.DefineAsConstant(node));
2792 }
2793 
2794 void InstructionSelector::VisitComment(Node* node) {
2795  OperandGenerator g(this);
2796  InstructionOperand operand(g.UseImmediate(node));
2797  Emit(kArchComment, 0, nullptr, 1, &operand);
2798 }
2799 
2800 void InstructionSelector::VisitUnsafePointerAdd(Node* node) {
2801 #if V8_TARGET_ARCH_64_BIT
2802  VisitInt64Add(node);
2803 #else // V8_TARGET_ARCH_64_BIT
2804  VisitInt32Add(node);
2805 #endif // V8_TARGET_ARCH_64_BIT
2806 }
2807 
2808 void InstructionSelector::VisitRetain(Node* node) {
2809  OperandGenerator g(this);
2810  Emit(kArchNop, g.NoOutput(), g.UseAny(node->InputAt(0)));
2811 }
2812 
2813 bool InstructionSelector::CanProduceSignalingNaN(Node* node) {
2814  // TODO(jarin) Improve the heuristic here.
2815  if (node->opcode() == IrOpcode::kFloat64Add ||
2816  node->opcode() == IrOpcode::kFloat64Sub ||
2817  node->opcode() == IrOpcode::kFloat64Mul) {
2818  return false;
2819  }
2820  return true;
2821 }
2822 
2823 FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
2824  Node* state) {
2825  DCHECK_EQ(IrOpcode::kFrameState, state->opcode());
2826  DCHECK_EQ(kFrameStateInputCount, state->InputCount());
2827  FrameStateInfo state_info = FrameStateInfoOf(state->op());
2828 
2829  int parameters = static_cast<int>(
2830  StateValuesAccess(state->InputAt(kFrameStateParametersInput)).size());
2831  int locals = static_cast<int>(
2832  StateValuesAccess(state->InputAt(kFrameStateLocalsInput)).size());
2833  int stack = static_cast<int>(
2834  StateValuesAccess(state->InputAt(kFrameStateStackInput)).size());
2835 
2836  DCHECK_EQ(parameters, state_info.parameter_count());
2837  DCHECK_EQ(locals, state_info.local_count());
2838 
2839  FrameStateDescriptor* outer_state = nullptr;
2840  Node* outer_node = state->InputAt(kFrameStateOuterStateInput);
2841  if (outer_node->opcode() == IrOpcode::kFrameState) {
2842  outer_state = GetFrameStateDescriptor(outer_node);
2843  }
2844 
2845  return new (instruction_zone()) FrameStateDescriptor(
2846  instruction_zone(), state_info.type(), state_info.bailout_id(),
2847  state_info.state_combine(), parameters, locals, stack,
2848  state_info.shared_info(), outer_state);
2849 }
2850 
2851 // static
2852 void InstructionSelector::CanonicalizeShuffle(bool inputs_equal,
2853  uint8_t* shuffle,
2854  bool* needs_swap,
2855  bool* is_swizzle) {
2856  *needs_swap = false;
2857  // Inputs equal, then it's a swizzle.
2858  if (inputs_equal) {
2859  *is_swizzle = true;
2860  } else {
2861  // Inputs are distinct; check that both are required.
2862  bool src0_is_used = false;
2863  bool src1_is_used = false;
2864  for (int i = 0; i < kSimd128Size; ++i) {
2865  if (shuffle[i] < kSimd128Size) {
2866  src0_is_used = true;
2867  } else {
2868  src1_is_used = true;
2869  }
2870  }
2871  if (src0_is_used && !src1_is_used) {
2872  *is_swizzle = true;
2873  } else if (src1_is_used && !src0_is_used) {
2874  *needs_swap = true;
2875  *is_swizzle = true;
2876  } else {
2877  *is_swizzle = false;
2878  // Canonicalize general 2 input shuffles so that the first input lanes are
2879  // encountered first. This makes architectural shuffle pattern matching
2880  // easier, since we only need to consider 1 input ordering instead of 2.
2881  if (shuffle[0] >= kSimd128Size) {
2882  // The second operand is used first. Swap inputs and adjust the shuffle.
2883  *needs_swap = true;
2884  for (int i = 0; i < kSimd128Size; ++i) {
2885  shuffle[i] ^= kSimd128Size;
2886  }
2887  }
2888  }
2889  }
2890  if (*is_swizzle) {
2891  for (int i = 0; i < kSimd128Size; ++i) shuffle[i] &= kSimd128Size - 1;
2892  }
2893 }
2894 
2895 void InstructionSelector::CanonicalizeShuffle(Node* node, uint8_t* shuffle,
2896  bool* is_swizzle) {
2897  // Get raw shuffle indices.
2898  memcpy(shuffle, OpParameter<uint8_t*>(node->op()), kSimd128Size);
2899  bool needs_swap;
2900  bool inputs_equal = GetVirtualRegister(node->InputAt(0)) ==
2901  GetVirtualRegister(node->InputAt(1));
2902  CanonicalizeShuffle(inputs_equal, shuffle, &needs_swap, is_swizzle);
2903  if (needs_swap) {
2904  SwapShuffleInputs(node);
2905  }
2906  // Duplicate the first input; for some shuffles on some architectures, it's
2907  // easiest to implement a swizzle as a shuffle so it might be used.
2908  if (*is_swizzle) {
2909  node->ReplaceInput(1, node->InputAt(0));
2910  }
2911 }
2912 
2913 // static
2914 void InstructionSelector::SwapShuffleInputs(Node* node) {
2915  Node* input0 = node->InputAt(0);
2916  Node* input1 = node->InputAt(1);
2917  node->ReplaceInput(0, input1);
2918  node->ReplaceInput(1, input0);
2919 }
2920 
2921 // static
2922 bool InstructionSelector::TryMatchIdentity(const uint8_t* shuffle) {
2923  for (int i = 0; i < kSimd128Size; ++i) {
2924  if (shuffle[i] != i) return false;
2925  }
2926  return true;
2927 }
2928 
2929 // static
2930 bool InstructionSelector::TryMatch32x4Shuffle(const uint8_t* shuffle,
2931  uint8_t* shuffle32x4) {
2932  for (int i = 0; i < 4; ++i) {
2933  if (shuffle[i * 4] % 4 != 0) return false;
2934  for (int j = 1; j < 4; ++j) {
2935  if (shuffle[i * 4 + j] - shuffle[i * 4 + j - 1] != 1) return false;
2936  }
2937  shuffle32x4[i] = shuffle[i * 4] / 4;
2938  }
2939  return true;
2940 }
2941 
2942 // static
2943 bool InstructionSelector::TryMatch16x8Shuffle(const uint8_t* shuffle,
2944  uint8_t* shuffle16x8) {
2945  for (int i = 0; i < 8; ++i) {
2946  if (shuffle[i * 2] % 2 != 0) return false;
2947  for (int j = 1; j < 2; ++j) {
2948  if (shuffle[i * 2 + j] - shuffle[i * 2 + j - 1] != 1) return false;
2949  }
2950  shuffle16x8[i] = shuffle[i * 2] / 2;
2951  }
2952  return true;
2953 }
2954 
2955 // static
2956 bool InstructionSelector::TryMatchConcat(const uint8_t* shuffle,
2957  uint8_t* offset) {
2958  // Don't match the identity shuffle (e.g. [0 1 2 ... 15]).
2959  uint8_t start = shuffle[0];
2960  if (start == 0) return false;
2961  DCHECK_GT(kSimd128Size, start); // The shuffle should be canonicalized.
2962  // A concatenation is a series of consecutive indices, with at most one jump
2963  // in the middle from the last lane to the first.
2964  for (int i = 1; i < kSimd128Size; ++i) {
2965  if ((shuffle[i]) != ((shuffle[i - 1] + 1))) {
2966  if (shuffle[i - 1] != 15) return false;
2967  if (shuffle[i] % kSimd128Size != 0) return false;
2968  }
2969  }
2970  *offset = start;
2971  return true;
2972 }
2973 
2974 // static
2975 bool InstructionSelector::TryMatchBlend(const uint8_t* shuffle) {
2976  for (int i = 0; i < 16; ++i) {
2977  if ((shuffle[i] & 0xF) != i) return false;
2978  }
2979  return true;
2980 }
2981 
2982 // static
2983 int32_t InstructionSelector::Pack4Lanes(const uint8_t* shuffle) {
2984  int32_t result = 0;
2985  for (int i = 3; i >= 0; --i) {
2986  result <<= 8;
2987  result |= shuffle[i];
2988  }
2989  return result;
2990 }
2991 
2992 bool InstructionSelector::NeedsPoisoning(IsSafetyCheck safety_check) const {
2993  switch (poisoning_level_) {
2994  case PoisoningMitigationLevel::kDontPoison:
2995  return false;
2996  case PoisoningMitigationLevel::kPoisonAll:
2997  return safety_check != IsSafetyCheck::kNoSafetyCheck;
2998  case PoisoningMitigationLevel::kPoisonCriticalOnly:
2999  return safety_check == IsSafetyCheck::kCriticalSafetyCheck;
3000  }
3001  UNREACHABLE();
3002 }
3003 
3004 } // namespace compiler
3005 } // namespace internal
3006 } // namespace v8
Definition: libplatform.h:13