V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
instruction-selector-x64.cc
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <algorithm>
6 
7 #include "src/base/adapters.h"
8 #include "src/compiler/backend/instruction-selector-impl.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties.h"
11 #include "src/roots-inl.h"
12 #include "src/turbo-assembler.h"
13 
14 namespace v8 {
15 namespace internal {
16 namespace compiler {
17 
18 // Adds X64-specific methods for generating operands.
19 class X64OperandGenerator final : public OperandGenerator {
20  public:
21  explicit X64OperandGenerator(InstructionSelector* selector)
22  : OperandGenerator(selector) {}
23 
24  bool CanBeImmediate(Node* node) {
25  switch (node->opcode()) {
26  case IrOpcode::kInt32Constant:
27  case IrOpcode::kRelocatableInt32Constant:
28  return true;
29  case IrOpcode::kInt64Constant: {
30  const int64_t value = OpParameter<int64_t>(node->op());
31  return std::numeric_limits<int32_t>::min() < value &&
32  value <= std::numeric_limits<int32_t>::max();
33  }
34  case IrOpcode::kNumberConstant: {
35  const double value = OpParameter<double>(node->op());
36  return bit_cast<int64_t>(value) == 0;
37  }
38  default:
39  return false;
40  }
41  }
42 
43  int32_t GetImmediateIntegerValue(Node* node) {
44  DCHECK(CanBeImmediate(node));
45  if (node->opcode() == IrOpcode::kInt32Constant) {
46  return OpParameter<int32_t>(node->op());
47  }
48  DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
49  return static_cast<int32_t>(OpParameter<int64_t>(node->op()));
50  }
51 
52  bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
53  int effect_level) {
54  if (input->opcode() != IrOpcode::kLoad ||
55  !selector()->CanCover(node, input)) {
56  return false;
57  }
58  if (effect_level != selector()->GetEffectLevel(input)) {
59  return false;
60  }
61  MachineRepresentation rep =
62  LoadRepresentationOf(input->op()).representation();
63  switch (opcode) {
64  case kX64And:
65  case kX64Or:
66  case kX64Xor:
67  case kX64Add:
68  case kX64Sub:
69  case kX64Push:
70  case kX64Cmp:
71  case kX64Test:
72  return rep == MachineRepresentation::kWord64 || IsAnyTagged(rep);
73  case kX64And32:
74  case kX64Or32:
75  case kX64Xor32:
76  case kX64Add32:
77  case kX64Sub32:
78  case kX64Cmp32:
79  case kX64Test32:
80  return rep == MachineRepresentation::kWord32;
81  case kX64Cmp16:
82  case kX64Test16:
83  return rep == MachineRepresentation::kWord16;
84  case kX64Cmp8:
85  case kX64Test8:
86  return rep == MachineRepresentation::kWord8;
87  default:
88  break;
89  }
90  return false;
91  }
92 
93  AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent,
94  Node* base, Node* displacement,
95  DisplacementMode displacement_mode,
96  InstructionOperand inputs[],
97  size_t* input_count) {
98  AddressingMode mode = kMode_MRI;
99  if (base != nullptr && (index != nullptr || displacement != nullptr)) {
100  if (base->opcode() == IrOpcode::kInt32Constant &&
101  OpParameter<int32_t>(base->op()) == 0) {
102  base = nullptr;
103  } else if (base->opcode() == IrOpcode::kInt64Constant &&
104  OpParameter<int64_t>(base->op()) == 0) {
105  base = nullptr;
106  }
107  }
108  if (base != nullptr) {
109  inputs[(*input_count)++] = UseRegister(base);
110  if (index != nullptr) {
111  DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
112  inputs[(*input_count)++] = UseRegister(index);
113  if (displacement != nullptr) {
114  inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
115  ? UseNegatedImmediate(displacement)
116  : UseImmediate(displacement);
117  static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
118  kMode_MR4I, kMode_MR8I};
119  mode = kMRnI_modes[scale_exponent];
120  } else {
121  static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
122  kMode_MR4, kMode_MR8};
123  mode = kMRn_modes[scale_exponent];
124  }
125  } else {
126  if (displacement == nullptr) {
127  mode = kMode_MR;
128  } else {
129  inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
130  ? UseNegatedImmediate(displacement)
131  : UseImmediate(displacement);
132  mode = kMode_MRI;
133  }
134  }
135  } else {
136  DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
137  if (displacement != nullptr) {
138  if (index == nullptr) {
139  inputs[(*input_count)++] = UseRegister(displacement);
140  mode = kMode_MR;
141  } else {
142  inputs[(*input_count)++] = UseRegister(index);
143  inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
144  ? UseNegatedImmediate(displacement)
145  : UseImmediate(displacement);
146  static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
147  kMode_M4I, kMode_M8I};
148  mode = kMnI_modes[scale_exponent];
149  }
150  } else {
151  inputs[(*input_count)++] = UseRegister(index);
152  static const AddressingMode kMn_modes[] = {kMode_MR, kMode_MR1,
153  kMode_M4, kMode_M8};
154  mode = kMn_modes[scale_exponent];
155  if (mode == kMode_MR1) {
156  // [%r1 + %r1*1] has a smaller encoding than [%r1*2+0]
157  inputs[(*input_count)++] = UseRegister(index);
158  }
159  }
160  }
161  return mode;
162  }
163 
164  AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
165  InstructionOperand inputs[],
166  size_t* input_count) {
167  if (selector()->CanAddressRelativeToRootsRegister()) {
169  if (m.index().HasValue() && m.object().HasValue()) {
170  ptrdiff_t const delta =
171  m.index().Value() +
172  TurboAssemblerBase::RootRegisterOffsetForExternalReference(
173  selector()->isolate(), m.object().Value());
174  if (is_int32(delta)) {
175  inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta));
176  return kMode_Root;
177  }
178  }
179  }
180  BaseWithIndexAndDisplacement64Matcher m(operand, AddressOption::kAllowAll);
181  DCHECK(m.matches());
182  if (m.displacement() == nullptr || CanBeImmediate(m.displacement())) {
183  return GenerateMemoryOperandInputs(
184  m.index(), m.scale(), m.base(), m.displacement(),
185  m.displacement_mode(), inputs, input_count);
186  } else if (m.base() == nullptr &&
187  m.displacement_mode() == kPositiveDisplacement) {
188  // The displacement cannot be an immediate, but we can use the
189  // displacement as base instead and still benefit from addressing
190  // modes for the scale.
191  return GenerateMemoryOperandInputs(m.index(), m.scale(), m.displacement(),
192  nullptr, m.displacement_mode(), inputs,
193  input_count);
194  } else {
195  inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
196  inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
197  return kMode_MR1;
198  }
199  }
200 
201  InstructionOperand GetEffectiveIndexOperand(Node* index,
202  AddressingMode* mode) {
203  if (CanBeImmediate(index)) {
204  *mode = kMode_MRI;
205  return UseImmediate(index);
206  } else {
207  *mode = kMode_MR1;
208  return UseUniqueRegister(index);
209  }
210  }
211 
212  bool CanBeBetterLeftOperand(Node* node) const {
213  return !selector()->IsLive(node);
214  }
215 };
216 
217 namespace {
218 ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
219  ArchOpcode opcode = kArchNop;
220  switch (load_rep.representation()) {
221  case MachineRepresentation::kFloat32:
222  opcode = kX64Movss;
223  break;
224  case MachineRepresentation::kFloat64:
225  opcode = kX64Movsd;
226  break;
227  case MachineRepresentation::kBit: // Fall through.
228  case MachineRepresentation::kWord8:
229  opcode = load_rep.IsSigned() ? kX64Movsxbl : kX64Movzxbl;
230  break;
231  case MachineRepresentation::kWord16:
232  opcode = load_rep.IsSigned() ? kX64Movsxwl : kX64Movzxwl;
233  break;
234  case MachineRepresentation::kWord32:
235  opcode = kX64Movl;
236  break;
237 #ifdef V8_COMPRESS_POINTERS
238  case MachineRepresentation::kTaggedSigned:
239  return kX64MovqDecompressTaggedSigned;
240  case MachineRepresentation::kTaggedPointer:
241  return kX64MovqDecompressTaggedPointer;
242  case MachineRepresentation::kTagged:
243  return kX64MovqDecompressAnyTagged;
244 #else
245  case MachineRepresentation::kTaggedSigned: // Fall through.
246  case MachineRepresentation::kTaggedPointer: // Fall through.
247  case MachineRepresentation::kTagged: // Fall through.
248 #endif
249  case MachineRepresentation::kWord64:
250  opcode = kX64Movq;
251  break;
252  case MachineRepresentation::kSimd128: // Fall through.
253  opcode = kX64Movdqu;
254  break;
255  case MachineRepresentation::kNone:
256  UNREACHABLE();
257  break;
258  }
259  return opcode;
260 }
261 
262 ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
263  switch (store_rep.representation()) {
264  case MachineRepresentation::kFloat32:
265  return kX64Movss;
266  break;
267  case MachineRepresentation::kFloat64:
268  return kX64Movsd;
269  break;
270  case MachineRepresentation::kBit: // Fall through.
271  case MachineRepresentation::kWord8:
272  return kX64Movb;
273  break;
274  case MachineRepresentation::kWord16:
275  return kX64Movw;
276  break;
277  case MachineRepresentation::kWord32:
278  return kX64Movl;
279  break;
280  case MachineRepresentation::kTaggedSigned: // Fall through.
281  case MachineRepresentation::kTaggedPointer: // Fall through.
282  case MachineRepresentation::kTagged: // Fall through.
283  case MachineRepresentation::kWord64:
284  return kX64Movq;
285  break;
286  case MachineRepresentation::kSimd128: // Fall through.
287  return kX64Movdqu;
288  break;
289  case MachineRepresentation::kNone:
290  UNREACHABLE();
291  }
292  UNREACHABLE();
293 }
294 
295 } // namespace
296 
297 void InstructionSelector::VisitStackSlot(Node* node) {
298  StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
299  int slot = frame_->AllocateSpillSlot(rep.size());
300  OperandGenerator g(this);
301 
302  Emit(kArchStackSlot, g.DefineAsRegister(node),
303  sequence()->AddImmediate(Constant(slot)), 0, nullptr);
304 }
305 
306 void InstructionSelector::VisitDebugAbort(Node* node) {
307  X64OperandGenerator g(this);
308  Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx));
309 }
310 
311 void InstructionSelector::VisitSpeculationFence(Node* node) {
312  X64OperandGenerator g(this);
313  Emit(kLFence, g.NoOutput());
314 }
315 
316 void InstructionSelector::VisitLoad(Node* node) {
317  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
318  X64OperandGenerator g(this);
319 
320  ArchOpcode opcode = GetLoadOpcode(load_rep);
321  size_t temp_count = 0;
322  InstructionOperand temps[2];
323 #ifdef V8_COMPRESS_POINTERS
324  if (opcode == kX64MovqDecompressAnyTagged) {
325  temps[temp_count++] = g.TempRegister();
326  }
327 #ifdef DEBUG
328  if (opcode == kX64MovqDecompressTaggedSigned ||
329  opcode == kX64MovqDecompressTaggedPointer ||
330  opcode == kX64MovqDecompressAnyTagged) {
331  temps[temp_count++] = g.TempRegister();
332  }
333 #endif // DEBUG
334 #endif // V8_COMPRESS_POINTERS
335  DCHECK_LE(temp_count, arraysize(temps));
336  InstructionOperand outputs[] = {g.DefineAsRegister(node)};
337  InstructionOperand inputs[3];
338  size_t input_count = 0;
339  AddressingMode mode =
340  g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
341  InstructionCode code = opcode | AddressingModeField::encode(mode);
342  if (node->opcode() == IrOpcode::kProtectedLoad) {
343  code |= MiscField::encode(kMemoryAccessProtected);
344  } else if (node->opcode() == IrOpcode::kPoisonedLoad) {
345  CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
346  code |= MiscField::encode(kMemoryAccessPoisoned);
347  }
348  Emit(code, 1, outputs, input_count, inputs, temp_count, temps);
349 }
350 
351 void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
352 
353 void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
354 
355 void InstructionSelector::VisitStore(Node* node) {
356  X64OperandGenerator g(this);
357  Node* base = node->InputAt(0);
358  Node* index = node->InputAt(1);
359  Node* value = node->InputAt(2);
360 
361  StoreRepresentation store_rep = StoreRepresentationOf(node->op());
362  WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
363 
364  if (write_barrier_kind != kNoWriteBarrier) {
365  DCHECK(CanBeTaggedPointer(store_rep.representation()));
366  AddressingMode addressing_mode;
367  InstructionOperand inputs[] = {
368  g.UseUniqueRegister(base),
369  g.GetEffectiveIndexOperand(index, &addressing_mode),
370  g.UseUniqueRegister(value)};
371  RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
372  switch (write_barrier_kind) {
373  case kNoWriteBarrier:
374  UNREACHABLE();
375  break;
376  case kMapWriteBarrier:
377  record_write_mode = RecordWriteMode::kValueIsMap;
378  break;
379  case kPointerWriteBarrier:
380  record_write_mode = RecordWriteMode::kValueIsPointer;
381  break;
382  case kFullWriteBarrier:
383  record_write_mode = RecordWriteMode::kValueIsAny;
384  break;
385  }
386  InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
387  InstructionCode code = kArchStoreWithWriteBarrier;
388  code |= AddressingModeField::encode(addressing_mode);
389  code |= MiscField::encode(static_cast<int>(record_write_mode));
390  Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
391  } else {
392  ArchOpcode opcode = GetStoreOpcode(store_rep);
393  InstructionOperand inputs[4];
394  size_t input_count = 0;
395  AddressingMode addressing_mode =
396  g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
397  InstructionCode code =
398  opcode | AddressingModeField::encode(addressing_mode);
399  if ((ElementSizeLog2Of(store_rep.representation()) < kPointerSizeLog2) &&
400  (value->opcode() == IrOpcode::kTruncateInt64ToInt32) &&
401  CanCover(node, value)) {
402  value = value->InputAt(0);
403  }
404  InstructionOperand value_operand =
405  g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
406  inputs[input_count++] = value_operand;
407  Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
408  inputs);
409  }
410 }
411 
412 void InstructionSelector::VisitProtectedStore(Node* node) {
413  X64OperandGenerator g(this);
414  Node* value = node->InputAt(2);
415 
416  StoreRepresentation store_rep = StoreRepresentationOf(node->op());
417 
418  ArchOpcode opcode = GetStoreOpcode(store_rep);
419  InstructionOperand inputs[4];
420  size_t input_count = 0;
421  AddressingMode addressing_mode =
422  g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
423  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
424  MiscField::encode(kMemoryAccessProtected);
425  InstructionOperand value_operand =
426  g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
427  inputs[input_count++] = value_operand;
428  Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
429 }
430 
431 // Architecture supports unaligned access, therefore VisitLoad is used instead
432 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
433 
434 // Architecture supports unaligned access, therefore VisitStore is used instead
435 void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
436 
437 // Shared routine for multiple binary operations.
438 static void VisitBinop(InstructionSelector* selector, Node* node,
439  InstructionCode opcode, FlagsContinuation* cont) {
440  X64OperandGenerator g(selector);
441  Int32BinopMatcher m(node);
442  Node* left = m.left().node();
443  Node* right = m.right().node();
444  InstructionOperand inputs[8];
445  size_t input_count = 0;
446  InstructionOperand outputs[1];
447  size_t output_count = 0;
448 
449  // TODO(turbofan): match complex addressing modes.
450  if (left == right) {
451  // If both inputs refer to the same operand, enforce allocating a register
452  // for both of them to ensure that we don't end up generating code like
453  // this:
454  //
455  // mov rax, [rbp-0x10]
456  // add rax, [rbp-0x10]
457  // jo label
458  InstructionOperand const input = g.UseRegister(left);
459  inputs[input_count++] = input;
460  inputs[input_count++] = input;
461  } else if (g.CanBeImmediate(right)) {
462  inputs[input_count++] = g.UseRegister(left);
463  inputs[input_count++] = g.UseImmediate(right);
464  } else {
465  int effect_level = selector->GetEffectLevel(node);
466  if (cont->IsBranch()) {
467  effect_level = selector->GetEffectLevel(
468  cont->true_block()->PredecessorAt(0)->control_input());
469  }
470  if (node->op()->HasProperty(Operator::kCommutative) &&
471  g.CanBeBetterLeftOperand(right) &&
472  (!g.CanBeBetterLeftOperand(left) ||
473  !g.CanBeMemoryOperand(opcode, node, right, effect_level))) {
474  std::swap(left, right);
475  }
476  if (g.CanBeMemoryOperand(opcode, node, right, effect_level)) {
477  inputs[input_count++] = g.UseRegister(left);
478  AddressingMode addressing_mode =
479  g.GetEffectiveAddressMemoryOperand(right, inputs, &input_count);
480  opcode |= AddressingModeField::encode(addressing_mode);
481  } else {
482  inputs[input_count++] = g.UseRegister(left);
483  inputs[input_count++] = g.Use(right);
484  }
485  }
486 
487  if (cont->IsBranch()) {
488  inputs[input_count++] = g.Label(cont->true_block());
489  inputs[input_count++] = g.Label(cont->false_block());
490  }
491 
492  outputs[output_count++] = g.DefineSameAsFirst(node);
493 
494  DCHECK_NE(0u, input_count);
495  DCHECK_EQ(1u, output_count);
496  DCHECK_GE(arraysize(inputs), input_count);
497  DCHECK_GE(arraysize(outputs), output_count);
498 
499  selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
500  inputs, cont);
501 }
502 
503 // Shared routine for multiple binary operations.
504 static void VisitBinop(InstructionSelector* selector, Node* node,
505  InstructionCode opcode) {
506  FlagsContinuation cont;
507  VisitBinop(selector, node, opcode, &cont);
508 }
509 
510 void InstructionSelector::VisitWord32And(Node* node) {
511  X64OperandGenerator g(this);
512  Uint32BinopMatcher m(node);
513  if (m.right().Is(0xFF)) {
514  Emit(kX64Movzxbl, g.DefineAsRegister(node), g.Use(m.left().node()));
515  } else if (m.right().Is(0xFFFF)) {
516  Emit(kX64Movzxwl, g.DefineAsRegister(node), g.Use(m.left().node()));
517  } else {
518  VisitBinop(this, node, kX64And32);
519  }
520 }
521 
522 void InstructionSelector::VisitWord64And(Node* node) {
523  VisitBinop(this, node, kX64And);
524 }
525 
526 void InstructionSelector::VisitWord32Or(Node* node) {
527  VisitBinop(this, node, kX64Or32);
528 }
529 
530 void InstructionSelector::VisitWord64Or(Node* node) {
531  VisitBinop(this, node, kX64Or);
532 }
533 
534 void InstructionSelector::VisitWord32Xor(Node* node) {
535  X64OperandGenerator g(this);
536  Uint32BinopMatcher m(node);
537  if (m.right().Is(-1)) {
538  Emit(kX64Not32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
539  } else {
540  VisitBinop(this, node, kX64Xor32);
541  }
542 }
543 
544 void InstructionSelector::VisitWord64Xor(Node* node) {
545  X64OperandGenerator g(this);
546  Uint64BinopMatcher m(node);
547  if (m.right().Is(-1)) {
548  Emit(kX64Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
549  } else {
550  VisitBinop(this, node, kX64Xor);
551  }
552 }
553 
554 namespace {
555 
556 bool TryMergeTruncateInt64ToInt32IntoLoad(InstructionSelector* selector,
557  Node* node, Node* load) {
558  if (load->opcode() == IrOpcode::kLoad && selector->CanCover(node, load)) {
559  LoadRepresentation load_rep = LoadRepresentationOf(load->op());
560  MachineRepresentation rep = load_rep.representation();
561  InstructionCode opcode = kArchNop;
562  switch (rep) {
563  case MachineRepresentation::kBit: // Fall through.
564  case MachineRepresentation::kWord8:
565  opcode = load_rep.IsSigned() ? kX64Movsxbl : kX64Movzxbl;
566  break;
567  case MachineRepresentation::kWord16:
568  opcode = load_rep.IsSigned() ? kX64Movsxwl : kX64Movzxwl;
569  break;
570  case MachineRepresentation::kWord32:
571  case MachineRepresentation::kWord64:
572  case MachineRepresentation::kTaggedSigned:
573  case MachineRepresentation::kTagged:
574  opcode = kX64Movl;
575  break;
576  default:
577  UNREACHABLE();
578  return false;
579  }
580  X64OperandGenerator g(selector);
581  InstructionOperand outputs[] = {g.DefineAsRegister(node)};
582  size_t input_count = 0;
583  InstructionOperand inputs[3];
584  AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
585  node->InputAt(0), inputs, &input_count);
586  opcode |= AddressingModeField::encode(mode);
587  selector->Emit(opcode, 1, outputs, input_count, inputs);
588  return true;
589  }
590  return false;
591 }
592 
593 // Shared routine for multiple 32-bit shift operations.
594 // TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
595 void VisitWord32Shift(InstructionSelector* selector, Node* node,
596  ArchOpcode opcode) {
597  X64OperandGenerator g(selector);
598  Int32BinopMatcher m(node);
599  Node* left = m.left().node();
600  Node* right = m.right().node();
601 
602  if (left->opcode() == IrOpcode::kTruncateInt64ToInt32 &&
603  selector->CanCover(node, left)) {
604  left = left->InputAt(0);
605  }
606 
607  if (g.CanBeImmediate(right)) {
608  selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
609  g.UseImmediate(right));
610  } else {
611  selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
612  g.UseFixed(right, rcx));
613  }
614 }
615 
616 // Shared routine for multiple 64-bit shift operations.
617 // TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
618 void VisitWord64Shift(InstructionSelector* selector, Node* node,
619  ArchOpcode opcode) {
620  X64OperandGenerator g(selector);
621  Int64BinopMatcher m(node);
622  Node* left = m.left().node();
623  Node* right = m.right().node();
624 
625  if (g.CanBeImmediate(right)) {
626  selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
627  g.UseImmediate(right));
628  } else {
629  if (m.right().IsWord64And()) {
630  Int64BinopMatcher mright(right);
631  if (mright.right().Is(0x3F)) {
632  right = mright.left().node();
633  }
634  }
635  selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
636  g.UseFixed(right, rcx));
637  }
638 }
639 
640 // Shared routine for multiple shift operations with continuation.
641 template <typename BinopMatcher, int Bits>
642 bool TryVisitWordShift(InstructionSelector* selector, Node* node,
643  ArchOpcode opcode, FlagsContinuation* cont) {
644  X64OperandGenerator g(selector);
645  BinopMatcher m(node);
646  Node* left = m.left().node();
647  Node* right = m.right().node();
648 
649  // If the shift count is 0, the flags are not affected.
650  if (!g.CanBeImmediate(right) ||
651  (g.GetImmediateIntegerValue(right) & (Bits - 1)) == 0) {
652  return false;
653  }
654  InstructionOperand output = g.DefineSameAsFirst(node);
655  InstructionOperand inputs[2];
656  inputs[0] = g.UseRegister(left);
657  inputs[1] = g.UseImmediate(right);
658  selector->EmitWithContinuation(opcode, 1, &output, 2, inputs, cont);
659  return true;
660 }
661 
662 void EmitLea(InstructionSelector* selector, InstructionCode opcode,
663  Node* result, Node* index, int scale, Node* base,
664  Node* displacement, DisplacementMode displacement_mode) {
665  X64OperandGenerator g(selector);
666 
667  InstructionOperand inputs[4];
668  size_t input_count = 0;
669  AddressingMode mode =
670  g.GenerateMemoryOperandInputs(index, scale, base, displacement,
671  displacement_mode, inputs, &input_count);
672 
673  DCHECK_NE(0u, input_count);
674  DCHECK_GE(arraysize(inputs), input_count);
675 
676  InstructionOperand outputs[1];
677  outputs[0] = g.DefineAsRegister(result);
678 
679  opcode = AddressingModeField::encode(mode) | opcode;
680 
681  selector->Emit(opcode, 1, outputs, input_count, inputs);
682 }
683 
684 } // namespace
685 
686 void InstructionSelector::VisitWord32Shl(Node* node) {
687  Int32ScaleMatcher m(node, true);
688  if (m.matches()) {
689  Node* index = node->InputAt(0);
690  Node* base = m.power_of_two_plus_one() ? index : nullptr;
691  EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr,
692  kPositiveDisplacement);
693  return;
694  }
695  VisitWord32Shift(this, node, kX64Shl32);
696 }
697 
698 void InstructionSelector::VisitWord64Shl(Node* node) {
699  X64OperandGenerator g(this);
700  Int64ScaleMatcher m(node, true);
701  if (m.matches()) {
702  Node* index = node->InputAt(0);
703  Node* base = m.power_of_two_plus_one() ? index : nullptr;
704  EmitLea(this, kX64Lea, node, index, m.scale(), base, nullptr,
705  kPositiveDisplacement);
706  return;
707  } else {
708  Int64BinopMatcher m(node);
709  if ((m.left().IsChangeInt32ToInt64() ||
710  m.left().IsChangeUint32ToUint64()) &&
711  m.right().IsInRange(32, 63)) {
712  // There's no need to sign/zero-extend to 64-bit if we shift out the upper
713  // 32 bits anyway.
714  Emit(kX64Shl, g.DefineSameAsFirst(node),
715  g.UseRegister(m.left().node()->InputAt(0)),
716  g.UseImmediate(m.right().node()));
717  return;
718  }
719  }
720  VisitWord64Shift(this, node, kX64Shl);
721 }
722 
723 void InstructionSelector::VisitWord32Shr(Node* node) {
724  VisitWord32Shift(this, node, kX64Shr32);
725 }
726 
727 namespace {
728 
729 inline AddressingMode AddDisplacementToAddressingMode(AddressingMode mode) {
730  switch (mode) {
731  case kMode_MR:
732  return kMode_MRI;
733  break;
734  case kMode_MR1:
735  return kMode_MR1I;
736  break;
737  case kMode_MR2:
738  return kMode_MR2I;
739  break;
740  case kMode_MR4:
741  return kMode_MR4I;
742  break;
743  case kMode_MR8:
744  return kMode_MR8I;
745  break;
746  case kMode_M1:
747  return kMode_M1I;
748  break;
749  case kMode_M2:
750  return kMode_M2I;
751  break;
752  case kMode_M4:
753  return kMode_M4I;
754  break;
755  case kMode_M8:
756  return kMode_M8I;
757  break;
758  case kMode_None:
759  case kMode_MRI:
760  case kMode_MR1I:
761  case kMode_MR2I:
762  case kMode_MR4I:
763  case kMode_MR8I:
764  case kMode_M1I:
765  case kMode_M2I:
766  case kMode_M4I:
767  case kMode_M8I:
768  case kMode_Root:
769  UNREACHABLE();
770  }
771  UNREACHABLE();
772 }
773 
774 bool TryMatchLoadWord64AndShiftRight(InstructionSelector* selector, Node* node,
775  InstructionCode opcode) {
776  DCHECK(IrOpcode::kWord64Sar == node->opcode() ||
777  IrOpcode::kWord64Shr == node->opcode());
778  X64OperandGenerator g(selector);
779  Int64BinopMatcher m(node);
780  if (selector->CanCover(m.node(), m.left().node()) && m.left().IsLoad() &&
781  m.right().Is(32)) {
782  DCHECK_EQ(selector->GetEffectLevel(node),
783  selector->GetEffectLevel(m.left().node()));
784  // Just load and sign-extend the interesting 4 bytes instead. This happens,
785  // for example, when we're loading and untagging SMIs.
786  BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(),
787  AddressOption::kAllowAll);
788  if (mleft.matches() && (mleft.displacement() == nullptr ||
789  g.CanBeImmediate(mleft.displacement()))) {
790  size_t input_count = 0;
791  InstructionOperand inputs[3];
792  AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
793  m.left().node(), inputs, &input_count);
794  if (mleft.displacement() == nullptr) {
795  // Make sure that the addressing mode indicates the presence of an
796  // immediate displacement. It seems that we never use M1 and M2, but we
797  // handle them here anyways.
798  mode = AddDisplacementToAddressingMode(mode);
799  inputs[input_count++] = ImmediateOperand(ImmediateOperand::INLINE, 4);
800  } else {
801  // In the case that the base address was zero, the displacement will be
802  // in a register and replacing it with an immediate is not allowed. This
803  // usually only happens in dead code anyway.
804  if (!inputs[input_count - 1].IsImmediate()) return false;
805  int32_t displacement = g.GetImmediateIntegerValue(mleft.displacement());
806  inputs[input_count - 1] =
807  ImmediateOperand(ImmediateOperand::INLINE, displacement + 4);
808  }
809  InstructionOperand outputs[] = {g.DefineAsRegister(node)};
810  InstructionCode code = opcode | AddressingModeField::encode(mode);
811  selector->Emit(code, 1, outputs, input_count, inputs);
812  return true;
813  }
814  }
815  return false;
816 }
817 
818 } // namespace
819 
820 void InstructionSelector::VisitWord64Shr(Node* node) {
821  if (TryMatchLoadWord64AndShiftRight(this, node, kX64Movl)) return;
822  VisitWord64Shift(this, node, kX64Shr);
823 }
824 
825 void InstructionSelector::VisitWord32Sar(Node* node) {
826  X64OperandGenerator g(this);
827  Int32BinopMatcher m(node);
828  if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
829  Int32BinopMatcher mleft(m.left().node());
830  if (mleft.right().Is(16) && m.right().Is(16)) {
831  Emit(kX64Movsxwl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
832  return;
833  } else if (mleft.right().Is(24) && m.right().Is(24)) {
834  Emit(kX64Movsxbl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
835  return;
836  }
837  }
838  VisitWord32Shift(this, node, kX64Sar32);
839 }
840 
841 void InstructionSelector::VisitWord64Sar(Node* node) {
842  if (TryMatchLoadWord64AndShiftRight(this, node, kX64Movsxlq)) return;
843  VisitWord64Shift(this, node, kX64Sar);
844 }
845 
846 void InstructionSelector::VisitWord32Ror(Node* node) {
847  VisitWord32Shift(this, node, kX64Ror32);
848 }
849 
850 void InstructionSelector::VisitWord64Ror(Node* node) {
851  VisitWord64Shift(this, node, kX64Ror);
852 }
853 
854 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
855 
856 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
857 
858 void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
859  X64OperandGenerator g(this);
860  Emit(kX64Bswap, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)));
861 }
862 
863 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
864  X64OperandGenerator g(this);
865  Emit(kX64Bswap32, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)));
866 }
867 
868 void InstructionSelector::VisitInt32Add(Node* node) {
869  X64OperandGenerator g(this);
870 
871  // Try to match the Add to a leal pattern
872  BaseWithIndexAndDisplacement32Matcher m(node);
873  if (m.matches() &&
874  (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
875  EmitLea(this, kX64Lea32, node, m.index(), m.scale(), m.base(),
876  m.displacement(), m.displacement_mode());
877  return;
878  }
879 
880  // No leal pattern match, use addl
881  VisitBinop(this, node, kX64Add32);
882 }
883 
884 void InstructionSelector::VisitInt64Add(Node* node) {
885  X64OperandGenerator g(this);
886 
887  // Try to match the Add to a leaq pattern
888  BaseWithIndexAndDisplacement64Matcher m(node);
889  if (m.matches() &&
890  (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
891  EmitLea(this, kX64Lea, node, m.index(), m.scale(), m.base(),
892  m.displacement(), m.displacement_mode());
893  return;
894  }
895 
896  // No leal pattern match, use addq
897  VisitBinop(this, node, kX64Add);
898 }
899 
900 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
901  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
902  FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
903  return VisitBinop(this, node, kX64Add, &cont);
904  }
905  FlagsContinuation cont;
906  VisitBinop(this, node, kX64Add, &cont);
907 }
908 
909 void InstructionSelector::VisitInt32Sub(Node* node) {
910  X64OperandGenerator g(this);
911  DCHECK_EQ(node->InputCount(), 2);
912  Node* input1 = node->InputAt(0);
913  Node* input2 = node->InputAt(1);
914  if (input1->opcode() == IrOpcode::kTruncateInt64ToInt32 &&
915  g.CanBeImmediate(input2)) {
916  int32_t imm = g.GetImmediateIntegerValue(input2);
917  InstructionOperand int64_input = g.UseRegister(input1->InputAt(0));
918  if (imm == 0) {
919  // Emit "movl" for subtraction of 0.
920  Emit(kX64Movl, g.DefineAsRegister(node), int64_input);
921  } else {
922  // Omit truncation and turn subtractions of constant values into immediate
923  // "leal" instructions by negating the value.
924  Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
925  g.DefineAsRegister(node), int64_input, g.TempImmediate(-imm));
926  }
927  return;
928  }
929 
930  Int32BinopMatcher m(node);
931  if (m.left().Is(0)) {
932  Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
933  } else if (m.right().Is(0)) {
934  // TODO(jarin): We should be able to use {EmitIdentity} here
935  // (https://crbug.com/v8/7947).
936  Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(m.left().node()));
937  } else if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
938  // Turn subtractions of constant values into immediate "leal" instructions
939  // by negating the value.
940  Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
941  g.DefineAsRegister(node), g.UseRegister(m.left().node()),
942  g.TempImmediate(-m.right().Value()));
943  } else {
944  VisitBinop(this, node, kX64Sub32);
945  }
946 }
947 
948 void InstructionSelector::VisitInt64Sub(Node* node) {
949  X64OperandGenerator g(this);
950  Int64BinopMatcher m(node);
951  if (m.left().Is(0)) {
952  Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
953  } else {
954  if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
955  // Turn subtractions of constant values into immediate "leaq" instructions
956  // by negating the value.
957  Emit(kX64Lea | AddressingModeField::encode(kMode_MRI),
958  g.DefineAsRegister(node), g.UseRegister(m.left().node()),
959  g.TempImmediate(-static_cast<int32_t>(m.right().Value())));
960  return;
961  }
962  VisitBinop(this, node, kX64Sub);
963  }
964 }
965 
966 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
967  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
968  FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
969  return VisitBinop(this, node, kX64Sub, &cont);
970  }
971  FlagsContinuation cont;
972  VisitBinop(this, node, kX64Sub, &cont);
973 }
974 
975 namespace {
976 
977 void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
978  X64OperandGenerator g(selector);
979  Int32BinopMatcher m(node);
980  Node* left = m.left().node();
981  Node* right = m.right().node();
982  if (g.CanBeImmediate(right)) {
983  selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
984  g.UseImmediate(right));
985  } else {
986  if (g.CanBeBetterLeftOperand(right)) {
987  std::swap(left, right);
988  }
989  selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
990  g.Use(right));
991  }
992 }
993 
994 void VisitMulHigh(InstructionSelector* selector, Node* node,
995  ArchOpcode opcode) {
996  X64OperandGenerator g(selector);
997  Node* left = node->InputAt(0);
998  Node* right = node->InputAt(1);
999  if (selector->IsLive(left) && !selector->IsLive(right)) {
1000  std::swap(left, right);
1001  }
1002  InstructionOperand temps[] = {g.TempRegister(rax)};
1003  // TODO(turbofan): We use UseUniqueRegister here to improve register
1004  // allocation.
1005  selector->Emit(opcode, g.DefineAsFixed(node, rdx), g.UseFixed(left, rax),
1006  g.UseUniqueRegister(right), arraysize(temps), temps);
1007 }
1008 
1009 void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
1010  X64OperandGenerator g(selector);
1011  InstructionOperand temps[] = {g.TempRegister(rdx)};
1012  selector->Emit(
1013  opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
1014  g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
1015 }
1016 
1017 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
1018  X64OperandGenerator g(selector);
1019  InstructionOperand temps[] = {g.TempRegister(rax)};
1020  selector->Emit(
1021  opcode, g.DefineAsFixed(node, rdx), g.UseFixed(node->InputAt(0), rax),
1022  g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
1023 }
1024 
1025 } // namespace
1026 
1027 void InstructionSelector::VisitInt32Mul(Node* node) {
1028  Int32ScaleMatcher m(node, true);
1029  if (m.matches()) {
1030  Node* index = node->InputAt(0);
1031  Node* base = m.power_of_two_plus_one() ? index : nullptr;
1032  EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr,
1033  kPositiveDisplacement);
1034  return;
1035  }
1036  VisitMul(this, node, kX64Imul32);
1037 }
1038 
1039 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
1040  // TODO(mvstanton): Use Int32ScaleMatcher somehow.
1041  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1042  FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1043  return VisitBinop(this, node, kX64Imul32, &cont);
1044  }
1045  FlagsContinuation cont;
1046  VisitBinop(this, node, kX64Imul32, &cont);
1047 }
1048 
1049 void InstructionSelector::VisitInt64Mul(Node* node) {
1050  VisitMul(this, node, kX64Imul);
1051 }
1052 
1053 void InstructionSelector::VisitInt32MulHigh(Node* node) {
1054  VisitMulHigh(this, node, kX64ImulHigh32);
1055 }
1056 
1057 void InstructionSelector::VisitInt32Div(Node* node) {
1058  VisitDiv(this, node, kX64Idiv32);
1059 }
1060 
1061 void InstructionSelector::VisitInt64Div(Node* node) {
1062  VisitDiv(this, node, kX64Idiv);
1063 }
1064 
1065 void InstructionSelector::VisitUint32Div(Node* node) {
1066  VisitDiv(this, node, kX64Udiv32);
1067 }
1068 
1069 void InstructionSelector::VisitUint64Div(Node* node) {
1070  VisitDiv(this, node, kX64Udiv);
1071 }
1072 
1073 void InstructionSelector::VisitInt32Mod(Node* node) {
1074  VisitMod(this, node, kX64Idiv32);
1075 }
1076 
1077 void InstructionSelector::VisitInt64Mod(Node* node) {
1078  VisitMod(this, node, kX64Idiv);
1079 }
1080 
1081 void InstructionSelector::VisitUint32Mod(Node* node) {
1082  VisitMod(this, node, kX64Udiv32);
1083 }
1084 
1085 void InstructionSelector::VisitUint64Mod(Node* node) {
1086  VisitMod(this, node, kX64Udiv);
1087 }
1088 
1089 void InstructionSelector::VisitUint32MulHigh(Node* node) {
1090  VisitMulHigh(this, node, kX64UmulHigh32);
1091 }
1092 
1093 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1094  X64OperandGenerator g(this);
1095  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1096  InstructionOperand outputs[2];
1097  size_t output_count = 0;
1098  outputs[output_count++] = g.DefineAsRegister(node);
1099 
1100  Node* success_output = NodeProperties::FindProjection(node, 1);
1101  if (success_output) {
1102  outputs[output_count++] = g.DefineAsRegister(success_output);
1103  }
1104 
1105  Emit(kSSEFloat32ToInt64, output_count, outputs, 1, inputs);
1106 }
1107 
1108 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1109  X64OperandGenerator g(this);
1110  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1111  InstructionOperand outputs[2];
1112  size_t output_count = 0;
1113  outputs[output_count++] = g.DefineAsRegister(node);
1114 
1115  Node* success_output = NodeProperties::FindProjection(node, 1);
1116  if (success_output) {
1117  outputs[output_count++] = g.DefineAsRegister(success_output);
1118  }
1119 
1120  Emit(kSSEFloat64ToInt64, output_count, outputs, 1, inputs);
1121 }
1122 
1123 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1124  X64OperandGenerator g(this);
1125  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1126  InstructionOperand outputs[2];
1127  size_t output_count = 0;
1128  outputs[output_count++] = g.DefineAsRegister(node);
1129 
1130  Node* success_output = NodeProperties::FindProjection(node, 1);
1131  if (success_output) {
1132  outputs[output_count++] = g.DefineAsRegister(success_output);
1133  }
1134 
1135  Emit(kSSEFloat32ToUint64, output_count, outputs, 1, inputs);
1136 }
1137 
1138 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1139  X64OperandGenerator g(this);
1140  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1141  InstructionOperand outputs[2];
1142  size_t output_count = 0;
1143  outputs[output_count++] = g.DefineAsRegister(node);
1144 
1145  Node* success_output = NodeProperties::FindProjection(node, 1);
1146  if (success_output) {
1147  outputs[output_count++] = g.DefineAsRegister(success_output);
1148  }
1149 
1150  Emit(kSSEFloat64ToUint64, output_count, outputs, 1, inputs);
1151 }
1152 
1153 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
1154  X64OperandGenerator g(this);
1155  Node* const value = node->InputAt(0);
1156  if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
1157  LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1158  MachineRepresentation rep = load_rep.representation();
1159  InstructionCode opcode = kArchNop;
1160  switch (rep) {
1161  case MachineRepresentation::kBit: // Fall through.
1162  case MachineRepresentation::kWord8:
1163  opcode = load_rep.IsSigned() ? kX64Movsxbq : kX64Movzxbq;
1164  break;
1165  case MachineRepresentation::kWord16:
1166  opcode = load_rep.IsSigned() ? kX64Movsxwq : kX64Movzxwq;
1167  break;
1168  case MachineRepresentation::kWord32:
1169  opcode = load_rep.IsSigned() ? kX64Movsxlq : kX64Movl;
1170  break;
1171  default:
1172  UNREACHABLE();
1173  return;
1174  }
1175  InstructionOperand outputs[] = {g.DefineAsRegister(node)};
1176  size_t input_count = 0;
1177  InstructionOperand inputs[3];
1178  AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
1179  node->InputAt(0), inputs, &input_count);
1180  opcode |= AddressingModeField::encode(mode);
1181  Emit(opcode, 1, outputs, input_count, inputs);
1182  } else {
1183  Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1184  }
1185 }
1186 
1187 namespace {
1188 
1189 bool ZeroExtendsWord32ToWord64(Node* node) {
1190  switch (node->opcode()) {
1191  case IrOpcode::kWord32And:
1192  case IrOpcode::kWord32Or:
1193  case IrOpcode::kWord32Xor:
1194  case IrOpcode::kWord32Shl:
1195  case IrOpcode::kWord32Shr:
1196  case IrOpcode::kWord32Sar:
1197  case IrOpcode::kWord32Ror:
1198  case IrOpcode::kWord32Equal:
1199  case IrOpcode::kInt32Add:
1200  case IrOpcode::kInt32Sub:
1201  case IrOpcode::kInt32Mul:
1202  case IrOpcode::kInt32MulHigh:
1203  case IrOpcode::kInt32Div:
1204  case IrOpcode::kInt32LessThan:
1205  case IrOpcode::kInt32LessThanOrEqual:
1206  case IrOpcode::kInt32Mod:
1207  case IrOpcode::kUint32Div:
1208  case IrOpcode::kUint32LessThan:
1209  case IrOpcode::kUint32LessThanOrEqual:
1210  case IrOpcode::kUint32Mod:
1211  case IrOpcode::kUint32MulHigh:
1212  // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
1213  // zero-extension is a no-op.
1214  return true;
1215  case IrOpcode::kProjection: {
1216  Node* const value = node->InputAt(0);
1217  switch (value->opcode()) {
1218  case IrOpcode::kInt32AddWithOverflow:
1219  case IrOpcode::kInt32SubWithOverflow:
1220  case IrOpcode::kInt32MulWithOverflow:
1221  return true;
1222  default:
1223  return false;
1224  }
1225  }
1226  case IrOpcode::kLoad:
1227  case IrOpcode::kPoisonedLoad: {
1228  // The movzxbl/movsxbl/movzxwl/movsxwl/movl operations implicitly
1229  // zero-extend to 64-bit on x64, so the zero-extension is a no-op.
1230  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1231  switch (load_rep.representation()) {
1232  case MachineRepresentation::kWord8:
1233  case MachineRepresentation::kWord16:
1234  case MachineRepresentation::kWord32:
1235  return true;
1236  default:
1237  return false;
1238  }
1239  }
1240  default:
1241  return false;
1242  }
1243 }
1244 
1245 } // namespace
1246 
1247 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1248  X64OperandGenerator g(this);
1249  Node* value = node->InputAt(0);
1250  if (ZeroExtendsWord32ToWord64(value)) {
1251  // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
1252  // zero-extension is a no-op.
1253  return EmitIdentity(node);
1254  }
1255  Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
1256 }
1257 
1258 namespace {
1259 
1260 void VisitRO(InstructionSelector* selector, Node* node,
1261  InstructionCode opcode) {
1262  X64OperandGenerator g(selector);
1263  selector->Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
1264 }
1265 
1266 void VisitRR(InstructionSelector* selector, Node* node,
1267  InstructionCode opcode) {
1268  X64OperandGenerator g(selector);
1269  selector->Emit(opcode, g.DefineAsRegister(node),
1270  g.UseRegister(node->InputAt(0)));
1271 }
1272 
1273 void VisitRRO(InstructionSelector* selector, Node* node,
1274  InstructionCode opcode) {
1275  X64OperandGenerator g(selector);
1276  selector->Emit(opcode, g.DefineSameAsFirst(node),
1277  g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
1278 }
1279 
1280 void VisitFloatBinop(InstructionSelector* selector, Node* node,
1281  ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
1282  X64OperandGenerator g(selector);
1283  InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
1284  InstructionOperand operand1 = g.Use(node->InputAt(1));
1285  if (selector->IsSupported(AVX)) {
1286  selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1);
1287  } else {
1288  selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
1289  }
1290 }
1291 
1292 void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
1293  ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
1294  X64OperandGenerator g(selector);
1295  if (selector->IsSupported(AVX)) {
1296  selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input));
1297  } else {
1298  selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
1299  }
1300 }
1301 
1302 } // namespace
1303 
1304 #define RO_OP_LIST(V) \
1305  V(Word64Clz, kX64Lzcnt) \
1306  V(Word32Clz, kX64Lzcnt32) \
1307  V(Word64Ctz, kX64Tzcnt) \
1308  V(Word32Ctz, kX64Tzcnt32) \
1309  V(Word64Popcnt, kX64Popcnt) \
1310  V(Word32Popcnt, kX64Popcnt32) \
1311  V(Float64Sqrt, kSSEFloat64Sqrt) \
1312  V(Float32Sqrt, kSSEFloat32Sqrt) \
1313  V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \
1314  V(ChangeFloat64ToInt64, kSSEFloat64ToInt64) \
1315  V(ChangeFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(1)) \
1316  V(TruncateFloat64ToInt64, kSSEFloat64ToInt64) \
1317  V(TruncateFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(0)) \
1318  V(ChangeFloat64ToUint64, kSSEFloat64ToUint64) \
1319  V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32) \
1320  V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64) \
1321  V(TruncateFloat32ToInt32, kSSEFloat32ToInt32) \
1322  V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \
1323  V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \
1324  V(ChangeInt64ToFloat64, kSSEInt64ToFloat64) \
1325  V(ChangeUint32ToFloat64, kSSEUint32ToFloat64) \
1326  V(RoundFloat64ToInt32, kSSEFloat64ToInt32) \
1327  V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \
1328  V(RoundInt64ToFloat32, kSSEInt64ToFloat32) \
1329  V(RoundUint64ToFloat32, kSSEUint64ToFloat32) \
1330  V(RoundInt64ToFloat64, kSSEInt64ToFloat64) \
1331  V(RoundUint64ToFloat64, kSSEUint64ToFloat64) \
1332  V(RoundUint32ToFloat32, kSSEUint32ToFloat32) \
1333  V(BitcastFloat32ToInt32, kX64BitcastFI) \
1334  V(BitcastFloat64ToInt64, kX64BitcastDL) \
1335  V(BitcastInt32ToFloat32, kX64BitcastIF) \
1336  V(BitcastInt64ToFloat64, kX64BitcastLD) \
1337  V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \
1338  V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32) \
1339  V(SignExtendWord8ToInt32, kX64Movsxbl) \
1340  V(SignExtendWord16ToInt32, kX64Movsxwl) \
1341  V(SignExtendWord8ToInt64, kX64Movsxbq) \
1342  V(SignExtendWord16ToInt64, kX64Movsxwq) \
1343  V(SignExtendWord32ToInt64, kX64Movsxlq)
1344 
1345 #define RR_OP_LIST(V) \
1346  V(Float32RoundDown, kSSEFloat32Round | MiscField::encode(kRoundDown)) \
1347  V(Float64RoundDown, kSSEFloat64Round | MiscField::encode(kRoundDown)) \
1348  V(Float32RoundUp, kSSEFloat32Round | MiscField::encode(kRoundUp)) \
1349  V(Float64RoundUp, kSSEFloat64Round | MiscField::encode(kRoundUp)) \
1350  V(Float32RoundTruncate, kSSEFloat32Round | MiscField::encode(kRoundToZero)) \
1351  V(Float64RoundTruncate, kSSEFloat64Round | MiscField::encode(kRoundToZero)) \
1352  V(Float32RoundTiesEven, \
1353  kSSEFloat32Round | MiscField::encode(kRoundToNearest)) \
1354  V(Float64RoundTiesEven, kSSEFloat64Round | MiscField::encode(kRoundToNearest))
1355 
1356 #define RO_VISITOR(Name, opcode) \
1357  void InstructionSelector::Visit##Name(Node* node) { \
1358  VisitRO(this, node, opcode); \
1359  }
1360 RO_OP_LIST(RO_VISITOR)
1361 #undef RO_VISITOR
1362 #undef RO_OP_LIST
1363 
1364 #define RR_VISITOR(Name, opcode) \
1365  void InstructionSelector::Visit##Name(Node* node) { \
1366  VisitRR(this, node, opcode); \
1367  }
1368 RR_OP_LIST(RR_VISITOR)
1369 #undef RR_VISITOR
1370 #undef RR_OP_LIST
1371 
1372 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1373  VisitRR(this, node, kArchTruncateDoubleToI);
1374 }
1375 
1376 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1377  X64OperandGenerator g(this);
1378  Node* value = node->InputAt(0);
1379  if (CanCover(node, value)) {
1380  switch (value->opcode()) {
1381  case IrOpcode::kWord64Sar:
1382  case IrOpcode::kWord64Shr: {
1383  Int64BinopMatcher m(value);
1384  if (m.right().Is(32)) {
1385  if (CanCoverTransitively(node, value, value->InputAt(0)) &&
1386  TryMatchLoadWord64AndShiftRight(this, value, kX64Movl)) {
1387  return EmitIdentity(node);
1388  }
1389  Emit(kX64Shr, g.DefineSameAsFirst(node),
1390  g.UseRegister(m.left().node()), g.TempImmediate(32));
1391  return;
1392  }
1393  break;
1394  }
1395  case IrOpcode::kLoad: {
1396  if (TryMergeTruncateInt64ToInt32IntoLoad(this, node, value)) {
1397  return;
1398  }
1399  break;
1400  }
1401  default:
1402  break;
1403  }
1404  }
1405  Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
1406 }
1407 
1408 void InstructionSelector::VisitFloat32Add(Node* node) {
1409  VisitFloatBinop(this, node, kAVXFloat32Add, kSSEFloat32Add);
1410 }
1411 
1412 void InstructionSelector::VisitFloat32Sub(Node* node) {
1413  VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
1414 }
1415 
1416 void InstructionSelector::VisitFloat32Mul(Node* node) {
1417  VisitFloatBinop(this, node, kAVXFloat32Mul, kSSEFloat32Mul);
1418 }
1419 
1420 void InstructionSelector::VisitFloat32Div(Node* node) {
1421  VisitFloatBinop(this, node, kAVXFloat32Div, kSSEFloat32Div);
1422 }
1423 
1424 void InstructionSelector::VisitFloat32Abs(Node* node) {
1425  VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
1426 }
1427 
1428 void InstructionSelector::VisitFloat32Max(Node* node) {
1429  VisitRRO(this, node, kSSEFloat32Max);
1430 }
1431 
1432 void InstructionSelector::VisitFloat32Min(Node* node) {
1433  VisitRRO(this, node, kSSEFloat32Min);
1434 }
1435 
1436 void InstructionSelector::VisitFloat64Add(Node* node) {
1437  VisitFloatBinop(this, node, kAVXFloat64Add, kSSEFloat64Add);
1438 }
1439 
1440 void InstructionSelector::VisitFloat64Sub(Node* node) {
1441  VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
1442 }
1443 
1444 void InstructionSelector::VisitFloat64Mul(Node* node) {
1445  VisitFloatBinop(this, node, kAVXFloat64Mul, kSSEFloat64Mul);
1446 }
1447 
1448 void InstructionSelector::VisitFloat64Div(Node* node) {
1449  VisitFloatBinop(this, node, kAVXFloat64Div, kSSEFloat64Div);
1450 }
1451 
1452 void InstructionSelector::VisitFloat64Mod(Node* node) {
1453  X64OperandGenerator g(this);
1454  InstructionOperand temps[] = {g.TempRegister(rax)};
1455  Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
1456  g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
1457  temps);
1458 }
1459 
1460 void InstructionSelector::VisitFloat64Max(Node* node) {
1461  VisitRRO(this, node, kSSEFloat64Max);
1462 }
1463 
1464 void InstructionSelector::VisitFloat64Min(Node* node) {
1465  VisitRRO(this, node, kSSEFloat64Min);
1466 }
1467 
1468 void InstructionSelector::VisitFloat64Abs(Node* node) {
1469  VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
1470 }
1471 
1472 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1473  UNREACHABLE();
1474 }
1475 
1476 void InstructionSelector::VisitFloat32Neg(Node* node) {
1477  VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Neg, kSSEFloat32Neg);
1478 }
1479 
1480 void InstructionSelector::VisitFloat64Neg(Node* node) {
1481  VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Neg, kSSEFloat64Neg);
1482 }
1483 
1484 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1485  InstructionCode opcode) {
1486  X64OperandGenerator g(this);
1487  Emit(opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(node->InputAt(0), xmm0),
1488  g.UseFixed(node->InputAt(1), xmm1))
1489  ->MarkAsCall();
1490 }
1491 
1492 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1493  InstructionCode opcode) {
1494  X64OperandGenerator g(this);
1495  Emit(opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(node->InputAt(0), xmm0))
1496  ->MarkAsCall();
1497 }
1498 
1499 void InstructionSelector::EmitPrepareArguments(
1500  ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
1501  Node* node) {
1502  X64OperandGenerator g(this);
1503 
1504  // Prepare for C function call.
1505  if (call_descriptor->IsCFunctionCall()) {
1506  Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
1507  call_descriptor->ParameterCount())),
1508  0, nullptr, 0, nullptr);
1509 
1510  // Poke any stack arguments.
1511  for (size_t n = 0; n < arguments->size(); ++n) {
1512  PushParameter input = (*arguments)[n];
1513  if (input.node) {
1514  int slot = static_cast<int>(n);
1515  InstructionOperand value = g.CanBeImmediate(input.node)
1516  ? g.UseImmediate(input.node)
1517  : g.UseRegister(input.node);
1518  Emit(kX64Poke | MiscField::encode(slot), g.NoOutput(), value);
1519  }
1520  }
1521  } else {
1522  // Push any stack arguments.
1523  int effect_level = GetEffectLevel(node);
1524  for (PushParameter input : base::Reversed(*arguments)) {
1525  // Skip any alignment holes in pushed nodes. We may have one in case of a
1526  // Simd128 stack argument.
1527  if (input.node == nullptr) continue;
1528  if (g.CanBeImmediate(input.node)) {
1529  Emit(kX64Push, g.NoOutput(), g.UseImmediate(input.node));
1530  } else if (IsSupported(ATOM) ||
1531  sequence()->IsFP(GetVirtualRegister(input.node))) {
1532  // TODO(titzer): X64Push cannot handle stack->stack double moves
1533  // because there is no way to encode fixed double slots.
1534  Emit(kX64Push, g.NoOutput(), g.UseRegister(input.node));
1535  } else if (g.CanBeMemoryOperand(kX64Push, node, input.node,
1536  effect_level)) {
1537  InstructionOperand outputs[1];
1538  InstructionOperand inputs[4];
1539  size_t input_count = 0;
1540  InstructionCode opcode = kX64Push;
1541  AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
1542  input.node, inputs, &input_count);
1543  opcode |= AddressingModeField::encode(mode);
1544  Emit(opcode, 0, outputs, input_count, inputs);
1545  } else {
1546  Emit(kX64Push, g.NoOutput(), g.UseAny(input.node));
1547  }
1548  }
1549  }
1550 }
1551 
1552 void InstructionSelector::EmitPrepareResults(
1553  ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
1554  Node* node) {
1555  X64OperandGenerator g(this);
1556 
1557  int reverse_slot = 0;
1558  for (PushParameter output : *results) {
1559  if (!output.location.IsCallerFrameSlot()) continue;
1560  reverse_slot += output.location.GetSizeInPointers();
1561  // Skip any alignment holes in nodes.
1562  if (output.node == nullptr) continue;
1563  DCHECK(!call_descriptor->IsCFunctionCall());
1564  if (output.location.GetType() == MachineType::Float32()) {
1565  MarkAsFloat32(output.node);
1566  } else if (output.location.GetType() == MachineType::Float64()) {
1567  MarkAsFloat64(output.node);
1568  }
1569  InstructionOperand result = g.DefineAsRegister(output.node);
1570  InstructionOperand slot = g.UseImmediate(reverse_slot);
1571  Emit(kX64Peek, 1, &result, 1, &slot);
1572  }
1573 }
1574 
1575 bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
1576 
1577 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
1578 
1579 namespace {
1580 
1581 void VisitCompareWithMemoryOperand(InstructionSelector* selector,
1582  InstructionCode opcode, Node* left,
1583  InstructionOperand right,
1584  FlagsContinuation* cont) {
1585  DCHECK_EQ(IrOpcode::kLoad, left->opcode());
1586  X64OperandGenerator g(selector);
1587  size_t input_count = 0;
1588  InstructionOperand inputs[4];
1589  AddressingMode addressing_mode =
1590  g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
1591  opcode |= AddressingModeField::encode(addressing_mode);
1592  inputs[input_count++] = right;
1593 
1594  selector->EmitWithContinuation(opcode, 0, nullptr, input_count, inputs, cont);
1595 }
1596 
1597 // Shared routine for multiple compare operations.
1598 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1599  InstructionOperand left, InstructionOperand right,
1600  FlagsContinuation* cont) {
1601  selector->EmitWithContinuation(opcode, left, right, cont);
1602 }
1603 
1604 // Shared routine for multiple compare operations.
1605 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1606  Node* left, Node* right, FlagsContinuation* cont,
1607  bool commutative) {
1608  X64OperandGenerator g(selector);
1609  if (commutative && g.CanBeBetterLeftOperand(right)) {
1610  std::swap(left, right);
1611  }
1612  VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
1613 }
1614 
1615 MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
1616  if (hint_node->opcode() == IrOpcode::kLoad) {
1617  MachineType hint = LoadRepresentationOf(hint_node->op());
1618  if (node->opcode() == IrOpcode::kInt32Constant ||
1619  node->opcode() == IrOpcode::kInt64Constant) {
1620  int64_t constant = node->opcode() == IrOpcode::kInt32Constant
1621  ? OpParameter<int32_t>(node->op())
1622  : OpParameter<int64_t>(node->op());
1623  if (hint == MachineType::Int8()) {
1624  if (constant >= std::numeric_limits<int8_t>::min() &&
1625  constant <= std::numeric_limits<int8_t>::max()) {
1626  return hint;
1627  }
1628  } else if (hint == MachineType::Uint8()) {
1629  if (constant >= std::numeric_limits<uint8_t>::min() &&
1630  constant <= std::numeric_limits<uint8_t>::max()) {
1631  return hint;
1632  }
1633  } else if (hint == MachineType::Int16()) {
1634  if (constant >= std::numeric_limits<int16_t>::min() &&
1635  constant <= std::numeric_limits<int16_t>::max()) {
1636  return hint;
1637  }
1638  } else if (hint == MachineType::Uint16()) {
1639  if (constant >= std::numeric_limits<uint16_t>::min() &&
1640  constant <= std::numeric_limits<uint16_t>::max()) {
1641  return hint;
1642  }
1643  } else if (hint == MachineType::Int32()) {
1644  return hint;
1645  } else if (hint == MachineType::Uint32()) {
1646  if (constant >= 0) return hint;
1647  }
1648  }
1649  }
1650  return node->opcode() == IrOpcode::kLoad ? LoadRepresentationOf(node->op())
1651  : MachineType::None();
1652 }
1653 
1654 // Tries to match the size of the given opcode to that of the operands, if
1655 // possible.
1656 InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
1657  Node* right, FlagsContinuation* cont) {
1658  // TODO(epertoso): we can probably get some size information out phi nodes.
1659  // If the load representations don't match, both operands will be
1660  // zero/sign-extended to 32bit.
1661  MachineType left_type = MachineTypeForNarrow(left, right);
1662  MachineType right_type = MachineTypeForNarrow(right, left);
1663  if (left_type == right_type) {
1664  switch (left_type.representation()) {
1665  case MachineRepresentation::kBit:
1666  case MachineRepresentation::kWord8: {
1667  if (opcode == kX64Test32) return kX64Test8;
1668  if (opcode == kX64Cmp32) {
1669  if (left_type.semantic() == MachineSemantic::kUint32) {
1670  cont->OverwriteUnsignedIfSigned();
1671  } else {
1672  CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
1673  }
1674  return kX64Cmp8;
1675  }
1676  break;
1677  }
1678  case MachineRepresentation::kWord16:
1679  if (opcode == kX64Test32) return kX64Test16;
1680  if (opcode == kX64Cmp32) {
1681  if (left_type.semantic() == MachineSemantic::kUint32) {
1682  cont->OverwriteUnsignedIfSigned();
1683  } else {
1684  CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
1685  }
1686  return kX64Cmp16;
1687  }
1688  break;
1689  default:
1690  break;
1691  }
1692  }
1693  return opcode;
1694 }
1695 
1696 // Shared routine for multiple word compare operations.
1697 void VisitWordCompare(InstructionSelector* selector, Node* node,
1698  InstructionCode opcode, FlagsContinuation* cont) {
1699  X64OperandGenerator g(selector);
1700  Node* left = node->InputAt(0);
1701  Node* right = node->InputAt(1);
1702 
1703  // The 32-bit comparisons automatically truncate Word64
1704  // values to Word32 range, no need to do that explicitly.
1705  if (opcode == kX64Cmp32 || opcode == kX64Test32) {
1706  if (left->opcode() == IrOpcode::kTruncateInt64ToInt32 &&
1707  selector->CanCover(node, left)) {
1708  left = left->InputAt(0);
1709  }
1710 
1711  if (right->opcode() == IrOpcode::kTruncateInt64ToInt32 &&
1712  selector->CanCover(node, right)) {
1713  right = right->InputAt(0);
1714  }
1715  }
1716 
1717  opcode = TryNarrowOpcodeSize(opcode, left, right, cont);
1718 
1719  // If one of the two inputs is an immediate, make sure it's on the right, or
1720  // if one of the two inputs is a memory operand, make sure it's on the left.
1721  int effect_level = selector->GetEffectLevel(node);
1722  if (cont->IsBranch()) {
1723  effect_level = selector->GetEffectLevel(
1724  cont->true_block()->PredecessorAt(0)->control_input());
1725  }
1726 
1727  if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
1728  (g.CanBeMemoryOperand(opcode, node, right, effect_level) &&
1729  !g.CanBeMemoryOperand(opcode, node, left, effect_level))) {
1730  if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1731  std::swap(left, right);
1732  }
1733 
1734  // Match immediates on right side of comparison.
1735  if (g.CanBeImmediate(right)) {
1736  if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
1737  return VisitCompareWithMemoryOperand(selector, opcode, left,
1738  g.UseImmediate(right), cont);
1739  }
1740  return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
1741  cont);
1742  }
1743 
1744  // Match memory operands on left side of comparison.
1745  if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
1746  return VisitCompareWithMemoryOperand(selector, opcode, left,
1747  g.UseRegister(right), cont);
1748  }
1749 
1750  return VisitCompare(selector, opcode, left, right, cont,
1751  node->op()->HasProperty(Operator::kCommutative));
1752 }
1753 
1754 // Shared routine for 64-bit word comparison operations.
1755 void VisitWord64Compare(InstructionSelector* selector, Node* node,
1756  FlagsContinuation* cont) {
1757  X64OperandGenerator g(selector);
1758  if (selector->CanUseRootsRegister()) {
1759  const RootsTable& roots_table = selector->isolate()->roots_table();
1760  RootIndex root_index;
1761  HeapObjectBinopMatcher m(node);
1762  if (m.right().HasValue() &&
1763  roots_table.IsRootHandle(m.right().Value(), &root_index)) {
1764  if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1765  InstructionCode opcode =
1766  kX64Cmp | AddressingModeField::encode(kMode_Root);
1767  return VisitCompare(
1768  selector, opcode,
1769  g.TempImmediate(
1770  TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)),
1771  g.UseRegister(m.left().node()), cont);
1772  } else if (m.left().HasValue() &&
1773  roots_table.IsRootHandle(m.left().Value(), &root_index)) {
1774  InstructionCode opcode =
1775  kX64Cmp | AddressingModeField::encode(kMode_Root);
1776  return VisitCompare(
1777  selector, opcode,
1778  g.TempImmediate(
1779  TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)),
1780  g.UseRegister(m.right().node()), cont);
1781  }
1782  }
1783  StackCheckMatcher<Int64BinopMatcher, IrOpcode::kUint64LessThan> m(
1784  selector->isolate(), node);
1785  if (m.Matched()) {
1786  // Compare(Load(js_stack_limit), LoadStackPointer)
1787  if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1788  InstructionCode opcode = cont->Encode(kX64StackCheck);
1789  CHECK(cont->IsBranch());
1790  selector->EmitWithContinuation(opcode, cont);
1791  return;
1792  }
1793  WasmStackCheckMatcher<Int64BinopMatcher, IrOpcode::kUint64LessThan> wasm_m(
1794  node);
1795  if (wasm_m.Matched()) {
1796  // This is a wasm stack check. By structure, we know that we can use the
1797  // stack pointer directly, as wasm code does not modify the stack at points
1798  // where stack checks are performed.
1799  Node* left = node->InputAt(0);
1800  LocationOperand rsp(InstructionOperand::EXPLICIT, LocationOperand::REGISTER,
1801  InstructionSequence::DefaultRepresentation(),
1802  RegisterCode::kRegCode_rsp);
1803  return VisitCompareWithMemoryOperand(selector, kX64Cmp, left, rsp, cont);
1804  }
1805  VisitWordCompare(selector, node, kX64Cmp, cont);
1806 }
1807 
1808 // Shared routine for comparison with zero.
1809 void VisitCompareZero(InstructionSelector* selector, Node* user, Node* node,
1810  InstructionCode opcode, FlagsContinuation* cont) {
1811  X64OperandGenerator g(selector);
1812  if (cont->IsBranch() &&
1813  (cont->condition() == kNotEqual || cont->condition() == kEqual)) {
1814  switch (node->opcode()) {
1815 #define FLAGS_SET_BINOP_LIST(V) \
1816  V(kInt32Add, VisitBinop, kX64Add32) \
1817  V(kInt32Sub, VisitBinop, kX64Sub32) \
1818  V(kWord32And, VisitBinop, kX64And32) \
1819  V(kWord32Or, VisitBinop, kX64Or32) \
1820  V(kInt64Add, VisitBinop, kX64Add) \
1821  V(kInt64Sub, VisitBinop, kX64Sub) \
1822  V(kWord64And, VisitBinop, kX64And) \
1823  V(kWord64Or, VisitBinop, kX64Or)
1824 #define FLAGS_SET_BINOP(opcode, Visit, archOpcode) \
1825  case IrOpcode::opcode: \
1826  if (selector->IsOnlyUserOfNodeInSameBlock(user, node)) { \
1827  return Visit(selector, node, archOpcode, cont); \
1828  } \
1829  break;
1830  FLAGS_SET_BINOP_LIST(FLAGS_SET_BINOP)
1831 #undef FLAGS_SET_BINOP_LIST
1832 #undef FLAGS_SET_BINOP
1833 
1834 #define TRY_VISIT_WORD32_SHIFT TryVisitWordShift<Int32BinopMatcher, 32>
1835 #define TRY_VISIT_WORD64_SHIFT TryVisitWordShift<Int64BinopMatcher, 64>
1836 // Skip Word64Sar/Word32Sar since no instruction reduction in most cases.
1837 #define FLAGS_SET_SHIFT_LIST(V) \
1838  V(kWord32Shl, TRY_VISIT_WORD32_SHIFT, kX64Shl32) \
1839  V(kWord32Shr, TRY_VISIT_WORD32_SHIFT, kX64Shr32) \
1840  V(kWord64Shl, TRY_VISIT_WORD64_SHIFT, kX64Shl) \
1841  V(kWord64Shr, TRY_VISIT_WORD64_SHIFT, kX64Shr)
1842 #define FLAGS_SET_SHIFT(opcode, TryVisit, archOpcode) \
1843  case IrOpcode::opcode: \
1844  if (selector->IsOnlyUserOfNodeInSameBlock(user, node)) { \
1845  if (TryVisit(selector, node, archOpcode, cont)) return; \
1846  } \
1847  break;
1848  FLAGS_SET_SHIFT_LIST(FLAGS_SET_SHIFT)
1849 #undef TRY_VISIT_WORD32_SHIFT
1850 #undef TRY_VISIT_WORD64_SHIFT
1851 #undef FLAGS_SET_SHIFT_LIST
1852 #undef FLAGS_SET_SHIFT
1853  default:
1854  break;
1855  }
1856  }
1857  int effect_level = selector->GetEffectLevel(node);
1858  if (cont->IsBranch()) {
1859  effect_level = selector->GetEffectLevel(
1860  cont->true_block()->PredecessorAt(0)->control_input());
1861  }
1862  if (node->opcode() == IrOpcode::kLoad) {
1863  switch (LoadRepresentationOf(node->op()).representation()) {
1864  case MachineRepresentation::kWord8:
1865  if (opcode == kX64Cmp32) {
1866  opcode = kX64Cmp8;
1867  } else if (opcode == kX64Test32) {
1868  opcode = kX64Test8;
1869  }
1870  break;
1871  case MachineRepresentation::kWord16:
1872  if (opcode == kX64Cmp32) {
1873  opcode = kX64Cmp16;
1874  } else if (opcode == kX64Test32) {
1875  opcode = kX64Test16;
1876  }
1877  break;
1878  default:
1879  break;
1880  }
1881  }
1882  if (g.CanBeMemoryOperand(opcode, user, node, effect_level)) {
1883  VisitCompareWithMemoryOperand(selector, opcode, node, g.TempImmediate(0),
1884  cont);
1885  } else {
1886  VisitCompare(selector, opcode, g.Use(node), g.TempImmediate(0), cont);
1887  }
1888 }
1889 
1890 // Shared routine for multiple float32 compare operations (inputs commuted).
1891 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1892  FlagsContinuation* cont) {
1893  Node* const left = node->InputAt(0);
1894  Node* const right = node->InputAt(1);
1895  InstructionCode const opcode =
1896  selector->IsSupported(AVX) ? kAVXFloat32Cmp : kSSEFloat32Cmp;
1897  VisitCompare(selector, opcode, right, left, cont, false);
1898 }
1899 
1900 // Shared routine for multiple float64 compare operations (inputs commuted).
1901 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1902  FlagsContinuation* cont) {
1903  Node* const left = node->InputAt(0);
1904  Node* const right = node->InputAt(1);
1905  InstructionCode const opcode =
1906  selector->IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
1907  VisitCompare(selector, opcode, right, left, cont, false);
1908 }
1909 
1910 // Shared routine for Word32/Word64 Atomic Binops
1911 void VisitAtomicBinop(InstructionSelector* selector, Node* node,
1912  ArchOpcode opcode) {
1913  X64OperandGenerator g(selector);
1914  Node* base = node->InputAt(0);
1915  Node* index = node->InputAt(1);
1916  Node* value = node->InputAt(2);
1917  AddressingMode addressing_mode;
1918  InstructionOperand inputs[] = {
1919  g.UseUniqueRegister(value), g.UseUniqueRegister(base),
1920  g.GetEffectiveIndexOperand(index, &addressing_mode)};
1921  InstructionOperand outputs[] = {g.DefineAsFixed(node, rax)};
1922  InstructionOperand temps[] = {g.TempRegister()};
1923  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
1924  selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
1925  arraysize(temps), temps);
1926 }
1927 
1928 // Shared routine for Word32/Word64 Atomic CmpExchg
1929 void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
1930  ArchOpcode opcode) {
1931  X64OperandGenerator g(selector);
1932  Node* base = node->InputAt(0);
1933  Node* index = node->InputAt(1);
1934  Node* old_value = node->InputAt(2);
1935  Node* new_value = node->InputAt(3);
1936  AddressingMode addressing_mode;
1937  InstructionOperand inputs[] = {
1938  g.UseFixed(old_value, rax), g.UseUniqueRegister(new_value),
1939  g.UseUniqueRegister(base),
1940  g.GetEffectiveIndexOperand(index, &addressing_mode)};
1941  InstructionOperand outputs[] = {g.DefineAsFixed(node, rax)};
1942  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
1943  selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
1944 }
1945 
1946 // Shared routine for Word32/Word64 Atomic Exchange
1947 void VisitAtomicExchange(InstructionSelector* selector, Node* node,
1948  ArchOpcode opcode) {
1949  X64OperandGenerator g(selector);
1950  Node* base = node->InputAt(0);
1951  Node* index = node->InputAt(1);
1952  Node* value = node->InputAt(2);
1953  AddressingMode addressing_mode;
1954  InstructionOperand inputs[] = {
1955  g.UseUniqueRegister(value), g.UseUniqueRegister(base),
1956  g.GetEffectiveIndexOperand(index, &addressing_mode)};
1957  InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
1958  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
1959  selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
1960 }
1961 
1962 } // namespace
1963 
1964 // Shared routine for word comparison against zero.
1965 void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
1966  FlagsContinuation* cont) {
1967  // Try to combine with comparisons against 0 by simply inverting the branch.
1968  while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
1969  Int32BinopMatcher m(value);
1970  if (!m.right().Is(0)) break;
1971 
1972  user = value;
1973  value = m.left().node();
1974  cont->Negate();
1975  }
1976 
1977  if (CanCover(user, value)) {
1978  switch (value->opcode()) {
1979  case IrOpcode::kWord32Equal:
1980  cont->OverwriteAndNegateIfEqual(kEqual);
1981  return VisitWordCompare(this, value, kX64Cmp32, cont);
1982  case IrOpcode::kInt32LessThan:
1983  cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1984  return VisitWordCompare(this, value, kX64Cmp32, cont);
1985  case IrOpcode::kInt32LessThanOrEqual:
1986  cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1987  return VisitWordCompare(this, value, kX64Cmp32, cont);
1988  case IrOpcode::kUint32LessThan:
1989  cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1990  return VisitWordCompare(this, value, kX64Cmp32, cont);
1991  case IrOpcode::kUint32LessThanOrEqual:
1992  cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1993  return VisitWordCompare(this, value, kX64Cmp32, cont);
1994  case IrOpcode::kWord64Equal: {
1995  cont->OverwriteAndNegateIfEqual(kEqual);
1996  Int64BinopMatcher m(value);
1997  if (m.right().Is(0)) {
1998  // Try to combine the branch with a comparison.
1999  Node* const user = m.node();
2000  Node* const value = m.left().node();
2001  if (CanCover(user, value)) {
2002  switch (value->opcode()) {
2003  case IrOpcode::kInt64Sub:
2004  return VisitWord64Compare(this, value, cont);
2005  case IrOpcode::kWord64And:
2006  return VisitWordCompare(this, value, kX64Test, cont);
2007  default:
2008  break;
2009  }
2010  }
2011  return VisitCompareZero(this, user, value, kX64Cmp, cont);
2012  }
2013  return VisitWord64Compare(this, value, cont);
2014  }
2015  case IrOpcode::kInt64LessThan:
2016  cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2017  return VisitWord64Compare(this, value, cont);
2018  case IrOpcode::kInt64LessThanOrEqual:
2019  cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2020  return VisitWord64Compare(this, value, cont);
2021  case IrOpcode::kUint64LessThan:
2022  cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2023  return VisitWord64Compare(this, value, cont);
2024  case IrOpcode::kUint64LessThanOrEqual:
2025  cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2026  return VisitWord64Compare(this, value, cont);
2027  case IrOpcode::kFloat32Equal:
2028  cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
2029  return VisitFloat32Compare(this, value, cont);
2030  case IrOpcode::kFloat32LessThan:
2031  cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
2032  return VisitFloat32Compare(this, value, cont);
2033  case IrOpcode::kFloat32LessThanOrEqual:
2034  cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
2035  return VisitFloat32Compare(this, value, cont);
2036  case IrOpcode::kFloat64Equal:
2037  cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
2038  return VisitFloat64Compare(this, value, cont);
2039  case IrOpcode::kFloat64LessThan: {
2040  Float64BinopMatcher m(value);
2041  if (m.left().Is(0.0) && m.right().IsFloat64Abs()) {
2042  // This matches the pattern
2043  //
2044  // Float64LessThan(#0.0, Float64Abs(x))
2045  //
2046  // which TurboFan generates for NumberToBoolean in the general case,
2047  // and which evaluates to false if x is 0, -0 or NaN. We can compile
2048  // this to a simple (v)ucomisd using not_equal flags condition, which
2049  // avoids the costly Float64Abs.
2050  cont->OverwriteAndNegateIfEqual(kNotEqual);
2051  InstructionCode const opcode =
2052  IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
2053  return VisitCompare(this, opcode, m.left().node(),
2054  m.right().InputAt(0), cont, false);
2055  }
2056  cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
2057  return VisitFloat64Compare(this, value, cont);
2058  }
2059  case IrOpcode::kFloat64LessThanOrEqual:
2060  cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
2061  return VisitFloat64Compare(this, value, cont);
2062  case IrOpcode::kProjection:
2063  // Check if this is the overflow output projection of an
2064  // <Operation>WithOverflow node.
2065  if (ProjectionIndexOf(value->op()) == 1u) {
2066  // We cannot combine the <Operation>WithOverflow with this branch
2067  // unless the 0th projection (the use of the actual value of the
2068  // <Operation> is either nullptr, which means there's no use of the
2069  // actual value, or was already defined, which means it is scheduled
2070  // *AFTER* this branch).
2071  Node* const node = value->InputAt(0);
2072  Node* const result = NodeProperties::FindProjection(node, 0);
2073  if (result == nullptr || IsDefined(result)) {
2074  switch (node->opcode()) {
2075  case IrOpcode::kInt32AddWithOverflow:
2076  cont->OverwriteAndNegateIfEqual(kOverflow);
2077  return VisitBinop(this, node, kX64Add32, cont);
2078  case IrOpcode::kInt32SubWithOverflow:
2079  cont->OverwriteAndNegateIfEqual(kOverflow);
2080  return VisitBinop(this, node, kX64Sub32, cont);
2081  case IrOpcode::kInt32MulWithOverflow:
2082  cont->OverwriteAndNegateIfEqual(kOverflow);
2083  return VisitBinop(this, node, kX64Imul32, cont);
2084  case IrOpcode::kInt64AddWithOverflow:
2085  cont->OverwriteAndNegateIfEqual(kOverflow);
2086  return VisitBinop(this, node, kX64Add, cont);
2087  case IrOpcode::kInt64SubWithOverflow:
2088  cont->OverwriteAndNegateIfEqual(kOverflow);
2089  return VisitBinop(this, node, kX64Sub, cont);
2090  default:
2091  break;
2092  }
2093  }
2094  }
2095  break;
2096  case IrOpcode::kInt32Sub:
2097  return VisitWordCompare(this, value, kX64Cmp32, cont);
2098  case IrOpcode::kWord32And:
2099  return VisitWordCompare(this, value, kX64Test32, cont);
2100  default:
2101  break;
2102  }
2103  }
2104 
2105  // Branch could not be combined with a compare, emit compare against 0.
2106  VisitCompareZero(this, user, value, kX64Cmp32, cont);
2107 }
2108 
2109 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
2110  X64OperandGenerator g(this);
2111  InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
2112 
2113  // Emit either ArchTableSwitch or ArchLookupSwitch.
2114  if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
2115  static const size_t kMaxTableSwitchValueRange = 2 << 16;
2116  size_t table_space_cost = 4 + sw.value_range();
2117  size_t table_time_cost = 3;
2118  size_t lookup_space_cost = 3 + 2 * sw.case_count();
2119  size_t lookup_time_cost = sw.case_count();
2120  if (sw.case_count() > 4 &&
2121  table_space_cost + 3 * table_time_cost <=
2122  lookup_space_cost + 3 * lookup_time_cost &&
2123  sw.min_value() > std::numeric_limits<int32_t>::min() &&
2124  sw.value_range() <= kMaxTableSwitchValueRange) {
2125  InstructionOperand index_operand = g.TempRegister();
2126  if (sw.min_value()) {
2127  // The leal automatically zero extends, so result is a valid 64-bit
2128  // index.
2129  Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand,
2130  value_operand, g.TempImmediate(-sw.min_value()));
2131  } else {
2132  // Zero extend, because we use it as 64-bit index into the jump table.
2133  Emit(kX64Movl, index_operand, value_operand);
2134  }
2135  // Generate a table lookup.
2136  return EmitTableSwitch(sw, index_operand);
2137  }
2138  }
2139 
2140  // Generate a tree of conditional jumps.
2141  return EmitBinarySearchSwitch(sw, value_operand);
2142 }
2143 
2144 void InstructionSelector::VisitWord32Equal(Node* const node) {
2145  Node* user = node;
2146  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2147  Int32BinopMatcher m(user);
2148  if (m.right().Is(0)) {
2149  return VisitWordCompareZero(m.node(), m.left().node(), &cont);
2150  }
2151  VisitWordCompare(this, node, kX64Cmp32, &cont);
2152 }
2153 
2154 void InstructionSelector::VisitInt32LessThan(Node* node) {
2155  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2156  VisitWordCompare(this, node, kX64Cmp32, &cont);
2157 }
2158 
2159 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
2160  FlagsContinuation cont =
2161  FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2162  VisitWordCompare(this, node, kX64Cmp32, &cont);
2163 }
2164 
2165 void InstructionSelector::VisitUint32LessThan(Node* node) {
2166  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2167  VisitWordCompare(this, node, kX64Cmp32, &cont);
2168 }
2169 
2170 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
2171  FlagsContinuation cont =
2172  FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2173  VisitWordCompare(this, node, kX64Cmp32, &cont);
2174 }
2175 
2176 void InstructionSelector::VisitWord64Equal(Node* const node) {
2177  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2178  Int64BinopMatcher m(node);
2179  if (m.right().Is(0)) {
2180  // Try to combine the equality check with a comparison.
2181  Node* const user = m.node();
2182  Node* const value = m.left().node();
2183  if (CanCover(user, value)) {
2184  switch (value->opcode()) {
2185  case IrOpcode::kInt64Sub:
2186  return VisitWord64Compare(this, value, &cont);
2187  case IrOpcode::kWord64And:
2188  return VisitWordCompare(this, value, kX64Test, &cont);
2189  default:
2190  break;
2191  }
2192  }
2193  }
2194  VisitWord64Compare(this, node, &cont);
2195 }
2196 
2197 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
2198  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2199  FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2200  return VisitBinop(this, node, kX64Add32, &cont);
2201  }
2202  FlagsContinuation cont;
2203  VisitBinop(this, node, kX64Add32, &cont);
2204 }
2205 
2206 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
2207  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2208  FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2209  return VisitBinop(this, node, kX64Sub32, &cont);
2210  }
2211  FlagsContinuation cont;
2212  VisitBinop(this, node, kX64Sub32, &cont);
2213 }
2214 
2215 void InstructionSelector::VisitInt64LessThan(Node* node) {
2216  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2217  VisitWord64Compare(this, node, &cont);
2218 }
2219 
2220 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2221  FlagsContinuation cont =
2222  FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2223  VisitWord64Compare(this, node, &cont);
2224 }
2225 
2226 void InstructionSelector::VisitUint64LessThan(Node* node) {
2227  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2228  VisitWord64Compare(this, node, &cont);
2229 }
2230 
2231 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2232  FlagsContinuation cont =
2233  FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2234  VisitWord64Compare(this, node, &cont);
2235 }
2236 
2237 void InstructionSelector::VisitFloat32Equal(Node* node) {
2238  FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
2239  VisitFloat32Compare(this, node, &cont);
2240 }
2241 
2242 void InstructionSelector::VisitFloat32LessThan(Node* node) {
2243  FlagsContinuation cont =
2244  FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
2245  VisitFloat32Compare(this, node, &cont);
2246 }
2247 
2248 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2249  FlagsContinuation cont =
2250  FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
2251  VisitFloat32Compare(this, node, &cont);
2252 }
2253 
2254 void InstructionSelector::VisitFloat64Equal(Node* node) {
2255  FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
2256  VisitFloat64Compare(this, node, &cont);
2257 }
2258 
2259 void InstructionSelector::VisitFloat64LessThan(Node* node) {
2260  Float64BinopMatcher m(node);
2261  if (m.left().Is(0.0) && m.right().IsFloat64Abs()) {
2262  // This matches the pattern
2263  //
2264  // Float64LessThan(#0.0, Float64Abs(x))
2265  //
2266  // which TurboFan generates for NumberToBoolean in the general case,
2267  // and which evaluates to false if x is 0, -0 or NaN. We can compile
2268  // this to a simple (v)ucomisd using not_equal flags condition, which
2269  // avoids the costly Float64Abs.
2270  FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, node);
2271  InstructionCode const opcode =
2272  IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
2273  return VisitCompare(this, opcode, m.left().node(), m.right().InputAt(0),
2274  &cont, false);
2275  }
2276  FlagsContinuation cont =
2277  FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
2278  VisitFloat64Compare(this, node, &cont);
2279 }
2280 
2281 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
2282  FlagsContinuation cont =
2283  FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
2284  VisitFloat64Compare(this, node, &cont);
2285 }
2286 
2287 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
2288  X64OperandGenerator g(this);
2289  Node* left = node->InputAt(0);
2290  Node* right = node->InputAt(1);
2291  Float64Matcher mleft(left);
2292  if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
2293  Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
2294  return;
2295  }
2296  Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node),
2297  g.UseRegister(left), g.Use(right));
2298 }
2299 
2300 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
2301  X64OperandGenerator g(this);
2302  Node* left = node->InputAt(0);
2303  Node* right = node->InputAt(1);
2304  Emit(kSSEFloat64InsertHighWord32, g.DefineSameAsFirst(node),
2305  g.UseRegister(left), g.Use(right));
2306 }
2307 
2308 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
2309  X64OperandGenerator g(this);
2310  Emit(kSSEFloat64SilenceNaN, g.DefineSameAsFirst(node),
2311  g.UseRegister(node->InputAt(0)));
2312 }
2313 
2314 void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
2315  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2316  DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
2317  load_rep.representation() == MachineRepresentation::kWord16 ||
2318  load_rep.representation() == MachineRepresentation::kWord32);
2319  USE(load_rep);
2320  VisitLoad(node);
2321 }
2322 
2323 void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
2324  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2325  USE(load_rep);
2326  VisitLoad(node);
2327 }
2328 
2329 void InstructionSelector::VisitWord32AtomicStore(Node* node) {
2330  MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2331  ArchOpcode opcode = kArchNop;
2332  switch (rep) {
2333  case MachineRepresentation::kWord8:
2334  opcode = kWord32AtomicExchangeInt8;
2335  break;
2336  case MachineRepresentation::kWord16:
2337  opcode = kWord32AtomicExchangeInt16;
2338  break;
2339  case MachineRepresentation::kWord32:
2340  opcode = kWord32AtomicExchangeWord32;
2341  break;
2342  default:
2343  UNREACHABLE();
2344  return;
2345  }
2346  VisitAtomicExchange(this, node, opcode);
2347 }
2348 
2349 void InstructionSelector::VisitWord64AtomicStore(Node* node) {
2350  MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2351  ArchOpcode opcode = kArchNop;
2352  switch (rep) {
2353  case MachineRepresentation::kWord8:
2354  opcode = kX64Word64AtomicExchangeUint8;
2355  break;
2356  case MachineRepresentation::kWord16:
2357  opcode = kX64Word64AtomicExchangeUint16;
2358  break;
2359  case MachineRepresentation::kWord32:
2360  opcode = kX64Word64AtomicExchangeUint32;
2361  break;
2362  case MachineRepresentation::kWord64:
2363  opcode = kX64Word64AtomicExchangeUint64;
2364  break;
2365  default:
2366  UNREACHABLE();
2367  return;
2368  }
2369  VisitAtomicExchange(this, node, opcode);
2370 }
2371 
2372 void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
2373  MachineType type = AtomicOpType(node->op());
2374  ArchOpcode opcode = kArchNop;
2375  if (type == MachineType::Int8()) {
2376  opcode = kWord32AtomicExchangeInt8;
2377  } else if (type == MachineType::Uint8()) {
2378  opcode = kWord32AtomicExchangeUint8;
2379  } else if (type == MachineType::Int16()) {
2380  opcode = kWord32AtomicExchangeInt16;
2381  } else if (type == MachineType::Uint16()) {
2382  opcode = kWord32AtomicExchangeUint16;
2383  } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2384  opcode = kWord32AtomicExchangeWord32;
2385  } else {
2386  UNREACHABLE();
2387  return;
2388  }
2389  VisitAtomicExchange(this, node, opcode);
2390 }
2391 
2392 void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
2393  MachineType type = AtomicOpType(node->op());
2394  ArchOpcode opcode = kArchNop;
2395  if (type == MachineType::Uint8()) {
2396  opcode = kX64Word64AtomicExchangeUint8;
2397  } else if (type == MachineType::Uint16()) {
2398  opcode = kX64Word64AtomicExchangeUint16;
2399  } else if (type == MachineType::Uint32()) {
2400  opcode = kX64Word64AtomicExchangeUint32;
2401  } else if (type == MachineType::Uint64()) {
2402  opcode = kX64Word64AtomicExchangeUint64;
2403  } else {
2404  UNREACHABLE();
2405  return;
2406  }
2407  VisitAtomicExchange(this, node, opcode);
2408 }
2409 
2410 void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
2411  MachineType type = AtomicOpType(node->op());
2412  ArchOpcode opcode = kArchNop;
2413  if (type == MachineType::Int8()) {
2414  opcode = kWord32AtomicCompareExchangeInt8;
2415  } else if (type == MachineType::Uint8()) {
2416  opcode = kWord32AtomicCompareExchangeUint8;
2417  } else if (type == MachineType::Int16()) {
2418  opcode = kWord32AtomicCompareExchangeInt16;
2419  } else if (type == MachineType::Uint16()) {
2420  opcode = kWord32AtomicCompareExchangeUint16;
2421  } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2422  opcode = kWord32AtomicCompareExchangeWord32;
2423  } else {
2424  UNREACHABLE();
2425  return;
2426  }
2427  VisitAtomicCompareExchange(this, node, opcode);
2428 }
2429 
2430 void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
2431  MachineType type = AtomicOpType(node->op());
2432  ArchOpcode opcode = kArchNop;
2433  if (type == MachineType::Uint8()) {
2434  opcode = kX64Word64AtomicCompareExchangeUint8;
2435  } else if (type == MachineType::Uint16()) {
2436  opcode = kX64Word64AtomicCompareExchangeUint16;
2437  } else if (type == MachineType::Uint32()) {
2438  opcode = kX64Word64AtomicCompareExchangeUint32;
2439  } else if (type == MachineType::Uint64()) {
2440  opcode = kX64Word64AtomicCompareExchangeUint64;
2441  } else {
2442  UNREACHABLE();
2443  return;
2444  }
2445  VisitAtomicCompareExchange(this, node, opcode);
2446 }
2447 
2448 void InstructionSelector::VisitWord32AtomicBinaryOperation(
2449  Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2450  ArchOpcode uint16_op, ArchOpcode word32_op) {
2451  MachineType type = AtomicOpType(node->op());
2452  ArchOpcode opcode = kArchNop;
2453  if (type == MachineType::Int8()) {
2454  opcode = int8_op;
2455  } else if (type == MachineType::Uint8()) {
2456  opcode = uint8_op;
2457  } else if (type == MachineType::Int16()) {
2458  opcode = int16_op;
2459  } else if (type == MachineType::Uint16()) {
2460  opcode = uint16_op;
2461  } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2462  opcode = word32_op;
2463  } else {
2464  UNREACHABLE();
2465  return;
2466  }
2467  VisitAtomicBinop(this, node, opcode);
2468 }
2469 
2470 #define VISIT_ATOMIC_BINOP(op) \
2471  void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
2472  VisitWord32AtomicBinaryOperation( \
2473  node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
2474  kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
2475  kWord32Atomic##op##Word32); \
2476  }
2477 VISIT_ATOMIC_BINOP(Add)
2478 VISIT_ATOMIC_BINOP(Sub)
2479 VISIT_ATOMIC_BINOP(And)
2480 VISIT_ATOMIC_BINOP(Or)
2481 VISIT_ATOMIC_BINOP(Xor)
2482 #undef VISIT_ATOMIC_BINOP
2483 
2484 void InstructionSelector::VisitWord64AtomicBinaryOperation(
2485  Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
2486  ArchOpcode word64_op) {
2487  MachineType type = AtomicOpType(node->op());
2488  ArchOpcode opcode = kArchNop;
2489  if (type == MachineType::Uint8()) {
2490  opcode = uint8_op;
2491  } else if (type == MachineType::Uint16()) {
2492  opcode = uint16_op;
2493  } else if (type == MachineType::Uint32()) {
2494  opcode = uint32_op;
2495  } else if (type == MachineType::Uint64()) {
2496  opcode = word64_op;
2497  } else {
2498  UNREACHABLE();
2499  return;
2500  }
2501  VisitAtomicBinop(this, node, opcode);
2502 }
2503 
2504 #define VISIT_ATOMIC_BINOP(op) \
2505  void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
2506  VisitWord64AtomicBinaryOperation( \
2507  node, kX64Word64Atomic##op##Uint8, kX64Word64Atomic##op##Uint16, \
2508  kX64Word64Atomic##op##Uint32, kX64Word64Atomic##op##Uint64); \
2509  }
2510 VISIT_ATOMIC_BINOP(Add)
2511 VISIT_ATOMIC_BINOP(Sub)
2512 VISIT_ATOMIC_BINOP(And)
2513 VISIT_ATOMIC_BINOP(Or)
2514 VISIT_ATOMIC_BINOP(Xor)
2515 #undef VISIT_ATOMIC_BINOP
2516 
2517 #define SIMD_TYPES(V) \
2518  V(F32x4) \
2519  V(I32x4) \
2520  V(I16x8) \
2521  V(I8x16)
2522 
2523 #define SIMD_BINOP_LIST(V) \
2524  V(F32x4Add) \
2525  V(F32x4AddHoriz) \
2526  V(F32x4Sub) \
2527  V(F32x4Mul) \
2528  V(F32x4Min) \
2529  V(F32x4Max) \
2530  V(F32x4Eq) \
2531  V(F32x4Ne) \
2532  V(F32x4Lt) \
2533  V(F32x4Le) \
2534  V(I32x4Add) \
2535  V(I32x4AddHoriz) \
2536  V(I32x4Sub) \
2537  V(I32x4Mul) \
2538  V(I32x4MinS) \
2539  V(I32x4MaxS) \
2540  V(I32x4Eq) \
2541  V(I32x4Ne) \
2542  V(I32x4GtS) \
2543  V(I32x4GeS) \
2544  V(I32x4MinU) \
2545  V(I32x4MaxU) \
2546  V(I32x4GtU) \
2547  V(I32x4GeU) \
2548  V(I16x8SConvertI32x4) \
2549  V(I16x8Add) \
2550  V(I16x8AddSaturateS) \
2551  V(I16x8AddHoriz) \
2552  V(I16x8Sub) \
2553  V(I16x8SubSaturateS) \
2554  V(I16x8Mul) \
2555  V(I16x8MinS) \
2556  V(I16x8MaxS) \
2557  V(I16x8Eq) \
2558  V(I16x8Ne) \
2559  V(I16x8GtS) \
2560  V(I16x8GeS) \
2561  V(I16x8AddSaturateU) \
2562  V(I16x8SubSaturateU) \
2563  V(I16x8MinU) \
2564  V(I16x8MaxU) \
2565  V(I16x8GtU) \
2566  V(I16x8GeU) \
2567  V(I8x16SConvertI16x8) \
2568  V(I8x16Add) \
2569  V(I8x16AddSaturateS) \
2570  V(I8x16Sub) \
2571  V(I8x16SubSaturateS) \
2572  V(I8x16MinS) \
2573  V(I8x16MaxS) \
2574  V(I8x16Eq) \
2575  V(I8x16Ne) \
2576  V(I8x16GtS) \
2577  V(I8x16GeS) \
2578  V(I8x16AddSaturateU) \
2579  V(I8x16SubSaturateU) \
2580  V(I8x16MinU) \
2581  V(I8x16MaxU) \
2582  V(I8x16GtU) \
2583  V(I8x16GeU) \
2584  V(S128And) \
2585  V(S128Or) \
2586  V(S128Xor)
2587 
2588 #define SIMD_UNOP_LIST(V) \
2589  V(F32x4SConvertI32x4) \
2590  V(F32x4Abs) \
2591  V(F32x4Neg) \
2592  V(F32x4RecipApprox) \
2593  V(F32x4RecipSqrtApprox) \
2594  V(I32x4SConvertI16x8Low) \
2595  V(I32x4SConvertI16x8High) \
2596  V(I32x4Neg) \
2597  V(I32x4UConvertI16x8Low) \
2598  V(I32x4UConvertI16x8High) \
2599  V(I16x8SConvertI8x16Low) \
2600  V(I16x8SConvertI8x16High) \
2601  V(I16x8Neg) \
2602  V(I16x8UConvertI8x16Low) \
2603  V(I16x8UConvertI8x16High) \
2604  V(I8x16Neg) \
2605  V(S128Not)
2606 
2607 #define SIMD_SHIFT_OPCODES(V) \
2608  V(I32x4Shl) \
2609  V(I32x4ShrS) \
2610  V(I32x4ShrU) \
2611  V(I16x8Shl) \
2612  V(I16x8ShrS) \
2613  V(I16x8ShrU) \
2614  V(I8x16Shl) \
2615  V(I8x16ShrS) \
2616  V(I8x16ShrU)
2617 
2618 #define SIMD_ANYTRUE_LIST(V) \
2619  V(S1x4AnyTrue) \
2620  V(S1x8AnyTrue) \
2621  V(S1x16AnyTrue)
2622 
2623 #define SIMD_ALLTRUE_LIST(V) \
2624  V(S1x4AllTrue) \
2625  V(S1x8AllTrue) \
2626  V(S1x16AllTrue)
2627 
2628 void InstructionSelector::VisitS128Zero(Node* node) {
2629  X64OperandGenerator g(this);
2630  Emit(kX64S128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node));
2631 }
2632 
2633 #define VISIT_SIMD_SPLAT(Type) \
2634  void InstructionSelector::Visit##Type##Splat(Node* node) { \
2635  X64OperandGenerator g(this); \
2636  Emit(kX64##Type##Splat, g.DefineAsRegister(node), \
2637  g.Use(node->InputAt(0))); \
2638  }
2639 SIMD_TYPES(VISIT_SIMD_SPLAT)
2640 #undef VISIT_SIMD_SPLAT
2641 
2642 #define VISIT_SIMD_EXTRACT_LANE(Type) \
2643  void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
2644  X64OperandGenerator g(this); \
2645  int32_t lane = OpParameter<int32_t>(node->op()); \
2646  Emit(kX64##Type##ExtractLane, g.DefineAsRegister(node), \
2647  g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); \
2648  }
2649 SIMD_TYPES(VISIT_SIMD_EXTRACT_LANE)
2650 #undef VISIT_SIMD_EXTRACT_LANE
2651 
2652 #define VISIT_SIMD_REPLACE_LANE(Type) \
2653  void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
2654  X64OperandGenerator g(this); \
2655  int32_t lane = OpParameter<int32_t>(node->op()); \
2656  Emit(kX64##Type##ReplaceLane, g.DefineSameAsFirst(node), \
2657  g.UseRegister(node->InputAt(0)), g.UseImmediate(lane), \
2658  g.Use(node->InputAt(1))); \
2659  }
2660 SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
2661 #undef VISIT_SIMD_REPLACE_LANE
2662 
2663 #define VISIT_SIMD_SHIFT(Opcode) \
2664  void InstructionSelector::Visit##Opcode(Node* node) { \
2665  X64OperandGenerator g(this); \
2666  int32_t value = OpParameter<int32_t>(node->op()); \
2667  Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
2668  g.UseRegister(node->InputAt(0)), g.UseImmediate(value)); \
2669  }
2670 SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
2671 #undef VISIT_SIMD_SHIFT
2672 #undef SIMD_SHIFT_OPCODES
2673 
2674 #define VISIT_SIMD_UNOP(Opcode) \
2675  void InstructionSelector::Visit##Opcode(Node* node) { \
2676  X64OperandGenerator g(this); \
2677  Emit(kX64##Opcode, g.DefineAsRegister(node), \
2678  g.UseRegister(node->InputAt(0))); \
2679  }
2680 SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
2681 #undef VISIT_SIMD_UNOP
2682 #undef SIMD_UNOP_LIST
2683 
2684 #define VISIT_SIMD_BINOP(Opcode) \
2685  void InstructionSelector::Visit##Opcode(Node* node) { \
2686  X64OperandGenerator g(this); \
2687  Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
2688  g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); \
2689  }
2690 SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
2691 #undef VISIT_SIMD_BINOP
2692 #undef SIMD_BINOP_LIST
2693 
2694 #define VISIT_SIMD_ANYTRUE(Opcode) \
2695  void InstructionSelector::Visit##Opcode(Node* node) { \
2696  X64OperandGenerator g(this); \
2697  InstructionOperand temps[] = {g.TempRegister()}; \
2698  Emit(kX64##Opcode, g.DefineAsRegister(node), \
2699  g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps); \
2700  }
2701 SIMD_ANYTRUE_LIST(VISIT_SIMD_ANYTRUE)
2702 #undef VISIT_SIMD_ANYTRUE
2703 #undef SIMD_ANYTRUE_LIST
2704 
2705 #define VISIT_SIMD_ALLTRUE(Opcode) \
2706  void InstructionSelector::Visit##Opcode(Node* node) { \
2707  X64OperandGenerator g(this); \
2708  InstructionOperand temps[] = {g.TempRegister()}; \
2709  Emit(kX64##Opcode, g.DefineAsRegister(node), \
2710  g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps); \
2711  }
2712 SIMD_ALLTRUE_LIST(VISIT_SIMD_ALLTRUE)
2713 #undef VISIT_SIMD_ALLTRUE
2714 #undef SIMD_ALLTRUE_LIST
2715 #undef SIMD_TYPES
2716 
2717 void InstructionSelector::VisitS128Select(Node* node) {
2718  X64OperandGenerator g(this);
2719  Emit(kX64S128Select, g.DefineSameAsFirst(node),
2720  g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
2721  g.UseRegister(node->InputAt(2)));
2722 }
2723 
2724 void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
2725  X64OperandGenerator g(this);
2726  Emit(kX64F32x4UConvertI32x4, g.DefineSameAsFirst(node),
2727  g.UseRegister(node->InputAt(0)));
2728 }
2729 
2730 void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
2731  X64OperandGenerator g(this);
2732  Emit(kX64I32x4SConvertF32x4, g.DefineSameAsFirst(node),
2733  g.UseRegister(node->InputAt(0)));
2734 }
2735 
2736 void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
2737  X64OperandGenerator g(this);
2738  InstructionOperand temps[] = {g.TempSimd128Register()};
2739  Emit(kX64I32x4UConvertF32x4, g.DefineSameAsFirst(node),
2740  g.UseRegister(node->InputAt(0)), arraysize(temps), temps);
2741 }
2742 
2743 void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
2744  X64OperandGenerator g(this);
2745  Emit(kX64I16x8UConvertI32x4, g.DefineSameAsFirst(node),
2746  g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
2747 }
2748 
2749 void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
2750  X64OperandGenerator g(this);
2751  Emit(kX64I8x16UConvertI16x8, g.DefineSameAsFirst(node),
2752  g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
2753 }
2754 
2755 void InstructionSelector::VisitI8x16Mul(Node* node) {
2756  X64OperandGenerator g(this);
2757  InstructionOperand temps[] = {g.TempSimd128Register()};
2758  Emit(kX64I8x16Mul, g.DefineSameAsFirst(node),
2759  g.UseUniqueRegister(node->InputAt(0)),
2760  g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
2761 }
2762 
2763 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
2764  UNREACHABLE();
2765 }
2766 
2767 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
2768  UNREACHABLE();
2769 }
2770 
2771 // static
2772 MachineOperatorBuilder::Flags
2773 InstructionSelector::SupportedMachineOperatorFlags() {
2774  MachineOperatorBuilder::Flags flags =
2775  MachineOperatorBuilder::kWord32ShiftIsSafe |
2776  MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz |
2777  MachineOperatorBuilder::kSpeculationFence;
2778  if (CpuFeatures::IsSupported(POPCNT)) {
2779  flags |= MachineOperatorBuilder::kWord32Popcnt |
2780  MachineOperatorBuilder::kWord64Popcnt;
2781  }
2782  if (CpuFeatures::IsSupported(SSE4_1)) {
2783  flags |= MachineOperatorBuilder::kFloat32RoundDown |
2784  MachineOperatorBuilder::kFloat64RoundDown |
2785  MachineOperatorBuilder::kFloat32RoundUp |
2786  MachineOperatorBuilder::kFloat64RoundUp |
2787  MachineOperatorBuilder::kFloat32RoundTruncate |
2788  MachineOperatorBuilder::kFloat64RoundTruncate |
2789  MachineOperatorBuilder::kFloat32RoundTiesEven |
2790  MachineOperatorBuilder::kFloat64RoundTiesEven;
2791  }
2792  return flags;
2793 }
2794 
2795 // static
2796 MachineOperatorBuilder::AlignmentRequirements
2797 InstructionSelector::AlignmentRequirements() {
2798  return MachineOperatorBuilder::AlignmentRequirements::
2799  FullUnalignedAccessSupport();
2800 }
2801 
2802 } // namespace compiler
2803 } // namespace internal
2804 } // namespace v8
Definition: libplatform.h:13