V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
instruction-selector-mips64.cc
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/base/adapters.h"
6 #include "src/base/bits.h"
7 #include "src/compiler/backend/instruction-selector-impl.h"
8 #include "src/compiler/node-matchers.h"
9 #include "src/compiler/node-properties.h"
10 
11 namespace v8 {
12 namespace internal {
13 namespace compiler {
14 
15 #define TRACE_UNIMPL() \
16  PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
17 
18 #define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
19 
20 // Adds Mips-specific methods for generating InstructionOperands.
22  public:
24  : OperandGenerator(selector) {}
25 
26  InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
27  if (CanBeImmediate(node, opcode)) {
28  return UseImmediate(node);
29  }
30  return UseRegister(node);
31  }
32 
33  // Use the zero register if the node has the immediate value zero, otherwise
34  // assign a register.
35  InstructionOperand UseRegisterOrImmediateZero(Node* node) {
36  if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
37  (IsFloatConstant(node) &&
38  (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
39  return UseImmediate(node);
40  }
41  return UseRegister(node);
42  }
43 
44  bool IsIntegerConstant(Node* node) {
45  return (node->opcode() == IrOpcode::kInt32Constant) ||
46  (node->opcode() == IrOpcode::kInt64Constant);
47  }
48 
49  int64_t GetIntegerConstantValue(Node* node) {
50  if (node->opcode() == IrOpcode::kInt32Constant) {
51  return OpParameter<int32_t>(node->op());
52  }
53  DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
54  return OpParameter<int64_t>(node->op());
55  }
56 
57  bool IsFloatConstant(Node* node) {
58  return (node->opcode() == IrOpcode::kFloat32Constant) ||
59  (node->opcode() == IrOpcode::kFloat64Constant);
60  }
61 
62  double GetFloatConstantValue(Node* node) {
63  if (node->opcode() == IrOpcode::kFloat32Constant) {
64  return OpParameter<float>(node->op());
65  }
66  DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
67  return OpParameter<double>(node->op());
68  }
69 
70  bool CanBeImmediate(Node* node, InstructionCode mode) {
71  return IsIntegerConstant(node) &&
72  CanBeImmediate(GetIntegerConstantValue(node), mode);
73  }
74 
75  bool CanBeImmediate(int64_t value, InstructionCode opcode) {
76  switch (ArchOpcodeField::decode(opcode)) {
77  case kMips64Shl:
78  case kMips64Sar:
79  case kMips64Shr:
80  return is_uint5(value);
81  case kMips64Dshl:
82  case kMips64Dsar:
83  case kMips64Dshr:
84  return is_uint6(value);
85  case kMips64Add:
86  case kMips64And32:
87  case kMips64And:
88  case kMips64Dadd:
89  case kMips64Or32:
90  case kMips64Or:
91  case kMips64Tst:
92  case kMips64Xor:
93  return is_uint16(value);
94  case kMips64Lb:
95  case kMips64Lbu:
96  case kMips64Sb:
97  case kMips64Lh:
98  case kMips64Lhu:
99  case kMips64Sh:
100  case kMips64Lw:
101  case kMips64Sw:
102  case kMips64Ld:
103  case kMips64Sd:
104  case kMips64Lwc1:
105  case kMips64Swc1:
106  case kMips64Ldc1:
107  case kMips64Sdc1:
108  return is_int32(value);
109  default:
110  return is_int16(value);
111  }
112  }
113 
114  private:
115  bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
116  TRACE_UNIMPL();
117  return false;
118  }
119 };
120 
121 static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
122  Node* node) {
123  Mips64OperandGenerator g(selector);
124  selector->Emit(opcode, g.DefineAsRegister(node),
125  g.UseRegister(node->InputAt(0)));
126 }
127 
128 static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
129  Node* node) {
130  Mips64OperandGenerator g(selector);
131  int32_t imm = OpParameter<int32_t>(node->op());
132  selector->Emit(opcode, g.DefineAsRegister(node),
133  g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
134 }
135 
136 static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode,
137  Node* node) {
138  Mips64OperandGenerator g(selector);
139  int32_t imm = OpParameter<int32_t>(node->op());
140  selector->Emit(opcode, g.DefineAsRegister(node),
141  g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
142  g.UseRegister(node->InputAt(1)));
143 }
144 
145 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
146  Node* node) {
147  Mips64OperandGenerator g(selector);
148  selector->Emit(opcode, g.DefineAsRegister(node),
149  g.UseRegister(node->InputAt(0)),
150  g.UseRegister(node->InputAt(1)));
151 }
152 
153 void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
154  Mips64OperandGenerator g(selector);
155  selector->Emit(
156  opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
157  g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
158 }
159 
160 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
161  Node* node) {
162  Mips64OperandGenerator g(selector);
163  selector->Emit(opcode, g.DefineAsRegister(node),
164  g.UseRegister(node->InputAt(0)),
165  g.UseOperand(node->InputAt(1), opcode));
166 }
167 
168 struct ExtendingLoadMatcher {
169  ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
170  : matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
171  Initialize(node);
172  }
173 
174  bool Matches() const { return matches_; }
175 
176  Node* base() const {
177  DCHECK(Matches());
178  return base_;
179  }
180  int64_t immediate() const {
181  DCHECK(Matches());
182  return immediate_;
183  }
184  ArchOpcode opcode() const {
185  DCHECK(Matches());
186  return opcode_;
187  }
188 
189  private:
190  bool matches_;
191  InstructionSelector* selector_;
192  Node* base_;
193  int64_t immediate_;
194  ArchOpcode opcode_;
195 
196  void Initialize(Node* node) {
197  Int64BinopMatcher m(node);
198  // When loading a 64-bit value and shifting by 32, we should
199  // just load and sign-extend the interesting 4 bytes instead.
200  // This happens, for example, when we're loading and untagging SMIs.
201  DCHECK(m.IsWord64Sar());
202  if (m.left().IsLoad() && m.right().Is(32) &&
203  selector_->CanCover(m.node(), m.left().node())) {
204  MachineRepresentation rep =
205  LoadRepresentationOf(m.left().node()->op()).representation();
206  DCHECK_EQ(3, ElementSizeLog2Of(rep));
207  if (rep != MachineRepresentation::kTaggedSigned &&
208  rep != MachineRepresentation::kTaggedPointer &&
209  rep != MachineRepresentation::kTagged &&
210  rep != MachineRepresentation::kWord64) {
211  return;
212  }
213 
214  Mips64OperandGenerator g(selector_);
215  Node* load = m.left().node();
216  Node* offset = load->InputAt(1);
217  base_ = load->InputAt(0);
218  opcode_ = kMips64Lw;
219  if (g.CanBeImmediate(offset, opcode_)) {
220 #if defined(V8_TARGET_LITTLE_ENDIAN)
221  immediate_ = g.GetIntegerConstantValue(offset) + 4;
222 #elif defined(V8_TARGET_BIG_ENDIAN)
223  immediate_ = g.GetIntegerConstantValue(offset);
224 #endif
225  matches_ = g.CanBeImmediate(immediate_, kMips64Lw);
226  }
227  }
228  }
229 };
230 
231 bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node,
232  Node* output_node) {
233  ExtendingLoadMatcher m(node, selector);
234  Mips64OperandGenerator g(selector);
235  if (m.Matches()) {
236  InstructionOperand inputs[2];
237  inputs[0] = g.UseRegister(m.base());
238  InstructionCode opcode =
239  m.opcode() | AddressingModeField::encode(kMode_MRI);
240  DCHECK(is_int32(m.immediate()));
241  inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
242  InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
243  selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
244  inputs);
245  return true;
246  }
247  return false;
248 }
249 
250 bool TryMatchImmediate(InstructionSelector* selector,
251  InstructionCode* opcode_return, Node* node,
252  size_t* input_count_return, InstructionOperand* inputs) {
253  Mips64OperandGenerator g(selector);
254  if (g.CanBeImmediate(node, *opcode_return)) {
255  *opcode_return |= AddressingModeField::encode(kMode_MRI);
256  inputs[0] = g.UseImmediate(node);
257  *input_count_return = 1;
258  return true;
259  }
260  return false;
261 }
262 
263 static void VisitBinop(InstructionSelector* selector, Node* node,
264  InstructionCode opcode, bool has_reverse_opcode,
265  InstructionCode reverse_opcode,
266  FlagsContinuation* cont) {
267  Mips64OperandGenerator g(selector);
268  Int32BinopMatcher m(node);
269  InstructionOperand inputs[2];
270  size_t input_count = 0;
271  InstructionOperand outputs[1];
272  size_t output_count = 0;
273 
274  if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
275  &inputs[1])) {
276  inputs[0] = g.UseRegister(m.left().node());
277  input_count++;
278  } else if (has_reverse_opcode &&
279  TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
280  &input_count, &inputs[1])) {
281  inputs[0] = g.UseRegister(m.right().node());
282  opcode = reverse_opcode;
283  input_count++;
284  } else {
285  inputs[input_count++] = g.UseRegister(m.left().node());
286  inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
287  }
288 
289  if (cont->IsDeoptimize()) {
290  // If we can deoptimize as a result of the binop, we need to make sure that
291  // the deopt inputs are not overwritten by the binop result. One way
292  // to achieve that is to declare the output register as same-as-first.
293  outputs[output_count++] = g.DefineSameAsFirst(node);
294  } else {
295  outputs[output_count++] = g.DefineAsRegister(node);
296  }
297 
298  DCHECK_NE(0u, input_count);
299  DCHECK_EQ(1u, output_count);
300  DCHECK_GE(arraysize(inputs), input_count);
301  DCHECK_GE(arraysize(outputs), output_count);
302 
303  selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
304  inputs, cont);
305 }
306 
307 static void VisitBinop(InstructionSelector* selector, Node* node,
308  InstructionCode opcode, bool has_reverse_opcode,
309  InstructionCode reverse_opcode) {
310  FlagsContinuation cont;
311  VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
312 }
313 
314 static void VisitBinop(InstructionSelector* selector, Node* node,
315  InstructionCode opcode, FlagsContinuation* cont) {
316  VisitBinop(selector, node, opcode, false, kArchNop, cont);
317 }
318 
319 static void VisitBinop(InstructionSelector* selector, Node* node,
320  InstructionCode opcode) {
321  VisitBinop(selector, node, opcode, false, kArchNop);
322 }
323 
324 void InstructionSelector::VisitStackSlot(Node* node) {
325  StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
326  int alignment = rep.alignment();
327  int slot = frame_->AllocateSpillSlot(rep.size(), alignment);
328  OperandGenerator g(this);
329 
330  Emit(kArchStackSlot, g.DefineAsRegister(node),
331  sequence()->AddImmediate(Constant(slot)),
332  sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
333 }
334 
335 void InstructionSelector::VisitDebugAbort(Node* node) {
336  Mips64OperandGenerator g(this);
337  Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
338 }
339 
340 void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
341  Node* output = nullptr) {
342  Mips64OperandGenerator g(selector);
343  Node* base = node->InputAt(0);
344  Node* index = node->InputAt(1);
345 
346  if (g.CanBeImmediate(index, opcode)) {
347  selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
348  g.DefineAsRegister(output == nullptr ? node : output),
349  g.UseRegister(base), g.UseImmediate(index));
350  } else {
351  InstructionOperand addr_reg = g.TempRegister();
352  selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
353  addr_reg, g.UseRegister(index), g.UseRegister(base));
354  // Emit desired load opcode, using temp addr_reg.
355  selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
356  g.DefineAsRegister(output == nullptr ? node : output),
357  addr_reg, g.TempImmediate(0));
358  }
359 }
360 
361 void InstructionSelector::VisitLoad(Node* node) {
362  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
363 
364  InstructionCode opcode = kArchNop;
365  switch (load_rep.representation()) {
366  case MachineRepresentation::kFloat32:
367  opcode = kMips64Lwc1;
368  break;
369  case MachineRepresentation::kFloat64:
370  opcode = kMips64Ldc1;
371  break;
372  case MachineRepresentation::kBit: // Fall through.
373  case MachineRepresentation::kWord8:
374  opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
375  break;
376  case MachineRepresentation::kWord16:
377  opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
378  break;
379  case MachineRepresentation::kWord32:
380  opcode = load_rep.IsUnsigned() ? kMips64Lwu : kMips64Lw;
381  break;
382  case MachineRepresentation::kTaggedSigned: // Fall through.
383  case MachineRepresentation::kTaggedPointer: // Fall through.
384  case MachineRepresentation::kTagged: // Fall through.
385  case MachineRepresentation::kWord64:
386  opcode = kMips64Ld;
387  break;
388  case MachineRepresentation::kSimd128:
389  opcode = kMips64MsaLd;
390  break;
391  case MachineRepresentation::kNone:
392  UNREACHABLE();
393  return;
394  }
395  if (node->opcode() == IrOpcode::kPoisonedLoad) {
396  CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
397  opcode |= MiscField::encode(kMemoryAccessPoisoned);
398  }
399 
400  EmitLoad(this, node, opcode);
401 }
402 
403 void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
404 
405 void InstructionSelector::VisitProtectedLoad(Node* node) {
406  // TODO(eholk)
407  UNIMPLEMENTED();
408 }
409 
410 void InstructionSelector::VisitStore(Node* node) {
411  Mips64OperandGenerator g(this);
412  Node* base = node->InputAt(0);
413  Node* index = node->InputAt(1);
414  Node* value = node->InputAt(2);
415 
416  StoreRepresentation store_rep = StoreRepresentationOf(node->op());
417  WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
418  MachineRepresentation rep = store_rep.representation();
419 
420  // TODO(mips): I guess this could be done in a better way.
421  if (write_barrier_kind != kNoWriteBarrier) {
422  DCHECK(CanBeTaggedPointer(rep));
423  InstructionOperand inputs[3];
424  size_t input_count = 0;
425  inputs[input_count++] = g.UseUniqueRegister(base);
426  inputs[input_count++] = g.UseUniqueRegister(index);
427  inputs[input_count++] = g.UseUniqueRegister(value);
428  RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
429  switch (write_barrier_kind) {
430  case kNoWriteBarrier:
431  UNREACHABLE();
432  break;
433  case kMapWriteBarrier:
434  record_write_mode = RecordWriteMode::kValueIsMap;
435  break;
436  case kPointerWriteBarrier:
437  record_write_mode = RecordWriteMode::kValueIsPointer;
438  break;
439  case kFullWriteBarrier:
440  record_write_mode = RecordWriteMode::kValueIsAny;
441  break;
442  }
443  InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
444  size_t const temp_count = arraysize(temps);
445  InstructionCode code = kArchStoreWithWriteBarrier;
446  code |= MiscField::encode(static_cast<int>(record_write_mode));
447  Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
448  } else {
449  ArchOpcode opcode = kArchNop;
450  switch (rep) {
451  case MachineRepresentation::kFloat32:
452  opcode = kMips64Swc1;
453  break;
454  case MachineRepresentation::kFloat64:
455  opcode = kMips64Sdc1;
456  break;
457  case MachineRepresentation::kBit: // Fall through.
458  case MachineRepresentation::kWord8:
459  opcode = kMips64Sb;
460  break;
461  case MachineRepresentation::kWord16:
462  opcode = kMips64Sh;
463  break;
464  case MachineRepresentation::kWord32:
465  opcode = kMips64Sw;
466  break;
467  case MachineRepresentation::kTaggedSigned: // Fall through.
468  case MachineRepresentation::kTaggedPointer: // Fall through.
469  case MachineRepresentation::kTagged: // Fall through.
470  case MachineRepresentation::kWord64:
471  opcode = kMips64Sd;
472  break;
473  case MachineRepresentation::kSimd128:
474  opcode = kMips64MsaSt;
475  break;
476  case MachineRepresentation::kNone:
477  UNREACHABLE();
478  return;
479  }
480 
481  if (g.CanBeImmediate(index, opcode)) {
482  Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
483  g.UseRegister(base), g.UseImmediate(index),
484  g.UseRegisterOrImmediateZero(value));
485  } else {
486  InstructionOperand addr_reg = g.TempRegister();
487  Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
488  g.UseRegister(index), g.UseRegister(base));
489  // Emit desired store opcode, using temp addr_reg.
490  Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
491  addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
492  }
493  }
494 }
495 
496 void InstructionSelector::VisitProtectedStore(Node* node) {
497  // TODO(eholk)
498  UNIMPLEMENTED();
499 }
500 
501 void InstructionSelector::VisitWord32And(Node* node) {
502  Mips64OperandGenerator g(this);
503  Int32BinopMatcher m(node);
504  if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
505  m.right().HasValue()) {
506  uint32_t mask = m.right().Value();
507  uint32_t mask_width = base::bits::CountPopulation(mask);
508  uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
509  if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
510  // The mask must be contiguous, and occupy the least-significant bits.
511  DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
512 
513  // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
514  // significant bits.
515  Int32BinopMatcher mleft(m.left().node());
516  if (mleft.right().HasValue()) {
517  // Any shift value can match; int32 shifts use `value % 32`.
518  uint32_t lsb = mleft.right().Value() & 0x1F;
519 
520  // Ext cannot extract bits past the register size, however since
521  // shifting the original value would have introduced some zeros we can
522  // still use Ext with a smaller mask and the remaining bits will be
523  // zeros.
524  if (lsb + mask_width > 32) mask_width = 32 - lsb;
525 
526  Emit(kMips64Ext, g.DefineAsRegister(node),
527  g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
528  g.TempImmediate(mask_width));
529  return;
530  }
531  // Other cases fall through to the normal And operation.
532  }
533  }
534  if (m.right().HasValue()) {
535  uint32_t mask = m.right().Value();
536  uint32_t shift = base::bits::CountPopulation(~mask);
537  uint32_t msb = base::bits::CountLeadingZeros32(~mask);
538  if (shift != 0 && shift != 32 && msb + shift == 32) {
539  // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
540  // and remove constant loading of inverted mask.
541  Emit(kMips64Ins, g.DefineSameAsFirst(node),
542  g.UseRegister(m.left().node()), g.TempImmediate(0),
543  g.TempImmediate(shift));
544  return;
545  }
546  }
547  VisitBinop(this, node, kMips64And32, true, kMips64And32);
548 }
549 
550 void InstructionSelector::VisitWord64And(Node* node) {
551  Mips64OperandGenerator g(this);
552  Int64BinopMatcher m(node);
553  if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
554  m.right().HasValue()) {
555  uint64_t mask = m.right().Value();
556  uint32_t mask_width = base::bits::CountPopulation(mask);
557  uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
558  if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
559  // The mask must be contiguous, and occupy the least-significant bits.
560  DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
561 
562  // Select Dext for And(Shr(x, imm), mask) where the mask is in the least
563  // significant bits.
564  Int64BinopMatcher mleft(m.left().node());
565  if (mleft.right().HasValue()) {
566  // Any shift value can match; int64 shifts use `value % 64`.
567  uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3F);
568 
569  // Dext cannot extract bits past the register size, however since
570  // shifting the original value would have introduced some zeros we can
571  // still use Dext with a smaller mask and the remaining bits will be
572  // zeros.
573  if (lsb + mask_width > 64) mask_width = 64 - lsb;
574 
575  if (lsb == 0 && mask_width == 64) {
576  Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
577  } else {
578  Emit(kMips64Dext, g.DefineAsRegister(node),
579  g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
580  g.TempImmediate(static_cast<int32_t>(mask_width)));
581  }
582  return;
583  }
584  // Other cases fall through to the normal And operation.
585  }
586  }
587  if (m.right().HasValue()) {
588  uint64_t mask = m.right().Value();
589  uint32_t shift = base::bits::CountPopulation(~mask);
590  uint32_t msb = base::bits::CountLeadingZeros64(~mask);
591  if (shift != 0 && shift < 32 && msb + shift == 64) {
592  // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
593  // and remove constant loading of inverted mask. Dins cannot insert bits
594  // past word size, so shifts smaller than 32 are covered.
595  Emit(kMips64Dins, g.DefineSameAsFirst(node),
596  g.UseRegister(m.left().node()), g.TempImmediate(0),
597  g.TempImmediate(shift));
598  return;
599  }
600  }
601  VisitBinop(this, node, kMips64And, true, kMips64And);
602 }
603 
604 void InstructionSelector::VisitWord32Or(Node* node) {
605  VisitBinop(this, node, kMips64Or32, true, kMips64Or32);
606 }
607 
608 void InstructionSelector::VisitWord64Or(Node* node) {
609  VisitBinop(this, node, kMips64Or, true, kMips64Or);
610 }
611 
612 void InstructionSelector::VisitWord32Xor(Node* node) {
613  Int32BinopMatcher m(node);
614  if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
615  m.right().Is(-1)) {
616  Int32BinopMatcher mleft(m.left().node());
617  if (!mleft.right().HasValue()) {
618  Mips64OperandGenerator g(this);
619  Emit(kMips64Nor32, g.DefineAsRegister(node),
620  g.UseRegister(mleft.left().node()),
621  g.UseRegister(mleft.right().node()));
622  return;
623  }
624  }
625  if (m.right().Is(-1)) {
626  // Use Nor for bit negation and eliminate constant loading for xori.
627  Mips64OperandGenerator g(this);
628  Emit(kMips64Nor32, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
629  g.TempImmediate(0));
630  return;
631  }
632  VisitBinop(this, node, kMips64Xor32, true, kMips64Xor32);
633 }
634 
635 void InstructionSelector::VisitWord64Xor(Node* node) {
636  Int64BinopMatcher m(node);
637  if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
638  m.right().Is(-1)) {
639  Int64BinopMatcher mleft(m.left().node());
640  if (!mleft.right().HasValue()) {
641  Mips64OperandGenerator g(this);
642  Emit(kMips64Nor, g.DefineAsRegister(node),
643  g.UseRegister(mleft.left().node()),
644  g.UseRegister(mleft.right().node()));
645  return;
646  }
647  }
648  if (m.right().Is(-1)) {
649  // Use Nor for bit negation and eliminate constant loading for xori.
650  Mips64OperandGenerator g(this);
651  Emit(kMips64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
652  g.TempImmediate(0));
653  return;
654  }
655  VisitBinop(this, node, kMips64Xor, true, kMips64Xor);
656 }
657 
658 void InstructionSelector::VisitWord32Shl(Node* node) {
659  Int32BinopMatcher m(node);
660  if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
661  m.right().IsInRange(1, 31)) {
662  Mips64OperandGenerator g(this);
663  Int32BinopMatcher mleft(m.left().node());
664  // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
665  // contiguous, and the shift immediate non-zero.
666  if (mleft.right().HasValue()) {
667  uint32_t mask = mleft.right().Value();
668  uint32_t mask_width = base::bits::CountPopulation(mask);
669  uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
670  if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
671  uint32_t shift = m.right().Value();
672  DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
673  DCHECK_NE(0u, shift);
674  if ((shift + mask_width) >= 32) {
675  // If the mask is contiguous and reaches or extends beyond the top
676  // bit, only the shift is needed.
677  Emit(kMips64Shl, g.DefineAsRegister(node),
678  g.UseRegister(mleft.left().node()),
679  g.UseImmediate(m.right().node()));
680  return;
681  }
682  }
683  }
684  }
685  VisitRRO(this, kMips64Shl, node);
686 }
687 
688 void InstructionSelector::VisitWord32Shr(Node* node) {
689  Int32BinopMatcher m(node);
690  if (m.left().IsWord32And() && m.right().HasValue()) {
691  uint32_t lsb = m.right().Value() & 0x1F;
692  Int32BinopMatcher mleft(m.left().node());
693  if (mleft.right().HasValue() && mleft.right().Value() != 0) {
694  // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
695  // shifted into the least-significant bits.
696  uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
697  unsigned mask_width = base::bits::CountPopulation(mask);
698  unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
699  if ((mask_msb + mask_width + lsb) == 32) {
700  Mips64OperandGenerator g(this);
701  DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
702  Emit(kMips64Ext, g.DefineAsRegister(node),
703  g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
704  g.TempImmediate(mask_width));
705  return;
706  }
707  }
708  }
709  VisitRRO(this, kMips64Shr, node);
710 }
711 
712 void InstructionSelector::VisitWord32Sar(Node* node) {
713  Int32BinopMatcher m(node);
714  if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
715  Int32BinopMatcher mleft(m.left().node());
716  if (m.right().HasValue() && mleft.right().HasValue()) {
717  Mips64OperandGenerator g(this);
718  uint32_t sar = m.right().Value();
719  uint32_t shl = mleft.right().Value();
720  if ((sar == shl) && (sar == 16)) {
721  Emit(kMips64Seh, g.DefineAsRegister(node),
722  g.UseRegister(mleft.left().node()));
723  return;
724  } else if ((sar == shl) && (sar == 24)) {
725  Emit(kMips64Seb, g.DefineAsRegister(node),
726  g.UseRegister(mleft.left().node()));
727  return;
728  } else if ((sar == shl) && (sar == 32)) {
729  Emit(kMips64Shl, g.DefineAsRegister(node),
730  g.UseRegister(mleft.left().node()), g.TempImmediate(0));
731  return;
732  }
733  }
734  }
735  VisitRRO(this, kMips64Sar, node);
736 }
737 
738 void InstructionSelector::VisitWord64Shl(Node* node) {
739  Mips64OperandGenerator g(this);
740  Int64BinopMatcher m(node);
741  if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
742  m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
743  // There's no need to sign/zero-extend to 64-bit if we shift out the upper
744  // 32 bits anyway.
745  Emit(kMips64Dshl, g.DefineSameAsFirst(node),
746  g.UseRegister(m.left().node()->InputAt(0)),
747  g.UseImmediate(m.right().node()));
748  return;
749  }
750  if (m.left().IsWord64And() && CanCover(node, m.left().node()) &&
751  m.right().IsInRange(1, 63)) {
752  // Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is
753  // contiguous, and the shift immediate non-zero.
754  Int64BinopMatcher mleft(m.left().node());
755  if (mleft.right().HasValue()) {
756  uint64_t mask = mleft.right().Value();
757  uint32_t mask_width = base::bits::CountPopulation(mask);
758  uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
759  if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
760  uint64_t shift = m.right().Value();
761  DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
762  DCHECK_NE(0u, shift);
763 
764  if ((shift + mask_width) >= 64) {
765  // If the mask is contiguous and reaches or extends beyond the top
766  // bit, only the shift is needed.
767  Emit(kMips64Dshl, g.DefineAsRegister(node),
768  g.UseRegister(mleft.left().node()),
769  g.UseImmediate(m.right().node()));
770  return;
771  }
772  }
773  }
774  }
775  VisitRRO(this, kMips64Dshl, node);
776 }
777 
778 void InstructionSelector::VisitWord64Shr(Node* node) {
779  Int64BinopMatcher m(node);
780  if (m.left().IsWord64And() && m.right().HasValue()) {
781  uint32_t lsb = m.right().Value() & 0x3F;
782  Int64BinopMatcher mleft(m.left().node());
783  if (mleft.right().HasValue() && mleft.right().Value() != 0) {
784  // Select Dext for Shr(And(x, mask), imm) where the result of the mask is
785  // shifted into the least-significant bits.
786  uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
787  unsigned mask_width = base::bits::CountPopulation(mask);
788  unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
789  if ((mask_msb + mask_width + lsb) == 64) {
790  Mips64OperandGenerator g(this);
791  DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
792  Emit(kMips64Dext, g.DefineAsRegister(node),
793  g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
794  g.TempImmediate(mask_width));
795  return;
796  }
797  }
798  }
799  VisitRRO(this, kMips64Dshr, node);
800 }
801 
802 void InstructionSelector::VisitWord64Sar(Node* node) {
803  if (TryEmitExtendingLoad(this, node, node)) return;
804  VisitRRO(this, kMips64Dsar, node);
805 }
806 
807 void InstructionSelector::VisitWord32Ror(Node* node) {
808  VisitRRO(this, kMips64Ror, node);
809 }
810 
811 void InstructionSelector::VisitWord32Clz(Node* node) {
812  VisitRR(this, kMips64Clz, node);
813 }
814 
815 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
816 
817 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
818 
819 void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
820  Mips64OperandGenerator g(this);
821  Emit(kMips64ByteSwap64, g.DefineAsRegister(node),
822  g.UseRegister(node->InputAt(0)));
823 }
824 
825 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
826  Mips64OperandGenerator g(this);
827  Emit(kMips64ByteSwap32, g.DefineAsRegister(node),
828  g.UseRegister(node->InputAt(0)));
829 }
830 
831 void InstructionSelector::VisitWord32Ctz(Node* node) {
832  Mips64OperandGenerator g(this);
833  Emit(kMips64Ctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
834 }
835 
836 void InstructionSelector::VisitWord64Ctz(Node* node) {
837  Mips64OperandGenerator g(this);
838  Emit(kMips64Dctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
839 }
840 
841 void InstructionSelector::VisitWord32Popcnt(Node* node) {
842  Mips64OperandGenerator g(this);
843  Emit(kMips64Popcnt, g.DefineAsRegister(node),
844  g.UseRegister(node->InputAt(0)));
845 }
846 
847 void InstructionSelector::VisitWord64Popcnt(Node* node) {
848  Mips64OperandGenerator g(this);
849  Emit(kMips64Dpopcnt, g.DefineAsRegister(node),
850  g.UseRegister(node->InputAt(0)));
851 }
852 
853 void InstructionSelector::VisitWord64Ror(Node* node) {
854  VisitRRO(this, kMips64Dror, node);
855 }
856 
857 void InstructionSelector::VisitWord64Clz(Node* node) {
858  VisitRR(this, kMips64Dclz, node);
859 }
860 
861 void InstructionSelector::VisitInt32Add(Node* node) {
862  Mips64OperandGenerator g(this);
863  Int32BinopMatcher m(node);
864 
865  if (kArchVariant == kMips64r6) {
866  // Select Lsa for (left + (left_of_right << imm)).
867  if (m.right().opcode() == IrOpcode::kWord32Shl &&
868  CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
869  Int32BinopMatcher mright(m.right().node());
870  if (mright.right().HasValue() && !m.left().HasValue()) {
871  int32_t shift_value = static_cast<int32_t>(mright.right().Value());
872  if (shift_value > 0 && shift_value <= 31) {
873  Emit(kMips64Lsa, g.DefineAsRegister(node),
874  g.UseRegister(m.left().node()),
875  g.UseRegister(mright.left().node()),
876  g.TempImmediate(shift_value));
877  return;
878  }
879  }
880  }
881 
882  // Select Lsa for ((left_of_left << imm) + right).
883  if (m.left().opcode() == IrOpcode::kWord32Shl &&
884  CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
885  Int32BinopMatcher mleft(m.left().node());
886  if (mleft.right().HasValue() && !m.right().HasValue()) {
887  int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
888  if (shift_value > 0 && shift_value <= 31) {
889  Emit(kMips64Lsa, g.DefineAsRegister(node),
890  g.UseRegister(m.right().node()),
891  g.UseRegister(mleft.left().node()),
892  g.TempImmediate(shift_value));
893  return;
894  }
895  }
896  }
897  }
898 
899  VisitBinop(this, node, kMips64Add, true, kMips64Add);
900 }
901 
902 void InstructionSelector::VisitInt64Add(Node* node) {
903  Mips64OperandGenerator g(this);
904  Int64BinopMatcher m(node);
905 
906  if (kArchVariant == kMips64r6) {
907  // Select Dlsa for (left + (left_of_right << imm)).
908  if (m.right().opcode() == IrOpcode::kWord64Shl &&
909  CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
910  Int64BinopMatcher mright(m.right().node());
911  if (mright.right().HasValue() && !m.left().HasValue()) {
912  int32_t shift_value = static_cast<int32_t>(mright.right().Value());
913  if (shift_value > 0 && shift_value <= 31) {
914  Emit(kMips64Dlsa, g.DefineAsRegister(node),
915  g.UseRegister(m.left().node()),
916  g.UseRegister(mright.left().node()),
917  g.TempImmediate(shift_value));
918  return;
919  }
920  }
921  }
922 
923  // Select Dlsa for ((left_of_left << imm) + right).
924  if (m.left().opcode() == IrOpcode::kWord64Shl &&
925  CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
926  Int64BinopMatcher mleft(m.left().node());
927  if (mleft.right().HasValue() && !m.right().HasValue()) {
928  int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
929  if (shift_value > 0 && shift_value <= 31) {
930  Emit(kMips64Dlsa, g.DefineAsRegister(node),
931  g.UseRegister(m.right().node()),
932  g.UseRegister(mleft.left().node()),
933  g.TempImmediate(shift_value));
934  return;
935  }
936  }
937  }
938  }
939 
940  VisitBinop(this, node, kMips64Dadd, true, kMips64Dadd);
941 }
942 
943 void InstructionSelector::VisitInt32Sub(Node* node) {
944  VisitBinop(this, node, kMips64Sub);
945 }
946 
947 void InstructionSelector::VisitInt64Sub(Node* node) {
948  VisitBinop(this, node, kMips64Dsub);
949 }
950 
951 void InstructionSelector::VisitInt32Mul(Node* node) {
952  Mips64OperandGenerator g(this);
953  Int32BinopMatcher m(node);
954  if (m.right().HasValue() && m.right().Value() > 0) {
955  uint32_t value = static_cast<uint32_t>(m.right().Value());
956  if (base::bits::IsPowerOfTwo(value)) {
957  Emit(kMips64Shl | AddressingModeField::encode(kMode_None),
958  g.DefineAsRegister(node), g.UseRegister(m.left().node()),
959  g.TempImmediate(WhichPowerOf2(value)));
960  return;
961  }
962  if (base::bits::IsPowerOfTwo(value - 1) && kArchVariant == kMips64r6 &&
963  value - 1 > 0 && value - 1 <= 31) {
964  Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
965  g.UseRegister(m.left().node()),
966  g.TempImmediate(WhichPowerOf2(value - 1)));
967  return;
968  }
969  if (base::bits::IsPowerOfTwo(value + 1)) {
970  InstructionOperand temp = g.TempRegister();
971  Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
972  g.UseRegister(m.left().node()),
973  g.TempImmediate(WhichPowerOf2(value + 1)));
974  Emit(kMips64Sub | AddressingModeField::encode(kMode_None),
975  g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
976  return;
977  }
978  }
979  Node* left = node->InputAt(0);
980  Node* right = node->InputAt(1);
981  if (CanCover(node, left) && CanCover(node, right)) {
982  if (left->opcode() == IrOpcode::kWord64Sar &&
983  right->opcode() == IrOpcode::kWord64Sar) {
984  Int64BinopMatcher leftInput(left), rightInput(right);
985  if (leftInput.right().Is(32) && rightInput.right().Is(32)) {
986  // Combine untagging shifts with Dmul high.
987  Emit(kMips64DMulHigh, g.DefineSameAsFirst(node),
988  g.UseRegister(leftInput.left().node()),
989  g.UseRegister(rightInput.left().node()));
990  return;
991  }
992  }
993  }
994  VisitRRR(this, kMips64Mul, node);
995 }
996 
997 void InstructionSelector::VisitInt32MulHigh(Node* node) {
998  VisitRRR(this, kMips64MulHigh, node);
999 }
1000 
1001 void InstructionSelector::VisitUint32MulHigh(Node* node) {
1002  VisitRRR(this, kMips64MulHighU, node);
1003 }
1004 
1005 void InstructionSelector::VisitInt64Mul(Node* node) {
1006  Mips64OperandGenerator g(this);
1007  Int64BinopMatcher m(node);
1008  // TODO(dusmil): Add optimization for shifts larger than 32.
1009  if (m.right().HasValue() && m.right().Value() > 0) {
1010  uint32_t value = static_cast<uint32_t>(m.right().Value());
1011  if (base::bits::IsPowerOfTwo(value)) {
1012  Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
1013  g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1014  g.TempImmediate(WhichPowerOf2(value)));
1015  return;
1016  }
1017  if (base::bits::IsPowerOfTwo(value - 1) && kArchVariant == kMips64r6 &&
1018  value - 1 > 0 && value - 1 <= 31) {
1019  // Dlsa macro will handle the shifting value out of bound cases.
1020  Emit(kMips64Dlsa, g.DefineAsRegister(node),
1021  g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
1022  g.TempImmediate(WhichPowerOf2(value - 1)));
1023  return;
1024  }
1025  if (base::bits::IsPowerOfTwo(value + 1)) {
1026  InstructionOperand temp = g.TempRegister();
1027  Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
1028  g.UseRegister(m.left().node()),
1029  g.TempImmediate(WhichPowerOf2(value + 1)));
1030  Emit(kMips64Dsub | AddressingModeField::encode(kMode_None),
1031  g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
1032  return;
1033  }
1034  }
1035  Emit(kMips64Dmul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1036  g.UseRegister(m.right().node()));
1037 }
1038 
1039 void InstructionSelector::VisitInt32Div(Node* node) {
1040  Mips64OperandGenerator g(this);
1041  Int32BinopMatcher m(node);
1042  Node* left = node->InputAt(0);
1043  Node* right = node->InputAt(1);
1044  if (CanCover(node, left) && CanCover(node, right)) {
1045  if (left->opcode() == IrOpcode::kWord64Sar &&
1046  right->opcode() == IrOpcode::kWord64Sar) {
1047  Int64BinopMatcher rightInput(right), leftInput(left);
1048  if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
1049  // Combine both shifted operands with Ddiv.
1050  Emit(kMips64Ddiv, g.DefineSameAsFirst(node),
1051  g.UseRegister(leftInput.left().node()),
1052  g.UseRegister(rightInput.left().node()));
1053  return;
1054  }
1055  }
1056  }
1057  Emit(kMips64Div, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1058  g.UseRegister(m.right().node()));
1059 }
1060 
1061 void InstructionSelector::VisitUint32Div(Node* node) {
1062  Mips64OperandGenerator g(this);
1063  Int32BinopMatcher m(node);
1064  Emit(kMips64DivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1065  g.UseRegister(m.right().node()));
1066 }
1067 
1068 void InstructionSelector::VisitInt32Mod(Node* node) {
1069  Mips64OperandGenerator g(this);
1070  Int32BinopMatcher m(node);
1071  Node* left = node->InputAt(0);
1072  Node* right = node->InputAt(1);
1073  if (CanCover(node, left) && CanCover(node, right)) {
1074  if (left->opcode() == IrOpcode::kWord64Sar &&
1075  right->opcode() == IrOpcode::kWord64Sar) {
1076  Int64BinopMatcher rightInput(right), leftInput(left);
1077  if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
1078  // Combine both shifted operands with Dmod.
1079  Emit(kMips64Dmod, g.DefineSameAsFirst(node),
1080  g.UseRegister(leftInput.left().node()),
1081  g.UseRegister(rightInput.left().node()));
1082  return;
1083  }
1084  }
1085  }
1086  Emit(kMips64Mod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1087  g.UseRegister(m.right().node()));
1088 }
1089 
1090 void InstructionSelector::VisitUint32Mod(Node* node) {
1091  Mips64OperandGenerator g(this);
1092  Int32BinopMatcher m(node);
1093  Emit(kMips64ModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1094  g.UseRegister(m.right().node()));
1095 }
1096 
1097 void InstructionSelector::VisitInt64Div(Node* node) {
1098  Mips64OperandGenerator g(this);
1099  Int64BinopMatcher m(node);
1100  Emit(kMips64Ddiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1101  g.UseRegister(m.right().node()));
1102 }
1103 
1104 void InstructionSelector::VisitUint64Div(Node* node) {
1105  Mips64OperandGenerator g(this);
1106  Int64BinopMatcher m(node);
1107  Emit(kMips64DdivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1108  g.UseRegister(m.right().node()));
1109 }
1110 
1111 void InstructionSelector::VisitInt64Mod(Node* node) {
1112  Mips64OperandGenerator g(this);
1113  Int64BinopMatcher m(node);
1114  Emit(kMips64Dmod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1115  g.UseRegister(m.right().node()));
1116 }
1117 
1118 void InstructionSelector::VisitUint64Mod(Node* node) {
1119  Mips64OperandGenerator g(this);
1120  Int64BinopMatcher m(node);
1121  Emit(kMips64DmodU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1122  g.UseRegister(m.right().node()));
1123 }
1124 
1125 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
1126  VisitRR(this, kMips64CvtDS, node);
1127 }
1128 
1129 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
1130  VisitRR(this, kMips64CvtSW, node);
1131 }
1132 
1133 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
1134  VisitRR(this, kMips64CvtSUw, node);
1135 }
1136 
1137 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
1138  VisitRR(this, kMips64CvtDW, node);
1139 }
1140 
1141 void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) {
1142  VisitRR(this, kMips64CvtDL, node);
1143 }
1144 
1145 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
1146  VisitRR(this, kMips64CvtDUw, node);
1147 }
1148 
1149 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
1150  VisitRR(this, kMips64TruncWS, node);
1151 }
1152 
1153 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
1154  VisitRR(this, kMips64TruncUwS, node);
1155 }
1156 
1157 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
1158  Mips64OperandGenerator g(this);
1159  Node* value = node->InputAt(0);
1160  // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
1161  // which does rounding and conversion to integer format.
1162  if (CanCover(node, value)) {
1163  switch (value->opcode()) {
1164  case IrOpcode::kFloat64RoundDown:
1165  Emit(kMips64FloorWD, g.DefineAsRegister(node),
1166  g.UseRegister(value->InputAt(0)));
1167  return;
1168  case IrOpcode::kFloat64RoundUp:
1169  Emit(kMips64CeilWD, g.DefineAsRegister(node),
1170  g.UseRegister(value->InputAt(0)));
1171  return;
1172  case IrOpcode::kFloat64RoundTiesEven:
1173  Emit(kMips64RoundWD, g.DefineAsRegister(node),
1174  g.UseRegister(value->InputAt(0)));
1175  return;
1176  case IrOpcode::kFloat64RoundTruncate:
1177  Emit(kMips64TruncWD, g.DefineAsRegister(node),
1178  g.UseRegister(value->InputAt(0)));
1179  return;
1180  default:
1181  break;
1182  }
1183  if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
1184  Node* next = value->InputAt(0);
1185  if (CanCover(value, next)) {
1186  // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
1187  switch (next->opcode()) {
1188  case IrOpcode::kFloat32RoundDown:
1189  Emit(kMips64FloorWS, g.DefineAsRegister(node),
1190  g.UseRegister(next->InputAt(0)));
1191  return;
1192  case IrOpcode::kFloat32RoundUp:
1193  Emit(kMips64CeilWS, g.DefineAsRegister(node),
1194  g.UseRegister(next->InputAt(0)));
1195  return;
1196  case IrOpcode::kFloat32RoundTiesEven:
1197  Emit(kMips64RoundWS, g.DefineAsRegister(node),
1198  g.UseRegister(next->InputAt(0)));
1199  return;
1200  case IrOpcode::kFloat32RoundTruncate:
1201  Emit(kMips64TruncWS, g.DefineAsRegister(node),
1202  g.UseRegister(next->InputAt(0)));
1203  return;
1204  default:
1205  Emit(kMips64TruncWS, g.DefineAsRegister(node),
1206  g.UseRegister(value->InputAt(0)));
1207  return;
1208  }
1209  } else {
1210  // Match float32 -> float64 -> int32 representation change path.
1211  Emit(kMips64TruncWS, g.DefineAsRegister(node),
1212  g.UseRegister(value->InputAt(0)));
1213  return;
1214  }
1215  }
1216  }
1217  VisitRR(this, kMips64TruncWD, node);
1218 }
1219 
1220 void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
1221  VisitRR(this, kMips64TruncLD, node);
1222 }
1223 
1224 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
1225  VisitRR(this, kMips64TruncUwD, node);
1226 }
1227 
1228 void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
1229  VisitRR(this, kMips64TruncUlD, node);
1230 }
1231 
1232 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
1233  VisitRR(this, kMips64TruncUwD, node);
1234 }
1235 
1236 void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
1237  VisitRR(this, kMips64TruncLD, node);
1238 }
1239 
1240 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1241  Mips64OperandGenerator g(this);
1242  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1243  InstructionOperand outputs[2];
1244  size_t output_count = 0;
1245  outputs[output_count++] = g.DefineAsRegister(node);
1246 
1247  Node* success_output = NodeProperties::FindProjection(node, 1);
1248  if (success_output) {
1249  outputs[output_count++] = g.DefineAsRegister(success_output);
1250  }
1251 
1252  this->Emit(kMips64TruncLS, output_count, outputs, 1, inputs);
1253 }
1254 
1255 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1256  Mips64OperandGenerator g(this);
1257  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1258  InstructionOperand outputs[2];
1259  size_t output_count = 0;
1260  outputs[output_count++] = g.DefineAsRegister(node);
1261 
1262  Node* success_output = NodeProperties::FindProjection(node, 1);
1263  if (success_output) {
1264  outputs[output_count++] = g.DefineAsRegister(success_output);
1265  }
1266 
1267  Emit(kMips64TruncLD, output_count, outputs, 1, inputs);
1268 }
1269 
1270 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1271  Mips64OperandGenerator g(this);
1272  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1273  InstructionOperand outputs[2];
1274  size_t output_count = 0;
1275  outputs[output_count++] = g.DefineAsRegister(node);
1276 
1277  Node* success_output = NodeProperties::FindProjection(node, 1);
1278  if (success_output) {
1279  outputs[output_count++] = g.DefineAsRegister(success_output);
1280  }
1281 
1282  Emit(kMips64TruncUlS, output_count, outputs, 1, inputs);
1283 }
1284 
1285 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1286  Mips64OperandGenerator g(this);
1287 
1288  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1289  InstructionOperand outputs[2];
1290  size_t output_count = 0;
1291  outputs[output_count++] = g.DefineAsRegister(node);
1292 
1293  Node* success_output = NodeProperties::FindProjection(node, 1);
1294  if (success_output) {
1295  outputs[output_count++] = g.DefineAsRegister(success_output);
1296  }
1297 
1298  Emit(kMips64TruncUlD, output_count, outputs, 1, inputs);
1299 }
1300 
1301 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
1302  Node* value = node->InputAt(0);
1303  if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
1304  // Generate sign-extending load.
1305  LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1306  InstructionCode opcode = kArchNop;
1307  switch (load_rep.representation()) {
1308  case MachineRepresentation::kBit: // Fall through.
1309  case MachineRepresentation::kWord8:
1310  opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
1311  break;
1312  case MachineRepresentation::kWord16:
1313  opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
1314  break;
1315  case MachineRepresentation::kWord32:
1316  opcode = kMips64Lw;
1317  break;
1318  default:
1319  UNREACHABLE();
1320  return;
1321  }
1322  EmitLoad(this, value, opcode, node);
1323  } else {
1324  Mips64OperandGenerator g(this);
1325  Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
1326  g.TempImmediate(0));
1327  }
1328 }
1329 
1330 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1331  Mips64OperandGenerator g(this);
1332  Node* value = node->InputAt(0);
1333  switch (value->opcode()) {
1334  // 32-bit operations will write their result in a 64 bit register,
1335  // clearing the top 32 bits of the destination register.
1336  case IrOpcode::kUint32Div:
1337  case IrOpcode::kUint32Mod:
1338  case IrOpcode::kUint32MulHigh: {
1339  Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1340  return;
1341  }
1342  case IrOpcode::kLoad: {
1343  LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1344  if (load_rep.IsUnsigned()) {
1345  switch (load_rep.representation()) {
1346  case MachineRepresentation::kWord8:
1347  case MachineRepresentation::kWord16:
1348  case MachineRepresentation::kWord32:
1349  Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1350  return;
1351  default:
1352  break;
1353  }
1354  }
1355  break;
1356  }
1357  default:
1358  break;
1359  }
1360  Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
1361  g.TempImmediate(0), g.TempImmediate(32));
1362 }
1363 
1364 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1365  Mips64OperandGenerator g(this);
1366  Node* value = node->InputAt(0);
1367  if (CanCover(node, value)) {
1368  switch (value->opcode()) {
1369  case IrOpcode::kWord64Sar: {
1370  if (TryEmitExtendingLoad(this, value, node)) {
1371  return;
1372  } else {
1373  Int64BinopMatcher m(value);
1374  if (m.right().IsInRange(32, 63)) {
1375  // After smi untagging no need for truncate. Combine sequence.
1376  Emit(kMips64Dsar, g.DefineSameAsFirst(node),
1377  g.UseRegister(m.left().node()),
1378  g.UseImmediate(m.right().node()));
1379  return;
1380  }
1381  }
1382  break;
1383  }
1384  default:
1385  break;
1386  }
1387  }
1388  Emit(kMips64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
1389  g.TempImmediate(0), g.TempImmediate(32));
1390 }
1391 
1392 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1393  Mips64OperandGenerator g(this);
1394  Node* value = node->InputAt(0);
1395  // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
1396  // instruction.
1397  if (CanCover(node, value) &&
1398  value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
1399  Emit(kMips64CvtSW, g.DefineAsRegister(node),
1400  g.UseRegister(value->InputAt(0)));
1401  return;
1402  }
1403  VisitRR(this, kMips64CvtSD, node);
1404 }
1405 
1406 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1407  VisitRR(this, kArchTruncateDoubleToI, node);
1408 }
1409 
1410 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
1411  VisitRR(this, kMips64TruncWD, node);
1412 }
1413 
1414 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
1415  VisitRR(this, kMips64CvtSL, node);
1416 }
1417 
1418 void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
1419  VisitRR(this, kMips64CvtDL, node);
1420 }
1421 
1422 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
1423  VisitRR(this, kMips64CvtSUl, node);
1424 }
1425 
1426 void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
1427  VisitRR(this, kMips64CvtDUl, node);
1428 }
1429 
1430 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1431  VisitRR(this, kMips64Float64ExtractLowWord32, node);
1432 }
1433 
1434 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
1435  VisitRR(this, kMips64BitcastDL, node);
1436 }
1437 
1438 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1439  Mips64OperandGenerator g(this);
1440  Emit(kMips64Float64InsertLowWord32, g.DefineAsRegister(node),
1441  ImmediateOperand(ImmediateOperand::INLINE, 0),
1442  g.UseRegister(node->InputAt(0)));
1443 }
1444 
1445 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
1446  VisitRR(this, kMips64BitcastLD, node);
1447 }
1448 
1449 void InstructionSelector::VisitFloat32Add(Node* node) {
1450  // Optimization with Madd.S(z, x, y) is intentionally removed.
1451  // See explanation for madd_s in assembler-mips64.cc.
1452  VisitRRR(this, kMips64AddS, node);
1453 }
1454 
1455 void InstructionSelector::VisitFloat64Add(Node* node) {
1456  // Optimization with Madd.D(z, x, y) is intentionally removed.
1457  // See explanation for madd_d in assembler-mips64.cc.
1458  VisitRRR(this, kMips64AddD, node);
1459 }
1460 
1461 void InstructionSelector::VisitFloat32Sub(Node* node) {
1462  // Optimization with Msub.S(z, x, y) is intentionally removed.
1463  // See explanation for madd_s in assembler-mips64.cc.
1464  VisitRRR(this, kMips64SubS, node);
1465 }
1466 
1467 void InstructionSelector::VisitFloat64Sub(Node* node) {
1468  // Optimization with Msub.D(z, x, y) is intentionally removed.
1469  // See explanation for madd_d in assembler-mips64.cc.
1470  VisitRRR(this, kMips64SubD, node);
1471 }
1472 
1473 void InstructionSelector::VisitFloat32Mul(Node* node) {
1474  VisitRRR(this, kMips64MulS, node);
1475 }
1476 
1477 void InstructionSelector::VisitFloat64Mul(Node* node) {
1478  VisitRRR(this, kMips64MulD, node);
1479 }
1480 
1481 void InstructionSelector::VisitFloat32Div(Node* node) {
1482  VisitRRR(this, kMips64DivS, node);
1483 }
1484 
1485 void InstructionSelector::VisitFloat64Div(Node* node) {
1486  VisitRRR(this, kMips64DivD, node);
1487 }
1488 
1489 void InstructionSelector::VisitFloat64Mod(Node* node) {
1490  Mips64OperandGenerator g(this);
1491  Emit(kMips64ModD, g.DefineAsFixed(node, f0),
1492  g.UseFixed(node->InputAt(0), f12), g.UseFixed(node->InputAt(1), f14))
1493  ->MarkAsCall();
1494 }
1495 
1496 void InstructionSelector::VisitFloat32Max(Node* node) {
1497  Mips64OperandGenerator g(this);
1498  Emit(kMips64Float32Max, g.DefineAsRegister(node),
1499  g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1500 }
1501 
1502 void InstructionSelector::VisitFloat64Max(Node* node) {
1503  Mips64OperandGenerator g(this);
1504  Emit(kMips64Float64Max, g.DefineAsRegister(node),
1505  g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1506 }
1507 
1508 void InstructionSelector::VisitFloat32Min(Node* node) {
1509  Mips64OperandGenerator g(this);
1510  Emit(kMips64Float32Min, g.DefineAsRegister(node),
1511  g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1512 }
1513 
1514 void InstructionSelector::VisitFloat64Min(Node* node) {
1515  Mips64OperandGenerator g(this);
1516  Emit(kMips64Float64Min, g.DefineAsRegister(node),
1517  g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1518 }
1519 
1520 void InstructionSelector::VisitFloat32Abs(Node* node) {
1521  VisitRR(this, kMips64AbsS, node);
1522 }
1523 
1524 void InstructionSelector::VisitFloat64Abs(Node* node) {
1525  VisitRR(this, kMips64AbsD, node);
1526 }
1527 
1528 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1529  VisitRR(this, kMips64SqrtS, node);
1530 }
1531 
1532 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1533  VisitRR(this, kMips64SqrtD, node);
1534 }
1535 
1536 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1537  VisitRR(this, kMips64Float32RoundDown, node);
1538 }
1539 
1540 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1541  VisitRR(this, kMips64Float64RoundDown, node);
1542 }
1543 
1544 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1545  VisitRR(this, kMips64Float32RoundUp, node);
1546 }
1547 
1548 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1549  VisitRR(this, kMips64Float64RoundUp, node);
1550 }
1551 
1552 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1553  VisitRR(this, kMips64Float32RoundTruncate, node);
1554 }
1555 
1556 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1557  VisitRR(this, kMips64Float64RoundTruncate, node);
1558 }
1559 
1560 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1561  UNREACHABLE();
1562 }
1563 
1564 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1565  VisitRR(this, kMips64Float32RoundTiesEven, node);
1566 }
1567 
1568 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1569  VisitRR(this, kMips64Float64RoundTiesEven, node);
1570 }
1571 
1572 void InstructionSelector::VisitFloat32Neg(Node* node) {
1573  VisitRR(this, kMips64NegS, node);
1574 }
1575 
1576 void InstructionSelector::VisitFloat64Neg(Node* node) {
1577  VisitRR(this, kMips64NegD, node);
1578 }
1579 
1580 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1581  InstructionCode opcode) {
1582  Mips64OperandGenerator g(this);
1583  Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2),
1584  g.UseFixed(node->InputAt(1), f4))
1585  ->MarkAsCall();
1586 }
1587 
1588 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1589  InstructionCode opcode) {
1590  Mips64OperandGenerator g(this);
1591  Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12))
1592  ->MarkAsCall();
1593 }
1594 
1595 void InstructionSelector::EmitPrepareArguments(
1596  ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
1597  Node* node) {
1598  Mips64OperandGenerator g(this);
1599 
1600  // Prepare for C function call.
1601  if (call_descriptor->IsCFunctionCall()) {
1602  Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
1603  call_descriptor->ParameterCount())),
1604  0, nullptr, 0, nullptr);
1605 
1606  // Poke any stack arguments.
1607  int slot = kCArgSlotCount;
1608  for (PushParameter input : (*arguments)) {
1609  Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1610  g.TempImmediate(slot << kPointerSizeLog2));
1611  ++slot;
1612  }
1613  } else {
1614  int push_count = static_cast<int>(call_descriptor->StackParameterCount());
1615  if (push_count > 0) {
1616  // Calculate needed space
1617  int stack_size = 0;
1618  for (PushParameter input : (*arguments)) {
1619  if (input.node) {
1620  stack_size += input.location.GetSizeInPointers();
1621  }
1622  }
1623  Emit(kMips64StackClaim, g.NoOutput(),
1624  g.TempImmediate(stack_size << kPointerSizeLog2));
1625  }
1626  for (size_t n = 0; n < arguments->size(); ++n) {
1627  PushParameter input = (*arguments)[n];
1628  if (input.node) {
1629  Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1630  g.TempImmediate(static_cast<int>(n << kPointerSizeLog2)));
1631  }
1632  }
1633  }
1634 }
1635 
1636 void InstructionSelector::EmitPrepareResults(
1637  ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
1638  Node* node) {
1639  Mips64OperandGenerator g(this);
1640 
1641  int reverse_slot = 0;
1642  for (PushParameter output : *results) {
1643  if (!output.location.IsCallerFrameSlot()) continue;
1644  // Skip any alignment holes in nodes.
1645  if (output.node != nullptr) {
1646  DCHECK(!call_descriptor->IsCFunctionCall());
1647  if (output.location.GetType() == MachineType::Float32()) {
1648  MarkAsFloat32(output.node);
1649  } else if (output.location.GetType() == MachineType::Float64()) {
1650  MarkAsFloat64(output.node);
1651  }
1652  Emit(kMips64Peek, g.DefineAsRegister(output.node),
1653  g.UseImmediate(reverse_slot));
1654  }
1655  reverse_slot += output.location.GetSizeInPointers();
1656  }
1657 }
1658 
1659 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1660 
1661 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
1662 
1663 void InstructionSelector::VisitUnalignedLoad(Node* node) {
1664  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1665  Mips64OperandGenerator g(this);
1666  Node* base = node->InputAt(0);
1667  Node* index = node->InputAt(1);
1668 
1669  ArchOpcode opcode = kArchNop;
1670  switch (load_rep.representation()) {
1671  case MachineRepresentation::kFloat32:
1672  opcode = kMips64Ulwc1;
1673  break;
1674  case MachineRepresentation::kFloat64:
1675  opcode = kMips64Uldc1;
1676  break;
1677  case MachineRepresentation::kBit: // Fall through.
1678  case MachineRepresentation::kWord8:
1679  UNREACHABLE();
1680  break;
1681  case MachineRepresentation::kWord16:
1682  opcode = load_rep.IsUnsigned() ? kMips64Ulhu : kMips64Ulh;
1683  break;
1684  case MachineRepresentation::kWord32:
1685  opcode = load_rep.IsUnsigned() ? kMips64Ulwu : kMips64Ulw;
1686  break;
1687  case MachineRepresentation::kTaggedSigned: // Fall through.
1688  case MachineRepresentation::kTaggedPointer: // Fall through.
1689  case MachineRepresentation::kTagged: // Fall through.
1690  case MachineRepresentation::kWord64:
1691  opcode = kMips64Uld;
1692  break;
1693  case MachineRepresentation::kSimd128:
1694  opcode = kMips64MsaLd;
1695  break;
1696  case MachineRepresentation::kNone:
1697  UNREACHABLE();
1698  return;
1699  }
1700 
1701  if (g.CanBeImmediate(index, opcode)) {
1702  Emit(opcode | AddressingModeField::encode(kMode_MRI),
1703  g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
1704  } else {
1705  InstructionOperand addr_reg = g.TempRegister();
1706  Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
1707  g.UseRegister(index), g.UseRegister(base));
1708  // Emit desired load opcode, using temp addr_reg.
1709  Emit(opcode | AddressingModeField::encode(kMode_MRI),
1710  g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1711  }
1712 }
1713 
1714 void InstructionSelector::VisitUnalignedStore(Node* node) {
1715  Mips64OperandGenerator g(this);
1716  Node* base = node->InputAt(0);
1717  Node* index = node->InputAt(1);
1718  Node* value = node->InputAt(2);
1719 
1720  UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
1721  ArchOpcode opcode = kArchNop;
1722  switch (rep) {
1723  case MachineRepresentation::kFloat32:
1724  opcode = kMips64Uswc1;
1725  break;
1726  case MachineRepresentation::kFloat64:
1727  opcode = kMips64Usdc1;
1728  break;
1729  case MachineRepresentation::kBit: // Fall through.
1730  case MachineRepresentation::kWord8:
1731  UNREACHABLE();
1732  break;
1733  case MachineRepresentation::kWord16:
1734  opcode = kMips64Ush;
1735  break;
1736  case MachineRepresentation::kWord32:
1737  opcode = kMips64Usw;
1738  break;
1739  case MachineRepresentation::kTaggedSigned: // Fall through.
1740  case MachineRepresentation::kTaggedPointer: // Fall through.
1741  case MachineRepresentation::kTagged: // Fall through.
1742  case MachineRepresentation::kWord64:
1743  opcode = kMips64Usd;
1744  break;
1745  case MachineRepresentation::kSimd128:
1746  opcode = kMips64MsaSt;
1747  break;
1748  case MachineRepresentation::kNone:
1749  UNREACHABLE();
1750  return;
1751  }
1752 
1753  if (g.CanBeImmediate(index, opcode)) {
1754  Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1755  g.UseRegister(base), g.UseImmediate(index),
1756  g.UseRegisterOrImmediateZero(value));
1757  } else {
1758  InstructionOperand addr_reg = g.TempRegister();
1759  Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
1760  g.UseRegister(index), g.UseRegister(base));
1761  // Emit desired store opcode, using temp addr_reg.
1762  Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1763  addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
1764  }
1765 }
1766 
1767 namespace {
1768 
1769 // Shared routine for multiple compare operations.
1770 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1771  InstructionOperand left, InstructionOperand right,
1772  FlagsContinuation* cont) {
1773  selector->EmitWithContinuation(opcode, left, right, cont);
1774 }
1775 
1776 // Shared routine for multiple float32 compare operations.
1777 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1778  FlagsContinuation* cont) {
1779  Mips64OperandGenerator g(selector);
1780  Float32BinopMatcher m(node);
1781  InstructionOperand lhs, rhs;
1782 
1783  lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1784  : g.UseRegister(m.left().node());
1785  rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1786  : g.UseRegister(m.right().node());
1787  VisitCompare(selector, kMips64CmpS, lhs, rhs, cont);
1788 }
1789 
1790 // Shared routine for multiple float64 compare operations.
1791 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1792  FlagsContinuation* cont) {
1793  Mips64OperandGenerator g(selector);
1794  Float64BinopMatcher m(node);
1795  InstructionOperand lhs, rhs;
1796 
1797  lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1798  : g.UseRegister(m.left().node());
1799  rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1800  : g.UseRegister(m.right().node());
1801  VisitCompare(selector, kMips64CmpD, lhs, rhs, cont);
1802 }
1803 
1804 // Shared routine for multiple word compare operations.
1805 void VisitWordCompare(InstructionSelector* selector, Node* node,
1806  InstructionCode opcode, FlagsContinuation* cont,
1807  bool commutative) {
1808  Mips64OperandGenerator g(selector);
1809  Node* left = node->InputAt(0);
1810  Node* right = node->InputAt(1);
1811 
1812  // Match immediates on left or right side of comparison.
1813  if (g.CanBeImmediate(right, opcode)) {
1814  if (opcode == kMips64Tst) {
1815  VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
1816  cont);
1817  } else {
1818  switch (cont->condition()) {
1819  case kEqual:
1820  case kNotEqual:
1821  if (cont->IsSet()) {
1822  VisitCompare(selector, opcode, g.UseRegister(left),
1823  g.UseImmediate(right), cont);
1824  } else {
1825  VisitCompare(selector, opcode, g.UseRegister(left),
1826  g.UseRegister(right), cont);
1827  }
1828  break;
1829  case kSignedLessThan:
1830  case kSignedGreaterThanOrEqual:
1831  case kUnsignedLessThan:
1832  case kUnsignedGreaterThanOrEqual:
1833  VisitCompare(selector, opcode, g.UseRegister(left),
1834  g.UseImmediate(right), cont);
1835  break;
1836  default:
1837  VisitCompare(selector, opcode, g.UseRegister(left),
1838  g.UseRegister(right), cont);
1839  }
1840  }
1841  } else if (g.CanBeImmediate(left, opcode)) {
1842  if (!commutative) cont->Commute();
1843  if (opcode == kMips64Tst) {
1844  VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
1845  cont);
1846  } else {
1847  switch (cont->condition()) {
1848  case kEqual:
1849  case kNotEqual:
1850  if (cont->IsSet()) {
1851  VisitCompare(selector, opcode, g.UseRegister(right),
1852  g.UseImmediate(left), cont);
1853  } else {
1854  VisitCompare(selector, opcode, g.UseRegister(right),
1855  g.UseRegister(left), cont);
1856  }
1857  break;
1858  case kSignedLessThan:
1859  case kSignedGreaterThanOrEqual:
1860  case kUnsignedLessThan:
1861  case kUnsignedGreaterThanOrEqual:
1862  VisitCompare(selector, opcode, g.UseRegister(right),
1863  g.UseImmediate(left), cont);
1864  break;
1865  default:
1866  VisitCompare(selector, opcode, g.UseRegister(right),
1867  g.UseRegister(left), cont);
1868  }
1869  }
1870  } else {
1871  VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
1872  cont);
1873  }
1874 }
1875 
1876 bool IsNodeUnsigned(Node* n) {
1877  NodeMatcher m(n);
1878 
1879  if (m.IsLoad() || m.IsUnalignedLoad() || m.IsPoisonedLoad() ||
1880  m.IsProtectedLoad() || m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
1881  LoadRepresentation load_rep = LoadRepresentationOf(n->op());
1882  return load_rep.IsUnsigned();
1883  } else {
1884  return m.IsUint32Div() || m.IsUint32LessThan() ||
1885  m.IsUint32LessThanOrEqual() || m.IsUint32Mod() ||
1886  m.IsUint32MulHigh() || m.IsChangeFloat64ToUint32() ||
1887  m.IsTruncateFloat64ToUint32() || m.IsTruncateFloat32ToUint32();
1888  }
1889 }
1890 
1891 // Shared routine for multiple word compare operations.
1892 void VisitFullWord32Compare(InstructionSelector* selector, Node* node,
1893  InstructionCode opcode, FlagsContinuation* cont) {
1894  Mips64OperandGenerator g(selector);
1895  InstructionOperand leftOp = g.TempRegister();
1896  InstructionOperand rightOp = g.TempRegister();
1897 
1898  selector->Emit(kMips64Dshl, leftOp, g.UseRegister(node->InputAt(0)),
1899  g.TempImmediate(32));
1900  selector->Emit(kMips64Dshl, rightOp, g.UseRegister(node->InputAt(1)),
1901  g.TempImmediate(32));
1902 
1903  VisitCompare(selector, opcode, leftOp, rightOp, cont);
1904 }
1905 
1906 void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node,
1907  InstructionCode opcode,
1908  FlagsContinuation* cont) {
1909  if (FLAG_debug_code) {
1910  Mips64OperandGenerator g(selector);
1911  InstructionOperand leftOp = g.TempRegister();
1912  InstructionOperand rightOp = g.TempRegister();
1913  InstructionOperand optimizedResult = g.TempRegister();
1914  InstructionOperand fullResult = g.TempRegister();
1915  FlagsCondition condition = cont->condition();
1916  InstructionCode testOpcode = opcode |
1917  FlagsConditionField::encode(condition) |
1918  FlagsModeField::encode(kFlags_set);
1919 
1920  selector->Emit(testOpcode, optimizedResult, g.UseRegister(node->InputAt(0)),
1921  g.UseRegister(node->InputAt(1)));
1922 
1923  selector->Emit(kMips64Dshl, leftOp, g.UseRegister(node->InputAt(0)),
1924  g.TempImmediate(32));
1925  selector->Emit(kMips64Dshl, rightOp, g.UseRegister(node->InputAt(1)),
1926  g.TempImmediate(32));
1927  selector->Emit(testOpcode, fullResult, leftOp, rightOp);
1928 
1929  selector->Emit(
1930  kMips64AssertEqual, g.NoOutput(), optimizedResult, fullResult,
1931  g.TempImmediate(
1932  static_cast<int>(AbortReason::kUnsupportedNonPrimitiveCompare)));
1933  }
1934 
1935  VisitWordCompare(selector, node, opcode, cont, false);
1936 }
1937 
1938 void VisitWord32Compare(InstructionSelector* selector, Node* node,
1939  FlagsContinuation* cont) {
1940  // MIPS64 doesn't support Word32 compare instructions. Instead it relies
1941  // that the values in registers are correctly sign-extended and uses
1942  // Word64 comparison instead. This behavior is correct in most cases,
1943  // but doesn't work when comparing signed with unsigned operands.
1944  // We could simulate full Word32 compare in all cases but this would
1945  // create an unnecessary overhead since unsigned integers are rarely
1946  // used in JavaScript.
1947  // The solution proposed here tries to match a comparison of signed
1948  // with unsigned operand, and perform full Word32Compare only
1949  // in those cases. Unfortunately, the solution is not complete because
1950  // it might skip cases where Word32 full compare is needed, so
1951  // basically it is a hack.
1952  if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) {
1953  VisitFullWord32Compare(selector, node, kMips64Cmp, cont);
1954  } else {
1955  VisitOptimizedWord32Compare(selector, node, kMips64Cmp, cont);
1956  }
1957 }
1958 
1959 void VisitWord64Compare(InstructionSelector* selector, Node* node,
1960  FlagsContinuation* cont) {
1961  VisitWordCompare(selector, node, kMips64Cmp, cont, false);
1962 }
1963 
1964 void EmitWordCompareZero(InstructionSelector* selector, Node* value,
1965  FlagsContinuation* cont) {
1966  Mips64OperandGenerator g(selector);
1967  selector->EmitWithContinuation(kMips64Cmp, g.UseRegister(value),
1968  g.TempImmediate(0), cont);
1969 }
1970 
1971 void VisitAtomicLoad(InstructionSelector* selector, Node* node,
1972  ArchOpcode opcode) {
1973  Mips64OperandGenerator g(selector);
1974  Node* base = node->InputAt(0);
1975  Node* index = node->InputAt(1);
1976  if (g.CanBeImmediate(index, opcode)) {
1977  selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
1978  g.DefineAsRegister(node), g.UseRegister(base),
1979  g.UseImmediate(index));
1980  } else {
1981  InstructionOperand addr_reg = g.TempRegister();
1982  selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
1983  addr_reg, g.UseRegister(index), g.UseRegister(base));
1984  // Emit desired load opcode, using temp addr_reg.
1985  selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
1986  g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1987  }
1988 }
1989 
1990 void VisitAtomicStore(InstructionSelector* selector, Node* node,
1991  ArchOpcode opcode) {
1992  Mips64OperandGenerator g(selector);
1993  Node* base = node->InputAt(0);
1994  Node* index = node->InputAt(1);
1995  Node* value = node->InputAt(2);
1996 
1997  if (g.CanBeImmediate(index, opcode)) {
1998  selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
1999  g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
2000  g.UseRegisterOrImmediateZero(value));
2001  } else {
2002  InstructionOperand addr_reg = g.TempRegister();
2003  selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
2004  addr_reg, g.UseRegister(index), g.UseRegister(base));
2005  // Emit desired store opcode, using temp addr_reg.
2006  selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
2007  g.NoOutput(), addr_reg, g.TempImmediate(0),
2008  g.UseRegisterOrImmediateZero(value));
2009  }
2010 }
2011 
2012 void VisitAtomicExchange(InstructionSelector* selector, Node* node,
2013  ArchOpcode opcode) {
2014  Mips64OperandGenerator g(selector);
2015  Node* base = node->InputAt(0);
2016  Node* index = node->InputAt(1);
2017  Node* value = node->InputAt(2);
2018 
2019  AddressingMode addressing_mode = kMode_MRI;
2020  InstructionOperand inputs[3];
2021  size_t input_count = 0;
2022  inputs[input_count++] = g.UseUniqueRegister(base);
2023  inputs[input_count++] = g.UseUniqueRegister(index);
2024  inputs[input_count++] = g.UseUniqueRegister(value);
2025  InstructionOperand outputs[1];
2026  outputs[0] = g.UseUniqueRegister(node);
2027  InstructionOperand temp[3];
2028  temp[0] = g.TempRegister();
2029  temp[1] = g.TempRegister();
2030  temp[2] = g.TempRegister();
2031  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2032  selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
2033 }
2034 
2035 void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
2036  ArchOpcode opcode) {
2037  Mips64OperandGenerator g(selector);
2038  Node* base = node->InputAt(0);
2039  Node* index = node->InputAt(1);
2040  Node* old_value = node->InputAt(2);
2041  Node* new_value = node->InputAt(3);
2042 
2043  AddressingMode addressing_mode = kMode_MRI;
2044  InstructionOperand inputs[4];
2045  size_t input_count = 0;
2046  inputs[input_count++] = g.UseUniqueRegister(base);
2047  inputs[input_count++] = g.UseUniqueRegister(index);
2048  inputs[input_count++] = g.UseUniqueRegister(old_value);
2049  inputs[input_count++] = g.UseUniqueRegister(new_value);
2050  InstructionOperand outputs[1];
2051  outputs[0] = g.UseUniqueRegister(node);
2052  InstructionOperand temp[3];
2053  temp[0] = g.TempRegister();
2054  temp[1] = g.TempRegister();
2055  temp[2] = g.TempRegister();
2056  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2057  selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
2058 }
2059 
2060 void VisitAtomicBinop(InstructionSelector* selector, Node* node,
2061  ArchOpcode opcode) {
2062  Mips64OperandGenerator g(selector);
2063  Node* base = node->InputAt(0);
2064  Node* index = node->InputAt(1);
2065  Node* value = node->InputAt(2);
2066 
2067  AddressingMode addressing_mode = kMode_MRI;
2068  InstructionOperand inputs[3];
2069  size_t input_count = 0;
2070  inputs[input_count++] = g.UseUniqueRegister(base);
2071  inputs[input_count++] = g.UseUniqueRegister(index);
2072  inputs[input_count++] = g.UseUniqueRegister(value);
2073  InstructionOperand outputs[1];
2074  outputs[0] = g.UseUniqueRegister(node);
2075  InstructionOperand temps[4];
2076  temps[0] = g.TempRegister();
2077  temps[1] = g.TempRegister();
2078  temps[2] = g.TempRegister();
2079  temps[3] = g.TempRegister();
2080  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2081  selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
2082 }
2083 
2084 } // namespace
2085 
2086 // Shared routine for word comparisons against zero.
2087 void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
2088  FlagsContinuation* cont) {
2089  // Try to combine with comparisons against 0 by simply inverting the branch.
2090  while (CanCover(user, value)) {
2091  if (value->opcode() == IrOpcode::kWord32Equal) {
2092  Int32BinopMatcher m(value);
2093  if (!m.right().Is(0)) break;
2094  user = value;
2095  value = m.left().node();
2096  } else if (value->opcode() == IrOpcode::kWord64Equal) {
2097  Int64BinopMatcher m(value);
2098  if (!m.right().Is(0)) break;
2099  user = value;
2100  value = m.left().node();
2101  } else {
2102  break;
2103  }
2104 
2105  cont->Negate();
2106  }
2107 
2108  if (CanCover(user, value)) {
2109  switch (value->opcode()) {
2110  case IrOpcode::kWord32Equal:
2111  cont->OverwriteAndNegateIfEqual(kEqual);
2112  return VisitWord32Compare(this, value, cont);
2113  case IrOpcode::kInt32LessThan:
2114  cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2115  return VisitWord32Compare(this, value, cont);
2116  case IrOpcode::kInt32LessThanOrEqual:
2117  cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2118  return VisitWord32Compare(this, value, cont);
2119  case IrOpcode::kUint32LessThan:
2120  cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2121  return VisitWord32Compare(this, value, cont);
2122  case IrOpcode::kUint32LessThanOrEqual:
2123  cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2124  return VisitWord32Compare(this, value, cont);
2125  case IrOpcode::kWord64Equal:
2126  cont->OverwriteAndNegateIfEqual(kEqual);
2127  return VisitWord64Compare(this, value, cont);
2128  case IrOpcode::kInt64LessThan:
2129  cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2130  return VisitWord64Compare(this, value, cont);
2131  case IrOpcode::kInt64LessThanOrEqual:
2132  cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2133  return VisitWord64Compare(this, value, cont);
2134  case IrOpcode::kUint64LessThan:
2135  cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2136  return VisitWord64Compare(this, value, cont);
2137  case IrOpcode::kUint64LessThanOrEqual:
2138  cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2139  return VisitWord64Compare(this, value, cont);
2140  case IrOpcode::kFloat32Equal:
2141  cont->OverwriteAndNegateIfEqual(kEqual);
2142  return VisitFloat32Compare(this, value, cont);
2143  case IrOpcode::kFloat32LessThan:
2144  cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2145  return VisitFloat32Compare(this, value, cont);
2146  case IrOpcode::kFloat32LessThanOrEqual:
2147  cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2148  return VisitFloat32Compare(this, value, cont);
2149  case IrOpcode::kFloat64Equal:
2150  cont->OverwriteAndNegateIfEqual(kEqual);
2151  return VisitFloat64Compare(this, value, cont);
2152  case IrOpcode::kFloat64LessThan:
2153  cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2154  return VisitFloat64Compare(this, value, cont);
2155  case IrOpcode::kFloat64LessThanOrEqual:
2156  cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2157  return VisitFloat64Compare(this, value, cont);
2158  case IrOpcode::kProjection:
2159  // Check if this is the overflow output projection of an
2160  // <Operation>WithOverflow node.
2161  if (ProjectionIndexOf(value->op()) == 1u) {
2162  // We cannot combine the <Operation>WithOverflow with this branch
2163  // unless the 0th projection (the use of the actual value of the
2164  // <Operation> is either nullptr, which means there's no use of the
2165  // actual value, or was already defined, which means it is scheduled
2166  // *AFTER* this branch).
2167  Node* const node = value->InputAt(0);
2168  Node* const result = NodeProperties::FindProjection(node, 0);
2169  if (result == nullptr || IsDefined(result)) {
2170  switch (node->opcode()) {
2171  case IrOpcode::kInt32AddWithOverflow:
2172  cont->OverwriteAndNegateIfEqual(kOverflow);
2173  return VisitBinop(this, node, kMips64Dadd, cont);
2174  case IrOpcode::kInt32SubWithOverflow:
2175  cont->OverwriteAndNegateIfEqual(kOverflow);
2176  return VisitBinop(this, node, kMips64Dsub, cont);
2177  case IrOpcode::kInt32MulWithOverflow:
2178  cont->OverwriteAndNegateIfEqual(kOverflow);
2179  return VisitBinop(this, node, kMips64MulOvf, cont);
2180  case IrOpcode::kInt64AddWithOverflow:
2181  cont->OverwriteAndNegateIfEqual(kOverflow);
2182  return VisitBinop(this, node, kMips64DaddOvf, cont);
2183  case IrOpcode::kInt64SubWithOverflow:
2184  cont->OverwriteAndNegateIfEqual(kOverflow);
2185  return VisitBinop(this, node, kMips64DsubOvf, cont);
2186  default:
2187  break;
2188  }
2189  }
2190  }
2191  break;
2192  case IrOpcode::kWord32And:
2193  case IrOpcode::kWord64And:
2194  return VisitWordCompare(this, value, kMips64Tst, cont, true);
2195  default:
2196  break;
2197  }
2198  }
2199 
2200  // Continuation could not be combined with a compare, emit compare against 0.
2201  EmitWordCompareZero(this, value, cont);
2202 }
2203 
2204 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
2205  Mips64OperandGenerator g(this);
2206  InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
2207 
2208  // Emit either ArchTableSwitch or ArchLookupSwitch.
2209  if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
2210  static const size_t kMaxTableSwitchValueRange = 2 << 16;
2211  size_t table_space_cost = 10 + 2 * sw.value_range();
2212  size_t table_time_cost = 3;
2213  size_t lookup_space_cost = 2 + 2 * sw.case_count();
2214  size_t lookup_time_cost = sw.case_count();
2215  if (sw.case_count() > 0 &&
2216  table_space_cost + 3 * table_time_cost <=
2217  lookup_space_cost + 3 * lookup_time_cost &&
2218  sw.min_value() > std::numeric_limits<int32_t>::min() &&
2219  sw.value_range() <= kMaxTableSwitchValueRange) {
2220  InstructionOperand index_operand = value_operand;
2221  if (sw.min_value()) {
2222  index_operand = g.TempRegister();
2223  Emit(kMips64Sub, index_operand, value_operand,
2224  g.TempImmediate(sw.min_value()));
2225  }
2226  // Generate a table lookup.
2227  return EmitTableSwitch(sw, index_operand);
2228  }
2229  }
2230 
2231  // Generate a tree of conditional jumps.
2232  return EmitBinarySearchSwitch(sw, value_operand);
2233 }
2234 
2235 void InstructionSelector::VisitWord32Equal(Node* const node) {
2236  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2237  Int32BinopMatcher m(node);
2238  if (m.right().Is(0)) {
2239  return VisitWordCompareZero(m.node(), m.left().node(), &cont);
2240  }
2241 
2242  VisitWord32Compare(this, node, &cont);
2243 }
2244 
2245 void InstructionSelector::VisitInt32LessThan(Node* node) {
2246  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2247  VisitWord32Compare(this, node, &cont);
2248 }
2249 
2250 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
2251  FlagsContinuation cont =
2252  FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2253  VisitWord32Compare(this, node, &cont);
2254 }
2255 
2256 void InstructionSelector::VisitUint32LessThan(Node* node) {
2257  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2258  VisitWord32Compare(this, node, &cont);
2259 }
2260 
2261 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
2262  FlagsContinuation cont =
2263  FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2264  VisitWord32Compare(this, node, &cont);
2265 }
2266 
2267 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
2268  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2269  FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2270  return VisitBinop(this, node, kMips64Dadd, &cont);
2271  }
2272  FlagsContinuation cont;
2273  VisitBinop(this, node, kMips64Dadd, &cont);
2274 }
2275 
2276 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
2277  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2278  FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2279  return VisitBinop(this, node, kMips64Dsub, &cont);
2280  }
2281  FlagsContinuation cont;
2282  VisitBinop(this, node, kMips64Dsub, &cont);
2283 }
2284 
2285 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
2286  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2287  FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2288  return VisitBinop(this, node, kMips64MulOvf, &cont);
2289  }
2290  FlagsContinuation cont;
2291  VisitBinop(this, node, kMips64MulOvf, &cont);
2292 }
2293 
2294 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
2295  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2296  FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2297  return VisitBinop(this, node, kMips64DaddOvf, &cont);
2298  }
2299  FlagsContinuation cont;
2300  VisitBinop(this, node, kMips64DaddOvf, &cont);
2301 }
2302 
2303 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
2304  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2305  FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2306  return VisitBinop(this, node, kMips64DsubOvf, &cont);
2307  }
2308  FlagsContinuation cont;
2309  VisitBinop(this, node, kMips64DsubOvf, &cont);
2310 }
2311 
2312 void InstructionSelector::VisitWord64Equal(Node* const node) {
2313  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2314  Int64BinopMatcher m(node);
2315  if (m.right().Is(0)) {
2316  return VisitWordCompareZero(m.node(), m.left().node(), &cont);
2317  }
2318 
2319  VisitWord64Compare(this, node, &cont);
2320 }
2321 
2322 void InstructionSelector::VisitInt64LessThan(Node* node) {
2323  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2324  VisitWord64Compare(this, node, &cont);
2325 }
2326 
2327 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2328  FlagsContinuation cont =
2329  FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2330  VisitWord64Compare(this, node, &cont);
2331 }
2332 
2333 void InstructionSelector::VisitUint64LessThan(Node* node) {
2334  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2335  VisitWord64Compare(this, node, &cont);
2336 }
2337 
2338 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2339  FlagsContinuation cont =
2340  FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2341  VisitWord64Compare(this, node, &cont);
2342 }
2343 
2344 void InstructionSelector::VisitFloat32Equal(Node* node) {
2345  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2346  VisitFloat32Compare(this, node, &cont);
2347 }
2348 
2349 void InstructionSelector::VisitFloat32LessThan(Node* node) {
2350  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2351  VisitFloat32Compare(this, node, &cont);
2352 }
2353 
2354 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2355  FlagsContinuation cont =
2356  FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2357  VisitFloat32Compare(this, node, &cont);
2358 }
2359 
2360 void InstructionSelector::VisitFloat64Equal(Node* node) {
2361  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2362  VisitFloat64Compare(this, node, &cont);
2363 }
2364 
2365 void InstructionSelector::VisitFloat64LessThan(Node* node) {
2366  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2367  VisitFloat64Compare(this, node, &cont);
2368 }
2369 
2370 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
2371  FlagsContinuation cont =
2372  FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2373  VisitFloat64Compare(this, node, &cont);
2374 }
2375 
2376 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
2377  VisitRR(this, kMips64Float64ExtractLowWord32, node);
2378 }
2379 
2380 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
2381  VisitRR(this, kMips64Float64ExtractHighWord32, node);
2382 }
2383 
2384 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
2385  VisitRR(this, kMips64Float64SilenceNaN, node);
2386 }
2387 
2388 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
2389  Mips64OperandGenerator g(this);
2390  Node* left = node->InputAt(0);
2391  Node* right = node->InputAt(1);
2392  Emit(kMips64Float64InsertLowWord32, g.DefineSameAsFirst(node),
2393  g.UseRegister(left), g.UseRegister(right));
2394 }
2395 
2396 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
2397  Mips64OperandGenerator g(this);
2398  Node* left = node->InputAt(0);
2399  Node* right = node->InputAt(1);
2400  Emit(kMips64Float64InsertHighWord32, g.DefineSameAsFirst(node),
2401  g.UseRegister(left), g.UseRegister(right));
2402 }
2403 
2404 void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
2405  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2406  ArchOpcode opcode = kArchNop;
2407  switch (load_rep.representation()) {
2408  case MachineRepresentation::kWord8:
2409  opcode =
2410  load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
2411  break;
2412  case MachineRepresentation::kWord16:
2413  opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
2414  : kWord32AtomicLoadUint16;
2415  break;
2416  case MachineRepresentation::kWord32:
2417  opcode = kWord32AtomicLoadWord32;
2418  break;
2419  default:
2420  UNREACHABLE();
2421  return;
2422  }
2423  VisitAtomicLoad(this, node, opcode);
2424 }
2425 
2426 void InstructionSelector::VisitWord32AtomicStore(Node* node) {
2427  MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2428  ArchOpcode opcode = kArchNop;
2429  switch (rep) {
2430  case MachineRepresentation::kWord8:
2431  opcode = kWord32AtomicStoreWord8;
2432  break;
2433  case MachineRepresentation::kWord16:
2434  opcode = kWord32AtomicStoreWord16;
2435  break;
2436  case MachineRepresentation::kWord32:
2437  opcode = kWord32AtomicStoreWord32;
2438  break;
2439  default:
2440  UNREACHABLE();
2441  return;
2442  }
2443 
2444  VisitAtomicStore(this, node, opcode);
2445 }
2446 
2447 void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
2448  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2449  ArchOpcode opcode = kArchNop;
2450  switch (load_rep.representation()) {
2451  case MachineRepresentation::kWord8:
2452  opcode = kMips64Word64AtomicLoadUint8;
2453  break;
2454  case MachineRepresentation::kWord16:
2455  opcode = kMips64Word64AtomicLoadUint16;
2456  break;
2457  case MachineRepresentation::kWord32:
2458  opcode = kMips64Word64AtomicLoadUint32;
2459  break;
2460  case MachineRepresentation::kWord64:
2461  opcode = kMips64Word64AtomicLoadUint64;
2462  break;
2463  default:
2464  UNREACHABLE();
2465  return;
2466  }
2467  VisitAtomicLoad(this, node, opcode);
2468 }
2469 
2470 void InstructionSelector::VisitWord64AtomicStore(Node* node) {
2471  MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2472  ArchOpcode opcode = kArchNop;
2473  switch (rep) {
2474  case MachineRepresentation::kWord8:
2475  opcode = kMips64Word64AtomicStoreWord8;
2476  break;
2477  case MachineRepresentation::kWord16:
2478  opcode = kMips64Word64AtomicStoreWord16;
2479  break;
2480  case MachineRepresentation::kWord32:
2481  opcode = kMips64Word64AtomicStoreWord32;
2482  break;
2483  case MachineRepresentation::kWord64:
2484  opcode = kMips64Word64AtomicStoreWord64;
2485  break;
2486  default:
2487  UNREACHABLE();
2488  return;
2489  }
2490 
2491  VisitAtomicStore(this, node, opcode);
2492 }
2493 
2494 void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
2495  ArchOpcode opcode = kArchNop;
2496  MachineType type = AtomicOpType(node->op());
2497  if (type == MachineType::Int8()) {
2498  opcode = kWord32AtomicExchangeInt8;
2499  } else if (type == MachineType::Uint8()) {
2500  opcode = kWord32AtomicExchangeUint8;
2501  } else if (type == MachineType::Int16()) {
2502  opcode = kWord32AtomicExchangeInt16;
2503  } else if (type == MachineType::Uint16()) {
2504  opcode = kWord32AtomicExchangeUint16;
2505  } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2506  opcode = kWord32AtomicExchangeWord32;
2507  } else {
2508  UNREACHABLE();
2509  return;
2510  }
2511 
2512  VisitAtomicExchange(this, node, opcode);
2513 }
2514 
2515 void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
2516  ArchOpcode opcode = kArchNop;
2517  MachineType type = AtomicOpType(node->op());
2518  if (type == MachineType::Uint8()) {
2519  opcode = kMips64Word64AtomicExchangeUint8;
2520  } else if (type == MachineType::Uint16()) {
2521  opcode = kMips64Word64AtomicExchangeUint16;
2522  } else if (type == MachineType::Uint32()) {
2523  opcode = kMips64Word64AtomicExchangeUint32;
2524  } else if (type == MachineType::Uint64()) {
2525  opcode = kMips64Word64AtomicExchangeUint64;
2526  } else {
2527  UNREACHABLE();
2528  return;
2529  }
2530  VisitAtomicExchange(this, node, opcode);
2531 }
2532 
2533 void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
2534  ArchOpcode opcode = kArchNop;
2535  MachineType type = AtomicOpType(node->op());
2536  if (type == MachineType::Int8()) {
2537  opcode = kWord32AtomicCompareExchangeInt8;
2538  } else if (type == MachineType::Uint8()) {
2539  opcode = kWord32AtomicCompareExchangeUint8;
2540  } else if (type == MachineType::Int16()) {
2541  opcode = kWord32AtomicCompareExchangeInt16;
2542  } else if (type == MachineType::Uint16()) {
2543  opcode = kWord32AtomicCompareExchangeUint16;
2544  } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2545  opcode = kWord32AtomicCompareExchangeWord32;
2546  } else {
2547  UNREACHABLE();
2548  return;
2549  }
2550 
2551  VisitAtomicCompareExchange(this, node, opcode);
2552 }
2553 
2554 void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
2555  ArchOpcode opcode = kArchNop;
2556  MachineType type = AtomicOpType(node->op());
2557  if (type == MachineType::Uint8()) {
2558  opcode = kMips64Word64AtomicCompareExchangeUint8;
2559  } else if (type == MachineType::Uint16()) {
2560  opcode = kMips64Word64AtomicCompareExchangeUint16;
2561  } else if (type == MachineType::Uint32()) {
2562  opcode = kMips64Word64AtomicCompareExchangeUint32;
2563  } else if (type == MachineType::Uint64()) {
2564  opcode = kMips64Word64AtomicCompareExchangeUint64;
2565  } else {
2566  UNREACHABLE();
2567  return;
2568  }
2569  VisitAtomicCompareExchange(this, node, opcode);
2570 }
2571 void InstructionSelector::VisitWord32AtomicBinaryOperation(
2572  Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2573  ArchOpcode uint16_op, ArchOpcode word32_op) {
2574  ArchOpcode opcode = kArchNop;
2575  MachineType type = AtomicOpType(node->op());
2576  if (type == MachineType::Int8()) {
2577  opcode = int8_op;
2578  } else if (type == MachineType::Uint8()) {
2579  opcode = uint8_op;
2580  } else if (type == MachineType::Int16()) {
2581  opcode = int16_op;
2582  } else if (type == MachineType::Uint16()) {
2583  opcode = uint16_op;
2584  } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2585  opcode = word32_op;
2586  } else {
2587  UNREACHABLE();
2588  return;
2589  }
2590 
2591  VisitAtomicBinop(this, node, opcode);
2592 }
2593 
2594 #define VISIT_ATOMIC_BINOP(op) \
2595  void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
2596  VisitWord32AtomicBinaryOperation( \
2597  node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
2598  kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
2599  kWord32Atomic##op##Word32); \
2600  }
2601 VISIT_ATOMIC_BINOP(Add)
2602 VISIT_ATOMIC_BINOP(Sub)
2603 VISIT_ATOMIC_BINOP(And)
2604 VISIT_ATOMIC_BINOP(Or)
2605 VISIT_ATOMIC_BINOP(Xor)
2606 #undef VISIT_ATOMIC_BINOP
2607 
2608 void InstructionSelector::VisitWord64AtomicBinaryOperation(
2609  Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
2610  ArchOpcode uint64_op) {
2611  ArchOpcode opcode = kArchNop;
2612  MachineType type = AtomicOpType(node->op());
2613  if (type == MachineType::Uint8()) {
2614  opcode = uint8_op;
2615  } else if (type == MachineType::Uint16()) {
2616  opcode = uint16_op;
2617  } else if (type == MachineType::Uint32()) {
2618  opcode = uint32_op;
2619  } else if (type == MachineType::Uint64()) {
2620  opcode = uint64_op;
2621  } else {
2622  UNREACHABLE();
2623  return;
2624  }
2625  VisitAtomicBinop(this, node, opcode);
2626 }
2627 
2628 #define VISIT_ATOMIC_BINOP(op) \
2629  void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
2630  VisitWord64AtomicBinaryOperation( \
2631  node, kMips64Word64Atomic##op##Uint8, kMips64Word64Atomic##op##Uint16, \
2632  kMips64Word64Atomic##op##Uint32, kMips64Word64Atomic##op##Uint64); \
2633  }
2634 VISIT_ATOMIC_BINOP(Add)
2635 VISIT_ATOMIC_BINOP(Sub)
2636 VISIT_ATOMIC_BINOP(And)
2637 VISIT_ATOMIC_BINOP(Or)
2638 VISIT_ATOMIC_BINOP(Xor)
2639 #undef VISIT_ATOMIC_BINOP
2640 
2641 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
2642  UNREACHABLE();
2643 }
2644 
2645 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
2646  UNREACHABLE();
2647 }
2648 
2649 void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
2650 
2651 #define SIMD_TYPE_LIST(V) \
2652  V(F32x4) \
2653  V(I32x4) \
2654  V(I16x8) \
2655  V(I8x16)
2656 
2657 #define SIMD_UNOP_LIST(V) \
2658  V(F32x4SConvertI32x4, kMips64F32x4SConvertI32x4) \
2659  V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4) \
2660  V(F32x4Abs, kMips64F32x4Abs) \
2661  V(F32x4Neg, kMips64F32x4Neg) \
2662  V(F32x4RecipApprox, kMips64F32x4RecipApprox) \
2663  V(F32x4RecipSqrtApprox, kMips64F32x4RecipSqrtApprox) \
2664  V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4) \
2665  V(I32x4UConvertF32x4, kMips64I32x4UConvertF32x4) \
2666  V(I32x4Neg, kMips64I32x4Neg) \
2667  V(I32x4SConvertI16x8Low, kMips64I32x4SConvertI16x8Low) \
2668  V(I32x4SConvertI16x8High, kMips64I32x4SConvertI16x8High) \
2669  V(I32x4UConvertI16x8Low, kMips64I32x4UConvertI16x8Low) \
2670  V(I32x4UConvertI16x8High, kMips64I32x4UConvertI16x8High) \
2671  V(I16x8Neg, kMips64I16x8Neg) \
2672  V(I16x8SConvertI8x16Low, kMips64I16x8SConvertI8x16Low) \
2673  V(I16x8SConvertI8x16High, kMips64I16x8SConvertI8x16High) \
2674  V(I16x8UConvertI8x16Low, kMips64I16x8UConvertI8x16Low) \
2675  V(I16x8UConvertI8x16High, kMips64I16x8UConvertI8x16High) \
2676  V(I8x16Neg, kMips64I8x16Neg) \
2677  V(S128Not, kMips64S128Not) \
2678  V(S1x4AnyTrue, kMips64S1x4AnyTrue) \
2679  V(S1x4AllTrue, kMips64S1x4AllTrue) \
2680  V(S1x8AnyTrue, kMips64S1x8AnyTrue) \
2681  V(S1x8AllTrue, kMips64S1x8AllTrue) \
2682  V(S1x16AnyTrue, kMips64S1x16AnyTrue) \
2683  V(S1x16AllTrue, kMips64S1x16AllTrue)
2684 
2685 #define SIMD_SHIFT_OP_LIST(V) \
2686  V(I32x4Shl) \
2687  V(I32x4ShrS) \
2688  V(I32x4ShrU) \
2689  V(I16x8Shl) \
2690  V(I16x8ShrS) \
2691  V(I16x8ShrU) \
2692  V(I8x16Shl) \
2693  V(I8x16ShrS) \
2694  V(I8x16ShrU)
2695 
2696 #define SIMD_BINOP_LIST(V) \
2697  V(F32x4Add, kMips64F32x4Add) \
2698  V(F32x4AddHoriz, kMips64F32x4AddHoriz) \
2699  V(F32x4Sub, kMips64F32x4Sub) \
2700  V(F32x4Mul, kMips64F32x4Mul) \
2701  V(F32x4Max, kMips64F32x4Max) \
2702  V(F32x4Min, kMips64F32x4Min) \
2703  V(F32x4Eq, kMips64F32x4Eq) \
2704  V(F32x4Ne, kMips64F32x4Ne) \
2705  V(F32x4Lt, kMips64F32x4Lt) \
2706  V(F32x4Le, kMips64F32x4Le) \
2707  V(I32x4Add, kMips64I32x4Add) \
2708  V(I32x4AddHoriz, kMips64I32x4AddHoriz) \
2709  V(I32x4Sub, kMips64I32x4Sub) \
2710  V(I32x4Mul, kMips64I32x4Mul) \
2711  V(I32x4MaxS, kMips64I32x4MaxS) \
2712  V(I32x4MinS, kMips64I32x4MinS) \
2713  V(I32x4MaxU, kMips64I32x4MaxU) \
2714  V(I32x4MinU, kMips64I32x4MinU) \
2715  V(I32x4Eq, kMips64I32x4Eq) \
2716  V(I32x4Ne, kMips64I32x4Ne) \
2717  V(I32x4GtS, kMips64I32x4GtS) \
2718  V(I32x4GeS, kMips64I32x4GeS) \
2719  V(I32x4GtU, kMips64I32x4GtU) \
2720  V(I32x4GeU, kMips64I32x4GeU) \
2721  V(I16x8Add, kMips64I16x8Add) \
2722  V(I16x8AddSaturateS, kMips64I16x8AddSaturateS) \
2723  V(I16x8AddSaturateU, kMips64I16x8AddSaturateU) \
2724  V(I16x8AddHoriz, kMips64I16x8AddHoriz) \
2725  V(I16x8Sub, kMips64I16x8Sub) \
2726  V(I16x8SubSaturateS, kMips64I16x8SubSaturateS) \
2727  V(I16x8SubSaturateU, kMips64I16x8SubSaturateU) \
2728  V(I16x8Mul, kMips64I16x8Mul) \
2729  V(I16x8MaxS, kMips64I16x8MaxS) \
2730  V(I16x8MinS, kMips64I16x8MinS) \
2731  V(I16x8MaxU, kMips64I16x8MaxU) \
2732  V(I16x8MinU, kMips64I16x8MinU) \
2733  V(I16x8Eq, kMips64I16x8Eq) \
2734  V(I16x8Ne, kMips64I16x8Ne) \
2735  V(I16x8GtS, kMips64I16x8GtS) \
2736  V(I16x8GeS, kMips64I16x8GeS) \
2737  V(I16x8GtU, kMips64I16x8GtU) \
2738  V(I16x8GeU, kMips64I16x8GeU) \
2739  V(I16x8SConvertI32x4, kMips64I16x8SConvertI32x4) \
2740  V(I16x8UConvertI32x4, kMips64I16x8UConvertI32x4) \
2741  V(I8x16Add, kMips64I8x16Add) \
2742  V(I8x16AddSaturateS, kMips64I8x16AddSaturateS) \
2743  V(I8x16AddSaturateU, kMips64I8x16AddSaturateU) \
2744  V(I8x16Sub, kMips64I8x16Sub) \
2745  V(I8x16SubSaturateS, kMips64I8x16SubSaturateS) \
2746  V(I8x16SubSaturateU, kMips64I8x16SubSaturateU) \
2747  V(I8x16Mul, kMips64I8x16Mul) \
2748  V(I8x16MaxS, kMips64I8x16MaxS) \
2749  V(I8x16MinS, kMips64I8x16MinS) \
2750  V(I8x16MaxU, kMips64I8x16MaxU) \
2751  V(I8x16MinU, kMips64I8x16MinU) \
2752  V(I8x16Eq, kMips64I8x16Eq) \
2753  V(I8x16Ne, kMips64I8x16Ne) \
2754  V(I8x16GtS, kMips64I8x16GtS) \
2755  V(I8x16GeS, kMips64I8x16GeS) \
2756  V(I8x16GtU, kMips64I8x16GtU) \
2757  V(I8x16GeU, kMips64I8x16GeU) \
2758  V(I8x16SConvertI16x8, kMips64I8x16SConvertI16x8) \
2759  V(I8x16UConvertI16x8, kMips64I8x16UConvertI16x8) \
2760  V(S128And, kMips64S128And) \
2761  V(S128Or, kMips64S128Or) \
2762  V(S128Xor, kMips64S128Xor)
2763 
2764 void InstructionSelector::VisitS128Zero(Node* node) {
2765  Mips64OperandGenerator g(this);
2766  Emit(kMips64S128Zero, g.DefineSameAsFirst(node));
2767 }
2768 
2769 #define SIMD_VISIT_SPLAT(Type) \
2770  void InstructionSelector::Visit##Type##Splat(Node* node) { \
2771  VisitRR(this, kMips64##Type##Splat, node); \
2772  }
2773 SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
2774 #undef SIMD_VISIT_SPLAT
2775 
2776 #define SIMD_VISIT_EXTRACT_LANE(Type) \
2777  void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
2778  VisitRRI(this, kMips64##Type##ExtractLane, node); \
2779  }
2780 SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
2781 #undef SIMD_VISIT_EXTRACT_LANE
2782 
2783 #define SIMD_VISIT_REPLACE_LANE(Type) \
2784  void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
2785  VisitRRIR(this, kMips64##Type##ReplaceLane, node); \
2786  }
2787 SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
2788 #undef SIMD_VISIT_REPLACE_LANE
2789 
2790 #define SIMD_VISIT_UNOP(Name, instruction) \
2791  void InstructionSelector::Visit##Name(Node* node) { \
2792  VisitRR(this, instruction, node); \
2793  }
2794 SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
2795 #undef SIMD_VISIT_UNOP
2796 
2797 #define SIMD_VISIT_SHIFT_OP(Name) \
2798  void InstructionSelector::Visit##Name(Node* node) { \
2799  VisitRRI(this, kMips64##Name, node); \
2800  }
2801 SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
2802 #undef SIMD_VISIT_SHIFT_OP
2803 
2804 #define SIMD_VISIT_BINOP(Name, instruction) \
2805  void InstructionSelector::Visit##Name(Node* node) { \
2806  VisitRRR(this, instruction, node); \
2807  }
2808 SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
2809 #undef SIMD_VISIT_BINOP
2810 
2811 void InstructionSelector::VisitS128Select(Node* node) {
2812  VisitRRRR(this, kMips64S128Select, node);
2813 }
2814 
2815 namespace {
2816 
2817 struct ShuffleEntry {
2818  uint8_t shuffle[kSimd128Size];
2819  ArchOpcode opcode;
2820 };
2821 
2822 static const ShuffleEntry arch_shuffles[] = {
2823  {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
2824  kMips64S32x4InterleaveRight},
2825  {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
2826  kMips64S32x4InterleaveLeft},
2827  {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
2828  kMips64S32x4PackEven},
2829  {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
2830  kMips64S32x4PackOdd},
2831  {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
2832  kMips64S32x4InterleaveEven},
2833  {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
2834  kMips64S32x4InterleaveOdd},
2835 
2836  {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
2837  kMips64S16x8InterleaveRight},
2838  {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
2839  kMips64S16x8InterleaveLeft},
2840  {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
2841  kMips64S16x8PackEven},
2842  {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
2843  kMips64S16x8PackOdd},
2844  {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
2845  kMips64S16x8InterleaveEven},
2846  {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
2847  kMips64S16x8InterleaveOdd},
2848  {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
2849  kMips64S16x4Reverse},
2850  {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
2851  kMips64S16x2Reverse},
2852 
2853  {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
2854  kMips64S8x16InterleaveRight},
2855  {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
2856  kMips64S8x16InterleaveLeft},
2857  {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
2858  kMips64S8x16PackEven},
2859  {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
2860  kMips64S8x16PackOdd},
2861  {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
2862  kMips64S8x16InterleaveEven},
2863  {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
2864  kMips64S8x16InterleaveOdd},
2865  {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
2866  kMips64S8x8Reverse},
2867  {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
2868  kMips64S8x4Reverse},
2869  {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
2870  kMips64S8x2Reverse}};
2871 
2872 bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
2873  size_t num_entries, bool is_swizzle,
2874  ArchOpcode* opcode) {
2875  uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
2876  for (size_t i = 0; i < num_entries; ++i) {
2877  const ShuffleEntry& entry = table[i];
2878  int j = 0;
2879  for (; j < kSimd128Size; ++j) {
2880  if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
2881  break;
2882  }
2883  }
2884  if (j == kSimd128Size) {
2885  *opcode = entry.opcode;
2886  return true;
2887  }
2888  }
2889  return false;
2890 }
2891 
2892 } // namespace
2893 
2894 void InstructionSelector::VisitS8x16Shuffle(Node* node) {
2895  uint8_t shuffle[kSimd128Size];
2896  bool is_swizzle;
2897  CanonicalizeShuffle(node, shuffle, &is_swizzle);
2898  uint8_t shuffle32x4[4];
2899  ArchOpcode opcode;
2900  if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
2901  is_swizzle, &opcode)) {
2902  VisitRRR(this, opcode, node);
2903  return;
2904  }
2905  Node* input0 = node->InputAt(0);
2906  Node* input1 = node->InputAt(1);
2907  uint8_t offset;
2908  Mips64OperandGenerator g(this);
2909  if (TryMatchConcat(shuffle, &offset)) {
2910  Emit(kMips64S8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
2911  g.UseRegister(input0), g.UseImmediate(offset));
2912  return;
2913  }
2914  if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
2915  Emit(kMips64S32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
2916  g.UseRegister(input1), g.UseImmediate(Pack4Lanes(shuffle32x4)));
2917  return;
2918  }
2919  Emit(kMips64S8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
2920  g.UseRegister(input1), g.UseImmediate(Pack4Lanes(shuffle)),
2921  g.UseImmediate(Pack4Lanes(shuffle + 4)),
2922  g.UseImmediate(Pack4Lanes(shuffle + 8)),
2923  g.UseImmediate(Pack4Lanes(shuffle + 12)));
2924 }
2925 
2926 void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
2927  Mips64OperandGenerator g(this);
2928  Emit(kMips64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
2929 }
2930 
2931 void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
2932  Mips64OperandGenerator g(this);
2933  Emit(kMips64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
2934 }
2935 
2936 void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
2937  Mips64OperandGenerator g(this);
2938  Emit(kMips64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
2939 }
2940 
2941 void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
2942  Mips64OperandGenerator g(this);
2943  Emit(kMips64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
2944 }
2945 
2946 void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
2947  Mips64OperandGenerator g(this);
2948  Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
2949  g.TempImmediate(0));
2950 }
2951 
2952 // static
2953 MachineOperatorBuilder::Flags
2954 InstructionSelector::SupportedMachineOperatorFlags() {
2955  MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
2956  return flags | MachineOperatorBuilder::kWord32Ctz |
2957  MachineOperatorBuilder::kWord64Ctz |
2958  MachineOperatorBuilder::kWord32Popcnt |
2959  MachineOperatorBuilder::kWord64Popcnt |
2960  MachineOperatorBuilder::kWord32ShiftIsSafe |
2961  MachineOperatorBuilder::kInt32DivIsSafe |
2962  MachineOperatorBuilder::kUint32DivIsSafe |
2963  MachineOperatorBuilder::kFloat64RoundDown |
2964  MachineOperatorBuilder::kFloat32RoundDown |
2965  MachineOperatorBuilder::kFloat64RoundUp |
2966  MachineOperatorBuilder::kFloat32RoundUp |
2967  MachineOperatorBuilder::kFloat64RoundTruncate |
2968  MachineOperatorBuilder::kFloat32RoundTruncate |
2969  MachineOperatorBuilder::kFloat64RoundTiesEven |
2970  MachineOperatorBuilder::kFloat32RoundTiesEven;
2971 }
2972 
2973 // static
2974 MachineOperatorBuilder::AlignmentRequirements
2975 InstructionSelector::AlignmentRequirements() {
2976  if (kArchVariant == kMips64r6) {
2977  return MachineOperatorBuilder::AlignmentRequirements::
2978  FullUnalignedAccessSupport();
2979  } else {
2980  DCHECK_EQ(kMips64r2, kArchVariant);
2981  return MachineOperatorBuilder::AlignmentRequirements::
2982  NoUnalignedAccessSupport();
2983  }
2984 }
2985 
2986 #undef SIMD_BINOP_LIST
2987 #undef SIMD_SHIFT_OP_LIST
2988 #undef SIMD_UNOP_LIST
2989 #undef SIMD_TYPE_LIST
2990 #undef TRACE_UNIMPL
2991 #undef TRACE
2992 
2993 } // namespace compiler
2994 } // namespace internal
2995 } // namespace v8
Definition: libplatform.h:13