V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
instruction-selector-mips.cc
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/base/adapters.h"
6 #include "src/base/bits.h"
7 #include "src/compiler/backend/instruction-selector-impl.h"
8 #include "src/compiler/node-matchers.h"
9 #include "src/compiler/node-properties.h"
10 
11 namespace v8 {
12 namespace internal {
13 namespace compiler {
14 
15 #define TRACE_UNIMPL() \
16  PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
17 
18 #define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
19 
20 // Adds Mips-specific methods for generating InstructionOperands.
21 class MipsOperandGenerator final : public OperandGenerator {
22  public:
23  explicit MipsOperandGenerator(InstructionSelector* selector)
24  : OperandGenerator(selector) {}
25 
26  InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
27  if (CanBeImmediate(node, opcode)) {
28  return UseImmediate(node);
29  }
30  return UseRegister(node);
31  }
32 
33  // Use the zero register if the node has the immediate value zero, otherwise
34  // assign a register.
35  InstructionOperand UseRegisterOrImmediateZero(Node* node) {
36  if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
37  (IsFloatConstant(node) &&
38  (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
39  return UseImmediate(node);
40  }
41  return UseRegister(node);
42  }
43 
44  bool IsIntegerConstant(Node* node) {
45  return (node->opcode() == IrOpcode::kInt32Constant);
46  }
47 
48  int64_t GetIntegerConstantValue(Node* node) {
49  DCHECK_EQ(IrOpcode::kInt32Constant, node->opcode());
50  return OpParameter<int32_t>(node->op());
51  }
52 
53  bool IsFloatConstant(Node* node) {
54  return (node->opcode() == IrOpcode::kFloat32Constant) ||
55  (node->opcode() == IrOpcode::kFloat64Constant);
56  }
57 
58  double GetFloatConstantValue(Node* node) {
59  if (node->opcode() == IrOpcode::kFloat32Constant) {
60  return OpParameter<float>(node->op());
61  }
62  DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
63  return OpParameter<double>(node->op());
64  }
65 
66  bool CanBeImmediate(Node* node, InstructionCode opcode) {
67  Int32Matcher m(node);
68  if (!m.HasValue()) return false;
69  int32_t value = m.Value();
70  switch (ArchOpcodeField::decode(opcode)) {
71  case kMipsShl:
72  case kMipsSar:
73  case kMipsShr:
74  return is_uint5(value);
75  case kMipsAdd:
76  case kMipsAnd:
77  case kMipsOr:
78  case kMipsTst:
79  case kMipsSub:
80  case kMipsXor:
81  return is_uint16(value);
82  case kMipsLb:
83  case kMipsLbu:
84  case kMipsSb:
85  case kMipsLh:
86  case kMipsLhu:
87  case kMipsSh:
88  case kMipsLw:
89  case kMipsSw:
90  case kMipsLwc1:
91  case kMipsSwc1:
92  case kMipsLdc1:
93  case kMipsSdc1:
94  // true even for 32b values, offsets > 16b
95  // are handled in assembler-mips.cc
96  return is_int32(value);
97  default:
98  return is_int16(value);
99  }
100  }
101 
102  private:
103  bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
104  TRACE_UNIMPL();
105  return false;
106  }
107 };
108 
109 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
110  Node* node) {
111  MipsOperandGenerator g(selector);
112  selector->Emit(opcode, g.DefineAsRegister(node),
113  g.UseRegister(node->InputAt(0)),
114  g.UseRegister(node->InputAt(1)));
115 }
116 
117 void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
118  MipsOperandGenerator g(selector);
119  selector->Emit(
120  opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
121  g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
122 }
123 
124 static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
125  Node* node) {
126  MipsOperandGenerator g(selector);
127  selector->Emit(opcode, g.DefineAsRegister(node),
128  g.UseRegister(node->InputAt(0)));
129 }
130 
131 static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
132  Node* node) {
133  MipsOperandGenerator g(selector);
134  int32_t imm = OpParameter<int32_t>(node->op());
135  selector->Emit(opcode, g.DefineAsRegister(node),
136  g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
137 }
138 
139 static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode,
140  Node* node) {
141  MipsOperandGenerator g(selector);
142  int32_t imm = OpParameter<int32_t>(node->op());
143  selector->Emit(opcode, g.DefineAsRegister(node),
144  g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
145  g.UseRegister(node->InputAt(1)));
146 }
147 
148 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
149  Node* node) {
150  MipsOperandGenerator g(selector);
151  selector->Emit(opcode, g.DefineAsRegister(node),
152  g.UseRegister(node->InputAt(0)),
153  g.UseOperand(node->InputAt(1), opcode));
154 }
155 
156 bool TryMatchImmediate(InstructionSelector* selector,
157  InstructionCode* opcode_return, Node* node,
158  size_t* input_count_return, InstructionOperand* inputs) {
159  MipsOperandGenerator g(selector);
160  if (g.CanBeImmediate(node, *opcode_return)) {
161  *opcode_return |= AddressingModeField::encode(kMode_MRI);
162  inputs[0] = g.UseImmediate(node);
163  *input_count_return = 1;
164  return true;
165  }
166  return false;
167 }
168 
169 static void VisitBinop(InstructionSelector* selector, Node* node,
170  InstructionCode opcode, bool has_reverse_opcode,
171  InstructionCode reverse_opcode,
172  FlagsContinuation* cont) {
173  MipsOperandGenerator g(selector);
174  Int32BinopMatcher m(node);
175  InstructionOperand inputs[2];
176  size_t input_count = 0;
177  InstructionOperand outputs[1];
178  size_t output_count = 0;
179 
180  if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
181  &inputs[1])) {
182  inputs[0] = g.UseRegister(m.left().node());
183  input_count++;
184  } else if (has_reverse_opcode &&
185  TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
186  &input_count, &inputs[1])) {
187  inputs[0] = g.UseRegister(m.right().node());
188  opcode = reverse_opcode;
189  input_count++;
190  } else {
191  inputs[input_count++] = g.UseRegister(m.left().node());
192  inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
193  }
194 
195  if (cont->IsDeoptimize()) {
196  // If we can deoptimize as a result of the binop, we need to make sure that
197  // the deopt inputs are not overwritten by the binop result. One way
198  // to achieve that is to declare the output register as same-as-first.
199  outputs[output_count++] = g.DefineSameAsFirst(node);
200  } else {
201  outputs[output_count++] = g.DefineAsRegister(node);
202  }
203 
204  DCHECK_NE(0u, input_count);
205  DCHECK_EQ(1u, output_count);
206  DCHECK_GE(arraysize(inputs), input_count);
207  DCHECK_GE(arraysize(outputs), output_count);
208 
209  selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
210  inputs, cont);
211 }
212 
213 static void VisitBinop(InstructionSelector* selector, Node* node,
214  InstructionCode opcode, bool has_reverse_opcode,
215  InstructionCode reverse_opcode) {
216  FlagsContinuation cont;
217  VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
218 }
219 
220 static void VisitBinop(InstructionSelector* selector, Node* node,
221  InstructionCode opcode, FlagsContinuation* cont) {
222  VisitBinop(selector, node, opcode, false, kArchNop, cont);
223 }
224 
225 static void VisitBinop(InstructionSelector* selector, Node* node,
226  InstructionCode opcode) {
227  VisitBinop(selector, node, opcode, false, kArchNop);
228 }
229 
230 static void VisitPairAtomicBinop(InstructionSelector* selector, Node* node,
231  ArchOpcode opcode) {
232  MipsOperandGenerator g(selector);
233  Node* base = node->InputAt(0);
234  Node* index = node->InputAt(1);
235  Node* value = node->InputAt(2);
236  Node* value_high = node->InputAt(3);
237 
238  InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
239  g.UseFixed(value, a1),
240  g.UseFixed(value_high, a2)};
241  Node* projection0 = NodeProperties::FindProjection(node, 0);
242  Node* projection1 = NodeProperties::FindProjection(node, 1);
243  if (projection1) {
244  InstructionOperand outputs[] = {g.DefineAsFixed(projection0, v0),
245  g.DefineAsFixed(projection1, v1)};
246  InstructionOperand temps[] = {g.TempRegister(a0), g.TempRegister(),
247  g.TempRegister()};
248  selector->Emit(opcode | AddressingModeField::encode(kMode_None),
249  arraysize(outputs), outputs, arraysize(inputs), inputs,
250  arraysize(temps), temps);
251  } else if (projection0) {
252  InstructionOperand outputs[] = {g.DefineAsFixed(projection0, v0)};
253  InstructionOperand temps[] = {g.TempRegister(a0), g.TempRegister(v1),
254  g.TempRegister()};
255  selector->Emit(opcode | AddressingModeField::encode(kMode_None),
256  arraysize(outputs), outputs, arraysize(inputs), inputs,
257  arraysize(temps), temps);
258  } else {
259  InstructionOperand temps[] = {g.TempRegister(a0), g.TempRegister(v0),
260  g.TempRegister(v1)};
261  selector->Emit(opcode | AddressingModeField::encode(kMode_None), 0, nullptr,
262  arraysize(inputs), inputs, arraysize(temps), temps);
263  }
264 }
265 
266 void InstructionSelector::VisitStackSlot(Node* node) {
267  StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
268  int alignment = rep.alignment();
269  int slot = frame_->AllocateSpillSlot(rep.size(), alignment);
270  OperandGenerator g(this);
271 
272  Emit(kArchStackSlot, g.DefineAsRegister(node),
273  sequence()->AddImmediate(Constant(slot)),
274  sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
275 }
276 
277 void InstructionSelector::VisitDebugAbort(Node* node) {
278  MipsOperandGenerator g(this);
279  Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
280 }
281 
282 void InstructionSelector::VisitLoad(Node* node) {
283  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
284  MipsOperandGenerator g(this);
285  Node* base = node->InputAt(0);
286  Node* index = node->InputAt(1);
287 
288  InstructionCode opcode = kArchNop;
289  switch (load_rep.representation()) {
290  case MachineRepresentation::kFloat32:
291  opcode = kMipsLwc1;
292  break;
293  case MachineRepresentation::kFloat64:
294  opcode = kMipsLdc1;
295  break;
296  case MachineRepresentation::kBit: // Fall through.
297  case MachineRepresentation::kWord8:
298  opcode = load_rep.IsUnsigned() ? kMipsLbu : kMipsLb;
299  break;
300  case MachineRepresentation::kWord16:
301  opcode = load_rep.IsUnsigned() ? kMipsLhu : kMipsLh;
302  break;
303  case MachineRepresentation::kTaggedSigned: // Fall through.
304  case MachineRepresentation::kTaggedPointer: // Fall through.
305  case MachineRepresentation::kTagged: // Fall through.
306  case MachineRepresentation::kWord32:
307  opcode = kMipsLw;
308  break;
309  case MachineRepresentation::kSimd128:
310  opcode = kMipsMsaLd;
311  break;
312  case MachineRepresentation::kWord64: // Fall through.
313  case MachineRepresentation::kNone:
314  UNREACHABLE();
315  return;
316  }
317  if (node->opcode() == IrOpcode::kPoisonedLoad) {
318  CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
319  opcode |= MiscField::encode(kMemoryAccessPoisoned);
320  }
321 
322  if (g.CanBeImmediate(index, opcode)) {
323  Emit(opcode | AddressingModeField::encode(kMode_MRI),
324  g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
325  } else {
326  InstructionOperand addr_reg = g.TempRegister();
327  Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
328  g.UseRegister(index), g.UseRegister(base));
329  // Emit desired load opcode, using temp addr_reg.
330  Emit(opcode | AddressingModeField::encode(kMode_MRI),
331  g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
332  }
333 }
334 
335 void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
336 
337 void InstructionSelector::VisitProtectedLoad(Node* node) {
338  // TODO(eholk)
339  UNIMPLEMENTED();
340 }
341 
342 void InstructionSelector::VisitStore(Node* node) {
343  MipsOperandGenerator g(this);
344  Node* base = node->InputAt(0);
345  Node* index = node->InputAt(1);
346  Node* value = node->InputAt(2);
347 
348  StoreRepresentation store_rep = StoreRepresentationOf(node->op());
349  WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
350  MachineRepresentation rep = store_rep.representation();
351 
352  // TODO(mips): I guess this could be done in a better way.
353  if (write_barrier_kind != kNoWriteBarrier) {
354  DCHECK(CanBeTaggedPointer(rep));
355  InstructionOperand inputs[3];
356  size_t input_count = 0;
357  inputs[input_count++] = g.UseUniqueRegister(base);
358  inputs[input_count++] = g.UseUniqueRegister(index);
359  inputs[input_count++] = g.UseUniqueRegister(value);
360  RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
361  switch (write_barrier_kind) {
362  case kNoWriteBarrier:
363  UNREACHABLE();
364  break;
365  case kMapWriteBarrier:
366  record_write_mode = RecordWriteMode::kValueIsMap;
367  break;
368  case kPointerWriteBarrier:
369  record_write_mode = RecordWriteMode::kValueIsPointer;
370  break;
371  case kFullWriteBarrier:
372  record_write_mode = RecordWriteMode::kValueIsAny;
373  break;
374  }
375  InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
376  size_t const temp_count = arraysize(temps);
377  InstructionCode code = kArchStoreWithWriteBarrier;
378  code |= MiscField::encode(static_cast<int>(record_write_mode));
379  Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
380  } else {
381  ArchOpcode opcode = kArchNop;
382  switch (rep) {
383  case MachineRepresentation::kFloat32:
384  opcode = kMipsSwc1;
385  break;
386  case MachineRepresentation::kFloat64:
387  opcode = kMipsSdc1;
388  break;
389  case MachineRepresentation::kBit: // Fall through.
390  case MachineRepresentation::kWord8:
391  opcode = kMipsSb;
392  break;
393  case MachineRepresentation::kWord16:
394  opcode = kMipsSh;
395  break;
396  case MachineRepresentation::kTaggedSigned: // Fall through.
397  case MachineRepresentation::kTaggedPointer: // Fall through.
398  case MachineRepresentation::kTagged: // Fall through.
399  case MachineRepresentation::kWord32:
400  opcode = kMipsSw;
401  break;
402  case MachineRepresentation::kSimd128:
403  opcode = kMipsMsaSt;
404  break;
405  case MachineRepresentation::kWord64: // Fall through.
406  case MachineRepresentation::kNone:
407  UNREACHABLE();
408  return;
409  }
410 
411  if (g.CanBeImmediate(index, opcode)) {
412  Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
413  g.UseRegister(base), g.UseImmediate(index),
414  g.UseRegisterOrImmediateZero(value));
415  } else {
416  InstructionOperand addr_reg = g.TempRegister();
417  Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
418  g.UseRegister(index), g.UseRegister(base));
419  // Emit desired store opcode, using temp addr_reg.
420  Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
421  addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
422  }
423  }
424 }
425 
426 void InstructionSelector::VisitProtectedStore(Node* node) {
427  // TODO(eholk)
428  UNIMPLEMENTED();
429 }
430 
431 void InstructionSelector::VisitWord32And(Node* node) {
432  MipsOperandGenerator g(this);
433  Int32BinopMatcher m(node);
434  if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
435  m.right().HasValue()) {
436  uint32_t mask = m.right().Value();
437  uint32_t mask_width = base::bits::CountPopulation(mask);
438  uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
439  if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
440  // The mask must be contiguous, and occupy the least-significant bits.
441  DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
442 
443  // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
444  // significant bits.
445  Int32BinopMatcher mleft(m.left().node());
446  if (mleft.right().HasValue()) {
447  // Any shift value can match; int32 shifts use `value % 32`.
448  uint32_t lsb = mleft.right().Value() & 0x1F;
449 
450  // Ext cannot extract bits past the register size, however since
451  // shifting the original value would have introduced some zeros we can
452  // still use Ext with a smaller mask and the remaining bits will be
453  // zeros.
454  if (lsb + mask_width > 32) mask_width = 32 - lsb;
455 
456  if (lsb == 0 && mask_width == 32) {
457  Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
458  } else {
459  Emit(kMipsExt, g.DefineAsRegister(node),
460  g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
461  g.TempImmediate(mask_width));
462  }
463  return;
464  }
465  // Other cases fall through to the normal And operation.
466  }
467  }
468  if (m.right().HasValue()) {
469  uint32_t mask = m.right().Value();
470  uint32_t shift = base::bits::CountPopulation(~mask);
471  uint32_t msb = base::bits::CountLeadingZeros32(~mask);
472  if (shift != 0 && shift != 32 && msb + shift == 32) {
473  // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
474  // and remove constant loading of invereted mask.
475  Emit(kMipsIns, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
476  g.TempImmediate(0), g.TempImmediate(shift));
477  return;
478  }
479  }
480  VisitBinop(this, node, kMipsAnd, true, kMipsAnd);
481 }
482 
483 void InstructionSelector::VisitWord32Or(Node* node) {
484  VisitBinop(this, node, kMipsOr, true, kMipsOr);
485 }
486 
487 void InstructionSelector::VisitWord32Xor(Node* node) {
488  Int32BinopMatcher m(node);
489  if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
490  m.right().Is(-1)) {
491  Int32BinopMatcher mleft(m.left().node());
492  if (!mleft.right().HasValue()) {
493  MipsOperandGenerator g(this);
494  Emit(kMipsNor, g.DefineAsRegister(node),
495  g.UseRegister(mleft.left().node()),
496  g.UseRegister(mleft.right().node()));
497  return;
498  }
499  }
500  if (m.right().Is(-1)) {
501  // Use Nor for bit negation and eliminate constant loading for xori.
502  MipsOperandGenerator g(this);
503  Emit(kMipsNor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
504  g.TempImmediate(0));
505  return;
506  }
507  VisitBinop(this, node, kMipsXor, true, kMipsXor);
508 }
509 
510 void InstructionSelector::VisitWord32Shl(Node* node) {
511  Int32BinopMatcher m(node);
512  if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
513  m.right().IsInRange(1, 31)) {
514  MipsOperandGenerator g(this);
515  Int32BinopMatcher mleft(m.left().node());
516  // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
517  // contiguous, and the shift immediate non-zero.
518  if (mleft.right().HasValue()) {
519  uint32_t mask = mleft.right().Value();
520  uint32_t mask_width = base::bits::CountPopulation(mask);
521  uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
522  if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
523  uint32_t shift = m.right().Value();
524  DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
525  DCHECK_NE(0u, shift);
526  if ((shift + mask_width) >= 32) {
527  // If the mask is contiguous and reaches or extends beyond the top
528  // bit, only the shift is needed.
529  Emit(kMipsShl, g.DefineAsRegister(node),
530  g.UseRegister(mleft.left().node()),
531  g.UseImmediate(m.right().node()));
532  return;
533  }
534  }
535  }
536  }
537  VisitRRO(this, kMipsShl, node);
538 }
539 
540 void InstructionSelector::VisitWord32Shr(Node* node) {
541  Int32BinopMatcher m(node);
542  if (m.left().IsWord32And() && m.right().HasValue()) {
543  uint32_t lsb = m.right().Value() & 0x1F;
544  Int32BinopMatcher mleft(m.left().node());
545  if (mleft.right().HasValue() && mleft.right().Value() != 0) {
546  // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
547  // shifted into the least-significant bits.
548  uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
549  unsigned mask_width = base::bits::CountPopulation(mask);
550  unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
551  if ((mask_msb + mask_width + lsb) == 32) {
552  MipsOperandGenerator g(this);
553  DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
554  Emit(kMipsExt, g.DefineAsRegister(node),
555  g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
556  g.TempImmediate(mask_width));
557  return;
558  }
559  }
560  }
561  VisitRRO(this, kMipsShr, node);
562 }
563 
564 void InstructionSelector::VisitWord32Sar(Node* node) {
565  Int32BinopMatcher m(node);
566  if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
567  m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
568  Int32BinopMatcher mleft(m.left().node());
569  if (m.right().HasValue() && mleft.right().HasValue()) {
570  MipsOperandGenerator g(this);
571  uint32_t sar = m.right().Value();
572  uint32_t shl = mleft.right().Value();
573  if ((sar == shl) && (sar == 16)) {
574  Emit(kMipsSeh, g.DefineAsRegister(node),
575  g.UseRegister(mleft.left().node()));
576  return;
577  } else if ((sar == shl) && (sar == 24)) {
578  Emit(kMipsSeb, g.DefineAsRegister(node),
579  g.UseRegister(mleft.left().node()));
580  return;
581  }
582  }
583  }
584  VisitRRO(this, kMipsSar, node);
585 }
586 
587 static void VisitInt32PairBinop(InstructionSelector* selector,
588  InstructionCode pair_opcode,
589  InstructionCode single_opcode, Node* node) {
590  MipsOperandGenerator g(selector);
591 
592  Node* projection1 = NodeProperties::FindProjection(node, 1);
593 
594  if (projection1) {
595  // We use UseUniqueRegister here to avoid register sharing with the output
596  // register.
597  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
598  g.UseUniqueRegister(node->InputAt(1)),
599  g.UseUniqueRegister(node->InputAt(2)),
600  g.UseUniqueRegister(node->InputAt(3))};
601 
602  InstructionOperand outputs[] = {
603  g.DefineAsRegister(node),
604  g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
605  selector->Emit(pair_opcode, 2, outputs, 4, inputs);
606  } else {
607  // The high word of the result is not used, so we emit the standard 32 bit
608  // instruction.
609  selector->Emit(single_opcode, g.DefineSameAsFirst(node),
610  g.UseRegister(node->InputAt(0)),
611  g.UseRegister(node->InputAt(2)));
612  }
613 }
614 
615 void InstructionSelector::VisitInt32PairAdd(Node* node) {
616  VisitInt32PairBinop(this, kMipsAddPair, kMipsAdd, node);
617 }
618 
619 void InstructionSelector::VisitInt32PairSub(Node* node) {
620  VisitInt32PairBinop(this, kMipsSubPair, kMipsSub, node);
621 }
622 
623 void InstructionSelector::VisitInt32PairMul(Node* node) {
624  VisitInt32PairBinop(this, kMipsMulPair, kMipsMul, node);
625 }
626 
627 // Shared routine for multiple shift operations.
628 static void VisitWord32PairShift(InstructionSelector* selector,
629  InstructionCode opcode, Node* node) {
630  MipsOperandGenerator g(selector);
631  Int32Matcher m(node->InputAt(2));
632  InstructionOperand shift_operand;
633  if (m.HasValue()) {
634  shift_operand = g.UseImmediate(m.node());
635  } else {
636  shift_operand = g.UseUniqueRegister(m.node());
637  }
638 
639  // We use UseUniqueRegister here to avoid register sharing with the output
640  // register.
641  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
642  g.UseUniqueRegister(node->InputAt(1)),
643  shift_operand};
644 
645  Node* projection1 = NodeProperties::FindProjection(node, 1);
646 
647  InstructionOperand outputs[2];
648  InstructionOperand temps[1];
649  int32_t output_count = 0;
650  int32_t temp_count = 0;
651 
652  outputs[output_count++] = g.DefineAsRegister(node);
653  if (projection1) {
654  outputs[output_count++] = g.DefineAsRegister(projection1);
655  } else {
656  temps[temp_count++] = g.TempRegister();
657  }
658 
659  selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
660 }
661 
662 void InstructionSelector::VisitWord32PairShl(Node* node) {
663  VisitWord32PairShift(this, kMipsShlPair, node);
664 }
665 
666 void InstructionSelector::VisitWord32PairShr(Node* node) {
667  VisitWord32PairShift(this, kMipsShrPair, node);
668 }
669 
670 void InstructionSelector::VisitWord32PairSar(Node* node) {
671  VisitWord32PairShift(this, kMipsSarPair, node);
672 }
673 
674 void InstructionSelector::VisitWord32Ror(Node* node) {
675  VisitRRO(this, kMipsRor, node);
676 }
677 
678 void InstructionSelector::VisitWord32Clz(Node* node) {
679  VisitRR(this, kMipsClz, node);
680 }
681 
682 void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
683  MipsOperandGenerator g(this);
684  Node* base = node->InputAt(0);
685  Node* index = node->InputAt(1);
686  ArchOpcode opcode = kMipsWord32AtomicPairLoad;
687 
688  Node* projection0 = NodeProperties::FindProjection(node, 0);
689  Node* projection1 = NodeProperties::FindProjection(node, 1);
690 
691  InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)};
692 
693  if (projection1) {
694  InstructionOperand outputs[] = {g.DefineAsFixed(projection0, v0),
695  g.DefineAsFixed(projection1, v1)};
696  InstructionOperand temps[] = {g.TempRegister(a0)};
697  Emit(opcode | AddressingModeField::encode(kMode_MRI), arraysize(outputs),
698  outputs, arraysize(inputs), inputs, arraysize(temps), temps);
699  } else if (projection0) {
700  InstructionOperand outputs[] = {g.DefineAsFixed(projection0, v0)};
701  InstructionOperand temps[] = {g.TempRegister(a0), g.TempRegister(v1)};
702  Emit(opcode | AddressingModeField::encode(kMode_MRI), arraysize(outputs),
703  outputs, arraysize(inputs), inputs, arraysize(temps), temps);
704  } else {
705  InstructionOperand temps[] = {g.TempRegister(a0), g.TempRegister(v0),
706  g.TempRegister(v1)};
707  Emit(opcode | AddressingModeField::encode(kMode_MRI), 0, nullptr,
708  arraysize(inputs), inputs, arraysize(temps), temps);
709  }
710 }
711 
712 void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
713  MipsOperandGenerator g(this);
714  Node* base = node->InputAt(0);
715  Node* index = node->InputAt(1);
716  Node* value_low = node->InputAt(2);
717  Node* value_high = node->InputAt(3);
718 
719  InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
720  g.UseFixed(value_low, a1),
721  g.UseFixed(value_high, a2)};
722  InstructionOperand temps[] = {g.TempRegister(a0), g.TempRegister(),
723  g.TempRegister()};
724  Emit(kMipsWord32AtomicPairStore | AddressingModeField::encode(kMode_MRI), 0,
725  nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
726 }
727 
728 void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
729  VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairAdd);
730 }
731 
732 void InstructionSelector::VisitWord32AtomicPairSub(Node* node) {
733  VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairSub);
734 }
735 
736 void InstructionSelector::VisitWord32AtomicPairAnd(Node* node) {
737  VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairAnd);
738 }
739 
740 void InstructionSelector::VisitWord32AtomicPairOr(Node* node) {
741  VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairOr);
742 }
743 
744 void InstructionSelector::VisitWord32AtomicPairXor(Node* node) {
745  VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairXor);
746 }
747 
748 void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
749  VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairExchange);
750 }
751 
752 void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
753  MipsOperandGenerator g(this);
754  InstructionOperand inputs[] = {
755  g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
756  g.UseFixed(node->InputAt(2), a1), g.UseFixed(node->InputAt(3), a2),
757  g.UseFixed(node->InputAt(4), a3), g.UseUniqueRegister(node->InputAt(5))};
758 
759  InstructionCode code = kMipsWord32AtomicPairCompareExchange |
760  AddressingModeField::encode(kMode_MRI);
761  Node* projection0 = NodeProperties::FindProjection(node, 0);
762  Node* projection1 = NodeProperties::FindProjection(node, 1);
763  if (projection1) {
764  InstructionOperand outputs[] = {g.DefineAsFixed(projection0, v0),
765  g.DefineAsFixed(projection1, v1)};
766  InstructionOperand temps[] = {g.TempRegister(a0)};
767  Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
768  arraysize(temps), temps);
769  } else if (projection0) {
770  InstructionOperand outputs[] = {g.DefineAsFixed(projection0, v0)};
771  InstructionOperand temps[] = {g.TempRegister(a0), g.TempRegister(v1)};
772  Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
773  arraysize(temps), temps);
774  } else {
775  InstructionOperand temps[] = {g.TempRegister(a0), g.TempRegister(v0),
776  g.TempRegister(v1)};
777  Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
778  }
779 }
780 
781 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
782 
783 void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
784 
785 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
786  MipsOperandGenerator g(this);
787  Emit(kMipsByteSwap32, g.DefineAsRegister(node),
788  g.UseRegister(node->InputAt(0)));
789 }
790 
791 void InstructionSelector::VisitWord32Ctz(Node* node) {
792  MipsOperandGenerator g(this);
793  Emit(kMipsCtz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
794 }
795 
796 void InstructionSelector::VisitWord32Popcnt(Node* node) {
797  MipsOperandGenerator g(this);
798  Emit(kMipsPopcnt, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
799 }
800 
801 void InstructionSelector::VisitInt32Add(Node* node) {
802  MipsOperandGenerator g(this);
803  Int32BinopMatcher m(node);
804 
805  if (IsMipsArchVariant(kMips32r6)) {
806  // Select Lsa for (left + (left_of_right << imm)).
807  if (m.right().opcode() == IrOpcode::kWord32Shl &&
808  CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
809  Int32BinopMatcher mright(m.right().node());
810  if (mright.right().HasValue() && !m.left().HasValue()) {
811  int32_t shift_value = static_cast<int32_t>(mright.right().Value());
812  if (shift_value > 0 && shift_value <= 31) {
813  Emit(kMipsLsa, g.DefineAsRegister(node),
814  g.UseRegister(m.left().node()),
815  g.UseRegister(mright.left().node()),
816  g.TempImmediate(shift_value));
817  return;
818  }
819  }
820  }
821 
822  // Select Lsa for ((left_of_left << imm) + right).
823  if (m.left().opcode() == IrOpcode::kWord32Shl &&
824  CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
825  Int32BinopMatcher mleft(m.left().node());
826  if (mleft.right().HasValue() && !m.right().HasValue()) {
827  int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
828  if (shift_value > 0 && shift_value <= 31) {
829  Emit(kMipsLsa, g.DefineAsRegister(node),
830  g.UseRegister(m.right().node()),
831  g.UseRegister(mleft.left().node()),
832  g.TempImmediate(shift_value));
833  return;
834  }
835  }
836  }
837  }
838 
839  VisitBinop(this, node, kMipsAdd, true, kMipsAdd);
840 }
841 
842 void InstructionSelector::VisitInt32Sub(Node* node) {
843  VisitBinop(this, node, kMipsSub);
844 }
845 
846 void InstructionSelector::VisitInt32Mul(Node* node) {
847  MipsOperandGenerator g(this);
848  Int32BinopMatcher m(node);
849  if (m.right().HasValue() && m.right().Value() > 0) {
850  uint32_t value = static_cast<uint32_t>(m.right().Value());
851  if (base::bits::IsPowerOfTwo(value)) {
852  Emit(kMipsShl | AddressingModeField::encode(kMode_None),
853  g.DefineAsRegister(node), g.UseRegister(m.left().node()),
854  g.TempImmediate(WhichPowerOf2(value)));
855  return;
856  }
857  if (base::bits::IsPowerOfTwo(value - 1) && IsMipsArchVariant(kMips32r6) &&
858  value - 1 > 0 && value - 1 <= 31) {
859  Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
860  g.UseRegister(m.left().node()),
861  g.TempImmediate(WhichPowerOf2(value - 1)));
862  return;
863  }
864  if (base::bits::IsPowerOfTwo(value + 1)) {
865  InstructionOperand temp = g.TempRegister();
866  Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
867  g.UseRegister(m.left().node()),
868  g.TempImmediate(WhichPowerOf2(value + 1)));
869  Emit(kMipsSub | AddressingModeField::encode(kMode_None),
870  g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
871  return;
872  }
873  }
874  VisitRRR(this, kMipsMul, node);
875 }
876 
877 void InstructionSelector::VisitInt32MulHigh(Node* node) {
878  VisitRRR(this, kMipsMulHigh, node);
879 }
880 
881 void InstructionSelector::VisitUint32MulHigh(Node* node) {
882  MipsOperandGenerator g(this);
883  Emit(kMipsMulHighU, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
884  g.UseRegister(node->InputAt(1)));
885 }
886 
887 void InstructionSelector::VisitInt32Div(Node* node) {
888  MipsOperandGenerator g(this);
889  Int32BinopMatcher m(node);
890  Emit(kMipsDiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
891  g.UseRegister(m.right().node()));
892 }
893 
894 void InstructionSelector::VisitUint32Div(Node* node) {
895  MipsOperandGenerator g(this);
896  Int32BinopMatcher m(node);
897  Emit(kMipsDivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
898  g.UseRegister(m.right().node()));
899 }
900 
901 void InstructionSelector::VisitInt32Mod(Node* node) {
902  MipsOperandGenerator g(this);
903  Int32BinopMatcher m(node);
904  Emit(kMipsMod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
905  g.UseRegister(m.right().node()));
906 }
907 
908 void InstructionSelector::VisitUint32Mod(Node* node) {
909  MipsOperandGenerator g(this);
910  Int32BinopMatcher m(node);
911  Emit(kMipsModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
912  g.UseRegister(m.right().node()));
913 }
914 
915 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
916  VisitRR(this, kMipsCvtDS, node);
917 }
918 
919 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
920  VisitRR(this, kMipsCvtSW, node);
921 }
922 
923 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
924  VisitRR(this, kMipsCvtSUw, node);
925 }
926 
927 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
928  VisitRR(this, kMipsCvtDW, node);
929 }
930 
931 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
932  VisitRR(this, kMipsCvtDUw, node);
933 }
934 
935 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
936  VisitRR(this, kMipsTruncWS, node);
937 }
938 
939 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
940  VisitRR(this, kMipsTruncUwS, node);
941 }
942 
943 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
944  MipsOperandGenerator g(this);
945  Node* value = node->InputAt(0);
946  // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
947  // which does rounding and conversion to integer format.
948  if (CanCover(node, value)) {
949  switch (value->opcode()) {
950  case IrOpcode::kFloat64RoundDown:
951  Emit(kMipsFloorWD, g.DefineAsRegister(node),
952  g.UseRegister(value->InputAt(0)));
953  return;
954  case IrOpcode::kFloat64RoundUp:
955  Emit(kMipsCeilWD, g.DefineAsRegister(node),
956  g.UseRegister(value->InputAt(0)));
957  return;
958  case IrOpcode::kFloat64RoundTiesEven:
959  Emit(kMipsRoundWD, g.DefineAsRegister(node),
960  g.UseRegister(value->InputAt(0)));
961  return;
962  case IrOpcode::kFloat64RoundTruncate:
963  Emit(kMipsTruncWD, g.DefineAsRegister(node),
964  g.UseRegister(value->InputAt(0)));
965  return;
966  default:
967  break;
968  }
969  if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
970  Node* next = value->InputAt(0);
971  if (CanCover(value, next)) {
972  // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
973  switch (next->opcode()) {
974  case IrOpcode::kFloat32RoundDown:
975  Emit(kMipsFloorWS, g.DefineAsRegister(node),
976  g.UseRegister(next->InputAt(0)));
977  return;
978  case IrOpcode::kFloat32RoundUp:
979  Emit(kMipsCeilWS, g.DefineAsRegister(node),
980  g.UseRegister(next->InputAt(0)));
981  return;
982  case IrOpcode::kFloat32RoundTiesEven:
983  Emit(kMipsRoundWS, g.DefineAsRegister(node),
984  g.UseRegister(next->InputAt(0)));
985  return;
986  case IrOpcode::kFloat32RoundTruncate:
987  Emit(kMipsTruncWS, g.DefineAsRegister(node),
988  g.UseRegister(next->InputAt(0)));
989  return;
990  default:
991  Emit(kMipsTruncWS, g.DefineAsRegister(node),
992  g.UseRegister(value->InputAt(0)));
993  return;
994  }
995  } else {
996  // Match float32 -> float64 -> int32 representation change path.
997  Emit(kMipsTruncWS, g.DefineAsRegister(node),
998  g.UseRegister(value->InputAt(0)));
999  return;
1000  }
1001  }
1002  }
1003  VisitRR(this, kMipsTruncWD, node);
1004 }
1005 
1006 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
1007  VisitRR(this, kMipsTruncUwD, node);
1008 }
1009 
1010 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
1011  VisitRR(this, kMipsTruncUwD, node);
1012 }
1013 
1014 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1015  MipsOperandGenerator g(this);
1016  Node* value = node->InputAt(0);
1017  // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
1018  // instruction.
1019  if (CanCover(node, value) &&
1020  value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
1021  Emit(kMipsCvtSW, g.DefineAsRegister(node),
1022  g.UseRegister(value->InputAt(0)));
1023  return;
1024  }
1025  VisitRR(this, kMipsCvtSD, node);
1026 }
1027 
1028 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1029  VisitRR(this, kArchTruncateDoubleToI, node);
1030 }
1031 
1032 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
1033  VisitRR(this, kMipsTruncWD, node);
1034 }
1035 
1036 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1037  VisitRR(this, kMipsFloat64ExtractLowWord32, node);
1038 }
1039 
1040 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1041  MipsOperandGenerator g(this);
1042  Emit(kMipsFloat64InsertLowWord32, g.DefineAsRegister(node),
1043  ImmediateOperand(ImmediateOperand::INLINE, 0),
1044  g.UseRegister(node->InputAt(0)));
1045 }
1046 
1047 void InstructionSelector::VisitFloat32Add(Node* node) {
1048  MipsOperandGenerator g(this);
1049  if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
1050  Float32BinopMatcher m(node);
1051  if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
1052  // For Add.S(Mul.S(x, y), z):
1053  Float32BinopMatcher mleft(m.left().node());
1054  Emit(kMipsMaddS, g.DefineAsRegister(node),
1055  g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
1056  g.UseRegister(mleft.right().node()));
1057  return;
1058  }
1059  if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
1060  // For Add.S(x, Mul.S(y, z)):
1061  Float32BinopMatcher mright(m.right().node());
1062  Emit(kMipsMaddS, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1063  g.UseRegister(mright.left().node()),
1064  g.UseRegister(mright.right().node()));
1065  return;
1066  }
1067  }
1068  VisitRRR(this, kMipsAddS, node);
1069 }
1070 
1071 void InstructionSelector::VisitFloat64Add(Node* node) {
1072  MipsOperandGenerator g(this);
1073  if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
1074  Float64BinopMatcher m(node);
1075  if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
1076  // For Add.D(Mul.D(x, y), z):
1077  Float64BinopMatcher mleft(m.left().node());
1078  Emit(kMipsMaddD, g.DefineAsRegister(node),
1079  g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
1080  g.UseRegister(mleft.right().node()));
1081  return;
1082  }
1083  if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
1084  // For Add.D(x, Mul.D(y, z)):
1085  Float64BinopMatcher mright(m.right().node());
1086  Emit(kMipsMaddD, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1087  g.UseRegister(mright.left().node()),
1088  g.UseRegister(mright.right().node()));
1089  return;
1090  }
1091  }
1092  VisitRRR(this, kMipsAddD, node);
1093 }
1094 
1095 void InstructionSelector::VisitFloat32Sub(Node* node) {
1096  MipsOperandGenerator g(this);
1097  if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
1098  Float32BinopMatcher m(node);
1099  if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
1100  // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
1101  Float32BinopMatcher mleft(m.left().node());
1102  Emit(kMipsMsubS, g.DefineAsRegister(node),
1103  g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
1104  g.UseRegister(mleft.right().node()));
1105  return;
1106  }
1107  }
1108  VisitRRR(this, kMipsSubS, node);
1109 }
1110 
1111 void InstructionSelector::VisitFloat64Sub(Node* node) {
1112  MipsOperandGenerator g(this);
1113  if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
1114  Float64BinopMatcher m(node);
1115  if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
1116  // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
1117  Float64BinopMatcher mleft(m.left().node());
1118  Emit(kMipsMsubD, g.DefineAsRegister(node),
1119  g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
1120  g.UseRegister(mleft.right().node()));
1121  return;
1122  }
1123  }
1124  VisitRRR(this, kMipsSubD, node);
1125 }
1126 
1127 void InstructionSelector::VisitFloat32Mul(Node* node) {
1128  VisitRRR(this, kMipsMulS, node);
1129 }
1130 
1131 void InstructionSelector::VisitFloat64Mul(Node* node) {
1132  VisitRRR(this, kMipsMulD, node);
1133 }
1134 
1135 void InstructionSelector::VisitFloat32Div(Node* node) {
1136  VisitRRR(this, kMipsDivS, node);
1137 }
1138 
1139 void InstructionSelector::VisitFloat64Div(Node* node) {
1140  VisitRRR(this, kMipsDivD, node);
1141 }
1142 
1143 void InstructionSelector::VisitFloat64Mod(Node* node) {
1144  MipsOperandGenerator g(this);
1145  Emit(kMipsModD, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12),
1146  g.UseFixed(node->InputAt(1), f14))
1147  ->MarkAsCall();
1148 }
1149 
1150 void InstructionSelector::VisitFloat32Max(Node* node) {
1151  MipsOperandGenerator g(this);
1152  Emit(kMipsFloat32Max, g.DefineAsRegister(node),
1153  g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1154 }
1155 
1156 void InstructionSelector::VisitFloat64Max(Node* node) {
1157  MipsOperandGenerator g(this);
1158  Emit(kMipsFloat64Max, g.DefineAsRegister(node),
1159  g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1160 }
1161 
1162 void InstructionSelector::VisitFloat32Min(Node* node) {
1163  MipsOperandGenerator g(this);
1164  Emit(kMipsFloat32Min, g.DefineAsRegister(node),
1165  g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1166 }
1167 
1168 void InstructionSelector::VisitFloat64Min(Node* node) {
1169  MipsOperandGenerator g(this);
1170  Emit(kMipsFloat64Min, g.DefineAsRegister(node),
1171  g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1172 }
1173 
1174 void InstructionSelector::VisitFloat32Abs(Node* node) {
1175  VisitRR(this, kMipsAbsS, node);
1176 }
1177 
1178 void InstructionSelector::VisitFloat64Abs(Node* node) {
1179  VisitRR(this, kMipsAbsD, node);
1180 }
1181 
1182 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1183  VisitRR(this, kMipsSqrtS, node);
1184 }
1185 
1186 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1187  VisitRR(this, kMipsSqrtD, node);
1188 }
1189 
1190 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1191  VisitRR(this, kMipsFloat32RoundDown, node);
1192 }
1193 
1194 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1195  VisitRR(this, kMipsFloat64RoundDown, node);
1196 }
1197 
1198 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1199  VisitRR(this, kMipsFloat32RoundUp, node);
1200 }
1201 
1202 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1203  VisitRR(this, kMipsFloat64RoundUp, node);
1204 }
1205 
1206 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1207  VisitRR(this, kMipsFloat32RoundTruncate, node);
1208 }
1209 
1210 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1211  VisitRR(this, kMipsFloat64RoundTruncate, node);
1212 }
1213 
1214 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1215  UNREACHABLE();
1216 }
1217 
1218 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1219  VisitRR(this, kMipsFloat32RoundTiesEven, node);
1220 }
1221 
1222 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1223  VisitRR(this, kMipsFloat64RoundTiesEven, node);
1224 }
1225 
1226 void InstructionSelector::VisitFloat32Neg(Node* node) {
1227  VisitRR(this, kMipsNegS, node);
1228 }
1229 
1230 void InstructionSelector::VisitFloat64Neg(Node* node) {
1231  VisitRR(this, kMipsNegD, node);
1232 }
1233 
1234 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1235  InstructionCode opcode) {
1236  MipsOperandGenerator g(this);
1237  Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2),
1238  g.UseFixed(node->InputAt(1), f4))
1239  ->MarkAsCall();
1240 }
1241 
1242 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1243  InstructionCode opcode) {
1244  MipsOperandGenerator g(this);
1245  Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12))
1246  ->MarkAsCall();
1247 }
1248 
1249 void InstructionSelector::EmitPrepareArguments(
1250  ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
1251  Node* node) {
1252  MipsOperandGenerator g(this);
1253 
1254  // Prepare for C function call.
1255  if (call_descriptor->IsCFunctionCall()) {
1256  Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
1257  call_descriptor->ParameterCount())),
1258  0, nullptr, 0, nullptr);
1259 
1260  // Poke any stack arguments.
1261  int slot = kCArgSlotCount;
1262  for (PushParameter input : (*arguments)) {
1263  if (input.node) {
1264  Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1265  g.TempImmediate(slot << kPointerSizeLog2));
1266  ++slot;
1267  }
1268  }
1269  } else {
1270  // Possibly align stack here for functions.
1271  int push_count = static_cast<int>(call_descriptor->StackParameterCount());
1272  if (push_count > 0) {
1273  // Calculate needed space
1274  int stack_size = 0;
1275  for (size_t n = 0; n < arguments->size(); ++n) {
1276  PushParameter input = (*arguments)[n];
1277  if (input.node) {
1278  stack_size += input.location.GetSizeInPointers();
1279  }
1280  }
1281  Emit(kMipsStackClaim, g.NoOutput(),
1282  g.TempImmediate(stack_size << kPointerSizeLog2));
1283  }
1284  for (size_t n = 0; n < arguments->size(); ++n) {
1285  PushParameter input = (*arguments)[n];
1286  if (input.node) {
1287  Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1288  g.TempImmediate(n << kPointerSizeLog2));
1289  }
1290  }
1291  }
1292 }
1293 
1294 void InstructionSelector::EmitPrepareResults(
1295  ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
1296  Node* node) {
1297  MipsOperandGenerator g(this);
1298 
1299  int reverse_slot = 0;
1300  for (PushParameter output : *results) {
1301  if (!output.location.IsCallerFrameSlot()) continue;
1302  // Skip any alignment holes in nodes.
1303  if (output.node != nullptr) {
1304  DCHECK(!call_descriptor->IsCFunctionCall());
1305  if (output.location.GetType() == MachineType::Float32()) {
1306  MarkAsFloat32(output.node);
1307  } else if (output.location.GetType() == MachineType::Float64()) {
1308  MarkAsFloat64(output.node);
1309  }
1310  Emit(kMipsPeek, g.DefineAsRegister(output.node),
1311  g.UseImmediate(reverse_slot));
1312  }
1313  reverse_slot += output.location.GetSizeInPointers();
1314  }
1315 }
1316 
1317 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1318 
1319 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
1320 
1321 void InstructionSelector::VisitUnalignedLoad(Node* node) {
1322  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1323  MipsOperandGenerator g(this);
1324  Node* base = node->InputAt(0);
1325  Node* index = node->InputAt(1);
1326 
1327  ArchOpcode opcode = kArchNop;
1328  switch (load_rep.representation()) {
1329  case MachineRepresentation::kBit: // Fall through.
1330  case MachineRepresentation::kWord8:
1331  UNREACHABLE();
1332  break;
1333  case MachineRepresentation::kWord16:
1334  opcode = load_rep.IsUnsigned() ? kMipsUlhu : kMipsUlh;
1335  break;
1336  case MachineRepresentation::kTaggedSigned: // Fall through.
1337  case MachineRepresentation::kTaggedPointer: // Fall through.
1338  case MachineRepresentation::kTagged: // Fall through.
1339  case MachineRepresentation::kWord32:
1340  opcode = kMipsUlw;
1341  break;
1342  case MachineRepresentation::kFloat32:
1343  opcode = kMipsUlwc1;
1344  break;
1345  case MachineRepresentation::kFloat64:
1346  opcode = kMipsUldc1;
1347  break;
1348  case MachineRepresentation::kSimd128:
1349  opcode = kMipsMsaLd;
1350  break;
1351  case MachineRepresentation::kWord64: // Fall through.
1352  case MachineRepresentation::kNone:
1353  UNREACHABLE();
1354  return;
1355  }
1356 
1357  if (g.CanBeImmediate(index, opcode)) {
1358  Emit(opcode | AddressingModeField::encode(kMode_MRI),
1359  g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
1360  } else {
1361  InstructionOperand addr_reg = g.TempRegister();
1362  Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
1363  g.UseRegister(index), g.UseRegister(base));
1364  // Emit desired load opcode, using temp addr_reg.
1365  Emit(opcode | AddressingModeField::encode(kMode_MRI),
1366  g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1367  }
1368 }
1369 
1370 void InstructionSelector::VisitUnalignedStore(Node* node) {
1371  MipsOperandGenerator g(this);
1372  Node* base = node->InputAt(0);
1373  Node* index = node->InputAt(1);
1374  Node* value = node->InputAt(2);
1375 
1376  UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
1377 
1378  // TODO(mips): I guess this could be done in a better way.
1379  ArchOpcode opcode = kArchNop;
1380  switch (rep) {
1381  case MachineRepresentation::kFloat32:
1382  opcode = kMipsUswc1;
1383  break;
1384  case MachineRepresentation::kFloat64:
1385  opcode = kMipsUsdc1;
1386  break;
1387  case MachineRepresentation::kBit: // Fall through.
1388  case MachineRepresentation::kWord8:
1389  UNREACHABLE();
1390  break;
1391  case MachineRepresentation::kWord16:
1392  opcode = kMipsUsh;
1393  break;
1394  case MachineRepresentation::kTaggedSigned: // Fall through.
1395  case MachineRepresentation::kTaggedPointer: // Fall through.
1396  case MachineRepresentation::kTagged: // Fall through.
1397  case MachineRepresentation::kWord32:
1398  opcode = kMipsUsw;
1399  break;
1400  case MachineRepresentation::kSimd128:
1401  opcode = kMipsMsaSt;
1402  break;
1403  case MachineRepresentation::kWord64: // Fall through.
1404  case MachineRepresentation::kNone:
1405  UNREACHABLE();
1406  return;
1407  }
1408 
1409  if (g.CanBeImmediate(index, opcode)) {
1410  Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1411  g.UseRegister(base), g.UseImmediate(index),
1412  g.UseRegisterOrImmediateZero(value));
1413  } else {
1414  InstructionOperand addr_reg = g.TempRegister();
1415  Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
1416  g.UseRegister(index), g.UseRegister(base));
1417  // Emit desired store opcode, using temp addr_reg.
1418  Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1419  addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
1420  }
1421 }
1422 
1423 namespace {
1424 // Shared routine for multiple compare operations.
1425 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1426  InstructionOperand left, InstructionOperand right,
1427  FlagsContinuation* cont) {
1428  selector->EmitWithContinuation(opcode, left, right, cont);
1429 }
1430 
1431 // Shared routine for multiple float32 compare operations.
1432 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1433  FlagsContinuation* cont) {
1434  MipsOperandGenerator g(selector);
1435  Float32BinopMatcher m(node);
1436  InstructionOperand lhs, rhs;
1437 
1438  lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1439  : g.UseRegister(m.left().node());
1440  rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1441  : g.UseRegister(m.right().node());
1442  VisitCompare(selector, kMipsCmpS, lhs, rhs, cont);
1443 }
1444 
1445 // Shared routine for multiple float64 compare operations.
1446 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1447  FlagsContinuation* cont) {
1448  MipsOperandGenerator g(selector);
1449  Float64BinopMatcher m(node);
1450  InstructionOperand lhs, rhs;
1451 
1452  lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1453  : g.UseRegister(m.left().node());
1454  rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1455  : g.UseRegister(m.right().node());
1456  VisitCompare(selector, kMipsCmpD, lhs, rhs, cont);
1457 }
1458 
1459 // Shared routine for multiple word compare operations.
1460 void VisitWordCompare(InstructionSelector* selector, Node* node,
1461  InstructionCode opcode, FlagsContinuation* cont,
1462  bool commutative) {
1463  MipsOperandGenerator g(selector);
1464  Node* left = node->InputAt(0);
1465  Node* right = node->InputAt(1);
1466 
1467  // Match immediates on left or right side of comparison.
1468  if (g.CanBeImmediate(right, opcode)) {
1469  if (opcode == kMipsTst) {
1470  VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
1471  cont);
1472  } else {
1473  switch (cont->condition()) {
1474  case kEqual:
1475  case kNotEqual:
1476  if (cont->IsSet()) {
1477  VisitCompare(selector, opcode, g.UseRegister(left),
1478  g.UseImmediate(right), cont);
1479  } else {
1480  VisitCompare(selector, opcode, g.UseRegister(left),
1481  g.UseRegister(right), cont);
1482  }
1483  break;
1484  case kSignedLessThan:
1485  case kSignedGreaterThanOrEqual:
1486  case kUnsignedLessThan:
1487  case kUnsignedGreaterThanOrEqual:
1488  VisitCompare(selector, opcode, g.UseRegister(left),
1489  g.UseImmediate(right), cont);
1490  break;
1491  default:
1492  VisitCompare(selector, opcode, g.UseRegister(left),
1493  g.UseRegister(right), cont);
1494  }
1495  }
1496  } else if (g.CanBeImmediate(left, opcode)) {
1497  if (!commutative) cont->Commute();
1498  if (opcode == kMipsTst) {
1499  VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
1500  cont);
1501  } else {
1502  switch (cont->condition()) {
1503  case kEqual:
1504  case kNotEqual:
1505  if (cont->IsSet()) {
1506  VisitCompare(selector, opcode, g.UseRegister(right),
1507  g.UseImmediate(left), cont);
1508  } else {
1509  VisitCompare(selector, opcode, g.UseRegister(right),
1510  g.UseRegister(left), cont);
1511  }
1512  break;
1513  case kSignedLessThan:
1514  case kSignedGreaterThanOrEqual:
1515  case kUnsignedLessThan:
1516  case kUnsignedGreaterThanOrEqual:
1517  VisitCompare(selector, opcode, g.UseRegister(right),
1518  g.UseImmediate(left), cont);
1519  break;
1520  default:
1521  VisitCompare(selector, opcode, g.UseRegister(right),
1522  g.UseRegister(left), cont);
1523  }
1524  }
1525  } else {
1526  VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
1527  cont);
1528  }
1529 }
1530 
1531 void VisitWordCompare(InstructionSelector* selector, Node* node,
1532  FlagsContinuation* cont) {
1533  VisitWordCompare(selector, node, kMipsCmp, cont, false);
1534 }
1535 
1536 } // namespace
1537 
1538 // Shared routine for word comparisons against zero.
1539 void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
1540  FlagsContinuation* cont) {
1541  // Try to combine with comparisons against 0 by simply inverting the branch.
1542  while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
1543  Int32BinopMatcher m(value);
1544  if (!m.right().Is(0)) break;
1545 
1546  user = value;
1547  value = m.left().node();
1548  cont->Negate();
1549  }
1550 
1551  if (CanCover(user, value)) {
1552  switch (value->opcode()) {
1553  case IrOpcode::kWord32Equal:
1554  cont->OverwriteAndNegateIfEqual(kEqual);
1555  return VisitWordCompare(this, value, cont);
1556  case IrOpcode::kInt32LessThan:
1557  cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1558  return VisitWordCompare(this, value, cont);
1559  case IrOpcode::kInt32LessThanOrEqual:
1560  cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1561  return VisitWordCompare(this, value, cont);
1562  case IrOpcode::kUint32LessThan:
1563  cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1564  return VisitWordCompare(this, value, cont);
1565  case IrOpcode::kUint32LessThanOrEqual:
1566  cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1567  return VisitWordCompare(this, value, cont);
1568  case IrOpcode::kFloat32Equal:
1569  cont->OverwriteAndNegateIfEqual(kEqual);
1570  return VisitFloat32Compare(this, value, cont);
1571  case IrOpcode::kFloat32LessThan:
1572  cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1573  return VisitFloat32Compare(this, value, cont);
1574  case IrOpcode::kFloat32LessThanOrEqual:
1575  cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1576  return VisitFloat32Compare(this, value, cont);
1577  case IrOpcode::kFloat64Equal:
1578  cont->OverwriteAndNegateIfEqual(kEqual);
1579  return VisitFloat64Compare(this, value, cont);
1580  case IrOpcode::kFloat64LessThan:
1581  cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1582  return VisitFloat64Compare(this, value, cont);
1583  case IrOpcode::kFloat64LessThanOrEqual:
1584  cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1585  return VisitFloat64Compare(this, value, cont);
1586  case IrOpcode::kProjection:
1587  // Check if this is the overflow output projection of an
1588  // <Operation>WithOverflow node.
1589  if (ProjectionIndexOf(value->op()) == 1u) {
1590  // We cannot combine the <Operation>WithOverflow with this branch
1591  // unless the 0th projection (the use of the actual value of the
1592  // <Operation> is either nullptr, which means there's no use of the
1593  // actual value, or was already defined, which means it is scheduled
1594  // *AFTER* this branch).
1595  Node* const node = value->InputAt(0);
1596  Node* const result = NodeProperties::FindProjection(node, 0);
1597  if (!result || IsDefined(result)) {
1598  switch (node->opcode()) {
1599  case IrOpcode::kInt32AddWithOverflow:
1600  cont->OverwriteAndNegateIfEqual(kOverflow);
1601  return VisitBinop(this, node, kMipsAddOvf, cont);
1602  case IrOpcode::kInt32SubWithOverflow:
1603  cont->OverwriteAndNegateIfEqual(kOverflow);
1604  return VisitBinop(this, node, kMipsSubOvf, cont);
1605  case IrOpcode::kInt32MulWithOverflow:
1606  cont->OverwriteAndNegateIfEqual(kOverflow);
1607  return VisitBinop(this, node, kMipsMulOvf, cont);
1608  default:
1609  break;
1610  }
1611  }
1612  }
1613  break;
1614  case IrOpcode::kWord32And:
1615  return VisitWordCompare(this, value, kMipsTst, cont, true);
1616  default:
1617  break;
1618  }
1619  }
1620 
1621  // Continuation could not be combined with a compare, emit compare against 0.
1622  MipsOperandGenerator g(this);
1623  InstructionOperand const value_operand = g.UseRegister(value);
1624  EmitWithContinuation(kMipsCmp, value_operand, g.TempImmediate(0), cont);
1625 }
1626 
1627 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1628  MipsOperandGenerator g(this);
1629  InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1630 
1631  // Emit either ArchTableSwitch or ArchLookupSwitch.
1632  if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
1633  static const size_t kMaxTableSwitchValueRange = 2 << 16;
1634  size_t table_space_cost = 9 + sw.value_range();
1635  size_t table_time_cost = 3;
1636  size_t lookup_space_cost = 2 + 2 * sw.case_count();
1637  size_t lookup_time_cost = sw.case_count();
1638  if (sw.case_count() > 0 &&
1639  table_space_cost + 3 * table_time_cost <=
1640  lookup_space_cost + 3 * lookup_time_cost &&
1641  sw.min_value() > std::numeric_limits<int32_t>::min() &&
1642  sw.value_range() <= kMaxTableSwitchValueRange) {
1643  InstructionOperand index_operand = value_operand;
1644  if (sw.min_value()) {
1645  index_operand = g.TempRegister();
1646  Emit(kMipsSub, index_operand, value_operand,
1647  g.TempImmediate(sw.min_value()));
1648  }
1649  // Generate a table lookup.
1650  return EmitTableSwitch(sw, index_operand);
1651  }
1652  }
1653 
1654  // Generate a tree of conditional jumps.
1655  return EmitBinarySearchSwitch(std::move(sw), value_operand);
1656 }
1657 
1658 void InstructionSelector::VisitWord32Equal(Node* const node) {
1659  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1660  Int32BinopMatcher m(node);
1661  if (m.right().Is(0)) {
1662  return VisitWordCompareZero(m.node(), m.left().node(), &cont);
1663  }
1664  VisitWordCompare(this, node, &cont);
1665 }
1666 
1667 void InstructionSelector::VisitInt32LessThan(Node* node) {
1668  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1669  VisitWordCompare(this, node, &cont);
1670 }
1671 
1672 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1673  FlagsContinuation cont =
1674  FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1675  VisitWordCompare(this, node, &cont);
1676 }
1677 
1678 void InstructionSelector::VisitUint32LessThan(Node* node) {
1679  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1680  VisitWordCompare(this, node, &cont);
1681 }
1682 
1683 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1684  FlagsContinuation cont =
1685  FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1686  VisitWordCompare(this, node, &cont);
1687 }
1688 
1689 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1690  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1691  FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1692  return VisitBinop(this, node, kMipsAddOvf, &cont);
1693  }
1694  FlagsContinuation cont;
1695  VisitBinop(this, node, kMipsAddOvf, &cont);
1696 }
1697 
1698 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1699  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1700  FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1701  return VisitBinop(this, node, kMipsSubOvf, &cont);
1702  }
1703  FlagsContinuation cont;
1704  VisitBinop(this, node, kMipsSubOvf, &cont);
1705 }
1706 
1707 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
1708  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1709  FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1710  return VisitBinop(this, node, kMipsMulOvf, &cont);
1711  }
1712  FlagsContinuation cont;
1713  VisitBinop(this, node, kMipsMulOvf, &cont);
1714 }
1715 
1716 void InstructionSelector::VisitFloat32Equal(Node* node) {
1717  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1718  VisitFloat32Compare(this, node, &cont);
1719 }
1720 
1721 void InstructionSelector::VisitFloat32LessThan(Node* node) {
1722  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1723  VisitFloat32Compare(this, node, &cont);
1724 }
1725 
1726 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
1727  FlagsContinuation cont =
1728  FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1729  VisitFloat32Compare(this, node, &cont);
1730 }
1731 
1732 void InstructionSelector::VisitFloat64Equal(Node* node) {
1733  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1734  VisitFloat64Compare(this, node, &cont);
1735 }
1736 
1737 void InstructionSelector::VisitFloat64LessThan(Node* node) {
1738  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1739  VisitFloat64Compare(this, node, &cont);
1740 }
1741 
1742 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1743  FlagsContinuation cont =
1744  FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1745  VisitFloat64Compare(this, node, &cont);
1746 }
1747 
1748 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
1749  MipsOperandGenerator g(this);
1750  Emit(kMipsFloat64ExtractLowWord32, g.DefineAsRegister(node),
1751  g.UseRegister(node->InputAt(0)));
1752 }
1753 
1754 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
1755  MipsOperandGenerator g(this);
1756  Emit(kMipsFloat64ExtractHighWord32, g.DefineAsRegister(node),
1757  g.UseRegister(node->InputAt(0)));
1758 }
1759 
1760 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
1761  MipsOperandGenerator g(this);
1762  Node* left = node->InputAt(0);
1763  Node* right = node->InputAt(1);
1764  Emit(kMipsFloat64InsertLowWord32, g.DefineSameAsFirst(node),
1765  g.UseRegister(left), g.UseRegister(right));
1766 }
1767 
1768 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
1769  MipsOperandGenerator g(this);
1770  Node* left = node->InputAt(0);
1771  Node* right = node->InputAt(1);
1772  Emit(kMipsFloat64InsertHighWord32, g.DefineSameAsFirst(node),
1773  g.UseRegister(left), g.UseRegister(right));
1774 }
1775 
1776 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
1777  MipsOperandGenerator g(this);
1778  Node* left = node->InputAt(0);
1779  InstructionOperand temps[] = {g.TempRegister()};
1780  Emit(kMipsFloat64SilenceNaN, g.DefineSameAsFirst(node), g.UseRegister(left),
1781  arraysize(temps), temps);
1782 }
1783 
1784 void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
1785  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1786  MipsOperandGenerator g(this);
1787  Node* base = node->InputAt(0);
1788  Node* index = node->InputAt(1);
1789  ArchOpcode opcode = kArchNop;
1790  switch (load_rep.representation()) {
1791  case MachineRepresentation::kWord8:
1792  opcode =
1793  load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
1794  break;
1795  case MachineRepresentation::kWord16:
1796  opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
1797  : kWord32AtomicLoadUint16;
1798  break;
1799  case MachineRepresentation::kWord32:
1800  opcode = kWord32AtomicLoadWord32;
1801  break;
1802  default:
1803  UNREACHABLE();
1804  return;
1805  }
1806 
1807  if (g.CanBeImmediate(index, opcode)) {
1808  Emit(opcode | AddressingModeField::encode(kMode_MRI),
1809  g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
1810  } else {
1811  InstructionOperand addr_reg = g.TempRegister();
1812  Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
1813  g.UseRegister(index), g.UseRegister(base));
1814  // Emit desired load opcode, using temp addr_reg.
1815  Emit(opcode | AddressingModeField::encode(kMode_MRI),
1816  g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1817  }
1818 }
1819 
1820 void InstructionSelector::VisitWord32AtomicStore(Node* node) {
1821  MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
1822  MipsOperandGenerator g(this);
1823  Node* base = node->InputAt(0);
1824  Node* index = node->InputAt(1);
1825  Node* value = node->InputAt(2);
1826  ArchOpcode opcode = kArchNop;
1827  switch (rep) {
1828  case MachineRepresentation::kWord8:
1829  opcode = kWord32AtomicStoreWord8;
1830  break;
1831  case MachineRepresentation::kWord16:
1832  opcode = kWord32AtomicStoreWord16;
1833  break;
1834  case MachineRepresentation::kWord32:
1835  opcode = kWord32AtomicStoreWord32;
1836  break;
1837  default:
1838  UNREACHABLE();
1839  return;
1840  }
1841 
1842  if (g.CanBeImmediate(index, opcode)) {
1843  Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1844  g.UseRegister(base), g.UseImmediate(index),
1845  g.UseRegisterOrImmediateZero(value));
1846  } else {
1847  InstructionOperand addr_reg = g.TempRegister();
1848  Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
1849  g.UseRegister(index), g.UseRegister(base));
1850  // Emit desired store opcode, using temp addr_reg.
1851  Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1852  addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
1853  }
1854 }
1855 
1856 void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
1857  MipsOperandGenerator g(this);
1858  Node* base = node->InputAt(0);
1859  Node* index = node->InputAt(1);
1860  Node* value = node->InputAt(2);
1861  ArchOpcode opcode = kArchNop;
1862  MachineType type = AtomicOpType(node->op());
1863  if (type == MachineType::Int8()) {
1864  opcode = kWord32AtomicExchangeInt8;
1865  } else if (type == MachineType::Uint8()) {
1866  opcode = kWord32AtomicExchangeUint8;
1867  } else if (type == MachineType::Int16()) {
1868  opcode = kWord32AtomicExchangeInt16;
1869  } else if (type == MachineType::Uint16()) {
1870  opcode = kWord32AtomicExchangeUint16;
1871  } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
1872  opcode = kWord32AtomicExchangeWord32;
1873  } else {
1874  UNREACHABLE();
1875  return;
1876  }
1877 
1878  AddressingMode addressing_mode = kMode_MRI;
1879  InstructionOperand inputs[3];
1880  size_t input_count = 0;
1881  inputs[input_count++] = g.UseUniqueRegister(base);
1882  inputs[input_count++] = g.UseUniqueRegister(index);
1883  inputs[input_count++] = g.UseUniqueRegister(value);
1884  InstructionOperand outputs[1];
1885  outputs[0] = g.UseUniqueRegister(node);
1886  InstructionOperand temp[3];
1887  temp[0] = g.TempRegister();
1888  temp[1] = g.TempRegister();
1889  temp[2] = g.TempRegister();
1890  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
1891  Emit(code, 1, outputs, input_count, inputs, 3, temp);
1892 }
1893 
1894 void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
1895  MipsOperandGenerator g(this);
1896  Node* base = node->InputAt(0);
1897  Node* index = node->InputAt(1);
1898  Node* old_value = node->InputAt(2);
1899  Node* new_value = node->InputAt(3);
1900  ArchOpcode opcode = kArchNop;
1901  MachineType type = AtomicOpType(node->op());
1902  if (type == MachineType::Int8()) {
1903  opcode = kWord32AtomicCompareExchangeInt8;
1904  } else if (type == MachineType::Uint8()) {
1905  opcode = kWord32AtomicCompareExchangeUint8;
1906  } else if (type == MachineType::Int16()) {
1907  opcode = kWord32AtomicCompareExchangeInt16;
1908  } else if (type == MachineType::Uint16()) {
1909  opcode = kWord32AtomicCompareExchangeUint16;
1910  } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
1911  opcode = kWord32AtomicCompareExchangeWord32;
1912  } else {
1913  UNREACHABLE();
1914  return;
1915  }
1916 
1917  AddressingMode addressing_mode = kMode_MRI;
1918  InstructionOperand inputs[4];
1919  size_t input_count = 0;
1920  inputs[input_count++] = g.UseUniqueRegister(base);
1921  inputs[input_count++] = g.UseUniqueRegister(index);
1922  inputs[input_count++] = g.UseUniqueRegister(old_value);
1923  inputs[input_count++] = g.UseUniqueRegister(new_value);
1924  InstructionOperand outputs[1];
1925  outputs[0] = g.UseUniqueRegister(node);
1926  InstructionOperand temp[3];
1927  temp[0] = g.TempRegister();
1928  temp[1] = g.TempRegister();
1929  temp[2] = g.TempRegister();
1930  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
1931  Emit(code, 1, outputs, input_count, inputs, 3, temp);
1932 }
1933 
1934 void InstructionSelector::VisitWord32AtomicBinaryOperation(
1935  Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
1936  ArchOpcode uint16_op, ArchOpcode word32_op) {
1937  MipsOperandGenerator g(this);
1938  Node* base = node->InputAt(0);
1939  Node* index = node->InputAt(1);
1940  Node* value = node->InputAt(2);
1941  ArchOpcode opcode = kArchNop;
1942  MachineType type = AtomicOpType(node->op());
1943  if (type == MachineType::Int8()) {
1944  opcode = int8_op;
1945  } else if (type == MachineType::Uint8()) {
1946  opcode = uint8_op;
1947  } else if (type == MachineType::Int16()) {
1948  opcode = int16_op;
1949  } else if (type == MachineType::Uint16()) {
1950  opcode = uint16_op;
1951  } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
1952  opcode = word32_op;
1953  } else {
1954  UNREACHABLE();
1955  return;
1956  }
1957 
1958  AddressingMode addressing_mode = kMode_MRI;
1959  InstructionOperand inputs[3];
1960  size_t input_count = 0;
1961  inputs[input_count++] = g.UseUniqueRegister(base);
1962  inputs[input_count++] = g.UseUniqueRegister(index);
1963  inputs[input_count++] = g.UseUniqueRegister(value);
1964  InstructionOperand outputs[1];
1965  outputs[0] = g.UseUniqueRegister(node);
1966  InstructionOperand temps[4];
1967  temps[0] = g.TempRegister();
1968  temps[1] = g.TempRegister();
1969  temps[2] = g.TempRegister();
1970  temps[3] = g.TempRegister();
1971  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
1972  Emit(code, 1, outputs, input_count, inputs, 4, temps);
1973 }
1974 
1975 #define VISIT_ATOMIC_BINOP(op) \
1976  void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
1977  VisitWord32AtomicBinaryOperation( \
1978  node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
1979  kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
1980  kWord32Atomic##op##Word32); \
1981  }
1982 VISIT_ATOMIC_BINOP(Add)
1983 VISIT_ATOMIC_BINOP(Sub)
1984 VISIT_ATOMIC_BINOP(And)
1985 VISIT_ATOMIC_BINOP(Or)
1986 VISIT_ATOMIC_BINOP(Xor)
1987 #undef VISIT_ATOMIC_BINOP
1988 
1989 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
1990  UNREACHABLE();
1991 }
1992 
1993 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
1994  UNREACHABLE();
1995 }
1996 
1997 void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
1998 
1999 #define SIMD_TYPE_LIST(V) \
2000  V(F32x4) \
2001  V(I32x4) \
2002  V(I16x8) \
2003  V(I8x16)
2004 
2005 #define SIMD_UNOP_LIST(V) \
2006  V(F32x4SConvertI32x4, kMipsF32x4SConvertI32x4) \
2007  V(F32x4UConvertI32x4, kMipsF32x4UConvertI32x4) \
2008  V(F32x4Abs, kMipsF32x4Abs) \
2009  V(F32x4Neg, kMipsF32x4Neg) \
2010  V(F32x4RecipApprox, kMipsF32x4RecipApprox) \
2011  V(F32x4RecipSqrtApprox, kMipsF32x4RecipSqrtApprox) \
2012  V(I32x4SConvertF32x4, kMipsI32x4SConvertF32x4) \
2013  V(I32x4UConvertF32x4, kMipsI32x4UConvertF32x4) \
2014  V(I32x4Neg, kMipsI32x4Neg) \
2015  V(I32x4SConvertI16x8Low, kMipsI32x4SConvertI16x8Low) \
2016  V(I32x4SConvertI16x8High, kMipsI32x4SConvertI16x8High) \
2017  V(I32x4UConvertI16x8Low, kMipsI32x4UConvertI16x8Low) \
2018  V(I32x4UConvertI16x8High, kMipsI32x4UConvertI16x8High) \
2019  V(I16x8Neg, kMipsI16x8Neg) \
2020  V(I16x8SConvertI8x16Low, kMipsI16x8SConvertI8x16Low) \
2021  V(I16x8SConvertI8x16High, kMipsI16x8SConvertI8x16High) \
2022  V(I16x8UConvertI8x16Low, kMipsI16x8UConvertI8x16Low) \
2023  V(I16x8UConvertI8x16High, kMipsI16x8UConvertI8x16High) \
2024  V(I8x16Neg, kMipsI8x16Neg) \
2025  V(S128Not, kMipsS128Not) \
2026  V(S1x4AnyTrue, kMipsS1x4AnyTrue) \
2027  V(S1x4AllTrue, kMipsS1x4AllTrue) \
2028  V(S1x8AnyTrue, kMipsS1x8AnyTrue) \
2029  V(S1x8AllTrue, kMipsS1x8AllTrue) \
2030  V(S1x16AnyTrue, kMipsS1x16AnyTrue) \
2031  V(S1x16AllTrue, kMipsS1x16AllTrue)
2032 
2033 #define SIMD_SHIFT_OP_LIST(V) \
2034  V(I32x4Shl) \
2035  V(I32x4ShrS) \
2036  V(I32x4ShrU) \
2037  V(I16x8Shl) \
2038  V(I16x8ShrS) \
2039  V(I16x8ShrU) \
2040  V(I8x16Shl) \
2041  V(I8x16ShrS) \
2042  V(I8x16ShrU)
2043 
2044 #define SIMD_BINOP_LIST(V) \
2045  V(F32x4Add, kMipsF32x4Add) \
2046  V(F32x4AddHoriz, kMipsF32x4AddHoriz) \
2047  V(F32x4Sub, kMipsF32x4Sub) \
2048  V(F32x4Mul, kMipsF32x4Mul) \
2049  V(F32x4Max, kMipsF32x4Max) \
2050  V(F32x4Min, kMipsF32x4Min) \
2051  V(F32x4Eq, kMipsF32x4Eq) \
2052  V(F32x4Ne, kMipsF32x4Ne) \
2053  V(F32x4Lt, kMipsF32x4Lt) \
2054  V(F32x4Le, kMipsF32x4Le) \
2055  V(I32x4Add, kMipsI32x4Add) \
2056  V(I32x4AddHoriz, kMipsI32x4AddHoriz) \
2057  V(I32x4Sub, kMipsI32x4Sub) \
2058  V(I32x4Mul, kMipsI32x4Mul) \
2059  V(I32x4MaxS, kMipsI32x4MaxS) \
2060  V(I32x4MinS, kMipsI32x4MinS) \
2061  V(I32x4MaxU, kMipsI32x4MaxU) \
2062  V(I32x4MinU, kMipsI32x4MinU) \
2063  V(I32x4Eq, kMipsI32x4Eq) \
2064  V(I32x4Ne, kMipsI32x4Ne) \
2065  V(I32x4GtS, kMipsI32x4GtS) \
2066  V(I32x4GeS, kMipsI32x4GeS) \
2067  V(I32x4GtU, kMipsI32x4GtU) \
2068  V(I32x4GeU, kMipsI32x4GeU) \
2069  V(I16x8Add, kMipsI16x8Add) \
2070  V(I16x8AddSaturateS, kMipsI16x8AddSaturateS) \
2071  V(I16x8AddSaturateU, kMipsI16x8AddSaturateU) \
2072  V(I16x8AddHoriz, kMipsI16x8AddHoriz) \
2073  V(I16x8Sub, kMipsI16x8Sub) \
2074  V(I16x8SubSaturateS, kMipsI16x8SubSaturateS) \
2075  V(I16x8SubSaturateU, kMipsI16x8SubSaturateU) \
2076  V(I16x8Mul, kMipsI16x8Mul) \
2077  V(I16x8MaxS, kMipsI16x8MaxS) \
2078  V(I16x8MinS, kMipsI16x8MinS) \
2079  V(I16x8MaxU, kMipsI16x8MaxU) \
2080  V(I16x8MinU, kMipsI16x8MinU) \
2081  V(I16x8Eq, kMipsI16x8Eq) \
2082  V(I16x8Ne, kMipsI16x8Ne) \
2083  V(I16x8GtS, kMipsI16x8GtS) \
2084  V(I16x8GeS, kMipsI16x8GeS) \
2085  V(I16x8GtU, kMipsI16x8GtU) \
2086  V(I16x8GeU, kMipsI16x8GeU) \
2087  V(I16x8SConvertI32x4, kMipsI16x8SConvertI32x4) \
2088  V(I16x8UConvertI32x4, kMipsI16x8UConvertI32x4) \
2089  V(I8x16Add, kMipsI8x16Add) \
2090  V(I8x16AddSaturateS, kMipsI8x16AddSaturateS) \
2091  V(I8x16AddSaturateU, kMipsI8x16AddSaturateU) \
2092  V(I8x16Sub, kMipsI8x16Sub) \
2093  V(I8x16SubSaturateS, kMipsI8x16SubSaturateS) \
2094  V(I8x16SubSaturateU, kMipsI8x16SubSaturateU) \
2095  V(I8x16Mul, kMipsI8x16Mul) \
2096  V(I8x16MaxS, kMipsI8x16MaxS) \
2097  V(I8x16MinS, kMipsI8x16MinS) \
2098  V(I8x16MaxU, kMipsI8x16MaxU) \
2099  V(I8x16MinU, kMipsI8x16MinU) \
2100  V(I8x16Eq, kMipsI8x16Eq) \
2101  V(I8x16Ne, kMipsI8x16Ne) \
2102  V(I8x16GtS, kMipsI8x16GtS) \
2103  V(I8x16GeS, kMipsI8x16GeS) \
2104  V(I8x16GtU, kMipsI8x16GtU) \
2105  V(I8x16GeU, kMipsI8x16GeU) \
2106  V(I8x16SConvertI16x8, kMipsI8x16SConvertI16x8) \
2107  V(I8x16UConvertI16x8, kMipsI8x16UConvertI16x8) \
2108  V(S128And, kMipsS128And) \
2109  V(S128Or, kMipsS128Or) \
2110  V(S128Xor, kMipsS128Xor)
2111 
2112 void InstructionSelector::VisitS128Zero(Node* node) {
2113  MipsOperandGenerator g(this);
2114  Emit(kMipsS128Zero, g.DefineSameAsFirst(node));
2115 }
2116 
2117 #define SIMD_VISIT_SPLAT(Type) \
2118  void InstructionSelector::Visit##Type##Splat(Node* node) { \
2119  VisitRR(this, kMips##Type##Splat, node); \
2120  }
2121 SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
2122 #undef SIMD_VISIT_SPLAT
2123 
2124 #define SIMD_VISIT_EXTRACT_LANE(Type) \
2125  void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
2126  VisitRRI(this, kMips##Type##ExtractLane, node); \
2127  }
2128 SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
2129 #undef SIMD_VISIT_EXTRACT_LANE
2130 
2131 #define SIMD_VISIT_REPLACE_LANE(Type) \
2132  void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
2133  VisitRRIR(this, kMips##Type##ReplaceLane, node); \
2134  }
2135 SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
2136 #undef SIMD_VISIT_REPLACE_LANE
2137 
2138 #define SIMD_VISIT_UNOP(Name, instruction) \
2139  void InstructionSelector::Visit##Name(Node* node) { \
2140  VisitRR(this, instruction, node); \
2141  }
2142 SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
2143 #undef SIMD_VISIT_UNOP
2144 
2145 #define SIMD_VISIT_SHIFT_OP(Name) \
2146  void InstructionSelector::Visit##Name(Node* node) { \
2147  VisitRRI(this, kMips##Name, node); \
2148  }
2149 SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
2150 #undef SIMD_VISIT_SHIFT_OP
2151 
2152 #define SIMD_VISIT_BINOP(Name, instruction) \
2153  void InstructionSelector::Visit##Name(Node* node) { \
2154  VisitRRR(this, instruction, node); \
2155  }
2156 SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
2157 #undef SIMD_VISIT_BINOP
2158 
2159 void InstructionSelector::VisitS128Select(Node* node) {
2160  VisitRRRR(this, kMipsS128Select, node);
2161 }
2162 
2163 namespace {
2164 
2165 struct ShuffleEntry {
2166  uint8_t shuffle[kSimd128Size];
2167  ArchOpcode opcode;
2168 };
2169 
2170 static const ShuffleEntry arch_shuffles[] = {
2171  {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
2172  kMipsS32x4InterleaveRight},
2173  {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
2174  kMipsS32x4InterleaveLeft},
2175  {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
2176  kMipsS32x4PackEven},
2177  {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
2178  kMipsS32x4PackOdd},
2179  {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
2180  kMipsS32x4InterleaveEven},
2181  {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
2182  kMipsS32x4InterleaveOdd},
2183 
2184  {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
2185  kMipsS16x8InterleaveRight},
2186  {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
2187  kMipsS16x8InterleaveLeft},
2188  {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
2189  kMipsS16x8PackEven},
2190  {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
2191  kMipsS16x8PackOdd},
2192  {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
2193  kMipsS16x8InterleaveEven},
2194  {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
2195  kMipsS16x8InterleaveOdd},
2196  {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9}, kMipsS16x4Reverse},
2197  {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}, kMipsS16x2Reverse},
2198 
2199  {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
2200  kMipsS8x16InterleaveRight},
2201  {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
2202  kMipsS8x16InterleaveLeft},
2203  {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
2204  kMipsS8x16PackEven},
2205  {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
2206  kMipsS8x16PackOdd},
2207  {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
2208  kMipsS8x16InterleaveEven},
2209  {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
2210  kMipsS8x16InterleaveOdd},
2211  {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kMipsS8x8Reverse},
2212  {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kMipsS8x4Reverse},
2213  {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, kMipsS8x2Reverse}};
2214 
2215 bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
2216  size_t num_entries, bool is_swizzle,
2217  ArchOpcode* opcode) {
2218  uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
2219  for (size_t i = 0; i < num_entries; ++i) {
2220  const ShuffleEntry& entry = table[i];
2221  int j = 0;
2222  for (; j < kSimd128Size; ++j) {
2223  if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
2224  break;
2225  }
2226  }
2227  if (j == kSimd128Size) {
2228  *opcode = entry.opcode;
2229  return true;
2230  }
2231  }
2232  return false;
2233 }
2234 
2235 } // namespace
2236 
2237 void InstructionSelector::VisitS8x16Shuffle(Node* node) {
2238  uint8_t shuffle[kSimd128Size];
2239  bool is_swizzle;
2240  CanonicalizeShuffle(node, shuffle, &is_swizzle);
2241  uint8_t shuffle32x4[4];
2242  ArchOpcode opcode;
2243  if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
2244  is_swizzle, &opcode)) {
2245  VisitRRR(this, opcode, node);
2246  return;
2247  }
2248  Node* input0 = node->InputAt(0);
2249  Node* input1 = node->InputAt(1);
2250  uint8_t offset;
2251  MipsOperandGenerator g(this);
2252  if (TryMatchConcat(shuffle, &offset)) {
2253  Emit(kMipsS8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
2254  g.UseRegister(input0), g.UseImmediate(offset));
2255  return;
2256  }
2257  if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
2258  Emit(kMipsS32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
2259  g.UseRegister(input1), g.UseImmediate(Pack4Lanes(shuffle32x4)));
2260  return;
2261  }
2262  Emit(kMipsS8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
2263  g.UseRegister(input1), g.UseImmediate(Pack4Lanes(shuffle)),
2264  g.UseImmediate(Pack4Lanes(shuffle + 4)),
2265  g.UseImmediate(Pack4Lanes(shuffle + 8)),
2266  g.UseImmediate(Pack4Lanes(shuffle + 12)));
2267 }
2268 
2269 void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
2270  MipsOperandGenerator g(this);
2271  Emit(kMipsSeb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
2272 }
2273 
2274 void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
2275  MipsOperandGenerator g(this);
2276  Emit(kMipsSeh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
2277 }
2278 
2279 // static
2280 MachineOperatorBuilder::Flags
2281 InstructionSelector::SupportedMachineOperatorFlags() {
2282  MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
2283  if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
2284  IsFp64Mode()) {
2285  flags |= MachineOperatorBuilder::kFloat64RoundDown |
2286  MachineOperatorBuilder::kFloat64RoundUp |
2287  MachineOperatorBuilder::kFloat64RoundTruncate |
2288  MachineOperatorBuilder::kFloat64RoundTiesEven;
2289  }
2290 
2291  return flags | MachineOperatorBuilder::kWord32Ctz |
2292  MachineOperatorBuilder::kWord32Popcnt |
2293  MachineOperatorBuilder::kInt32DivIsSafe |
2294  MachineOperatorBuilder::kUint32DivIsSafe |
2295  MachineOperatorBuilder::kWord32ShiftIsSafe |
2296  MachineOperatorBuilder::kFloat32RoundDown |
2297  MachineOperatorBuilder::kFloat32RoundUp |
2298  MachineOperatorBuilder::kFloat32RoundTruncate |
2299  MachineOperatorBuilder::kFloat32RoundTiesEven;
2300 }
2301 
2302 // static
2303 MachineOperatorBuilder::AlignmentRequirements
2304 InstructionSelector::AlignmentRequirements() {
2305  if (IsMipsArchVariant(kMips32r6)) {
2306  return MachineOperatorBuilder::AlignmentRequirements::
2307  FullUnalignedAccessSupport();
2308  } else {
2309  DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
2310  IsMipsArchVariant(kMips32r2));
2311  return MachineOperatorBuilder::AlignmentRequirements::
2312  NoUnalignedAccessSupport();
2313  }
2314 }
2315 
2316 #undef SIMD_BINOP_LIST
2317 #undef SIMD_SHIFT_OP_LIST
2318 #undef SIMD_UNOP_LIST
2319 #undef SIMD_TYPE_LIST
2320 #undef TRACE_UNIMPL
2321 #undef TRACE
2322 
2323 } // namespace compiler
2324 } // namespace internal
2325 } // namespace v8
Definition: libplatform.h:13