V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
instruction-selector-s390.cc
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/base/adapters.h"
6 #include "src/compiler/backend/instruction-selector-impl.h"
7 #include "src/compiler/node-matchers.h"
8 #include "src/compiler/node-properties.h"
9 #include "src/frame-constants.h"
10 
11 namespace v8 {
12 namespace internal {
13 namespace compiler {
14 
15 enum class OperandMode : uint32_t {
16  kNone = 0u,
17  // Immediate mode
18  kShift32Imm = 1u << 0,
19  kShift64Imm = 1u << 1,
20  kInt32Imm = 1u << 2,
21  kInt32Imm_Negate = 1u << 3,
22  kUint32Imm = 1u << 4,
23  kInt20Imm = 1u << 5,
24  kUint12Imm = 1u << 6,
25  // Instr format
26  kAllowRRR = 1u << 7,
27  kAllowRM = 1u << 8,
28  kAllowRI = 1u << 9,
29  kAllowRRI = 1u << 10,
30  kAllowRRM = 1u << 11,
31  // Useful combination
32  kAllowImmediate = kAllowRI | kAllowRRI,
33  kAllowMemoryOperand = kAllowRM | kAllowRRM,
34  kAllowDistinctOps = kAllowRRR | kAllowRRI | kAllowRRM,
35  kBitWiseCommonMode = kAllowRI,
36  kArithmeticCommonMode = kAllowRM | kAllowRI
37 };
38 
39 typedef base::Flags<OperandMode, uint32_t> OperandModes;
40 DEFINE_OPERATORS_FOR_FLAGS(OperandModes);
41 OperandModes immediateModeMask =
42  OperandMode::kShift32Imm | OperandMode::kShift64Imm |
43  OperandMode::kInt32Imm | OperandMode::kInt32Imm_Negate |
44  OperandMode::kUint32Imm | OperandMode::kInt20Imm;
45 
46 #define AndCommonMode \
47  ((OperandMode::kAllowRM | \
48  (CpuFeatures::IsSupported(DISTINCT_OPS) ? OperandMode::kAllowRRR \
49  : OperandMode::kNone)))
50 #define And64OperandMode AndCommonMode
51 #define Or64OperandMode And64OperandMode
52 #define Xor64OperandMode And64OperandMode
53 
54 #define And32OperandMode \
55  (AndCommonMode | OperandMode::kAllowRI | OperandMode::kUint32Imm)
56 #define Or32OperandMode And32OperandMode
57 #define Xor32OperandMode And32OperandMode
58 
59 #define Shift32OperandMode \
60  ((OperandMode::kAllowRI | OperandMode::kShift64Imm | \
61  (CpuFeatures::IsSupported(DISTINCT_OPS) \
62  ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI) \
63  : OperandMode::kNone)))
64 
65 #define Shift64OperandMode \
66  ((OperandMode::kAllowRI | OperandMode::kShift64Imm | \
67  OperandMode::kAllowRRR | OperandMode::kAllowRRI))
68 
69 #define AddOperandMode \
70  ((OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm | \
71  (CpuFeatures::IsSupported(DISTINCT_OPS) \
72  ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI) \
73  : OperandMode::kArithmeticCommonMode)))
74 #define SubOperandMode \
75  ((OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm_Negate | \
76  (CpuFeatures::IsSupported(DISTINCT_OPS) \
77  ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI) \
78  : OperandMode::kArithmeticCommonMode)))
79 #define MulOperandMode \
80  (OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm)
81 
82 // Adds S390-specific methods for generating operands.
83 class S390OperandGenerator final : public OperandGenerator {
84  public:
85  explicit S390OperandGenerator(InstructionSelector* selector)
86  : OperandGenerator(selector) {}
87 
88  InstructionOperand UseOperand(Node* node, OperandModes mode) {
89  if (CanBeImmediate(node, mode)) {
90  return UseImmediate(node);
91  }
92  return UseRegister(node);
93  }
94 
95  InstructionOperand UseAnyExceptImmediate(Node* node) {
96  if (NodeProperties::IsConstant(node))
97  return UseRegister(node);
98  else
99  return Use(node);
100  }
101 
102  int64_t GetImmediate(Node* node) {
103  if (node->opcode() == IrOpcode::kInt32Constant)
104  return OpParameter<int32_t>(node->op());
105  else if (node->opcode() == IrOpcode::kInt64Constant)
106  return OpParameter<int64_t>(node->op());
107  else
108  UNIMPLEMENTED();
109  return 0L;
110  }
111 
112  bool CanBeImmediate(Node* node, OperandModes mode) {
113  int64_t value;
114  if (node->opcode() == IrOpcode::kInt32Constant)
115  value = OpParameter<int32_t>(node->op());
116  else if (node->opcode() == IrOpcode::kInt64Constant)
117  value = OpParameter<int64_t>(node->op());
118  else
119  return false;
120  return CanBeImmediate(value, mode);
121  }
122 
123  bool CanBeImmediate(int64_t value, OperandModes mode) {
124  if (mode & OperandMode::kShift32Imm)
125  return 0 <= value && value < 32;
126  else if (mode & OperandMode::kShift64Imm)
127  return 0 <= value && value < 64;
128  else if (mode & OperandMode::kInt32Imm)
129  return is_int32(value);
130  else if (mode & OperandMode::kInt32Imm_Negate)
131  return is_int32(-value);
132  else if (mode & OperandMode::kUint32Imm)
133  return is_uint32(value);
134  else if (mode & OperandMode::kInt20Imm)
135  return is_int20(value);
136  else if (mode & OperandMode::kUint12Imm)
137  return is_uint12(value);
138  else
139  return false;
140  }
141 
142  bool CanBeMemoryOperand(InstructionCode opcode, Node* user, Node* input,
143  int effect_level) {
144  if (input->opcode() != IrOpcode::kLoad ||
145  !selector()->CanCover(user, input)) {
146  return false;
147  }
148 
149  if (effect_level != selector()->GetEffectLevel(input)) {
150  return false;
151  }
152 
153  MachineRepresentation rep =
154  LoadRepresentationOf(input->op()).representation();
155  switch (opcode) {
156  case kS390_Cmp64:
157  case kS390_LoadAndTestWord64:
158  return rep == MachineRepresentation::kWord64 || IsAnyTagged(rep);
159  case kS390_LoadAndTestWord32:
160  case kS390_Cmp32:
161  return rep == MachineRepresentation::kWord32;
162  default:
163  break;
164  }
165  return false;
166  }
167 
168  AddressingMode GenerateMemoryOperandInputs(Node* index, Node* base,
169  Node* displacement,
170  DisplacementMode displacement_mode,
171  InstructionOperand inputs[],
172  size_t* input_count) {
173  AddressingMode mode = kMode_MRI;
174  if (base != nullptr) {
175  inputs[(*input_count)++] = UseRegister(base);
176  if (index != nullptr) {
177  inputs[(*input_count)++] = UseRegister(index);
178  if (displacement != nullptr) {
179  inputs[(*input_count)++] = displacement_mode
180  ? UseNegatedImmediate(displacement)
181  : UseImmediate(displacement);
182  mode = kMode_MRRI;
183  } else {
184  mode = kMode_MRR;
185  }
186  } else {
187  if (displacement == nullptr) {
188  mode = kMode_MR;
189  } else {
190  inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
191  ? UseNegatedImmediate(displacement)
192  : UseImmediate(displacement);
193  mode = kMode_MRI;
194  }
195  }
196  } else {
197  DCHECK_NOT_NULL(index);
198  inputs[(*input_count)++] = UseRegister(index);
199  if (displacement != nullptr) {
200  inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
201  ? UseNegatedImmediate(displacement)
202  : UseImmediate(displacement);
203  mode = kMode_MRI;
204  } else {
205  mode = kMode_MR;
206  }
207  }
208  return mode;
209  }
210 
211  AddressingMode GetEffectiveAddressMemoryOperand(
212  Node* operand, InstructionOperand inputs[], size_t* input_count,
213  OperandModes immediate_mode = OperandMode::kInt20Imm) {
214 #if V8_TARGET_ARCH_S390X
216  AddressOption::kAllowInputSwap);
217 #else
219  AddressOption::kAllowInputSwap);
220 #endif
221  DCHECK(m.matches());
222  if ((m.displacement() == nullptr ||
223  CanBeImmediate(m.displacement(), immediate_mode))) {
224  DCHECK_EQ(0, m.scale());
225  return GenerateMemoryOperandInputs(m.index(), m.base(), m.displacement(),
226  m.displacement_mode(), inputs,
227  input_count);
228  } else {
229  inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
230  inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
231  return kMode_MRR;
232  }
233  }
234 
235  bool CanBeBetterLeftOperand(Node* node) const {
236  return !selector()->IsLive(node);
237  }
238 
239  MachineRepresentation GetRepresentation(Node* node) {
240  return sequence()->GetRepresentation(selector()->GetVirtualRegister(node));
241  }
242 
243  bool Is64BitOperand(Node* node) {
244  return MachineRepresentation::kWord64 == GetRepresentation(node);
245  }
246 
247  // Use the stack pointer if the node is LoadStackPointer, otherwise assign a
248  // register.
249  InstructionOperand UseRegisterOrStackPointer(Node* node) {
250  if (node->opcode() == IrOpcode::kLoadStackPointer) {
251  return LocationOperand(LocationOperand::EXPLICIT,
252  LocationOperand::REGISTER,
253  MachineRepresentation::kWord32, sp.code());
254  }
255  return UseRegister(node);
256  }
257 };
258 
259 namespace {
260 
261 bool S390OpcodeOnlySupport12BitDisp(ArchOpcode opcode) {
262  switch (opcode) {
263  case kS390_AddFloat:
264  case kS390_AddDouble:
265  case kS390_CmpFloat:
266  case kS390_CmpDouble:
267  case kS390_Float32ToDouble:
268  return true;
269  default:
270  return false;
271  }
272 }
273 
274 bool S390OpcodeOnlySupport12BitDisp(InstructionCode op) {
275  ArchOpcode opcode = ArchOpcodeField::decode(op);
276  return S390OpcodeOnlySupport12BitDisp(opcode);
277 }
278 
279 #define OpcodeImmMode(op) \
280  (S390OpcodeOnlySupport12BitDisp(op) ? OperandMode::kUint12Imm \
281  : OperandMode::kInt20Imm)
282 
283 ArchOpcode SelectLoadOpcode(Node* node) {
284  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
285  ArchOpcode opcode = kArchNop;
286  switch (load_rep.representation()) {
287  case MachineRepresentation::kFloat32:
288  opcode = kS390_LoadFloat32;
289  break;
290  case MachineRepresentation::kFloat64:
291  opcode = kS390_LoadDouble;
292  break;
293  case MachineRepresentation::kBit: // Fall through.
294  case MachineRepresentation::kWord8:
295  opcode = load_rep.IsSigned() ? kS390_LoadWordS8 : kS390_LoadWordU8;
296  break;
297  case MachineRepresentation::kWord16:
298  opcode = load_rep.IsSigned() ? kS390_LoadWordS16 : kS390_LoadWordU16;
299  break;
300 #if !V8_TARGET_ARCH_S390X
301  case MachineRepresentation::kTaggedSigned: // Fall through.
302  case MachineRepresentation::kTaggedPointer: // Fall through.
303  case MachineRepresentation::kTagged: // Fall through.
304 #endif
305  case MachineRepresentation::kWord32:
306  opcode = kS390_LoadWordU32;
307  break;
308 #if V8_TARGET_ARCH_S390X
309  case MachineRepresentation::kTaggedSigned: // Fall through.
310  case MachineRepresentation::kTaggedPointer: // Fall through.
311  case MachineRepresentation::kTagged: // Fall through.
312  case MachineRepresentation::kWord64:
313  opcode = kS390_LoadWord64;
314  break;
315 #else
316  case MachineRepresentation::kWord64: // Fall through.
317 #endif
318  case MachineRepresentation::kSimd128: // Fall through.
319  case MachineRepresentation::kNone:
320  default:
321  UNREACHABLE();
322  }
323  return opcode;
324 }
325 
326 #define RESULT_IS_WORD32_LIST(V) \
327  /* Float unary op*/ \
328  V(BitcastFloat32ToInt32) \
329  /* V(TruncateFloat64ToWord32) */ \
330  V(RoundFloat64ToInt32) \
331  V(TruncateFloat32ToInt32) \
332  V(TruncateFloat32ToUint32) \
333  V(TruncateFloat64ToUint32) \
334  V(ChangeFloat64ToInt32) \
335  V(ChangeFloat64ToUint32) \
336  /* Word32 unary op */ \
337  V(Word32Clz) \
338  V(Word32Popcnt) \
339  V(Float64ExtractLowWord32) \
340  V(Float64ExtractHighWord32) \
341  V(SignExtendWord8ToInt32) \
342  V(SignExtendWord16ToInt32) \
343  /* Word32 bin op */ \
344  V(Int32Add) \
345  V(Int32Sub) \
346  V(Int32Mul) \
347  V(Int32AddWithOverflow) \
348  V(Int32SubWithOverflow) \
349  V(Int32MulWithOverflow) \
350  V(Int32MulHigh) \
351  V(Uint32MulHigh) \
352  V(Int32Div) \
353  V(Uint32Div) \
354  V(Int32Mod) \
355  V(Uint32Mod) \
356  V(Word32Ror) \
357  V(Word32And) \
358  V(Word32Or) \
359  V(Word32Xor) \
360  V(Word32Shl) \
361  V(Word32Shr) \
362  V(Word32Sar)
363 
364 bool ProduceWord32Result(Node* node) {
365 #if !V8_TARGET_ARCH_S390X
366  return true;
367 #else
368  switch (node->opcode()) {
369 #define VISITOR(name) case IrOpcode::k##name:
370  RESULT_IS_WORD32_LIST(VISITOR)
371 #undef VISITOR
372  return true;
373  // TODO(john.yan): consider the following case to be valid
374  // case IrOpcode::kWord32Equal:
375  // case IrOpcode::kInt32LessThan:
376  // case IrOpcode::kInt32LessThanOrEqual:
377  // case IrOpcode::kUint32LessThan:
378  // case IrOpcode::kUint32LessThanOrEqual:
379  // case IrOpcode::kUint32MulHigh:
380  // // These 32-bit operations implicitly zero-extend to 64-bit on x64, so
381  // the
382  // // zero-extension is a no-op.
383  // return true;
384  // case IrOpcode::kProjection: {
385  // Node* const value = node->InputAt(0);
386  // switch (value->opcode()) {
387  // case IrOpcode::kInt32AddWithOverflow:
388  // case IrOpcode::kInt32SubWithOverflow:
389  // case IrOpcode::kInt32MulWithOverflow:
390  // return true;
391  // default:
392  // return false;
393  // }
394  // }
395  case IrOpcode::kLoad: {
396  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
397  switch (load_rep.representation()) {
398  case MachineRepresentation::kWord32:
399  return true;
400  case MachineRepresentation::kWord8:
401  if (load_rep.IsSigned())
402  return false;
403  else
404  return true;
405  default:
406  return false;
407  }
408  }
409  default:
410  return false;
411  }
412 #endif
413 }
414 
415 static inline bool DoZeroExtForResult(Node* node) {
416 #if V8_TARGET_ARCH_S390X
417  return ProduceWord32Result(node);
418 #else
419  return false;
420 #endif
421 }
422 
423 // TODO(john.yan): Create VisiteShift to match dst = src shift (R+I)
424 #if 0
425 void VisitShift() { }
426 #endif
427 
428 #if V8_TARGET_ARCH_S390X
429 void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
430  Node* node) {
431  S390OperandGenerator g(selector);
432  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
433  InstructionOperand outputs[2];
434  size_t output_count = 0;
435  outputs[output_count++] = g.DefineAsRegister(node);
436 
437  Node* success_output = NodeProperties::FindProjection(node, 1);
438  if (success_output) {
439  outputs[output_count++] = g.DefineAsRegister(success_output);
440  }
441 
442  selector->Emit(opcode, output_count, outputs, 1, inputs);
443 }
444 #endif
445 
446 template <class CanCombineWithLoad>
447 void GenerateRightOperands(InstructionSelector* selector, Node* node,
448  Node* right, InstructionCode& opcode,
449  OperandModes& operand_mode,
450  InstructionOperand* inputs, size_t& input_count,
451  CanCombineWithLoad canCombineWithLoad) {
452  S390OperandGenerator g(selector);
453 
454  if ((operand_mode & OperandMode::kAllowImmediate) &&
455  g.CanBeImmediate(right, operand_mode)) {
456  inputs[input_count++] = g.UseImmediate(right);
457  // Can only be RI or RRI
458  operand_mode &= OperandMode::kAllowImmediate;
459  } else if (operand_mode & OperandMode::kAllowMemoryOperand) {
460  NodeMatcher mright(right);
461  if (mright.IsLoad() && selector->CanCover(node, right) &&
462  canCombineWithLoad(SelectLoadOpcode(right))) {
463  AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
464  right, inputs, &input_count, OpcodeImmMode(opcode));
465  opcode |= AddressingModeField::encode(mode);
466  operand_mode &= ~OperandMode::kAllowImmediate;
467  if (operand_mode & OperandMode::kAllowRM)
468  operand_mode &= ~OperandMode::kAllowDistinctOps;
469  } else if (operand_mode & OperandMode::kAllowRM) {
470  DCHECK(!(operand_mode & OperandMode::kAllowRRM));
471  inputs[input_count++] = g.UseAnyExceptImmediate(right);
472  // Can not be Immediate
473  operand_mode &=
474  ~OperandMode::kAllowImmediate & ~OperandMode::kAllowDistinctOps;
475  } else if (operand_mode & OperandMode::kAllowRRM) {
476  DCHECK(!(operand_mode & OperandMode::kAllowRM));
477  inputs[input_count++] = g.UseAnyExceptImmediate(right);
478  // Can not be Immediate
479  operand_mode &= ~OperandMode::kAllowImmediate;
480  } else {
481  UNREACHABLE();
482  }
483  } else {
484  inputs[input_count++] = g.UseRegister(right);
485  // Can only be RR or RRR
486  operand_mode &= OperandMode::kAllowRRR;
487  }
488 }
489 
490 template <class CanCombineWithLoad>
491 void GenerateBinOpOperands(InstructionSelector* selector, Node* node,
492  Node* left, Node* right, InstructionCode& opcode,
493  OperandModes& operand_mode,
494  InstructionOperand* inputs, size_t& input_count,
495  CanCombineWithLoad canCombineWithLoad) {
496  S390OperandGenerator g(selector);
497  // left is always register
498  InstructionOperand const left_input = g.UseRegister(left);
499  inputs[input_count++] = left_input;
500 
501  if (left == right) {
502  inputs[input_count++] = left_input;
503  // Can only be RR or RRR
504  operand_mode &= OperandMode::kAllowRRR;
505  } else {
506  GenerateRightOperands(selector, node, right, opcode, operand_mode, inputs,
507  input_count, canCombineWithLoad);
508  }
509 }
510 
511 template <class CanCombineWithLoad>
512 void VisitUnaryOp(InstructionSelector* selector, Node* node,
513  InstructionCode opcode, OperandModes operand_mode,
514  FlagsContinuation* cont,
515  CanCombineWithLoad canCombineWithLoad);
516 
517 template <class CanCombineWithLoad>
518 void VisitBinOp(InstructionSelector* selector, Node* node,
519  InstructionCode opcode, OperandModes operand_mode,
520  FlagsContinuation* cont, CanCombineWithLoad canCombineWithLoad);
521 
522 // Generate The following variations:
523 // VisitWord32UnaryOp, VisitWord32BinOp,
524 // VisitWord64UnaryOp, VisitWord64BinOp,
525 // VisitFloat32UnaryOp, VisitFloat32BinOp,
526 // VisitFloat64UnaryOp, VisitFloat64BinOp
527 #define VISIT_OP_LIST_32(V) \
528  V(Word32, Unary, [](ArchOpcode opcode) { \
529  return opcode == kS390_LoadWordS32 || opcode == kS390_LoadWordU32; \
530  }) \
531  V(Word64, Unary, \
532  [](ArchOpcode opcode) { return opcode == kS390_LoadWord64; }) \
533  V(Float32, Unary, \
534  [](ArchOpcode opcode) { return opcode == kS390_LoadFloat32; }) \
535  V(Float64, Unary, \
536  [](ArchOpcode opcode) { return opcode == kS390_LoadDouble; }) \
537  V(Word32, Bin, [](ArchOpcode opcode) { \
538  return opcode == kS390_LoadWordS32 || opcode == kS390_LoadWordU32; \
539  }) \
540  V(Float32, Bin, \
541  [](ArchOpcode opcode) { return opcode == kS390_LoadFloat32; }) \
542  V(Float64, Bin, [](ArchOpcode opcode) { return opcode == kS390_LoadDouble; })
543 
544 #if V8_TARGET_ARCH_S390X
545 #define VISIT_OP_LIST(V) \
546  VISIT_OP_LIST_32(V) \
547  V(Word64, Bin, [](ArchOpcode opcode) { return opcode == kS390_LoadWord64; })
548 #else
549 #define VISIT_OP_LIST VISIT_OP_LIST_32
550 #endif
551 
552 #define DECLARE_VISIT_HELPER_FUNCTIONS(type1, type2, canCombineWithLoad) \
553  static inline void Visit##type1##type2##Op( \
554  InstructionSelector* selector, Node* node, InstructionCode opcode, \
555  OperandModes operand_mode, FlagsContinuation* cont) { \
556  Visit##type2##Op(selector, node, opcode, operand_mode, cont, \
557  canCombineWithLoad); \
558  } \
559  static inline void Visit##type1##type2##Op( \
560  InstructionSelector* selector, Node* node, InstructionCode opcode, \
561  OperandModes operand_mode) { \
562  FlagsContinuation cont; \
563  Visit##type1##type2##Op(selector, node, opcode, operand_mode, &cont); \
564  }
565 VISIT_OP_LIST(DECLARE_VISIT_HELPER_FUNCTIONS);
566 #undef DECLARE_VISIT_HELPER_FUNCTIONS
567 #undef VISIT_OP_LIST_32
568 #undef VISIT_OP_LIST
569 
570 template <class CanCombineWithLoad>
571 void VisitUnaryOp(InstructionSelector* selector, Node* node,
572  InstructionCode opcode, OperandModes operand_mode,
573  FlagsContinuation* cont,
574  CanCombineWithLoad canCombineWithLoad) {
575  S390OperandGenerator g(selector);
576  InstructionOperand inputs[8];
577  size_t input_count = 0;
578  InstructionOperand outputs[2];
579  size_t output_count = 0;
580  Node* input = node->InputAt(0);
581 
582  GenerateRightOperands(selector, node, input, opcode, operand_mode, inputs,
583  input_count, canCombineWithLoad);
584 
585  bool input_is_word32 = ProduceWord32Result(input);
586 
587  bool doZeroExt = DoZeroExtForResult(node);
588  bool canEliminateZeroExt = input_is_word32;
589 
590  if (doZeroExt) {
591  // Add zero-ext indication
592  inputs[input_count++] = g.TempImmediate(!canEliminateZeroExt);
593  }
594 
595  if (!cont->IsDeoptimize()) {
596  // If we can deoptimize as a result of the binop, we need to make sure
597  // that the deopt inputs are not overwritten by the binop result. One way
598  // to achieve that is to declare the output register as same-as-first.
599  if (doZeroExt && canEliminateZeroExt) {
600  // we have to make sure result and left use the same register
601  outputs[output_count++] = g.DefineSameAsFirst(node);
602  } else {
603  outputs[output_count++] = g.DefineAsRegister(node);
604  }
605  } else {
606  outputs[output_count++] = g.DefineSameAsFirst(node);
607  }
608 
609  DCHECK_NE(0u, input_count);
610  DCHECK_NE(0u, output_count);
611  DCHECK_GE(arraysize(inputs), input_count);
612  DCHECK_GE(arraysize(outputs), output_count);
613 
614  selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
615  inputs, cont);
616 }
617 
618 template <class CanCombineWithLoad>
619 void VisitBinOp(InstructionSelector* selector, Node* node,
620  InstructionCode opcode, OperandModes operand_mode,
621  FlagsContinuation* cont,
622  CanCombineWithLoad canCombineWithLoad) {
623  S390OperandGenerator g(selector);
624  Int32BinopMatcher m(node);
625  Node* left = m.left().node();
626  Node* right = m.right().node();
627  InstructionOperand inputs[8];
628  size_t input_count = 0;
629  InstructionOperand outputs[2];
630  size_t output_count = 0;
631 
632  if (node->op()->HasProperty(Operator::kCommutative) &&
633  !g.CanBeImmediate(right, operand_mode) &&
634  (g.CanBeBetterLeftOperand(right))) {
635  std::swap(left, right);
636  }
637 
638  GenerateBinOpOperands(selector, node, left, right, opcode, operand_mode,
639  inputs, input_count, canCombineWithLoad);
640 
641  bool left_is_word32 = ProduceWord32Result(left);
642 
643  bool doZeroExt = DoZeroExtForResult(node);
644  bool canEliminateZeroExt = left_is_word32;
645 
646  if (doZeroExt) {
647  // Add zero-ext indication
648  inputs[input_count++] = g.TempImmediate(!canEliminateZeroExt);
649  }
650 
651  if ((operand_mode & OperandMode::kAllowDistinctOps) &&
652  // If we can deoptimize as a result of the binop, we need to make sure
653  // that the deopt inputs are not overwritten by the binop result. One way
654  // to achieve that is to declare the output register as same-as-first.
655  !cont->IsDeoptimize()) {
656  if (doZeroExt && canEliminateZeroExt) {
657  // we have to make sure result and left use the same register
658  outputs[output_count++] = g.DefineSameAsFirst(node);
659  } else {
660  outputs[output_count++] = g.DefineAsRegister(node);
661  }
662  } else {
663  outputs[output_count++] = g.DefineSameAsFirst(node);
664  }
665 
666  DCHECK_NE(0u, input_count);
667  DCHECK_NE(0u, output_count);
668  DCHECK_GE(arraysize(inputs), input_count);
669  DCHECK_GE(arraysize(outputs), output_count);
670 
671  selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
672  inputs, cont);
673 }
674 
675 } // namespace
676 
677 void InstructionSelector::VisitStackSlot(Node* node) {
678  StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
679  int slot = frame_->AllocateSpillSlot(rep.size());
680  OperandGenerator g(this);
681 
682  Emit(kArchStackSlot, g.DefineAsRegister(node),
683  sequence()->AddImmediate(Constant(slot)), 0, nullptr);
684 }
685 
686 void InstructionSelector::VisitDebugAbort(Node* node) {
687  S390OperandGenerator g(this);
688  Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), r3));
689 }
690 
691 void InstructionSelector::VisitLoad(Node* node) {
692  S390OperandGenerator g(this);
693  InstructionCode opcode = SelectLoadOpcode(node);
694  InstructionOperand outputs[1];
695  outputs[0] = g.DefineAsRegister(node);
696  InstructionOperand inputs[3];
697  size_t input_count = 0;
698  AddressingMode mode =
699  g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
700  opcode |= AddressingModeField::encode(mode);
701  if (node->opcode() == IrOpcode::kPoisonedLoad) {
702  CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
703  opcode |= MiscField::encode(kMemoryAccessPoisoned);
704  }
705 
706  Emit(opcode, 1, outputs, input_count, inputs);
707 }
708 
709 void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
710 
711 void InstructionSelector::VisitProtectedLoad(Node* node) {
712  // TODO(eholk)
713  UNIMPLEMENTED();
714 }
715 
716 static void VisitGeneralStore(
717  InstructionSelector* selector, Node* node, MachineRepresentation rep,
718  WriteBarrierKind write_barrier_kind = kNoWriteBarrier) {
719  S390OperandGenerator g(selector);
720  Node* base = node->InputAt(0);
721  Node* offset = node->InputAt(1);
722  Node* value = node->InputAt(2);
723  if (write_barrier_kind != kNoWriteBarrier) {
724  DCHECK(CanBeTaggedPointer(rep));
725  AddressingMode addressing_mode;
726  InstructionOperand inputs[3];
727  size_t input_count = 0;
728  inputs[input_count++] = g.UseUniqueRegister(base);
729  // OutOfLineRecordWrite uses the offset in an 'AddP' instruction as well as
730  // for the store itself, so we must check compatibility with both.
731  if (g.CanBeImmediate(offset, OperandMode::kInt20Imm)) {
732  inputs[input_count++] = g.UseImmediate(offset);
733  addressing_mode = kMode_MRI;
734  } else {
735  inputs[input_count++] = g.UseUniqueRegister(offset);
736  addressing_mode = kMode_MRR;
737  }
738  inputs[input_count++] = g.UseUniqueRegister(value);
739  RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
740  switch (write_barrier_kind) {
741  case kNoWriteBarrier:
742  UNREACHABLE();
743  break;
744  case kMapWriteBarrier:
745  record_write_mode = RecordWriteMode::kValueIsMap;
746  break;
747  case kPointerWriteBarrier:
748  record_write_mode = RecordWriteMode::kValueIsPointer;
749  break;
750  case kFullWriteBarrier:
751  record_write_mode = RecordWriteMode::kValueIsAny;
752  break;
753  }
754  InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
755  size_t const temp_count = arraysize(temps);
756  InstructionCode code = kArchStoreWithWriteBarrier;
757  code |= AddressingModeField::encode(addressing_mode);
758  code |= MiscField::encode(static_cast<int>(record_write_mode));
759  selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
760  } else {
761  ArchOpcode opcode = kArchNop;
762  NodeMatcher m(value);
763  switch (rep) {
764  case MachineRepresentation::kFloat32:
765  opcode = kS390_StoreFloat32;
766  break;
767  case MachineRepresentation::kFloat64:
768  opcode = kS390_StoreDouble;
769  break;
770  case MachineRepresentation::kBit: // Fall through.
771  case MachineRepresentation::kWord8:
772  opcode = kS390_StoreWord8;
773  break;
774  case MachineRepresentation::kWord16:
775  opcode = kS390_StoreWord16;
776  break;
777 #if !V8_TARGET_ARCH_S390X
778  case MachineRepresentation::kTaggedSigned: // Fall through.
779  case MachineRepresentation::kTaggedPointer: // Fall through.
780  case MachineRepresentation::kTagged: // Fall through.
781 #endif
782  case MachineRepresentation::kWord32:
783  opcode = kS390_StoreWord32;
784  if (m.IsWord32ReverseBytes()) {
785  opcode = kS390_StoreReverse32;
786  value = value->InputAt(0);
787  }
788  break;
789 #if V8_TARGET_ARCH_S390X
790  case MachineRepresentation::kTaggedSigned: // Fall through.
791  case MachineRepresentation::kTaggedPointer: // Fall through.
792  case MachineRepresentation::kTagged: // Fall through.
793  case MachineRepresentation::kWord64:
794  opcode = kS390_StoreWord64;
795  if (m.IsWord64ReverseBytes()) {
796  opcode = kS390_StoreReverse64;
797  value = value->InputAt(0);
798  }
799  break;
800 #else
801  case MachineRepresentation::kWord64: // Fall through.
802 #endif
803  case MachineRepresentation::kSimd128: // Fall through.
804  case MachineRepresentation::kNone:
805  UNREACHABLE();
806  return;
807  }
808  InstructionOperand inputs[4];
809  size_t input_count = 0;
810  AddressingMode addressing_mode =
811  g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
812  InstructionCode code =
813  opcode | AddressingModeField::encode(addressing_mode);
814  InstructionOperand value_operand = g.UseRegister(value);
815  inputs[input_count++] = value_operand;
816  selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr),
817  input_count, inputs);
818  }
819 }
820 
821 void InstructionSelector::VisitStore(Node* node) {
822  StoreRepresentation store_rep = StoreRepresentationOf(node->op());
823  WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
824  MachineRepresentation rep = store_rep.representation();
825 
826  VisitGeneralStore(this, node, rep, write_barrier_kind);
827 }
828 
829 void InstructionSelector::VisitProtectedStore(Node* node) {
830  // TODO(eholk)
831  UNIMPLEMENTED();
832 }
833 
834 // Architecture supports unaligned access, therefore VisitLoad is used instead
835 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
836 
837 // Architecture supports unaligned access, therefore VisitStore is used instead
838 void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
839 
840 #if 0
841 static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
842  int mask_width = base::bits::CountPopulation(value);
843  int mask_msb = base::bits::CountLeadingZeros32(value);
844  int mask_lsb = base::bits::CountTrailingZeros32(value);
845  if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 32))
846  return false;
847  *mb = mask_lsb + mask_width - 1;
848  *me = mask_lsb;
849  return true;
850 }
851 #endif
852 
853 #if V8_TARGET_ARCH_S390X
854 static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
855  int mask_width = base::bits::CountPopulation(value);
856  int mask_msb = base::bits::CountLeadingZeros64(value);
857  int mask_lsb = base::bits::CountTrailingZeros64(value);
858  if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 64))
859  return false;
860  *mb = mask_lsb + mask_width - 1;
861  *me = mask_lsb;
862  return true;
863 }
864 #endif
865 
866 #if V8_TARGET_ARCH_S390X
867 void InstructionSelector::VisitWord64And(Node* node) {
868  S390OperandGenerator g(this);
869  Int64BinopMatcher m(node);
870  int mb = 0;
871  int me = 0;
872  if (m.right().HasValue() && IsContiguousMask64(m.right().Value(), &mb, &me)) {
873  int sh = 0;
874  Node* left = m.left().node();
875  if ((m.left().IsWord64Shr() || m.left().IsWord64Shl()) &&
876  CanCover(node, left)) {
877  Int64BinopMatcher mleft(m.left().node());
878  if (mleft.right().IsInRange(0, 63)) {
879  left = mleft.left().node();
880  sh = mleft.right().Value();
881  if (m.left().IsWord64Shr()) {
882  // Adjust the mask such that it doesn't include any rotated bits.
883  if (mb > 63 - sh) mb = 63 - sh;
884  sh = (64 - sh) & 0x3F;
885  } else {
886  // Adjust the mask such that it doesn't include any rotated bits.
887  if (me < sh) me = sh;
888  }
889  }
890  }
891  if (mb >= me) {
892  bool match = false;
893  ArchOpcode opcode;
894  int mask;
895  if (me == 0) {
896  match = true;
897  opcode = kS390_RotLeftAndClearLeft64;
898  mask = mb;
899  } else if (mb == 63) {
900  match = true;
901  opcode = kS390_RotLeftAndClearRight64;
902  mask = me;
903  } else if (sh && me <= sh && m.left().IsWord64Shl()) {
904  match = true;
905  opcode = kS390_RotLeftAndClear64;
906  mask = mb;
907  }
908  if (match && CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
909  Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
910  g.TempImmediate(sh), g.TempImmediate(mask));
911  return;
912  }
913  }
914  }
915  VisitWord64BinOp(this, node, kS390_And64, And64OperandMode);
916 }
917 
918 void InstructionSelector::VisitWord64Shl(Node* node) {
919  S390OperandGenerator g(this);
920  Int64BinopMatcher m(node);
921  // TODO(mbrandy): eliminate left sign extension if right >= 32
922  if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
923  Int64BinopMatcher mleft(m.left().node());
924  int sh = m.right().Value();
925  int mb;
926  int me;
927  if (mleft.right().HasValue() &&
928  IsContiguousMask64(mleft.right().Value() << sh, &mb, &me)) {
929  // Adjust the mask such that it doesn't include any rotated bits.
930  if (me < sh) me = sh;
931  if (mb >= me) {
932  bool match = false;
933  ArchOpcode opcode;
934  int mask;
935  if (me == 0) {
936  match = true;
937  opcode = kS390_RotLeftAndClearLeft64;
938  mask = mb;
939  } else if (mb == 63) {
940  match = true;
941  opcode = kS390_RotLeftAndClearRight64;
942  mask = me;
943  } else if (sh && me <= sh) {
944  match = true;
945  opcode = kS390_RotLeftAndClear64;
946  mask = mb;
947  }
948  if (match && CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
949  Emit(opcode, g.DefineAsRegister(node),
950  g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
951  g.TempImmediate(mask));
952  return;
953  }
954  }
955  }
956  }
957  VisitWord64BinOp(this, node, kS390_ShiftLeft64, Shift64OperandMode);
958 }
959 
960 void InstructionSelector::VisitWord64Shr(Node* node) {
961  S390OperandGenerator g(this);
962  Int64BinopMatcher m(node);
963  if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
964  Int64BinopMatcher mleft(m.left().node());
965  int sh = m.right().Value();
966  int mb;
967  int me;
968  if (mleft.right().HasValue() &&
969  IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
970  // Adjust the mask such that it doesn't include any rotated bits.
971  if (mb > 63 - sh) mb = 63 - sh;
972  sh = (64 - sh) & 0x3F;
973  if (mb >= me) {
974  bool match = false;
975  ArchOpcode opcode;
976  int mask;
977  if (me == 0) {
978  match = true;
979  opcode = kS390_RotLeftAndClearLeft64;
980  mask = mb;
981  } else if (mb == 63) {
982  match = true;
983  opcode = kS390_RotLeftAndClearRight64;
984  mask = me;
985  }
986  if (match) {
987  Emit(opcode, g.DefineAsRegister(node),
988  g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
989  g.TempImmediate(mask));
990  return;
991  }
992  }
993  }
994  }
995  VisitWord64BinOp(this, node, kS390_ShiftRight64, Shift64OperandMode);
996 }
997 #endif
998 
999 static inline bool TryMatchSignExtInt16OrInt8FromWord32Sar(
1000  InstructionSelector* selector, Node* node) {
1001  S390OperandGenerator g(selector);
1002  Int32BinopMatcher m(node);
1003  if (selector->CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
1004  Int32BinopMatcher mleft(m.left().node());
1005  if (mleft.right().Is(16) && m.right().Is(16)) {
1006  bool canEliminateZeroExt = ProduceWord32Result(mleft.left().node());
1007  selector->Emit(kS390_SignExtendWord16ToInt32,
1008  canEliminateZeroExt ? g.DefineSameAsFirst(node)
1009  : g.DefineAsRegister(node),
1010  g.UseRegister(mleft.left().node()),
1011  g.TempImmediate(!canEliminateZeroExt));
1012  return true;
1013  } else if (mleft.right().Is(24) && m.right().Is(24)) {
1014  bool canEliminateZeroExt = ProduceWord32Result(mleft.left().node());
1015  selector->Emit(kS390_SignExtendWord8ToInt32,
1016  canEliminateZeroExt ? g.DefineSameAsFirst(node)
1017  : g.DefineAsRegister(node),
1018  g.UseRegister(mleft.left().node()),
1019  g.TempImmediate(!canEliminateZeroExt));
1020  return true;
1021  }
1022  }
1023  return false;
1024 }
1025 
1026 #if !V8_TARGET_ARCH_S390X
1027 void VisitPairBinop(InstructionSelector* selector, InstructionCode opcode,
1028  InstructionCode opcode2, Node* node) {
1029  S390OperandGenerator g(selector);
1030 
1031  Node* projection1 = NodeProperties::FindProjection(node, 1);
1032  if (projection1) {
1033  // We use UseUniqueRegister here to avoid register sharing with the output
1034  // registers.
1035  InstructionOperand inputs[] = {
1036  g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
1037  g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
1038 
1039  InstructionOperand outputs[] = {
1040  g.DefineAsRegister(node),
1041  g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
1042 
1043  selector->Emit(opcode, 2, outputs, 4, inputs);
1044  } else {
1045  // The high word of the result is not used, so we emit the standard 32 bit
1046  // instruction.
1047  selector->Emit(opcode2, g.DefineSameAsFirst(node),
1048  g.UseRegister(node->InputAt(0)),
1049  g.UseRegister(node->InputAt(2)), g.TempImmediate(0));
1050  }
1051 }
1052 
1053 void InstructionSelector::VisitInt32PairAdd(Node* node) {
1054  VisitPairBinop(this, kS390_AddPair, kS390_Add32, node);
1055 }
1056 
1057 void InstructionSelector::VisitInt32PairSub(Node* node) {
1058  VisitPairBinop(this, kS390_SubPair, kS390_Sub32, node);
1059 }
1060 
1061 void InstructionSelector::VisitInt32PairMul(Node* node) {
1062  S390OperandGenerator g(this);
1063  Node* projection1 = NodeProperties::FindProjection(node, 1);
1064  if (projection1) {
1065  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
1066  g.UseUniqueRegister(node->InputAt(1)),
1067  g.UseUniqueRegister(node->InputAt(2)),
1068  g.UseUniqueRegister(node->InputAt(3))};
1069 
1070  InstructionOperand outputs[] = {
1071  g.DefineAsRegister(node),
1072  g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
1073 
1074  Emit(kS390_MulPair, 2, outputs, 4, inputs);
1075  } else {
1076  // The high word of the result is not used, so we emit the standard 32 bit
1077  // instruction.
1078  Emit(kS390_Mul32, g.DefineSameAsFirst(node),
1079  g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(2)),
1080  g.TempImmediate(0));
1081  }
1082 }
1083 
1084 namespace {
1085 // Shared routine for multiple shift operations.
1086 void VisitPairShift(InstructionSelector* selector, InstructionCode opcode,
1087  Node* node) {
1088  S390OperandGenerator g(selector);
1089  // We use g.UseUniqueRegister here to guarantee that there is
1090  // no register aliasing of input registers with output registers.
1091  Int32Matcher m(node->InputAt(2));
1092  InstructionOperand shift_operand;
1093  if (m.HasValue()) {
1094  shift_operand = g.UseImmediate(m.node());
1095  } else {
1096  shift_operand = g.UseUniqueRegister(m.node());
1097  }
1098 
1099  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
1100  g.UseUniqueRegister(node->InputAt(1)),
1101  shift_operand};
1102 
1103  Node* projection1 = NodeProperties::FindProjection(node, 1);
1104 
1105  InstructionOperand outputs[2];
1106  InstructionOperand temps[1];
1107  int32_t output_count = 0;
1108  int32_t temp_count = 0;
1109 
1110  outputs[output_count++] = g.DefineAsRegister(node);
1111  if (projection1) {
1112  outputs[output_count++] = g.DefineAsRegister(projection1);
1113  } else {
1114  temps[temp_count++] = g.TempRegister();
1115  }
1116 
1117  selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
1118 }
1119 } // namespace
1120 
1121 void InstructionSelector::VisitWord32PairShl(Node* node) {
1122  VisitPairShift(this, kS390_ShiftLeftPair, node);
1123 }
1124 
1125 void InstructionSelector::VisitWord32PairShr(Node* node) {
1126  VisitPairShift(this, kS390_ShiftRightPair, node);
1127 }
1128 
1129 void InstructionSelector::VisitWord32PairSar(Node* node) {
1130  VisitPairShift(this, kS390_ShiftRightArithPair, node);
1131 }
1132 #endif
1133 
1134 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
1135 
1136 #if V8_TARGET_ARCH_S390X
1137 void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
1138 #endif
1139 
1140 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
1141 
1142 #if V8_TARGET_ARCH_S390X
1143 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
1144 #endif
1145 
1146 void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
1147 
1148 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
1149  VisitWord32UnaryOp(this, node, kS390_Abs32, OperandMode::kNone);
1150 }
1151 
1152 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
1153  VisitWord64UnaryOp(this, node, kS390_Abs64, OperandMode::kNone);
1154 }
1155 
1156 void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
1157  S390OperandGenerator g(this);
1158  Emit(kS390_LoadReverse64RR, g.DefineAsRegister(node),
1159  g.UseRegister(node->InputAt(0)));
1160 }
1161 
1162 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
1163  S390OperandGenerator g(this);
1164  NodeMatcher input(node->InputAt(0));
1165  if (CanCover(node, input.node()) && input.IsLoad()) {
1166  LoadRepresentation load_rep = LoadRepresentationOf(input.node()->op());
1167  if (load_rep.representation() == MachineRepresentation::kWord32) {
1168  Node* base = input.node()->InputAt(0);
1169  Node* offset = input.node()->InputAt(1);
1170  Emit(kS390_LoadReverse32 | AddressingModeField::encode(kMode_MRR),
1171  // TODO(john.yan): one of the base and offset can be imm.
1172  g.DefineAsRegister(node), g.UseRegister(base),
1173  g.UseRegister(offset));
1174  return;
1175  }
1176  }
1177  Emit(kS390_LoadReverse32RR, g.DefineAsRegister(node),
1178  g.UseRegister(node->InputAt(0)));
1179 }
1180 
1181 template <class Matcher, ArchOpcode neg_opcode>
1182 static inline bool TryMatchNegFromSub(InstructionSelector* selector,
1183  Node* node) {
1184  S390OperandGenerator g(selector);
1185  Matcher m(node);
1186  static_assert(neg_opcode == kS390_Neg32 || neg_opcode == kS390_Neg64,
1187  "Provided opcode is not a Neg opcode.");
1188  if (m.left().Is(0)) {
1189  Node* value = m.right().node();
1190  bool doZeroExt = DoZeroExtForResult(node);
1191  bool canEliminateZeroExt = ProduceWord32Result(value);
1192  if (doZeroExt) {
1193  selector->Emit(neg_opcode,
1194  canEliminateZeroExt ? g.DefineSameAsFirst(node)
1195  : g.DefineAsRegister(node),
1196  g.UseRegister(value),
1197  g.TempImmediate(!canEliminateZeroExt));
1198  } else {
1199  selector->Emit(neg_opcode, g.DefineAsRegister(node),
1200  g.UseRegister(value));
1201  }
1202  return true;
1203  }
1204  return false;
1205 }
1206 
1207 template <class Matcher, ArchOpcode shift_op>
1208 bool TryMatchShiftFromMul(InstructionSelector* selector, Node* node) {
1209  S390OperandGenerator g(selector);
1210  Matcher m(node);
1211  Node* left = m.left().node();
1212  Node* right = m.right().node();
1213  if (g.CanBeImmediate(right, OperandMode::kInt32Imm) &&
1214  base::bits::IsPowerOfTwo(g.GetImmediate(right))) {
1215  int power = 63 - base::bits::CountLeadingZeros64(g.GetImmediate(right));
1216  bool doZeroExt = DoZeroExtForResult(node);
1217  bool canEliminateZeroExt = ProduceWord32Result(left);
1218  InstructionOperand dst = (doZeroExt && !canEliminateZeroExt &&
1219  CpuFeatures::IsSupported(DISTINCT_OPS))
1220  ? g.DefineAsRegister(node)
1221  : g.DefineSameAsFirst(node);
1222 
1223  if (doZeroExt) {
1224  selector->Emit(shift_op, dst, g.UseRegister(left), g.UseImmediate(power),
1225  g.TempImmediate(!canEliminateZeroExt));
1226  } else {
1227  selector->Emit(shift_op, dst, g.UseRegister(left), g.UseImmediate(power));
1228  }
1229  return true;
1230  }
1231  return false;
1232 }
1233 
1234 template <ArchOpcode opcode>
1235 static inline bool TryMatchInt32OpWithOverflow(InstructionSelector* selector,
1236  Node* node, OperandModes mode) {
1237  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1238  FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1239  VisitWord32BinOp(selector, node, opcode, mode, &cont);
1240  return true;
1241  }
1242  return false;
1243 }
1244 
1245 static inline bool TryMatchInt32AddWithOverflow(InstructionSelector* selector,
1246  Node* node) {
1247  return TryMatchInt32OpWithOverflow<kS390_Add32>(selector, node,
1248  AddOperandMode);
1249 }
1250 
1251 static inline bool TryMatchInt32SubWithOverflow(InstructionSelector* selector,
1252  Node* node) {
1253  return TryMatchInt32OpWithOverflow<kS390_Sub32>(selector, node,
1254  SubOperandMode);
1255 }
1256 
1257 static inline bool TryMatchInt32MulWithOverflow(InstructionSelector* selector,
1258  Node* node) {
1259  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1260  if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
1261  TryMatchInt32OpWithOverflow<kS390_Mul32>(
1262  selector, node, OperandMode::kAllowRRR | OperandMode::kAllowRM);
1263  } else {
1264  FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
1265  VisitWord32BinOp(selector, node, kS390_Mul32WithOverflow,
1266  OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
1267  &cont);
1268  }
1269  return true;
1270  }
1271  return TryMatchShiftFromMul<Int32BinopMatcher, kS390_ShiftLeft32>(selector,
1272  node);
1273 }
1274 
1275 #if V8_TARGET_ARCH_S390X
1276 template <ArchOpcode opcode>
1277 static inline bool TryMatchInt64OpWithOverflow(InstructionSelector* selector,
1278  Node* node, OperandModes mode) {
1279  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1280  FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1281  VisitWord64BinOp(selector, node, opcode, mode, &cont);
1282  return true;
1283  }
1284  return false;
1285 }
1286 
1287 static inline bool TryMatchInt64AddWithOverflow(InstructionSelector* selector,
1288  Node* node) {
1289  return TryMatchInt64OpWithOverflow<kS390_Add64>(selector, node,
1290  AddOperandMode);
1291 }
1292 
1293 static inline bool TryMatchInt64SubWithOverflow(InstructionSelector* selector,
1294  Node* node) {
1295  return TryMatchInt64OpWithOverflow<kS390_Sub64>(selector, node,
1296  SubOperandMode);
1297 }
1298 #endif
1299 
1300 static inline bool TryMatchDoubleConstructFromInsert(
1301  InstructionSelector* selector, Node* node) {
1302  S390OperandGenerator g(selector);
1303  Node* left = node->InputAt(0);
1304  Node* right = node->InputAt(1);
1305  Node* lo32 = nullptr;
1306  Node* hi32 = nullptr;
1307 
1308  if (node->opcode() == IrOpcode::kFloat64InsertLowWord32) {
1309  lo32 = right;
1310  } else if (node->opcode() == IrOpcode::kFloat64InsertHighWord32) {
1311  hi32 = right;
1312  } else {
1313  return false; // doesn't match
1314  }
1315 
1316  if (left->opcode() == IrOpcode::kFloat64InsertLowWord32) {
1317  lo32 = left->InputAt(1);
1318  } else if (left->opcode() == IrOpcode::kFloat64InsertHighWord32) {
1319  hi32 = left->InputAt(1);
1320  } else {
1321  return false; // doesn't match
1322  }
1323 
1324  if (!lo32 || !hi32) return false; // doesn't match
1325 
1326  selector->Emit(kS390_DoubleConstruct, g.DefineAsRegister(node),
1327  g.UseRegister(hi32), g.UseRegister(lo32));
1328  return true;
1329 }
1330 
1331 #define null ([]() { return false; })
1332 // TODO(john.yan): place kAllowRM where available
1333 #define FLOAT_UNARY_OP_LIST_32(V) \
1334  V(Float32, ChangeFloat32ToFloat64, kS390_Float32ToDouble, \
1335  OperandMode::kAllowRM, null) \
1336  V(Float32, BitcastFloat32ToInt32, kS390_BitcastFloat32ToInt32, \
1337  OperandMode::kAllowRM, null) \
1338  V(Float64, TruncateFloat64ToFloat32, kS390_DoubleToFloat32, \
1339  OperandMode::kNone, null) \
1340  V(Float64, TruncateFloat64ToWord32, kArchTruncateDoubleToI, \
1341  OperandMode::kNone, null) \
1342  V(Float64, RoundFloat64ToInt32, kS390_DoubleToInt32, OperandMode::kNone, \
1343  null) \
1344  V(Float32, TruncateFloat32ToInt32, kS390_Float32ToInt32, OperandMode::kNone, \
1345  null) \
1346  V(Float32, TruncateFloat32ToUint32, kS390_Float32ToUint32, \
1347  OperandMode::kNone, null) \
1348  V(Float64, TruncateFloat64ToUint32, kS390_DoubleToUint32, \
1349  OperandMode::kNone, null) \
1350  V(Float64, ChangeFloat64ToInt32, kS390_DoubleToInt32, OperandMode::kNone, \
1351  null) \
1352  V(Float64, ChangeFloat64ToUint32, kS390_DoubleToUint32, OperandMode::kNone, \
1353  null) \
1354  V(Float64, Float64SilenceNaN, kS390_Float64SilenceNaN, OperandMode::kNone, \
1355  null) \
1356  V(Float32, Float32Abs, kS390_AbsFloat, OperandMode::kNone, null) \
1357  V(Float64, Float64Abs, kS390_AbsDouble, OperandMode::kNone, null) \
1358  V(Float32, Float32Sqrt, kS390_SqrtFloat, OperandMode::kNone, null) \
1359  V(Float64, Float64Sqrt, kS390_SqrtDouble, OperandMode::kNone, null) \
1360  V(Float32, Float32RoundDown, kS390_FloorFloat, OperandMode::kNone, null) \
1361  V(Float64, Float64RoundDown, kS390_FloorDouble, OperandMode::kNone, null) \
1362  V(Float32, Float32RoundUp, kS390_CeilFloat, OperandMode::kNone, null) \
1363  V(Float64, Float64RoundUp, kS390_CeilDouble, OperandMode::kNone, null) \
1364  V(Float32, Float32RoundTruncate, kS390_TruncateFloat, OperandMode::kNone, \
1365  null) \
1366  V(Float64, Float64RoundTruncate, kS390_TruncateDouble, OperandMode::kNone, \
1367  null) \
1368  V(Float64, Float64RoundTiesAway, kS390_RoundDouble, OperandMode::kNone, \
1369  null) \
1370  V(Float32, Float32Neg, kS390_NegFloat, OperandMode::kNone, null) \
1371  V(Float64, Float64Neg, kS390_NegDouble, OperandMode::kNone, null) \
1372  /* TODO(john.yan): can use kAllowRM */ \
1373  V(Word32, Float64ExtractLowWord32, kS390_DoubleExtractLowWord32, \
1374  OperandMode::kNone, null) \
1375  V(Word32, Float64ExtractHighWord32, kS390_DoubleExtractHighWord32, \
1376  OperandMode::kNone, null)
1377 
1378 #define FLOAT_BIN_OP_LIST(V) \
1379  V(Float32, Float32Add, kS390_AddFloat, OperandMode::kAllowRM, null) \
1380  V(Float64, Float64Add, kS390_AddDouble, OperandMode::kAllowRM, null) \
1381  V(Float32, Float32Sub, kS390_SubFloat, OperandMode::kAllowRM, null) \
1382  V(Float64, Float64Sub, kS390_SubDouble, OperandMode::kAllowRM, null) \
1383  V(Float32, Float32Mul, kS390_MulFloat, OperandMode::kAllowRM, null) \
1384  V(Float64, Float64Mul, kS390_MulDouble, OperandMode::kAllowRM, null) \
1385  V(Float32, Float32Div, kS390_DivFloat, OperandMode::kAllowRM, null) \
1386  V(Float64, Float64Div, kS390_DivDouble, OperandMode::kAllowRM, null) \
1387  V(Float32, Float32Max, kS390_MaxFloat, OperandMode::kNone, null) \
1388  V(Float64, Float64Max, kS390_MaxDouble, OperandMode::kNone, null) \
1389  V(Float32, Float32Min, kS390_MinFloat, OperandMode::kNone, null) \
1390  V(Float64, Float64Min, kS390_MinDouble, OperandMode::kNone, null)
1391 
1392 #define WORD32_UNARY_OP_LIST_32(V) \
1393  V(Word32, Word32Clz, kS390_Cntlz32, OperandMode::kNone, null) \
1394  V(Word32, Word32Popcnt, kS390_Popcnt32, OperandMode::kNone, null) \
1395  V(Word32, RoundInt32ToFloat32, kS390_Int32ToFloat32, OperandMode::kNone, \
1396  null) \
1397  V(Word32, RoundUint32ToFloat32, kS390_Uint32ToFloat32, OperandMode::kNone, \
1398  null) \
1399  V(Word32, ChangeInt32ToFloat64, kS390_Int32ToDouble, OperandMode::kNone, \
1400  null) \
1401  V(Word32, ChangeUint32ToFloat64, kS390_Uint32ToDouble, OperandMode::kNone, \
1402  null) \
1403  V(Word32, SignExtendWord8ToInt32, kS390_SignExtendWord8ToInt32, \
1404  OperandMode::kNone, null) \
1405  V(Word32, SignExtendWord16ToInt32, kS390_SignExtendWord16ToInt32, \
1406  OperandMode::kNone, null) \
1407  V(Word32, BitcastInt32ToFloat32, kS390_BitcastInt32ToFloat32, \
1408  OperandMode::kNone, null)
1409 
1410 #ifdef V8_TARGET_ARCH_S390X
1411 #define FLOAT_UNARY_OP_LIST(V) \
1412  FLOAT_UNARY_OP_LIST_32(V) \
1413  V(Float64, ChangeFloat64ToUint64, kS390_DoubleToUint64, OperandMode::kNone, \
1414  null) \
1415  V(Float64, ChangeFloat64ToInt64, kS390_DoubleToInt64, OperandMode::kNone, \
1416  null) \
1417  V(Float64, TruncateFloat64ToInt64, kS390_DoubleToInt64, OperandMode::kNone, \
1418  null) \
1419  V(Float64, BitcastFloat64ToInt64, kS390_BitcastDoubleToInt64, \
1420  OperandMode::kNone, null)
1421 
1422 #define WORD32_UNARY_OP_LIST(V) \
1423  WORD32_UNARY_OP_LIST_32(V) \
1424  V(Word32, ChangeInt32ToInt64, kS390_SignExtendWord32ToInt64, \
1425  OperandMode::kNone, null) \
1426  V(Word32, SignExtendWord8ToInt64, kS390_SignExtendWord8ToInt64, \
1427  OperandMode::kNone, null) \
1428  V(Word32, SignExtendWord16ToInt64, kS390_SignExtendWord16ToInt64, \
1429  OperandMode::kNone, null) \
1430  V(Word32, SignExtendWord32ToInt64, kS390_SignExtendWord32ToInt64, \
1431  OperandMode::kNone, null) \
1432  V(Word32, ChangeUint32ToUint64, kS390_Uint32ToUint64, OperandMode::kNone, \
1433  [&]() -> bool { \
1434  if (ProduceWord32Result(node->InputAt(0))) { \
1435  EmitIdentity(node); \
1436  return true; \
1437  } \
1438  return false; \
1439  })
1440 
1441 #else
1442 #define FLOAT_UNARY_OP_LIST(V) FLOAT_UNARY_OP_LIST_32(V)
1443 #define WORD32_UNARY_OP_LIST(V) WORD32_UNARY_OP_LIST_32(V)
1444 #endif
1445 
1446 #define WORD32_BIN_OP_LIST(V) \
1447  V(Word32, Int32Add, kS390_Add32, AddOperandMode, null) \
1448  V(Word32, Int32Sub, kS390_Sub32, SubOperandMode, ([&]() { \
1449  return TryMatchNegFromSub<Int32BinopMatcher, kS390_Neg32>(this, node); \
1450  })) \
1451  V(Word32, Int32Mul, kS390_Mul32, MulOperandMode, ([&]() { \
1452  return TryMatchShiftFromMul<Int32BinopMatcher, kS390_ShiftLeft32>(this, \
1453  node); \
1454  })) \
1455  V(Word32, Int32AddWithOverflow, kS390_Add32, AddOperandMode, \
1456  ([&]() { return TryMatchInt32AddWithOverflow(this, node); })) \
1457  V(Word32, Int32SubWithOverflow, kS390_Sub32, SubOperandMode, \
1458  ([&]() { return TryMatchInt32SubWithOverflow(this, node); })) \
1459  V(Word32, Int32MulWithOverflow, kS390_Mul32, MulOperandMode, \
1460  ([&]() { return TryMatchInt32MulWithOverflow(this, node); })) \
1461  V(Word32, Int32MulHigh, kS390_MulHigh32, \
1462  OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps, null) \
1463  V(Word32, Uint32MulHigh, kS390_MulHighU32, \
1464  OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1465  V(Word32, Int32Div, kS390_Div32, \
1466  OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1467  V(Word32, Uint32Div, kS390_DivU32, \
1468  OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1469  V(Word32, Int32Mod, kS390_Mod32, \
1470  OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1471  V(Word32, Uint32Mod, kS390_ModU32, \
1472  OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1473  V(Word32, Word32Ror, kS390_RotRight32, \
1474  OperandMode::kAllowRI | OperandMode::kAllowRRR | OperandMode::kAllowRRI | \
1475  OperandMode::kShift32Imm, \
1476  null) \
1477  V(Word32, Word32And, kS390_And32, And32OperandMode, null) \
1478  V(Word32, Word32Or, kS390_Or32, Or32OperandMode, null) \
1479  V(Word32, Word32Xor, kS390_Xor32, Xor32OperandMode, null) \
1480  V(Word32, Word32Shl, kS390_ShiftLeft32, Shift32OperandMode, null) \
1481  V(Word32, Word32Shr, kS390_ShiftRight32, Shift32OperandMode, null) \
1482  V(Word32, Word32Sar, kS390_ShiftRightArith32, Shift32OperandMode, \
1483  [&]() { return TryMatchSignExtInt16OrInt8FromWord32Sar(this, node); }) \
1484  V(Word32, Float64InsertLowWord32, kS390_DoubleInsertLowWord32, \
1485  OperandMode::kAllowRRR, \
1486  [&]() -> bool { return TryMatchDoubleConstructFromInsert(this, node); }) \
1487  V(Word32, Float64InsertHighWord32, kS390_DoubleInsertHighWord32, \
1488  OperandMode::kAllowRRR, \
1489  [&]() -> bool { return TryMatchDoubleConstructFromInsert(this, node); })
1490 
1491 #define WORD64_UNARY_OP_LIST(V) \
1492  V(Word64, Word64Popcnt, kS390_Popcnt64, OperandMode::kNone, null) \
1493  V(Word64, Word64Clz, kS390_Cntlz64, OperandMode::kNone, null) \
1494  V(Word64, TruncateInt64ToInt32, kS390_Int64ToInt32, OperandMode::kNone, \
1495  null) \
1496  V(Word64, RoundInt64ToFloat32, kS390_Int64ToFloat32, OperandMode::kNone, \
1497  null) \
1498  V(Word64, RoundInt64ToFloat64, kS390_Int64ToDouble, OperandMode::kNone, \
1499  null) \
1500  V(Word64, ChangeInt64ToFloat64, kS390_Int64ToDouble, OperandMode::kNone, \
1501  null) \
1502  V(Word64, RoundUint64ToFloat32, kS390_Uint64ToFloat32, OperandMode::kNone, \
1503  null) \
1504  V(Word64, RoundUint64ToFloat64, kS390_Uint64ToDouble, OperandMode::kNone, \
1505  null) \
1506  V(Word64, BitcastInt64ToFloat64, kS390_BitcastInt64ToDouble, \
1507  OperandMode::kNone, null)
1508 
1509 #define WORD64_BIN_OP_LIST(V) \
1510  V(Word64, Int64Add, kS390_Add64, AddOperandMode, null) \
1511  V(Word64, Int64Sub, kS390_Sub64, SubOperandMode, ([&]() { \
1512  return TryMatchNegFromSub<Int64BinopMatcher, kS390_Neg64>(this, node); \
1513  })) \
1514  V(Word64, Int64AddWithOverflow, kS390_Add64, AddOperandMode, \
1515  ([&]() { return TryMatchInt64AddWithOverflow(this, node); })) \
1516  V(Word64, Int64SubWithOverflow, kS390_Sub64, SubOperandMode, \
1517  ([&]() { return TryMatchInt64SubWithOverflow(this, node); })) \
1518  V(Word64, Int64Mul, kS390_Mul64, MulOperandMode, ([&]() { \
1519  return TryMatchShiftFromMul<Int64BinopMatcher, kS390_ShiftLeft64>(this, \
1520  node); \
1521  })) \
1522  V(Word64, Int64Div, kS390_Div64, \
1523  OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1524  V(Word64, Uint64Div, kS390_DivU64, \
1525  OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1526  V(Word64, Int64Mod, kS390_Mod64, \
1527  OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1528  V(Word64, Uint64Mod, kS390_ModU64, \
1529  OperandMode::kAllowRRM | OperandMode::kAllowRRR, null) \
1530  V(Word64, Word64Sar, kS390_ShiftRightArith64, Shift64OperandMode, null) \
1531  V(Word64, Word64Ror, kS390_RotRight64, Shift64OperandMode, null) \
1532  V(Word64, Word64Or, kS390_Or64, Or64OperandMode, null) \
1533  V(Word64, Word64Xor, kS390_Xor64, Xor64OperandMode, null)
1534 
1535 #define DECLARE_UNARY_OP(type, name, op, mode, try_extra) \
1536  void InstructionSelector::Visit##name(Node* node) { \
1537  if (std::function<bool()>(try_extra)()) return; \
1538  Visit##type##UnaryOp(this, node, op, mode); \
1539  }
1540 
1541 #define DECLARE_BIN_OP(type, name, op, mode, try_extra) \
1542  void InstructionSelector::Visit##name(Node* node) { \
1543  if (std::function<bool()>(try_extra)()) return; \
1544  Visit##type##BinOp(this, node, op, mode); \
1545  }
1546 
1547 WORD32_BIN_OP_LIST(DECLARE_BIN_OP);
1548 WORD32_UNARY_OP_LIST(DECLARE_UNARY_OP);
1549 FLOAT_UNARY_OP_LIST(DECLARE_UNARY_OP);
1550 FLOAT_BIN_OP_LIST(DECLARE_BIN_OP);
1551 
1552 #if V8_TARGET_ARCH_S390X
1553 WORD64_UNARY_OP_LIST(DECLARE_UNARY_OP)
1554 WORD64_BIN_OP_LIST(DECLARE_BIN_OP)
1555 #endif
1556 
1557 #undef DECLARE_BIN_OP
1558 #undef DECLARE_UNARY_OP
1559 #undef WORD64_BIN_OP_LIST
1560 #undef WORD64_UNARY_OP_LIST
1561 #undef WORD32_BIN_OP_LIST
1562 #undef WORD32_UNARY_OP_LIST
1563 #undef FLOAT_UNARY_OP_LIST
1564 #undef WORD32_UNARY_OP_LIST_32
1565 #undef FLOAT_BIN_OP_LIST
1566 #undef FLOAT_BIN_OP_LIST_32
1567 #undef null
1568 
1569 #if V8_TARGET_ARCH_S390X
1570 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1571  VisitTryTruncateDouble(this, kS390_Float32ToInt64, node);
1572 }
1573 
1574 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1575  VisitTryTruncateDouble(this, kS390_DoubleToInt64, node);
1576 }
1577 
1578 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1579  VisitTryTruncateDouble(this, kS390_Float32ToUint64, node);
1580 }
1581 
1582 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1583  VisitTryTruncateDouble(this, kS390_DoubleToUint64, node);
1584 }
1585 
1586 #endif
1587 
1588 void InstructionSelector::VisitFloat64Mod(Node* node) {
1589  S390OperandGenerator g(this);
1590  Emit(kS390_ModDouble, g.DefineAsFixed(node, d1),
1591  g.UseFixed(node->InputAt(0), d1), g.UseFixed(node->InputAt(1), d2))
1592  ->MarkAsCall();
1593 }
1594 
1595 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1596  InstructionCode opcode) {
1597  S390OperandGenerator g(this);
1598  Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1))
1599  ->MarkAsCall();
1600 }
1601 
1602 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1603  InstructionCode opcode) {
1604  S390OperandGenerator g(this);
1605  Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1),
1606  g.UseFixed(node->InputAt(1), d2))
1607  ->MarkAsCall();
1608 }
1609 
1610 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1611  UNREACHABLE();
1612 }
1613 
1614 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1615  UNREACHABLE();
1616 }
1617 
1618 static bool CompareLogical(FlagsContinuation* cont) {
1619  switch (cont->condition()) {
1620  case kUnsignedLessThan:
1621  case kUnsignedGreaterThanOrEqual:
1622  case kUnsignedLessThanOrEqual:
1623  case kUnsignedGreaterThan:
1624  return true;
1625  default:
1626  return false;
1627  }
1628  UNREACHABLE();
1629 }
1630 
1631 namespace {
1632 
1633 // Shared routine for multiple compare operations.
1634 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1635  InstructionOperand left, InstructionOperand right,
1636  FlagsContinuation* cont) {
1637  selector->EmitWithContinuation(opcode, left, right, cont);
1638 }
1639 
1640 void VisitLoadAndTest(InstructionSelector* selector, InstructionCode opcode,
1641  Node* node, Node* value, FlagsContinuation* cont,
1642  bool discard_output = false);
1643 
1644 // Shared routine for multiple word compare operations.
1645 void VisitWordCompare(InstructionSelector* selector, Node* node,
1646  InstructionCode opcode, FlagsContinuation* cont,
1647  OperandModes immediate_mode) {
1648  S390OperandGenerator g(selector);
1649  Node* left = node->InputAt(0);
1650  Node* right = node->InputAt(1);
1651 
1652  DCHECK(IrOpcode::IsComparisonOpcode(node->opcode()) ||
1653  node->opcode() == IrOpcode::kInt32Sub ||
1654  node->opcode() == IrOpcode::kInt64Sub);
1655 
1656  InstructionOperand inputs[8];
1657  InstructionOperand outputs[1];
1658  size_t input_count = 0;
1659  size_t output_count = 0;
1660 
1661  // If one of the two inputs is an immediate, make sure it's on the right, or
1662  // if one of the two inputs is a memory operand, make sure it's on the left.
1663  int effect_level = selector->GetEffectLevel(node);
1664  if (cont->IsBranch()) {
1665  effect_level = selector->GetEffectLevel(
1666  cont->true_block()->PredecessorAt(0)->control_input());
1667  }
1668 
1669  if ((!g.CanBeImmediate(right, immediate_mode) &&
1670  g.CanBeImmediate(left, immediate_mode)) ||
1671  (!g.CanBeMemoryOperand(opcode, node, right, effect_level) &&
1672  g.CanBeMemoryOperand(opcode, node, left, effect_level))) {
1673  if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1674  std::swap(left, right);
1675  }
1676 
1677  // check if compare with 0
1678  if (g.CanBeImmediate(right, immediate_mode) && g.GetImmediate(right) == 0) {
1679  DCHECK(opcode == kS390_Cmp32 || opcode == kS390_Cmp64);
1680  ArchOpcode load_and_test = (opcode == kS390_Cmp32)
1681  ? kS390_LoadAndTestWord32
1682  : kS390_LoadAndTestWord64;
1683  return VisitLoadAndTest(selector, load_and_test, node, left, cont, true);
1684  }
1685 
1686  inputs[input_count++] = g.UseRegisterOrStackPointer(left);
1687  if (g.CanBeMemoryOperand(opcode, node, right, effect_level)) {
1688  // generate memory operand
1689  AddressingMode addressing_mode = g.GetEffectiveAddressMemoryOperand(
1690  right, inputs, &input_count, OpcodeImmMode(opcode));
1691  opcode |= AddressingModeField::encode(addressing_mode);
1692  } else if (g.CanBeImmediate(right, immediate_mode)) {
1693  inputs[input_count++] = g.UseImmediate(right);
1694  } else {
1695  inputs[input_count++] = g.UseAnyExceptImmediate(right);
1696  }
1697 
1698  DCHECK(input_count <= 8 && output_count <= 1);
1699  selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
1700  inputs, cont);
1701 }
1702 
1703 void VisitWord32Compare(InstructionSelector* selector, Node* node,
1704  FlagsContinuation* cont) {
1705  OperandModes mode =
1706  (CompareLogical(cont) ? OperandMode::kUint32Imm : OperandMode::kInt32Imm);
1707  VisitWordCompare(selector, node, kS390_Cmp32, cont, mode);
1708 }
1709 
1710 #if V8_TARGET_ARCH_S390X
1711 void VisitWord64Compare(InstructionSelector* selector, Node* node,
1712  FlagsContinuation* cont) {
1713  OperandModes mode =
1714  (CompareLogical(cont) ? OperandMode::kUint32Imm : OperandMode::kInt32Imm);
1715  VisitWordCompare(selector, node, kS390_Cmp64, cont, mode);
1716 }
1717 #endif
1718 
1719 // Shared routine for multiple float32 compare operations.
1720 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1721  FlagsContinuation* cont) {
1722  VisitWordCompare(selector, node, kS390_CmpFloat, cont, OperandMode::kNone);
1723 }
1724 
1725 // Shared routine for multiple float64 compare operations.
1726 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1727  FlagsContinuation* cont) {
1728  VisitWordCompare(selector, node, kS390_CmpDouble, cont, OperandMode::kNone);
1729 }
1730 
1731 void VisitTestUnderMask(InstructionSelector* selector, Node* node,
1732  FlagsContinuation* cont) {
1733  DCHECK(node->opcode() == IrOpcode::kWord32And ||
1734  node->opcode() == IrOpcode::kWord64And);
1735  ArchOpcode opcode =
1736  (node->opcode() == IrOpcode::kWord32And) ? kS390_Tst32 : kS390_Tst64;
1737  S390OperandGenerator g(selector);
1738  Node* left = node->InputAt(0);
1739  Node* right = node->InputAt(1);
1740  if (!g.CanBeImmediate(right, OperandMode::kUint32Imm) &&
1741  g.CanBeImmediate(left, OperandMode::kUint32Imm)) {
1742  std::swap(left, right);
1743  }
1744  VisitCompare(selector, opcode, g.UseRegister(left),
1745  g.UseOperand(right, OperandMode::kUint32Imm), cont);
1746 }
1747 
1748 void VisitLoadAndTest(InstructionSelector* selector, InstructionCode opcode,
1749  Node* node, Node* value, FlagsContinuation* cont,
1750  bool discard_output) {
1751  static_assert(kS390_LoadAndTestFloat64 - kS390_LoadAndTestWord32 == 3,
1752  "LoadAndTest Opcode shouldn't contain other opcodes.");
1753 
1754  // TODO(john.yan): Add support for Float32/Float64.
1755  DCHECK(opcode >= kS390_LoadAndTestWord32 ||
1756  opcode <= kS390_LoadAndTestWord64);
1757 
1758  S390OperandGenerator g(selector);
1759  InstructionOperand inputs[8];
1760  InstructionOperand outputs[2];
1761  size_t input_count = 0;
1762  size_t output_count = 0;
1763  bool use_value = false;
1764 
1765  int effect_level = selector->GetEffectLevel(node);
1766  if (cont->IsBranch()) {
1767  effect_level = selector->GetEffectLevel(
1768  cont->true_block()->PredecessorAt(0)->control_input());
1769  }
1770 
1771  if (g.CanBeMemoryOperand(opcode, node, value, effect_level)) {
1772  // generate memory operand
1773  AddressingMode addressing_mode =
1774  g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
1775  opcode |= AddressingModeField::encode(addressing_mode);
1776  } else {
1777  inputs[input_count++] = g.UseAnyExceptImmediate(value);
1778  use_value = true;
1779  }
1780 
1781  if (!discard_output && !use_value) {
1782  outputs[output_count++] = g.DefineAsRegister(value);
1783  }
1784 
1785  DCHECK(input_count <= 8 && output_count <= 2);
1786  selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
1787  inputs, cont);
1788 }
1789 
1790 } // namespace
1791 
1792 // Shared routine for word comparisons against zero.
1793 void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
1794  FlagsContinuation* cont) {
1795  // Try to combine with comparisons against 0 by simply inverting the branch.
1796  while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) {
1797  Int32BinopMatcher m(value);
1798  if (!m.right().Is(0)) break;
1799 
1800  user = value;
1801  value = m.left().node();
1802  cont->Negate();
1803  }
1804 
1805  FlagsCondition fc = cont->condition();
1806  if (CanCover(user, value)) {
1807  switch (value->opcode()) {
1808  case IrOpcode::kWord32Equal: {
1809  cont->OverwriteAndNegateIfEqual(kEqual);
1810  Int32BinopMatcher m(value);
1811  if (m.right().Is(0)) {
1812  // Try to combine the branch with a comparison.
1813  Node* const user = m.node();
1814  Node* const value = m.left().node();
1815  if (CanCover(user, value)) {
1816  switch (value->opcode()) {
1817  case IrOpcode::kInt32Sub:
1818  return VisitWord32Compare(this, value, cont);
1819  case IrOpcode::kWord32And:
1820  return VisitTestUnderMask(this, value, cont);
1821  default:
1822  break;
1823  }
1824  }
1825  }
1826  return VisitWord32Compare(this, value, cont);
1827  }
1828  case IrOpcode::kInt32LessThan:
1829  cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1830  return VisitWord32Compare(this, value, cont);
1831  case IrOpcode::kInt32LessThanOrEqual:
1832  cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1833  return VisitWord32Compare(this, value, cont);
1834  case IrOpcode::kUint32LessThan:
1835  cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1836  return VisitWord32Compare(this, value, cont);
1837  case IrOpcode::kUint32LessThanOrEqual:
1838  cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1839  return VisitWord32Compare(this, value, cont);
1840 #if V8_TARGET_ARCH_S390X
1841  case IrOpcode::kWord64Equal: {
1842  cont->OverwriteAndNegateIfEqual(kEqual);
1843  Int64BinopMatcher m(value);
1844  if (m.right().Is(0)) {
1845  // Try to combine the branch with a comparison.
1846  Node* const user = m.node();
1847  Node* const value = m.left().node();
1848  if (CanCover(user, value)) {
1849  switch (value->opcode()) {
1850  case IrOpcode::kInt64Sub:
1851  return VisitWord64Compare(this, value, cont);
1852  case IrOpcode::kWord64And:
1853  return VisitTestUnderMask(this, value, cont);
1854  default:
1855  break;
1856  }
1857  }
1858  }
1859  return VisitWord64Compare(this, value, cont);
1860  }
1861  case IrOpcode::kInt64LessThan:
1862  cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1863  return VisitWord64Compare(this, value, cont);
1864  case IrOpcode::kInt64LessThanOrEqual:
1865  cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1866  return VisitWord64Compare(this, value, cont);
1867  case IrOpcode::kUint64LessThan:
1868  cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1869  return VisitWord64Compare(this, value, cont);
1870  case IrOpcode::kUint64LessThanOrEqual:
1871  cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1872  return VisitWord64Compare(this, value, cont);
1873 #endif
1874  case IrOpcode::kFloat32Equal:
1875  cont->OverwriteAndNegateIfEqual(kEqual);
1876  return VisitFloat32Compare(this, value, cont);
1877  case IrOpcode::kFloat32LessThan:
1878  cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1879  return VisitFloat32Compare(this, value, cont);
1880  case IrOpcode::kFloat32LessThanOrEqual:
1881  cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1882  return VisitFloat32Compare(this, value, cont);
1883  case IrOpcode::kFloat64Equal:
1884  cont->OverwriteAndNegateIfEqual(kEqual);
1885  return VisitFloat64Compare(this, value, cont);
1886  case IrOpcode::kFloat64LessThan:
1887  cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1888  return VisitFloat64Compare(this, value, cont);
1889  case IrOpcode::kFloat64LessThanOrEqual:
1890  cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1891  return VisitFloat64Compare(this, value, cont);
1892  case IrOpcode::kProjection:
1893  // Check if this is the overflow output projection of an
1894  // <Operation>WithOverflow node.
1895  if (ProjectionIndexOf(value->op()) == 1u) {
1896  // We cannot combine the <Operation>WithOverflow with this branch
1897  // unless the 0th projection (the use of the actual value of the
1898  // <Operation> is either nullptr, which means there's no use of the
1899  // actual value, or was already defined, which means it is scheduled
1900  // *AFTER* this branch).
1901  Node* const node = value->InputAt(0);
1902  Node* const result = NodeProperties::FindProjection(node, 0);
1903  if (result == nullptr || IsDefined(result)) {
1904  switch (node->opcode()) {
1905  case IrOpcode::kInt32AddWithOverflow:
1906  cont->OverwriteAndNegateIfEqual(kOverflow);
1907  return VisitWord32BinOp(this, node, kS390_Add32, AddOperandMode,
1908  cont);
1909  case IrOpcode::kInt32SubWithOverflow:
1910  cont->OverwriteAndNegateIfEqual(kOverflow);
1911  return VisitWord32BinOp(this, node, kS390_Sub32, SubOperandMode,
1912  cont);
1913  case IrOpcode::kInt32MulWithOverflow:
1914  if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
1915  cont->OverwriteAndNegateIfEqual(kOverflow);
1916  return VisitWord32BinOp(
1917  this, node, kS390_Mul32,
1918  OperandMode::kAllowRRR | OperandMode::kAllowRM, cont);
1919  } else {
1920  cont->OverwriteAndNegateIfEqual(kNotEqual);
1921  return VisitWord32BinOp(
1922  this, node, kS390_Mul32WithOverflow,
1923  OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
1924  cont);
1925  }
1926  case IrOpcode::kInt32AbsWithOverflow:
1927  cont->OverwriteAndNegateIfEqual(kOverflow);
1928  return VisitWord32UnaryOp(this, node, kS390_Abs32,
1929  OperandMode::kNone, cont);
1930 #if V8_TARGET_ARCH_S390X
1931  case IrOpcode::kInt64AbsWithOverflow:
1932  cont->OverwriteAndNegateIfEqual(kOverflow);
1933  return VisitWord64UnaryOp(this, node, kS390_Abs64,
1934  OperandMode::kNone, cont);
1935  case IrOpcode::kInt64AddWithOverflow:
1936  cont->OverwriteAndNegateIfEqual(kOverflow);
1937  return VisitWord64BinOp(this, node, kS390_Add64, AddOperandMode,
1938  cont);
1939  case IrOpcode::kInt64SubWithOverflow:
1940  cont->OverwriteAndNegateIfEqual(kOverflow);
1941  return VisitWord64BinOp(this, node, kS390_Sub64, SubOperandMode,
1942  cont);
1943 #endif
1944  default:
1945  break;
1946  }
1947  }
1948  }
1949  break;
1950  case IrOpcode::kInt32Sub:
1951  if (fc == kNotEqual || fc == kEqual)
1952  return VisitWord32Compare(this, value, cont);
1953  break;
1954  case IrOpcode::kWord32And:
1955  return VisitTestUnderMask(this, value, cont);
1956  case IrOpcode::kLoad: {
1957  LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1958  switch (load_rep.representation()) {
1959  case MachineRepresentation::kWord32:
1960  return VisitLoadAndTest(this, kS390_LoadAndTestWord32, user, value,
1961  cont);
1962  default:
1963  break;
1964  }
1965  break;
1966  }
1967  case IrOpcode::kInt32Add:
1968  // can't handle overflow case.
1969  break;
1970  case IrOpcode::kWord32Or:
1971  if (fc == kNotEqual || fc == kEqual)
1972  return VisitWord32BinOp(this, value, kS390_Or32, Or32OperandMode,
1973  cont);
1974  break;
1975  case IrOpcode::kWord32Xor:
1976  if (fc == kNotEqual || fc == kEqual)
1977  return VisitWord32BinOp(this, value, kS390_Xor32, Xor32OperandMode,
1978  cont);
1979  break;
1980  case IrOpcode::kWord32Sar:
1981  case IrOpcode::kWord32Shl:
1982  case IrOpcode::kWord32Shr:
1983  case IrOpcode::kWord32Ror:
1984  // doesn't generate cc, so ignore.
1985  break;
1986 #if V8_TARGET_ARCH_S390X
1987  case IrOpcode::kInt64Sub:
1988  if (fc == kNotEqual || fc == kEqual)
1989  return VisitWord64Compare(this, value, cont);
1990  break;
1991  case IrOpcode::kWord64And:
1992  return VisitTestUnderMask(this, value, cont);
1993  case IrOpcode::kInt64Add:
1994  // can't handle overflow case.
1995  break;
1996  case IrOpcode::kWord64Or:
1997  if (fc == kNotEqual || fc == kEqual)
1998  return VisitWord64BinOp(this, value, kS390_Or64, Or64OperandMode,
1999  cont);
2000  break;
2001  case IrOpcode::kWord64Xor:
2002  if (fc == kNotEqual || fc == kEqual)
2003  return VisitWord64BinOp(this, value, kS390_Xor64, Xor64OperandMode,
2004  cont);
2005  break;
2006  case IrOpcode::kWord64Sar:
2007  case IrOpcode::kWord64Shl:
2008  case IrOpcode::kWord64Shr:
2009  case IrOpcode::kWord64Ror:
2010  // doesn't generate cc, so ignore
2011  break;
2012 #endif
2013  default:
2014  break;
2015  }
2016  }
2017 
2018  // Branch could not be combined with a compare, emit LoadAndTest
2019  VisitLoadAndTest(this, kS390_LoadAndTestWord32, user, value, cont, true);
2020 }
2021 
2022 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
2023  S390OperandGenerator g(this);
2024  InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
2025 
2026  // Emit either ArchTableSwitch or ArchLookupSwitch.
2027  if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
2028  static const size_t kMaxTableSwitchValueRange = 2 << 16;
2029  size_t table_space_cost = 4 + sw.value_range();
2030  size_t table_time_cost = 3;
2031  size_t lookup_space_cost = 3 + 2 * sw.case_count();
2032  size_t lookup_time_cost = sw.case_count();
2033  if (sw.case_count() > 0 &&
2034  table_space_cost + 3 * table_time_cost <=
2035  lookup_space_cost + 3 * lookup_time_cost &&
2036  sw.min_value() > std::numeric_limits<int32_t>::min() &&
2037  sw.value_range() <= kMaxTableSwitchValueRange) {
2038  InstructionOperand index_operand = value_operand;
2039  if (sw.min_value()) {
2040  index_operand = g.TempRegister();
2041  Emit(kS390_Lay | AddressingModeField::encode(kMode_MRI), index_operand,
2042  value_operand, g.TempImmediate(-sw.min_value()));
2043  }
2044 #if V8_TARGET_ARCH_S390X
2045  InstructionOperand index_operand_zero_ext = g.TempRegister();
2046  Emit(kS390_Uint32ToUint64, index_operand_zero_ext, index_operand);
2047  index_operand = index_operand_zero_ext;
2048 #endif
2049  // Generate a table lookup.
2050  return EmitTableSwitch(sw, index_operand);
2051  }
2052  }
2053 
2054  // Generate a tree of conditional jumps.
2055  return EmitBinarySearchSwitch(sw, value_operand);
2056 }
2057 
2058 void InstructionSelector::VisitWord32Equal(Node* const node) {
2059  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2060  Int32BinopMatcher m(node);
2061  if (m.right().Is(0)) {
2062  return VisitLoadAndTest(this, kS390_LoadAndTestWord32, m.node(),
2063  m.left().node(), &cont, true);
2064  }
2065  VisitWord32Compare(this, node, &cont);
2066 }
2067 
2068 void InstructionSelector::VisitInt32LessThan(Node* node) {
2069  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2070  VisitWord32Compare(this, node, &cont);
2071 }
2072 
2073 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
2074  FlagsContinuation cont =
2075  FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2076  VisitWord32Compare(this, node, &cont);
2077 }
2078 
2079 void InstructionSelector::VisitUint32LessThan(Node* node) {
2080  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2081  VisitWord32Compare(this, node, &cont);
2082 }
2083 
2084 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
2085  FlagsContinuation cont =
2086  FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2087  VisitWord32Compare(this, node, &cont);
2088 }
2089 
2090 #if V8_TARGET_ARCH_S390X
2091 void InstructionSelector::VisitWord64Equal(Node* const node) {
2092  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2093  Int64BinopMatcher m(node);
2094  if (m.right().Is(0)) {
2095  return VisitLoadAndTest(this, kS390_LoadAndTestWord64, m.node(),
2096  m.left().node(), &cont, true);
2097  }
2098  VisitWord64Compare(this, node, &cont);
2099 }
2100 
2101 void InstructionSelector::VisitInt64LessThan(Node* node) {
2102  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2103  VisitWord64Compare(this, node, &cont);
2104 }
2105 
2106 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2107  FlagsContinuation cont =
2108  FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2109  VisitWord64Compare(this, node, &cont);
2110 }
2111 
2112 void InstructionSelector::VisitUint64LessThan(Node* node) {
2113  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2114  VisitWord64Compare(this, node, &cont);
2115 }
2116 
2117 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2118  FlagsContinuation cont =
2119  FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2120  VisitWord64Compare(this, node, &cont);
2121 }
2122 #endif
2123 
2124 void InstructionSelector::VisitFloat32Equal(Node* node) {
2125  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2126  VisitFloat32Compare(this, node, &cont);
2127 }
2128 
2129 void InstructionSelector::VisitFloat32LessThan(Node* node) {
2130  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2131  VisitFloat32Compare(this, node, &cont);
2132 }
2133 
2134 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2135  FlagsContinuation cont =
2136  FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2137  VisitFloat32Compare(this, node, &cont);
2138 }
2139 
2140 void InstructionSelector::VisitFloat64Equal(Node* node) {
2141  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2142  VisitFloat64Compare(this, node, &cont);
2143 }
2144 
2145 void InstructionSelector::VisitFloat64LessThan(Node* node) {
2146  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2147  VisitFloat64Compare(this, node, &cont);
2148 }
2149 
2150 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
2151  FlagsContinuation cont =
2152  FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2153  VisitFloat64Compare(this, node, &cont);
2154 }
2155 
2156 void InstructionSelector::EmitPrepareArguments(
2157  ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
2158  Node* node) {
2159  S390OperandGenerator g(this);
2160 
2161  // Prepare for C function call.
2162  if (call_descriptor->IsCFunctionCall()) {
2163  Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
2164  call_descriptor->ParameterCount())),
2165  0, nullptr, 0, nullptr);
2166 
2167  // Poke any stack arguments.
2168  int slot = kStackFrameExtraParamSlot;
2169  for (PushParameter input : (*arguments)) {
2170  if (input.node == nullptr) continue;
2171  Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
2172  g.TempImmediate(slot));
2173  ++slot;
2174  }
2175  } else {
2176  // Push any stack arguments.
2177  int num_slots = 0;
2178  int slot = 0;
2179 
2180  for (PushParameter input : *arguments) {
2181  if (input.node == nullptr) continue;
2182  num_slots += input.location.GetType().representation() ==
2183  MachineRepresentation::kFloat64
2184  ? kDoubleSize / kPointerSize
2185  : 1;
2186  }
2187  Emit(kS390_StackClaim, g.NoOutput(), g.TempImmediate(num_slots));
2188  for (PushParameter input : *arguments) {
2189  // Skip any alignment holes in pushed nodes.
2190  if (input.node) {
2191  Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
2192  g.TempImmediate(slot));
2193  slot += input.location.GetType().representation() ==
2194  MachineRepresentation::kFloat64
2195  ? (kDoubleSize / kPointerSize)
2196  : 1;
2197  }
2198  }
2199  DCHECK(num_slots == slot);
2200  }
2201 }
2202 
2203 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
2204 
2205 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
2206 
2207 void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
2208  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2209  DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
2210  load_rep.representation() == MachineRepresentation::kWord16 ||
2211  load_rep.representation() == MachineRepresentation::kWord32);
2212  USE(load_rep);
2213  VisitLoad(node);
2214 }
2215 
2216 void InstructionSelector::VisitWord32AtomicStore(Node* node) {
2217  MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2218  VisitGeneralStore(this, node, rep);
2219 }
2220 
2221 void VisitAtomicExchange(InstructionSelector* selector, Node* node,
2222  ArchOpcode opcode) {
2223  S390OperandGenerator g(selector);
2224  Node* base = node->InputAt(0);
2225  Node* index = node->InputAt(1);
2226  Node* value = node->InputAt(2);
2227 
2228  AddressingMode addressing_mode = kMode_MRR;
2229  InstructionOperand inputs[3];
2230  size_t input_count = 0;
2231  inputs[input_count++] = g.UseUniqueRegister(base);
2232  inputs[input_count++] = g.UseUniqueRegister(index);
2233  inputs[input_count++] = g.UseUniqueRegister(value);
2234  InstructionOperand outputs[1];
2235  outputs[0] = g.DefineAsRegister(node);
2236  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2237  selector->Emit(code, 1, outputs, input_count, inputs);
2238 }
2239 
2240 void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
2241  ArchOpcode opcode = kArchNop;
2242  MachineType type = AtomicOpType(node->op());
2243  if (type == MachineType::Int8()) {
2244  opcode = kWord32AtomicExchangeInt8;
2245  } else if (type == MachineType::Uint8()) {
2246  opcode = kWord32AtomicExchangeUint8;
2247  } else if (type == MachineType::Int16()) {
2248  opcode = kWord32AtomicExchangeInt16;
2249  } else if (type == MachineType::Uint16()) {
2250  opcode = kWord32AtomicExchangeUint16;
2251  } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2252  opcode = kWord32AtomicExchangeWord32;
2253  } else {
2254  UNREACHABLE();
2255  return;
2256  }
2257  VisitAtomicExchange(this, node, opcode);
2258 }
2259 
2260 void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
2261  ArchOpcode opcode = kArchNop;
2262  MachineType type = AtomicOpType(node->op());
2263  if (type == MachineType::Uint8()) {
2264  opcode = kS390_Word64AtomicExchangeUint8;
2265  } else if (type == MachineType::Uint16()) {
2266  opcode = kS390_Word64AtomicExchangeUint16;
2267  } else if (type == MachineType::Uint32()) {
2268  opcode = kS390_Word64AtomicExchangeUint32;
2269  } else if (type == MachineType::Uint64()) {
2270  opcode = kS390_Word64AtomicExchangeUint64;
2271  } else {
2272  UNREACHABLE();
2273  return;
2274  }
2275  VisitAtomicExchange(this, node, opcode);
2276 }
2277 
2278 void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
2279  ArchOpcode opcode) {
2280  S390OperandGenerator g(selector);
2281  Node* base = node->InputAt(0);
2282  Node* index = node->InputAt(1);
2283  Node* old_value = node->InputAt(2);
2284  Node* new_value = node->InputAt(3);
2285 
2286  InstructionOperand inputs[4];
2287  size_t input_count = 0;
2288  inputs[input_count++] = g.UseUniqueRegister(old_value);
2289  inputs[input_count++] = g.UseUniqueRegister(new_value);
2290  inputs[input_count++] = g.UseUniqueRegister(base);
2291 
2292  AddressingMode addressing_mode;
2293  if (g.CanBeImmediate(index, OperandMode::kInt20Imm)) {
2294  inputs[input_count++] = g.UseImmediate(index);
2295  addressing_mode = kMode_MRI;
2296  } else {
2297  inputs[input_count++] = g.UseUniqueRegister(index);
2298  addressing_mode = kMode_MRR;
2299  }
2300 
2301  InstructionOperand outputs[1];
2302  size_t output_count = 0;
2303  outputs[output_count++] = g.DefineSameAsFirst(node);
2304 
2305  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2306  selector->Emit(code, output_count, outputs, input_count, inputs);
2307 }
2308 
2309 void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
2310  MachineType type = AtomicOpType(node->op());
2311  ArchOpcode opcode = kArchNop;
2312  if (type == MachineType::Int8()) {
2313  opcode = kWord32AtomicCompareExchangeInt8;
2314  } else if (type == MachineType::Uint8()) {
2315  opcode = kWord32AtomicCompareExchangeUint8;
2316  } else if (type == MachineType::Int16()) {
2317  opcode = kWord32AtomicCompareExchangeInt16;
2318  } else if (type == MachineType::Uint16()) {
2319  opcode = kWord32AtomicCompareExchangeUint16;
2320  } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2321  opcode = kWord32AtomicCompareExchangeWord32;
2322  } else {
2323  UNREACHABLE();
2324  return;
2325  }
2326  VisitAtomicCompareExchange(this, node, opcode);
2327 }
2328 
2329 void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
2330  MachineType type = AtomicOpType(node->op());
2331  ArchOpcode opcode = kArchNop;
2332  if (type == MachineType::Uint8()) {
2333  opcode = kS390_Word64AtomicCompareExchangeUint8;
2334  } else if (type == MachineType::Uint16()) {
2335  opcode = kS390_Word64AtomicCompareExchangeUint16;
2336  } else if (type == MachineType::Uint32()) {
2337  opcode = kS390_Word64AtomicCompareExchangeUint32;
2338  } else if (type == MachineType::Uint64()) {
2339  opcode = kS390_Word64AtomicCompareExchangeUint64;
2340  } else {
2341  UNREACHABLE();
2342  return;
2343  }
2344  VisitAtomicCompareExchange(this, node, opcode);
2345 }
2346 
2347 void VisitAtomicBinop(InstructionSelector* selector, Node* node,
2348  ArchOpcode opcode) {
2349  S390OperandGenerator g(selector);
2350  Node* base = node->InputAt(0);
2351  Node* index = node->InputAt(1);
2352  Node* value = node->InputAt(2);
2353 
2354  InstructionOperand inputs[3];
2355  size_t input_count = 0;
2356  inputs[input_count++] = g.UseUniqueRegister(base);
2357 
2358  AddressingMode addressing_mode;
2359  if (g.CanBeImmediate(index, OperandMode::kInt20Imm)) {
2360  inputs[input_count++] = g.UseImmediate(index);
2361  addressing_mode = kMode_MRI;
2362  } else {
2363  inputs[input_count++] = g.UseUniqueRegister(index);
2364  addressing_mode = kMode_MRR;
2365  }
2366 
2367  inputs[input_count++] = g.UseUniqueRegister(value);
2368 
2369  InstructionOperand outputs[1];
2370  size_t output_count = 0;
2371  outputs[output_count++] = g.DefineAsRegister(node);
2372 
2373  InstructionOperand temps[1];
2374  size_t temp_count = 0;
2375  temps[temp_count++] = g.TempRegister();
2376 
2377  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2378  selector->Emit(code, output_count, outputs, input_count, inputs, temp_count,
2379  temps);
2380 }
2381 
2382 void InstructionSelector::VisitWord32AtomicBinaryOperation(
2383  Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2384  ArchOpcode uint16_op, ArchOpcode word32_op) {
2385  MachineType type = AtomicOpType(node->op());
2386  ArchOpcode opcode = kArchNop;
2387 
2388  if (type == MachineType::Int8()) {
2389  opcode = int8_op;
2390  } else if (type == MachineType::Uint8()) {
2391  opcode = uint8_op;
2392  } else if (type == MachineType::Int16()) {
2393  opcode = int16_op;
2394  } else if (type == MachineType::Uint16()) {
2395  opcode = uint16_op;
2396  } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2397  opcode = word32_op;
2398  } else {
2399  UNREACHABLE();
2400  return;
2401  }
2402  VisitAtomicBinop(this, node, opcode);
2403 }
2404 
2405 #define VISIT_ATOMIC_BINOP(op) \
2406  void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
2407  VisitWord32AtomicBinaryOperation( \
2408  node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
2409  kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
2410  kWord32Atomic##op##Word32); \
2411  }
2412 VISIT_ATOMIC_BINOP(Add)
2413 VISIT_ATOMIC_BINOP(Sub)
2414 VISIT_ATOMIC_BINOP(And)
2415 VISIT_ATOMIC_BINOP(Or)
2416 VISIT_ATOMIC_BINOP(Xor)
2417 #undef VISIT_ATOMIC_BINOP
2418 
2419 void InstructionSelector::VisitWord64AtomicBinaryOperation(
2420  Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode word32_op,
2421  ArchOpcode word64_op) {
2422  MachineType type = AtomicOpType(node->op());
2423  ArchOpcode opcode = kArchNop;
2424 
2425  if (type == MachineType::Uint8()) {
2426  opcode = uint8_op;
2427  } else if (type == MachineType::Uint16()) {
2428  opcode = uint16_op;
2429  } else if (type == MachineType::Uint32()) {
2430  opcode = word32_op;
2431  } else if (type == MachineType::Uint64()) {
2432  opcode = word64_op;
2433  } else {
2434  UNREACHABLE();
2435  return;
2436  }
2437  VisitAtomicBinop(this, node, opcode);
2438 }
2439 
2440 #define VISIT_ATOMIC64_BINOP(op) \
2441  void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
2442  VisitWord64AtomicBinaryOperation( \
2443  node, kS390_Word64Atomic##op##Uint8, kS390_Word64Atomic##op##Uint16, \
2444  kS390_Word64Atomic##op##Uint32, kS390_Word64Atomic##op##Uint64); \
2445  }
2446 VISIT_ATOMIC64_BINOP(Add)
2447 VISIT_ATOMIC64_BINOP(Sub)
2448 VISIT_ATOMIC64_BINOP(And)
2449 VISIT_ATOMIC64_BINOP(Or)
2450 VISIT_ATOMIC64_BINOP(Xor)
2451 #undef VISIT_ATOMIC64_BINOP
2452 
2453 void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
2454  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2455  USE(load_rep);
2456  VisitLoad(node);
2457 }
2458 
2459 void InstructionSelector::VisitWord64AtomicStore(Node* node) {
2460  MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2461  VisitGeneralStore(this, node, rep);
2462 }
2463 
2464 void InstructionSelector::VisitI32x4Splat(Node* node) { UNIMPLEMENTED(); }
2465 
2466 void InstructionSelector::VisitI32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
2467 
2468 void InstructionSelector::VisitI32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
2469 
2470 void InstructionSelector::VisitI32x4Add(Node* node) { UNIMPLEMENTED(); }
2471 
2472 void InstructionSelector::VisitI32x4Sub(Node* node) { UNIMPLEMENTED(); }
2473 
2474 void InstructionSelector::VisitI32x4Shl(Node* node) { UNIMPLEMENTED(); }
2475 
2476 void InstructionSelector::VisitI32x4ShrS(Node* node) { UNIMPLEMENTED(); }
2477 
2478 void InstructionSelector::VisitI32x4Mul(Node* node) { UNIMPLEMENTED(); }
2479 
2480 void InstructionSelector::VisitI32x4MaxS(Node* node) { UNIMPLEMENTED(); }
2481 
2482 void InstructionSelector::VisitI32x4MinS(Node* node) { UNIMPLEMENTED(); }
2483 
2484 void InstructionSelector::VisitI32x4Eq(Node* node) { UNIMPLEMENTED(); }
2485 
2486 void InstructionSelector::VisitI32x4Ne(Node* node) { UNIMPLEMENTED(); }
2487 
2488 void InstructionSelector::VisitI32x4MinU(Node* node) { UNIMPLEMENTED(); }
2489 
2490 void InstructionSelector::VisitI32x4MaxU(Node* node) { UNIMPLEMENTED(); }
2491 
2492 void InstructionSelector::VisitI32x4ShrU(Node* node) { UNIMPLEMENTED(); }
2493 
2494 void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); }
2495 
2496 void InstructionSelector::VisitI32x4GtS(Node* node) { UNIMPLEMENTED(); }
2497 
2498 void InstructionSelector::VisitI32x4GeS(Node* node) { UNIMPLEMENTED(); }
2499 
2500 void InstructionSelector::VisitI32x4GtU(Node* node) { UNIMPLEMENTED(); }
2501 
2502 void InstructionSelector::VisitI32x4GeU(Node* node) { UNIMPLEMENTED(); }
2503 
2504 void InstructionSelector::VisitI16x8Splat(Node* node) { UNIMPLEMENTED(); }
2505 
2506 void InstructionSelector::VisitI16x8ExtractLane(Node* node) { UNIMPLEMENTED(); }
2507 
2508 void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }
2509 
2510 void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
2511 
2512 void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }
2513 
2514 void InstructionSelector::VisitI16x8ShrU(Node* node) { UNIMPLEMENTED(); }
2515 
2516 void InstructionSelector::VisitI16x8Add(Node* node) { UNIMPLEMENTED(); }
2517 
2518 void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
2519  UNIMPLEMENTED();
2520 }
2521 
2522 void InstructionSelector::VisitI16x8Sub(Node* node) { UNIMPLEMENTED(); }
2523 
2524 void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
2525  UNIMPLEMENTED();
2526 }
2527 
2528 void InstructionSelector::VisitI16x8Mul(Node* node) { UNIMPLEMENTED(); }
2529 
2530 void InstructionSelector::VisitI16x8MinS(Node* node) { UNIMPLEMENTED(); }
2531 
2532 void InstructionSelector::VisitI16x8MaxS(Node* node) { UNIMPLEMENTED(); }
2533 
2534 void InstructionSelector::VisitI16x8Eq(Node* node) { UNIMPLEMENTED(); }
2535 
2536 void InstructionSelector::VisitI16x8Ne(Node* node) { UNIMPLEMENTED(); }
2537 
2538 void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
2539  UNIMPLEMENTED();
2540 }
2541 
2542 void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
2543  UNIMPLEMENTED();
2544 }
2545 
2546 void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }
2547 
2548 void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
2549 
2550 void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
2551 
2552 void InstructionSelector::VisitI16x8GtS(Node* node) { UNIMPLEMENTED(); }
2553 
2554 void InstructionSelector::VisitI16x8GeS(Node* node) { UNIMPLEMENTED(); }
2555 
2556 void InstructionSelector::VisitI16x8GtU(Node* node) { UNIMPLEMENTED(); }
2557 
2558 void InstructionSelector::VisitI16x8GeU(Node* node) { UNIMPLEMENTED(); }
2559 
2560 void InstructionSelector::VisitI8x16Neg(Node* node) { UNIMPLEMENTED(); }
2561 
2562 void InstructionSelector::VisitI8x16Splat(Node* node) { UNIMPLEMENTED(); }
2563 
2564 void InstructionSelector::VisitI8x16ExtractLane(Node* node) { UNIMPLEMENTED(); }
2565 
2566 void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
2567 
2568 void InstructionSelector::VisitI8x16Add(Node* node) { UNIMPLEMENTED(); }
2569 
2570 void InstructionSelector::VisitI8x16AddSaturateS(Node* node) {
2571  UNIMPLEMENTED();
2572 }
2573 
2574 void InstructionSelector::VisitI8x16Sub(Node* node) { UNIMPLEMENTED(); }
2575 
2576 void InstructionSelector::VisitI8x16SubSaturateS(Node* node) {
2577  UNIMPLEMENTED();
2578 }
2579 
2580 void InstructionSelector::VisitI8x16MinS(Node* node) { UNIMPLEMENTED(); }
2581 
2582 void InstructionSelector::VisitI8x16MaxS(Node* node) { UNIMPLEMENTED(); }
2583 
2584 void InstructionSelector::VisitI8x16Eq(Node* node) { UNIMPLEMENTED(); }
2585 
2586 void InstructionSelector::VisitI8x16Ne(Node* node) { UNIMPLEMENTED(); }
2587 
2588 void InstructionSelector::VisitI8x16GtS(Node* node) { UNIMPLEMENTED(); }
2589 
2590 void InstructionSelector::VisitI8x16GeS(Node* node) { UNIMPLEMENTED(); }
2591 
2592 void InstructionSelector::VisitI8x16AddSaturateU(Node* node) {
2593  UNIMPLEMENTED();
2594 }
2595 
2596 void InstructionSelector::VisitI8x16SubSaturateU(Node* node) {
2597  UNIMPLEMENTED();
2598 }
2599 
2600 void InstructionSelector::VisitI8x16MinU(Node* node) { UNIMPLEMENTED(); }
2601 
2602 void InstructionSelector::VisitI8x16MaxU(Node* node) { UNIMPLEMENTED(); }
2603 
2604 void InstructionSelector::VisitI8x16GtU(Node* node) { UNIMPLEMENTED(); }
2605 
2606 void InstructionSelector::VisitI8x16GeU(Node* node) { UNIMPLEMENTED(); }
2607 
2608 void InstructionSelector::VisitS128And(Node* node) { UNIMPLEMENTED(); }
2609 
2610 void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
2611 
2612 void InstructionSelector::VisitS128Xor(Node* node) { UNIMPLEMENTED(); }
2613 
2614 void InstructionSelector::VisitS128Not(Node* node) { UNIMPLEMENTED(); }
2615 
2616 void InstructionSelector::VisitS128Zero(Node* node) { UNIMPLEMENTED(); }
2617 
2618 void InstructionSelector::VisitF32x4Eq(Node* node) { UNIMPLEMENTED(); }
2619 
2620 void InstructionSelector::VisitF32x4Ne(Node* node) { UNIMPLEMENTED(); }
2621 
2622 void InstructionSelector::VisitF32x4Lt(Node* node) { UNIMPLEMENTED(); }
2623 
2624 void InstructionSelector::VisitF32x4Le(Node* node) { UNIMPLEMENTED(); }
2625 
2626 void InstructionSelector::VisitF32x4Splat(Node* node) { UNIMPLEMENTED(); }
2627 
2628 void InstructionSelector::VisitF32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
2629 
2630 void InstructionSelector::VisitF32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
2631 
2632 void InstructionSelector::EmitPrepareResults(
2633  ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
2634  Node* node) {
2635  // TODO(John): Port.
2636 }
2637 
2638 void InstructionSelector::VisitF32x4Add(Node* node) { UNIMPLEMENTED(); }
2639 
2640 void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
2641 
2642 void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
2643 
2644 void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
2645 
2646 void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }
2647 
2648 void InstructionSelector::VisitS128Select(Node* node) { UNIMPLEMENTED(); }
2649 
2650 void InstructionSelector::VisitF32x4Neg(Node* node) { UNIMPLEMENTED(); }
2651 
2652 void InstructionSelector::VisitF32x4Abs(Node* node) { UNIMPLEMENTED(); }
2653 
2654 void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
2655  UNIMPLEMENTED();
2656 }
2657 
2658 void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); }
2659 
2660 void InstructionSelector::VisitF32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
2661 void InstructionSelector::VisitI32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
2662 void InstructionSelector::VisitI16x8AddHoriz(Node* node) { UNIMPLEMENTED(); }
2663 
2664 void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
2665  UNIMPLEMENTED();
2666 }
2667 
2668 void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
2669  UNIMPLEMENTED();
2670 }
2671 
2672 void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
2673  UNIMPLEMENTED();
2674 }
2675 
2676 void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
2677  UNIMPLEMENTED();
2678 }
2679 
2680 void InstructionSelector::VisitI32x4SConvertI16x8Low(Node* node) {
2681  UNIMPLEMENTED();
2682 }
2683 
2684 void InstructionSelector::VisitI32x4SConvertI16x8High(Node* node) {
2685  UNIMPLEMENTED();
2686 }
2687 
2688 void InstructionSelector::VisitI32x4UConvertI16x8Low(Node* node) {
2689  UNIMPLEMENTED();
2690 }
2691 
2692 void InstructionSelector::VisitI32x4UConvertI16x8High(Node* node) {
2693  UNIMPLEMENTED();
2694 }
2695 
2696 void InstructionSelector::VisitI16x8SConvertI8x16Low(Node* node) {
2697  UNIMPLEMENTED();
2698 }
2699 
2700 void InstructionSelector::VisitI16x8SConvertI8x16High(Node* node) {
2701  UNIMPLEMENTED();
2702 }
2703 
2704 void InstructionSelector::VisitI16x8UConvertI8x16Low(Node* node) {
2705  UNIMPLEMENTED();
2706 }
2707 
2708 void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
2709  UNIMPLEMENTED();
2710 }
2711 
2712 void InstructionSelector::VisitI16x8SConvertI32x4(Node* node) {
2713  UNIMPLEMENTED();
2714 }
2715 void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
2716  UNIMPLEMENTED();
2717 }
2718 
2719 void InstructionSelector::VisitI8x16SConvertI16x8(Node* node) {
2720  UNIMPLEMENTED();
2721 }
2722 
2723 void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
2724  UNIMPLEMENTED();
2725 }
2726 
2727 void InstructionSelector::VisitS1x4AnyTrue(Node* node) { UNIMPLEMENTED(); }
2728 
2729 void InstructionSelector::VisitS1x4AllTrue(Node* node) { UNIMPLEMENTED(); }
2730 
2731 void InstructionSelector::VisitS1x8AnyTrue(Node* node) { UNIMPLEMENTED(); }
2732 
2733 void InstructionSelector::VisitS1x8AllTrue(Node* node) { UNIMPLEMENTED(); }
2734 
2735 void InstructionSelector::VisitS1x16AnyTrue(Node* node) { UNIMPLEMENTED(); }
2736 
2737 void InstructionSelector::VisitS1x16AllTrue(Node* node) { UNIMPLEMENTED(); }
2738 
2739 void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }
2740 
2741 void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
2742 
2743 void InstructionSelector::VisitI8x16ShrU(Node* node) { UNIMPLEMENTED(); }
2744 
2745 void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
2746 
2747 // static
2748 MachineOperatorBuilder::Flags
2749 InstructionSelector::SupportedMachineOperatorFlags() {
2750  return MachineOperatorBuilder::kFloat32RoundDown |
2751  MachineOperatorBuilder::kFloat64RoundDown |
2752  MachineOperatorBuilder::kFloat32RoundUp |
2753  MachineOperatorBuilder::kFloat64RoundUp |
2754  MachineOperatorBuilder::kFloat32RoundTruncate |
2755  MachineOperatorBuilder::kFloat64RoundTruncate |
2756  MachineOperatorBuilder::kFloat64RoundTiesAway |
2757  MachineOperatorBuilder::kWord32Popcnt |
2758  MachineOperatorBuilder::kInt32AbsWithOverflow |
2759  MachineOperatorBuilder::kInt64AbsWithOverflow |
2760  MachineOperatorBuilder::kWord64Popcnt;
2761 }
2762 
2763 // static
2764 MachineOperatorBuilder::AlignmentRequirements
2765 InstructionSelector::AlignmentRequirements() {
2766  return MachineOperatorBuilder::AlignmentRequirements::
2767  FullUnalignedAccessSupport();
2768 }
2769 
2770 } // namespace compiler
2771 } // namespace internal
2772 } // namespace v8
Definition: libplatform.h:13