V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
code-generator-s390.cc
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/backend/code-generator.h"
6 
7 #include "src/assembler-inl.h"
8 #include "src/callable.h"
9 #include "src/compiler/backend/code-generator-impl.h"
10 #include "src/compiler/backend/gap-resolver.h"
11 #include "src/compiler/node-matchers.h"
12 #include "src/compiler/osr.h"
13 #include "src/macro-assembler.h"
14 #include "src/optimized-compilation-info.h"
15 #include "src/wasm/wasm-code-manager.h"
16 #include "src/wasm/wasm-objects.h"
17 
18 namespace v8 {
19 namespace internal {
20 namespace compiler {
21 
22 #define __ tasm()->
23 
24 #define kScratchReg ip
25 
26 // Adds S390-specific methods to convert InstructionOperands.
28  public:
30  : InstructionOperandConverter(gen, instr) {}
31 
32  size_t OutputCount() { return instr_->OutputCount(); }
33 
34  bool Is64BitOperand(int index) {
35  return LocationOperand::cast(instr_->InputAt(index))->representation() ==
36  MachineRepresentation::kWord64;
37  }
38 
39  bool Is32BitOperand(int index) {
40  return LocationOperand::cast(instr_->InputAt(index))->representation() ==
41  MachineRepresentation::kWord32;
42  }
43 
44  bool CompareLogical() const {
45  switch (instr_->flags_condition()) {
46  case kUnsignedLessThan:
47  case kUnsignedGreaterThanOrEqual:
48  case kUnsignedLessThanOrEqual:
49  case kUnsignedGreaterThan:
50  return true;
51  default:
52  return false;
53  }
54  UNREACHABLE();
55  }
56 
57  Operand InputImmediate(size_t index) {
58  Constant constant = ToConstant(instr_->InputAt(index));
59  switch (constant.type()) {
60  case Constant::kInt32:
61  return Operand(constant.ToInt32());
62  case Constant::kFloat32:
63  return Operand::EmbeddedNumber(constant.ToFloat32());
64  case Constant::kFloat64:
65  return Operand::EmbeddedNumber(constant.ToFloat64().value());
66  case Constant::kInt64:
67 #if V8_TARGET_ARCH_S390X
68  return Operand(constant.ToInt64());
69 #endif
70  case Constant::kExternalReference:
71  return Operand(constant.ToExternalReference());
72  case Constant::kDelayedStringConstant:
73  return Operand::EmbeddedStringConstant(
74  constant.ToDelayedStringConstant());
75  case Constant::kHeapObject:
76  case Constant::kRpoNumber:
77  break;
78  }
79  UNREACHABLE();
80  }
81 
82  MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
83  const size_t index = *first_index;
84  if (mode) *mode = AddressingModeField::decode(instr_->opcode());
85  switch (AddressingModeField::decode(instr_->opcode())) {
86  case kMode_None:
87  break;
88  case kMode_MR:
89  *first_index += 1;
90  return MemOperand(InputRegister(index + 0), 0);
91  case kMode_MRI:
92  *first_index += 2;
93  return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
94  case kMode_MRR:
95  *first_index += 2;
96  return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
97  case kMode_MRRI:
98  *first_index += 3;
99  return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
100  InputInt32(index + 2));
101  }
102  UNREACHABLE();
103  }
104 
105  MemOperand MemoryOperand(AddressingMode* mode = nullptr,
106  size_t first_index = 0) {
107  return MemoryOperand(mode, &first_index);
108  }
109 
110  MemOperand ToMemOperand(InstructionOperand* op) const {
111  DCHECK_NOT_NULL(op);
112  DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
113  return SlotToMemOperand(AllocatedOperand::cast(op)->index());
114  }
115 
116  MemOperand SlotToMemOperand(int slot) const {
117  FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
118  return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
119  }
120 
121  MemOperand InputStackSlot(size_t index) {
122  InstructionOperand* op = instr_->InputAt(index);
123  return SlotToMemOperand(AllocatedOperand::cast(op)->index());
124  }
125 
126  MemOperand InputStackSlot32(size_t index) {
127 #if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
128  // We want to read the 32-bits directly from memory
129  MemOperand mem = InputStackSlot(index);
130  return MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
131 #else
132  return InputStackSlot(index);
133 #endif
134  }
135 };
136 
137 static inline bool HasRegisterOutput(Instruction* instr, int index = 0) {
138  return instr->OutputCount() > 0 && instr->OutputAt(index)->IsRegister();
139 }
140 
141 static inline bool HasFPRegisterInput(Instruction* instr, int index) {
142  return instr->InputAt(index)->IsFPRegister();
143 }
144 
145 static inline bool HasRegisterInput(Instruction* instr, int index) {
146  return instr->InputAt(index)->IsRegister() ||
147  HasFPRegisterInput(instr, index);
148 }
149 
150 static inline bool HasImmediateInput(Instruction* instr, size_t index) {
151  return instr->InputAt(index)->IsImmediate();
152 }
153 
154 static inline bool HasFPStackSlotInput(Instruction* instr, size_t index) {
155  return instr->InputAt(index)->IsFPStackSlot();
156 }
157 
158 static inline bool HasStackSlotInput(Instruction* instr, size_t index) {
159  return instr->InputAt(index)->IsStackSlot() ||
160  HasFPStackSlotInput(instr, index);
161 }
162 
163 namespace {
164 
165 class OutOfLineRecordWrite final : public OutOfLineCode {
166  public:
167  OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
168  Register value, Register scratch0, Register scratch1,
169  RecordWriteMode mode, StubCallMode stub_mode)
170  : OutOfLineCode(gen),
171  object_(object),
172  offset_(offset),
173  offset_immediate_(0),
174  value_(value),
175  scratch0_(scratch0),
176  scratch1_(scratch1),
177  mode_(mode),
178  stub_mode_(stub_mode),
179  must_save_lr_(!gen->frame_access_state()->has_frame()),
180  zone_(gen->zone()) {}
181 
182  OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
183  Register value, Register scratch0, Register scratch1,
184  RecordWriteMode mode, StubCallMode stub_mode)
185  : OutOfLineCode(gen),
186  object_(object),
187  offset_(no_reg),
188  offset_immediate_(offset),
189  value_(value),
190  scratch0_(scratch0),
191  scratch1_(scratch1),
192  mode_(mode),
193  stub_mode_(stub_mode),
194  must_save_lr_(!gen->frame_access_state()->has_frame()),
195  zone_(gen->zone()) {}
196 
197  void Generate() final {
198  if (mode_ > RecordWriteMode::kValueIsPointer) {
199  __ JumpIfSmi(value_, exit());
200  }
201  __ CheckPageFlag(value_, scratch0_,
202  MemoryChunk::kPointersToHereAreInterestingMask, eq,
203  exit());
204  if (offset_ == no_reg) {
205  __ AddP(scratch1_, object_, Operand(offset_immediate_));
206  } else {
207  DCHECK_EQ(0, offset_immediate_);
208  __ AddP(scratch1_, object_, offset_);
209  }
210  RememberedSetAction const remembered_set_action =
211  mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
212  : OMIT_REMEMBERED_SET;
213  SaveFPRegsMode const save_fp_mode =
214  frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
215  if (must_save_lr_) {
216  // We need to save and restore r14 if the frame was elided.
217  __ Push(r14);
218  }
219  if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
220  __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
221  save_fp_mode, wasm::WasmCode::kWasmRecordWrite);
222  } else {
223  __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
224  save_fp_mode);
225  }
226  if (must_save_lr_) {
227  // We need to save and restore r14 if the frame was elided.
228  __ Pop(r14);
229  }
230  }
231 
232  private:
233  Register const object_;
234  Register const offset_;
235  int32_t const offset_immediate_; // Valid if offset_ == no_reg.
236  Register const value_;
237  Register const scratch0_;
238  Register const scratch1_;
239  RecordWriteMode const mode_;
240  StubCallMode stub_mode_;
241  bool must_save_lr_;
242  Zone* zone_;
243 };
244 
245 Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
246  switch (condition) {
247  case kEqual:
248  return eq;
249  case kNotEqual:
250  return ne;
251  case kUnsignedLessThan:
252  // unsigned number never less than 0
253  if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
254  return CC_NOP;
255  V8_FALLTHROUGH;
256  case kSignedLessThan:
257  return lt;
258  case kUnsignedGreaterThanOrEqual:
259  // unsigned number always greater than or equal 0
260  if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
261  return CC_ALWAYS;
262  V8_FALLTHROUGH;
263  case kSignedGreaterThanOrEqual:
264  return ge;
265  case kUnsignedLessThanOrEqual:
266  // unsigned number never less than 0
267  if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
268  return CC_EQ;
269  V8_FALLTHROUGH;
270  case kSignedLessThanOrEqual:
271  return le;
272  case kUnsignedGreaterThan:
273  // unsigned number always greater than or equal 0
274  if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
275  return ne;
276  V8_FALLTHROUGH;
277  case kSignedGreaterThan:
278  return gt;
279  case kOverflow:
280  // Overflow checked for AddP/SubP only.
281  switch (op) {
282  case kS390_Add32:
283  case kS390_Add64:
284  case kS390_Sub32:
285  case kS390_Sub64:
286  case kS390_Abs64:
287  case kS390_Abs32:
288  case kS390_Mul32:
289  return overflow;
290  default:
291  break;
292  }
293  break;
294  case kNotOverflow:
295  switch (op) {
296  case kS390_Add32:
297  case kS390_Add64:
298  case kS390_Sub32:
299  case kS390_Sub64:
300  case kS390_Abs64:
301  case kS390_Abs32:
302  case kS390_Mul32:
303  return nooverflow;
304  default:
305  break;
306  }
307  break;
308  default:
309  break;
310  }
311  UNREACHABLE();
312 }
313 
314 #define GET_MEMOPERAND32(ret, fi) \
315  ([&](int& ret) { \
316  AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
317  MemOperand mem(r0); \
318  if (mode != kMode_None) { \
319  size_t first_index = (fi); \
320  mem = i.MemoryOperand(&mode, &first_index); \
321  ret = first_index; \
322  } else { \
323  mem = i.InputStackSlot32(fi); \
324  } \
325  return mem; \
326  })(ret)
327 
328 #define GET_MEMOPERAND(ret, fi) \
329  ([&](int& ret) { \
330  AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
331  MemOperand mem(r0); \
332  if (mode != kMode_None) { \
333  size_t first_index = (fi); \
334  mem = i.MemoryOperand(&mode, &first_index); \
335  ret = first_index; \
336  } else { \
337  mem = i.InputStackSlot(fi); \
338  } \
339  return mem; \
340  })(ret)
341 
342 #define RRInstr(instr) \
343  [&]() { \
344  DCHECK(i.OutputRegister() == i.InputRegister(0)); \
345  __ instr(i.OutputRegister(), i.InputRegister(1)); \
346  return 2; \
347  }
348 #define RIInstr(instr) \
349  [&]() { \
350  DCHECK(i.OutputRegister() == i.InputRegister(0)); \
351  __ instr(i.OutputRegister(), i.InputImmediate(1)); \
352  return 2; \
353  }
354 #define RMInstr(instr, GETMEM) \
355  [&]() { \
356  DCHECK(i.OutputRegister() == i.InputRegister(0)); \
357  int ret = 2; \
358  __ instr(i.OutputRegister(), GETMEM(ret, 1)); \
359  return ret; \
360  }
361 #define RM32Instr(instr) RMInstr(instr, GET_MEMOPERAND32)
362 #define RM64Instr(instr) RMInstr(instr, GET_MEMOPERAND)
363 
364 #define RRRInstr(instr) \
365  [&]() { \
366  __ instr(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); \
367  return 2; \
368  }
369 #define RRIInstr(instr) \
370  [&]() { \
371  __ instr(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1)); \
372  return 2; \
373  }
374 #define RRMInstr(instr, GETMEM) \
375  [&]() { \
376  int ret = 2; \
377  __ instr(i.OutputRegister(), i.InputRegister(0), GETMEM(ret, 1)); \
378  return ret; \
379  }
380 #define RRM32Instr(instr) RRMInstr(instr, GET_MEMOPERAND32)
381 #define RRM64Instr(instr) RRMInstr(instr, GET_MEMOPERAND)
382 
383 #define DDInstr(instr) \
384  [&]() { \
385  DCHECK(i.OutputDoubleRegister() == i.InputDoubleRegister(0)); \
386  __ instr(i.OutputDoubleRegister(), i.InputDoubleRegister(1)); \
387  return 2; \
388  }
389 
390 #define DMInstr(instr) \
391  [&]() { \
392  DCHECK(i.OutputDoubleRegister() == i.InputDoubleRegister(0)); \
393  int ret = 2; \
394  __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 1)); \
395  return ret; \
396  }
397 
398 #define DMTInstr(instr) \
399  [&]() { \
400  DCHECK(i.OutputDoubleRegister() == i.InputDoubleRegister(0)); \
401  int ret = 2; \
402  __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 1), \
403  kScratchDoubleReg); \
404  return ret; \
405  }
406 
407 #define R_MInstr(instr) \
408  [&]() { \
409  int ret = 2; \
410  __ instr(i.OutputRegister(), GET_MEMOPERAND(ret, 0)); \
411  return ret; \
412  }
413 
414 #define R_DInstr(instr) \
415  [&]() { \
416  __ instr(i.OutputRegister(), i.InputDoubleRegister(0)); \
417  return 2; \
418  }
419 
420 #define D_DInstr(instr) \
421  [&]() { \
422  __ instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
423  return 2; \
424  }
425 
426 #define D_MInstr(instr) \
427  [&]() { \
428  int ret = 2; \
429  __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 0)); \
430  return ret; \
431  }
432 
433 #define D_MTInstr(instr) \
434  [&]() { \
435  int ret = 2; \
436  __ instr(i.OutputDoubleRegister(), GET_MEMOPERAND(ret, 0), \
437  kScratchDoubleReg); \
438  return ret; \
439  }
440 
441 static int nullInstr() { UNREACHABLE(); }
442 
443 template <int numOfOperand, class RType, class MType, class IType>
444 static inline int AssembleOp(Instruction* instr, RType r, MType m, IType i) {
445  AddressingMode mode = AddressingModeField::decode(instr->opcode());
446  if (mode != kMode_None || HasStackSlotInput(instr, numOfOperand - 1)) {
447  return m();
448  } else if (HasRegisterInput(instr, numOfOperand - 1)) {
449  return r();
450  } else if (HasImmediateInput(instr, numOfOperand - 1)) {
451  return i();
452  } else {
453  UNREACHABLE();
454  }
455 }
456 
457 template <class _RR, class _RM, class _RI>
458 static inline int AssembleBinOp(Instruction* instr, _RR _rr, _RM _rm, _RI _ri) {
459  return AssembleOp<2>(instr, _rr, _rm, _ri);
460 }
461 
462 template <class _R, class _M, class _I>
463 static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
464  return AssembleOp<1>(instr, _r, _m, _i);
465 }
466 
467 #define ASSEMBLE_BIN_OP(_rr, _rm, _ri) AssembleBinOp(instr, _rr, _rm, _ri)
468 #define ASSEMBLE_UNARY_OP(_r, _m, _i) AssembleUnaryOp(instr, _r, _m, _i)
469 
470 #ifdef V8_TARGET_ARCH_S390X
471 #define CHECK_AND_ZERO_EXT_OUTPUT(num) \
472  ([&](int index) { \
473  DCHECK(HasImmediateInput(instr, (index))); \
474  int doZeroExt = i.InputInt32(index); \
475  if (doZeroExt) __ LoadlW(i.OutputRegister(), i.OutputRegister()); \
476  })(num)
477 
478 #define ASSEMBLE_BIN32_OP(_rr, _rm, _ri) \
479  { CHECK_AND_ZERO_EXT_OUTPUT(AssembleBinOp(instr, _rr, _rm, _ri)); }
480 #else
481 #define ASSEMBLE_BIN32_OP ASSEMBLE_BIN_OP
482 #define CHECK_AND_ZERO_EXT_OUTPUT(num)
483 #endif
484 
485 } // namespace
486 
487 #define ASSEMBLE_FLOAT_UNOP(asm_instr) \
488  do { \
489  __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
490  } while (0)
491 
492 #define ASSEMBLE_FLOAT_BINOP(asm_instr) \
493  do { \
494  __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
495  i.InputDoubleRegister(1)); \
496  } while (0)
497 
498 #define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \
499  do { \
500  AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
501  if (mode != kMode_None) { \
502  size_t first_index = 1; \
503  MemOperand operand = i.MemoryOperand(&mode, &first_index); \
504  if (i.CompareLogical()) { \
505  __ cmpl_instr(i.InputRegister(0), operand); \
506  } else { \
507  __ cmp_instr(i.InputRegister(0), operand); \
508  } \
509  } else if (HasRegisterInput(instr, 1)) { \
510  if (i.CompareLogical()) { \
511  __ cmpl_instr(i.InputRegister(0), i.InputRegister(1)); \
512  } else { \
513  __ cmp_instr(i.InputRegister(0), i.InputRegister(1)); \
514  } \
515  } else if (HasImmediateInput(instr, 1)) { \
516  if (i.CompareLogical()) { \
517  __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1)); \
518  } else { \
519  __ cmp_instr(i.InputRegister(0), i.InputImmediate(1)); \
520  } \
521  } else { \
522  DCHECK(HasStackSlotInput(instr, 1)); \
523  if (i.CompareLogical()) { \
524  __ cmpl_instr(i.InputRegister(0), i.InputStackSlot(1)); \
525  } else { \
526  __ cmp_instr(i.InputRegister(0), i.InputStackSlot(1)); \
527  } \
528  } \
529  } while (0)
530 
531 #define ASSEMBLE_COMPARE32(cmp_instr, cmpl_instr) \
532  do { \
533  AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
534  if (mode != kMode_None) { \
535  size_t first_index = 1; \
536  MemOperand operand = i.MemoryOperand(&mode, &first_index); \
537  if (i.CompareLogical()) { \
538  __ cmpl_instr(i.InputRegister(0), operand); \
539  } else { \
540  __ cmp_instr(i.InputRegister(0), operand); \
541  } \
542  } else if (HasRegisterInput(instr, 1)) { \
543  if (i.CompareLogical()) { \
544  __ cmpl_instr(i.InputRegister(0), i.InputRegister(1)); \
545  } else { \
546  __ cmp_instr(i.InputRegister(0), i.InputRegister(1)); \
547  } \
548  } else if (HasImmediateInput(instr, 1)) { \
549  if (i.CompareLogical()) { \
550  __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1)); \
551  } else { \
552  __ cmp_instr(i.InputRegister(0), i.InputImmediate(1)); \
553  } \
554  } else { \
555  DCHECK(HasStackSlotInput(instr, 1)); \
556  if (i.CompareLogical()) { \
557  __ cmpl_instr(i.InputRegister(0), i.InputStackSlot32(1)); \
558  } else { \
559  __ cmp_instr(i.InputRegister(0), i.InputStackSlot32(1)); \
560  } \
561  } \
562  } while (0)
563 
564 #define ASSEMBLE_FLOAT_COMPARE(cmp_rr_instr, cmp_rm_instr, load_instr) \
565  do { \
566  AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
567  if (mode != kMode_None) { \
568  size_t first_index = 1; \
569  MemOperand operand = i.MemoryOperand(&mode, &first_index); \
570  __ cmp_rm_instr(i.InputDoubleRegister(0), operand); \
571  } else if (HasFPRegisterInput(instr, 1)) { \
572  __ cmp_rr_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
573  } else { \
574  USE(HasFPStackSlotInput); \
575  DCHECK(HasFPStackSlotInput(instr, 1)); \
576  MemOperand operand = i.InputStackSlot(1); \
577  if (operand.offset() >= 0) { \
578  __ cmp_rm_instr(i.InputDoubleRegister(0), operand); \
579  } else { \
580  __ load_instr(kScratchDoubleReg, operand); \
581  __ cmp_rr_instr(i.InputDoubleRegister(0), kScratchDoubleReg); \
582  } \
583  } \
584  } while (0)
585 
586 // Divide instruction dr will implicity use register pair
587 // r0 & r1 below.
588 // R0:R1 = R1 / divisor - R0 remainder
589 // Copy remainder to output reg
590 #define ASSEMBLE_MODULO(div_instr, shift_instr) \
591  do { \
592  __ LoadRR(r0, i.InputRegister(0)); \
593  __ shift_instr(r0, Operand(32)); \
594  __ div_instr(r0, i.InputRegister(1)); \
595  __ LoadlW(i.OutputRegister(), r0); \
596  } while (0)
597 
598 #define ASSEMBLE_FLOAT_MODULO() \
599  do { \
600  FrameScope scope(tasm(), StackFrame::MANUAL); \
601  __ PrepareCallCFunction(0, 2, kScratchReg); \
602  __ MovToFloatParameters(i.InputDoubleRegister(0), \
603  i.InputDoubleRegister(1)); \
604  __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); \
605  __ MovFromFloatResult(i.OutputDoubleRegister()); \
606  } while (0)
607 
608 #define ASSEMBLE_IEEE754_UNOP(name) \
609  do { \
610  /* TODO(bmeurer): We should really get rid of this special instruction, */ \
611  /* and generate a CallAddress instruction instead. */ \
612  FrameScope scope(tasm(), StackFrame::MANUAL); \
613  __ PrepareCallCFunction(0, 1, kScratchReg); \
614  __ MovToFloatParameter(i.InputDoubleRegister(0)); \
615  __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
616  /* Move the result in the double result register. */ \
617  __ MovFromFloatResult(i.OutputDoubleRegister()); \
618  } while (0)
619 
620 #define ASSEMBLE_IEEE754_BINOP(name) \
621  do { \
622  /* TODO(bmeurer): We should really get rid of this special instruction, */ \
623  /* and generate a CallAddress instruction instead. */ \
624  FrameScope scope(tasm(), StackFrame::MANUAL); \
625  __ PrepareCallCFunction(0, 2, kScratchReg); \
626  __ MovToFloatParameters(i.InputDoubleRegister(0), \
627  i.InputDoubleRegister(1)); \
628  __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
629  /* Move the result in the double result register. */ \
630  __ MovFromFloatResult(i.OutputDoubleRegister()); \
631  } while (0)
632 
633 #define ASSEMBLE_DOUBLE_MAX() \
634  do { \
635  DoubleRegister left_reg = i.InputDoubleRegister(0); \
636  DoubleRegister right_reg = i.InputDoubleRegister(1); \
637  DoubleRegister result_reg = i.OutputDoubleRegister(); \
638  Label check_nan_left, check_zero, return_left, return_right, done; \
639  __ cdbr(left_reg, right_reg); \
640  __ bunordered(&check_nan_left, Label::kNear); \
641  __ beq(&check_zero); \
642  __ bge(&return_left, Label::kNear); \
643  __ b(&return_right, Label::kNear); \
644  \
645  __ bind(&check_zero); \
646  __ lzdr(kDoubleRegZero); \
647  __ cdbr(left_reg, kDoubleRegZero); \
648  /* left == right != 0. */ \
649  __ bne(&return_left, Label::kNear); \
650  /* At this point, both left and right are either 0 or -0. */ \
651  /* N.B. The following works because +0 + -0 == +0 */ \
652  /* For max we want logical-and of sign bit: (L + R) */ \
653  __ ldr(result_reg, left_reg); \
654  __ adbr(result_reg, right_reg); \
655  __ b(&done, Label::kNear); \
656  \
657  __ bind(&check_nan_left); \
658  __ cdbr(left_reg, left_reg); \
659  /* left == NaN. */ \
660  __ bunordered(&return_left, Label::kNear); \
661  \
662  __ bind(&return_right); \
663  if (right_reg != result_reg) { \
664  __ ldr(result_reg, right_reg); \
665  } \
666  __ b(&done, Label::kNear); \
667  \
668  __ bind(&return_left); \
669  if (left_reg != result_reg) { \
670  __ ldr(result_reg, left_reg); \
671  } \
672  __ bind(&done); \
673  } while (0)
674 
675 #define ASSEMBLE_DOUBLE_MIN() \
676  do { \
677  DoubleRegister left_reg = i.InputDoubleRegister(0); \
678  DoubleRegister right_reg = i.InputDoubleRegister(1); \
679  DoubleRegister result_reg = i.OutputDoubleRegister(); \
680  Label check_nan_left, check_zero, return_left, return_right, done; \
681  __ cdbr(left_reg, right_reg); \
682  __ bunordered(&check_nan_left, Label::kNear); \
683  __ beq(&check_zero); \
684  __ ble(&return_left, Label::kNear); \
685  __ b(&return_right, Label::kNear); \
686  \
687  __ bind(&check_zero); \
688  __ lzdr(kDoubleRegZero); \
689  __ cdbr(left_reg, kDoubleRegZero); \
690  /* left == right != 0. */ \
691  __ bne(&return_left, Label::kNear); \
692  /* At this point, both left and right are either 0 or -0. */ \
693  /* N.B. The following works because +0 + -0 == +0 */ \
694  /* For min we want logical-or of sign bit: -(-L + -R) */ \
695  __ lcdbr(left_reg, left_reg); \
696  __ ldr(result_reg, left_reg); \
697  if (left_reg == right_reg) { \
698  __ adbr(result_reg, right_reg); \
699  } else { \
700  __ sdbr(result_reg, right_reg); \
701  } \
702  __ lcdbr(result_reg, result_reg); \
703  __ b(&done, Label::kNear); \
704  \
705  __ bind(&check_nan_left); \
706  __ cdbr(left_reg, left_reg); \
707  /* left == NaN. */ \
708  __ bunordered(&return_left, Label::kNear); \
709  \
710  __ bind(&return_right); \
711  if (right_reg != result_reg) { \
712  __ ldr(result_reg, right_reg); \
713  } \
714  __ b(&done, Label::kNear); \
715  \
716  __ bind(&return_left); \
717  if (left_reg != result_reg) { \
718  __ ldr(result_reg, left_reg); \
719  } \
720  __ bind(&done); \
721  } while (0)
722 
723 #define ASSEMBLE_FLOAT_MAX() \
724  do { \
725  DoubleRegister left_reg = i.InputDoubleRegister(0); \
726  DoubleRegister right_reg = i.InputDoubleRegister(1); \
727  DoubleRegister result_reg = i.OutputDoubleRegister(); \
728  Label check_nan_left, check_zero, return_left, return_right, done; \
729  __ cebr(left_reg, right_reg); \
730  __ bunordered(&check_nan_left, Label::kNear); \
731  __ beq(&check_zero); \
732  __ bge(&return_left, Label::kNear); \
733  __ b(&return_right, Label::kNear); \
734  \
735  __ bind(&check_zero); \
736  __ lzdr(kDoubleRegZero); \
737  __ cebr(left_reg, kDoubleRegZero); \
738  /* left == right != 0. */ \
739  __ bne(&return_left, Label::kNear); \
740  /* At this point, both left and right are either 0 or -0. */ \
741  /* N.B. The following works because +0 + -0 == +0 */ \
742  /* For max we want logical-and of sign bit: (L + R) */ \
743  __ ldr(result_reg, left_reg); \
744  __ aebr(result_reg, right_reg); \
745  __ b(&done, Label::kNear); \
746  \
747  __ bind(&check_nan_left); \
748  __ cebr(left_reg, left_reg); \
749  /* left == NaN. */ \
750  __ bunordered(&return_left, Label::kNear); \
751  \
752  __ bind(&return_right); \
753  if (right_reg != result_reg) { \
754  __ ldr(result_reg, right_reg); \
755  } \
756  __ b(&done, Label::kNear); \
757  \
758  __ bind(&return_left); \
759  if (left_reg != result_reg) { \
760  __ ldr(result_reg, left_reg); \
761  } \
762  __ bind(&done); \
763  } while (0)
764 
765 #define ASSEMBLE_FLOAT_MIN() \
766  do { \
767  DoubleRegister left_reg = i.InputDoubleRegister(0); \
768  DoubleRegister right_reg = i.InputDoubleRegister(1); \
769  DoubleRegister result_reg = i.OutputDoubleRegister(); \
770  Label check_nan_left, check_zero, return_left, return_right, done; \
771  __ cebr(left_reg, right_reg); \
772  __ bunordered(&check_nan_left, Label::kNear); \
773  __ beq(&check_zero); \
774  __ ble(&return_left, Label::kNear); \
775  __ b(&return_right, Label::kNear); \
776  \
777  __ bind(&check_zero); \
778  __ lzdr(kDoubleRegZero); \
779  __ cebr(left_reg, kDoubleRegZero); \
780  /* left == right != 0. */ \
781  __ bne(&return_left, Label::kNear); \
782  /* At this point, both left and right are either 0 or -0. */ \
783  /* N.B. The following works because +0 + -0 == +0 */ \
784  /* For min we want logical-or of sign bit: -(-L + -R) */ \
785  __ lcebr(left_reg, left_reg); \
786  __ ldr(result_reg, left_reg); \
787  if (left_reg == right_reg) { \
788  __ aebr(result_reg, right_reg); \
789  } else { \
790  __ sebr(result_reg, right_reg); \
791  } \
792  __ lcebr(result_reg, result_reg); \
793  __ b(&done, Label::kNear); \
794  \
795  __ bind(&check_nan_left); \
796  __ cebr(left_reg, left_reg); \
797  /* left == NaN. */ \
798  __ bunordered(&return_left, Label::kNear); \
799  \
800  __ bind(&return_right); \
801  if (right_reg != result_reg) { \
802  __ ldr(result_reg, right_reg); \
803  } \
804  __ b(&done, Label::kNear); \
805  \
806  __ bind(&return_left); \
807  if (left_reg != result_reg) { \
808  __ ldr(result_reg, left_reg); \
809  } \
810  __ bind(&done); \
811  } while (0)
812 //
813 // Only MRI mode for these instructions available
814 #define ASSEMBLE_LOAD_FLOAT(asm_instr) \
815  do { \
816  DoubleRegister result = i.OutputDoubleRegister(); \
817  AddressingMode mode = kMode_None; \
818  MemOperand operand = i.MemoryOperand(&mode); \
819  __ asm_instr(result, operand); \
820  } while (0)
821 
822 #define ASSEMBLE_LOAD_INTEGER(asm_instr) \
823  do { \
824  Register result = i.OutputRegister(); \
825  AddressingMode mode = kMode_None; \
826  MemOperand operand = i.MemoryOperand(&mode); \
827  __ asm_instr(result, operand); \
828  } while (0)
829 
830 #define ASSEMBLE_LOADANDTEST64(asm_instr_rr, asm_instr_rm) \
831  { \
832  AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
833  Register dst = HasRegisterOutput(instr) ? i.OutputRegister() : r0; \
834  if (mode != kMode_None) { \
835  size_t first_index = 0; \
836  MemOperand operand = i.MemoryOperand(&mode, &first_index); \
837  __ asm_instr_rm(dst, operand); \
838  } else if (HasRegisterInput(instr, 0)) { \
839  __ asm_instr_rr(dst, i.InputRegister(0)); \
840  } else { \
841  DCHECK(HasStackSlotInput(instr, 0)); \
842  __ asm_instr_rm(dst, i.InputStackSlot(0)); \
843  } \
844  }
845 
846 #define ASSEMBLE_LOADANDTEST32(asm_instr_rr, asm_instr_rm) \
847  { \
848  AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
849  Register dst = HasRegisterOutput(instr) ? i.OutputRegister() : r0; \
850  if (mode != kMode_None) { \
851  size_t first_index = 0; \
852  MemOperand operand = i.MemoryOperand(&mode, &first_index); \
853  __ asm_instr_rm(dst, operand); \
854  } else if (HasRegisterInput(instr, 0)) { \
855  __ asm_instr_rr(dst, i.InputRegister(0)); \
856  } else { \
857  DCHECK(HasStackSlotInput(instr, 0)); \
858  __ asm_instr_rm(dst, i.InputStackSlot32(0)); \
859  } \
860  }
861 
862 #define ASSEMBLE_STORE_FLOAT32() \
863  do { \
864  size_t index = 0; \
865  AddressingMode mode = kMode_None; \
866  MemOperand operand = i.MemoryOperand(&mode, &index); \
867  DoubleRegister value = i.InputDoubleRegister(index); \
868  __ StoreFloat32(value, operand); \
869  } while (0)
870 
871 #define ASSEMBLE_STORE_DOUBLE() \
872  do { \
873  size_t index = 0; \
874  AddressingMode mode = kMode_None; \
875  MemOperand operand = i.MemoryOperand(&mode, &index); \
876  DoubleRegister value = i.InputDoubleRegister(index); \
877  __ StoreDouble(value, operand); \
878  } while (0)
879 
880 #define ASSEMBLE_STORE_INTEGER(asm_instr) \
881  do { \
882  size_t index = 0; \
883  AddressingMode mode = kMode_None; \
884  MemOperand operand = i.MemoryOperand(&mode, &index); \
885  Register value = i.InputRegister(index); \
886  __ asm_instr(value, operand); \
887  } while (0)
888 
889 #define ATOMIC_COMP_EXCHANGE(start, end, shift_amount, offset) \
890  { \
891  __ LoadlW(temp0, MemOperand(addr, offset)); \
892  __ llgfr(temp1, temp0); \
893  __ RotateInsertSelectBits(temp0, old_val, Operand(start), Operand(end), \
894  Operand(shift_amount), false); \
895  __ RotateInsertSelectBits(temp1, new_val, Operand(start), Operand(end), \
896  Operand(shift_amount), false); \
897  __ CmpAndSwap(temp0, temp1, MemOperand(addr, offset)); \
898  __ RotateInsertSelectBits(output, temp0, Operand(start + shift_amount), \
899  Operand(end + shift_amount), \
900  Operand(64 - shift_amount), true); \
901  }
902 
903 #ifdef V8_TARGET_BIG_ENDIAN
904 #define ATOMIC_COMP_EXCHANGE_BYTE(i) \
905  { \
906  constexpr int idx = (i); \
907  static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
908  constexpr int start = 32 + 8 * idx; \
909  constexpr int end = start + 7; \
910  constexpr int shift_amount = (3 - idx) * 8; \
911  ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx); \
912  }
913 #define ATOMIC_COMP_EXCHANGE_HALFWORD(i) \
914  { \
915  constexpr int idx = (i); \
916  static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
917  constexpr int start = 32 + 16 * idx; \
918  constexpr int end = start + 15; \
919  constexpr int shift_amount = (1 - idx) * 16; \
920  ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx * 2); \
921  }
922 #else
923 #define ATOMIC_COMP_EXCHANGE_BYTE(i) \
924  { \
925  constexpr int idx = (i); \
926  static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
927  constexpr int start = 32 + 8 * (3 - idx); \
928  constexpr int end = start + 7; \
929  constexpr int shift_amount = idx * 8; \
930  ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx); \
931  }
932 #define ATOMIC_COMP_EXCHANGE_HALFWORD(i) \
933  { \
934  constexpr int idx = (i); \
935  static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
936  constexpr int start = 32 + 16 * (1 - idx); \
937  constexpr int end = start + 15; \
938  constexpr int shift_amount = idx * 16; \
939  ATOMIC_COMP_EXCHANGE(start, end, shift_amount, -idx * 2); \
940  }
941 #endif
942 
943 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(load_and_ext) \
944  do { \
945  Register old_val = i.InputRegister(0); \
946  Register new_val = i.InputRegister(1); \
947  Register output = i.OutputRegister(); \
948  Register addr = kScratchReg; \
949  Register temp0 = r0; \
950  Register temp1 = r1; \
951  size_t index = 2; \
952  AddressingMode mode = kMode_None; \
953  MemOperand op = i.MemoryOperand(&mode, &index); \
954  Label three, two, one, done; \
955  __ lay(addr, op); \
956  __ tmll(addr, Operand(3)); \
957  __ b(Condition(1), &three); \
958  __ b(Condition(2), &two); \
959  __ b(Condition(4), &one); \
960  /* ending with 0b00 */ \
961  ATOMIC_COMP_EXCHANGE_BYTE(0); \
962  __ b(&done); \
963  /* ending with 0b01 */ \
964  __ bind(&one); \
965  ATOMIC_COMP_EXCHANGE_BYTE(1); \
966  __ b(&done); \
967  /* ending with 0b10 */ \
968  __ bind(&two); \
969  ATOMIC_COMP_EXCHANGE_BYTE(2); \
970  __ b(&done); \
971  /* ending with 0b11 */ \
972  __ bind(&three); \
973  ATOMIC_COMP_EXCHANGE_BYTE(3); \
974  __ bind(&done); \
975  __ load_and_ext(output, output); \
976  } while (false)
977 
978 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(load_and_ext) \
979  do { \
980  Register old_val = i.InputRegister(0); \
981  Register new_val = i.InputRegister(1); \
982  Register output = i.OutputRegister(); \
983  Register addr = kScratchReg; \
984  Register temp0 = r0; \
985  Register temp1 = r1; \
986  size_t index = 2; \
987  AddressingMode mode = kMode_None; \
988  MemOperand op = i.MemoryOperand(&mode, &index); \
989  Label two, done; \
990  __ lay(addr, op); \
991  __ tmll(addr, Operand(3)); \
992  __ b(Condition(2), &two); \
993  ATOMIC_COMP_EXCHANGE_HALFWORD(0); \
994  __ b(&done); \
995  __ bind(&two); \
996  ATOMIC_COMP_EXCHANGE_HALFWORD(1); \
997  __ bind(&done); \
998  __ load_and_ext(output, output); \
999  } while (false)
1000 
1001 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_WORD() \
1002  do { \
1003  Register new_val = i.InputRegister(1); \
1004  Register output = i.OutputRegister(); \
1005  Register addr = kScratchReg; \
1006  size_t index = 2; \
1007  AddressingMode mode = kMode_None; \
1008  MemOperand op = i.MemoryOperand(&mode, &index); \
1009  __ lay(addr, op); \
1010  __ CmpAndSwap(output, new_val, MemOperand(addr)); \
1011  __ LoadlW(output, output); \
1012  } while (false)
1013 
1014 #define ASSEMBLE_ATOMIC_BINOP_WORD(load_and_op) \
1015  do { \
1016  Register value = i.InputRegister(2); \
1017  Register result = i.OutputRegister(0); \
1018  Register addr = r1; \
1019  AddressingMode mode = kMode_None; \
1020  MemOperand op = i.MemoryOperand(&mode); \
1021  __ lay(addr, op); \
1022  __ load_and_op(result, value, MemOperand(addr)); \
1023  __ LoadlW(result, result); \
1024  } while (false)
1025 
1026 #define ASSEMBLE_ATOMIC_BINOP_WORD64(load_and_op) \
1027  do { \
1028  Register value = i.InputRegister(2); \
1029  Register result = i.OutputRegister(0); \
1030  Register addr = r1; \
1031  AddressingMode mode = kMode_None; \
1032  MemOperand op = i.MemoryOperand(&mode); \
1033  __ lay(addr, op); \
1034  __ load_and_op(result, value, MemOperand(addr)); \
1035  } while (false)
1036 
1037 #define ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end) \
1038  do { \
1039  Label do_cs; \
1040  __ LoadlW(prev, MemOperand(addr, offset)); \
1041  __ bind(&do_cs); \
1042  __ RotateInsertSelectBits(temp, value, Operand(start), Operand(end), \
1043  Operand(static_cast<intptr_t>(shift_amount)), \
1044  true); \
1045  __ bin_inst(new_val, prev, temp); \
1046  __ lr(temp, prev); \
1047  __ RotateInsertSelectBits(temp, new_val, Operand(start), Operand(end), \
1048  Operand::Zero(), false); \
1049  __ CmpAndSwap(prev, temp, MemOperand(addr, offset)); \
1050  __ bne(&do_cs, Label::kNear); \
1051  } while (false)
1052 
1053 #ifdef V8_TARGET_BIG_ENDIAN
1054 #define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result) \
1055  { \
1056  constexpr int offset = -(2 * index); \
1057  constexpr int shift_amount = 16 - (index * 16); \
1058  constexpr int start = 48 - shift_amount; \
1059  constexpr int end = start + 15; \
1060  ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end); \
1061  extract_result(); \
1062  }
1063 #define ATOMIC_BIN_OP_BYTE(bin_inst, index, extract_result) \
1064  { \
1065  constexpr int offset = -(index); \
1066  constexpr int shift_amount = 24 - (index * 8); \
1067  constexpr int start = 56 - shift_amount; \
1068  constexpr int end = start + 7; \
1069  ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end); \
1070  extract_result(); \
1071  }
1072 #else
1073 #define ATOMIC_BIN_OP_HALFWORD(bin_inst, index, extract_result) \
1074  { \
1075  constexpr int offset = -(2 * index); \
1076  constexpr int shift_amount = index * 16; \
1077  constexpr int start = 48 - shift_amount; \
1078  constexpr int end = start + 15; \
1079  ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end); \
1080  extract_result(); \
1081  }
1082 #define ATOMIC_BIN_OP_BYTE(bin_inst, index, extract_result) \
1083  { \
1084  constexpr int offset = -(index); \
1085  constexpr int shift_amount = index * 8; \
1086  constexpr int start = 56 - shift_amount; \
1087  constexpr int end = start + 7; \
1088  ATOMIC_BIN_OP(bin_inst, offset, shift_amount, start, end); \
1089  extract_result(); \
1090  }
1091 #endif // V8_TARGET_BIG_ENDIAN
1092 
1093 #define ASSEMBLE_ATOMIC_BINOP_HALFWORD(bin_inst, extract_result) \
1094  do { \
1095  Register value = i.InputRegister(2); \
1096  Register result = i.OutputRegister(0); \
1097  Register prev = i.TempRegister(0); \
1098  Register new_val = r0; \
1099  Register addr = r1; \
1100  Register temp = kScratchReg; \
1101  AddressingMode mode = kMode_None; \
1102  MemOperand op = i.MemoryOperand(&mode); \
1103  Label two, done; \
1104  __ lay(addr, op); \
1105  __ tmll(addr, Operand(3)); \
1106  __ b(Condition(2), &two); \
1107  /* word boundary */ \
1108  ATOMIC_BIN_OP_HALFWORD(bin_inst, 0, extract_result); \
1109  __ b(&done); \
1110  __ bind(&two); \
1111  /* halfword boundary */ \
1112  ATOMIC_BIN_OP_HALFWORD(bin_inst, 1, extract_result); \
1113  __ bind(&done); \
1114  } while (false)
1115 
1116 #define ASSEMBLE_ATOMIC_BINOP_BYTE(bin_inst, extract_result) \
1117  do { \
1118  Register value = i.InputRegister(2); \
1119  Register result = i.OutputRegister(0); \
1120  Register addr = i.TempRegister(0); \
1121  Register prev = r0; \
1122  Register new_val = r1; \
1123  Register temp = kScratchReg; \
1124  AddressingMode mode = kMode_None; \
1125  MemOperand op = i.MemoryOperand(&mode); \
1126  Label done, one, two, three; \
1127  __ lay(addr, op); \
1128  __ tmll(addr, Operand(3)); \
1129  __ b(Condition(1), &three); \
1130  __ b(Condition(2), &two); \
1131  __ b(Condition(4), &one); \
1132  /* ending with 0b00 (word boundary) */ \
1133  ATOMIC_BIN_OP_BYTE(bin_inst, 0, extract_result); \
1134  __ b(&done); \
1135  /* ending with 0b01 */ \
1136  __ bind(&one); \
1137  ATOMIC_BIN_OP_BYTE(bin_inst, 1, extract_result); \
1138  __ b(&done); \
1139  /* ending with 0b10 (hw boundary) */ \
1140  __ bind(&two); \
1141  ATOMIC_BIN_OP_BYTE(bin_inst, 2, extract_result); \
1142  __ b(&done); \
1143  /* ending with 0b11 */ \
1144  __ bind(&three); \
1145  ATOMIC_BIN_OP_BYTE(bin_inst, 3, extract_result); \
1146  __ bind(&done); \
1147  } while (false)
1148 
1149 #define ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64() \
1150  do { \
1151  Register new_val = i.InputRegister(1); \
1152  Register output = i.OutputRegister(); \
1153  Register addr = kScratchReg; \
1154  size_t index = 2; \
1155  AddressingMode mode = kMode_None; \
1156  MemOperand op = i.MemoryOperand(&mode, &index); \
1157  __ lay(addr, op); \
1158  __ CmpAndSwap64(output, new_val, MemOperand(addr)); \
1159  } while (false)
1160 
1161 void CodeGenerator::AssembleDeconstructFrame() {
1162  __ LeaveFrame(StackFrame::MANUAL);
1163 }
1164 
1165 void CodeGenerator::AssemblePrepareTailCall() {
1166  if (frame_access_state()->has_frame()) {
1167  __ RestoreFrameStateForTailCall();
1168  }
1169  frame_access_state()->SetFrameAccessToSP();
1170 }
1171 
1172 void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
1173  Register scratch1,
1174  Register scratch2,
1175  Register scratch3) {
1176  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
1177  Label done;
1178 
1179  // Check if current frame is an arguments adaptor frame.
1180  __ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
1181  __ CmpP(scratch1,
1182  Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
1183  __ bne(&done);
1184 
1185  // Load arguments count from current arguments adaptor frame (note, it
1186  // does not include receiver).
1187  Register caller_args_count_reg = scratch1;
1188  __ LoadP(caller_args_count_reg,
1189  MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
1190  __ SmiUntag(caller_args_count_reg);
1191 
1192  ParameterCount callee_args_count(args_reg);
1193  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
1194  scratch3);
1195  __ bind(&done);
1196 }
1197 
1198 namespace {
1199 
1200 void FlushPendingPushRegisters(TurboAssembler* tasm,
1201  FrameAccessState* frame_access_state,
1202  ZoneVector<Register>* pending_pushes) {
1203  switch (pending_pushes->size()) {
1204  case 0:
1205  break;
1206  case 1:
1207  tasm->Push((*pending_pushes)[0]);
1208  break;
1209  case 2:
1210  tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
1211  break;
1212  case 3:
1213  tasm->Push((*pending_pushes)[0], (*pending_pushes)[1],
1214  (*pending_pushes)[2]);
1215  break;
1216  default:
1217  UNREACHABLE();
1218  break;
1219  }
1220  frame_access_state->IncreaseSPDelta(pending_pushes->size());
1221  pending_pushes->clear();
1222 }
1223 
1224 void AdjustStackPointerForTailCall(
1225  TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp,
1226  ZoneVector<Register>* pending_pushes = nullptr,
1227  bool allow_shrinkage = true) {
1228  int current_sp_offset = state->GetSPToFPSlotCount() +
1229  StandardFrameConstants::kFixedSlotCountAboveFp;
1230  int stack_slot_delta = new_slot_above_sp - current_sp_offset;
1231  if (stack_slot_delta > 0) {
1232  if (pending_pushes != nullptr) {
1233  FlushPendingPushRegisters(tasm, state, pending_pushes);
1234  }
1235  tasm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
1236  state->IncreaseSPDelta(stack_slot_delta);
1237  } else if (allow_shrinkage && stack_slot_delta < 0) {
1238  if (pending_pushes != nullptr) {
1239  FlushPendingPushRegisters(tasm, state, pending_pushes);
1240  }
1241  tasm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
1242  state->IncreaseSPDelta(stack_slot_delta);
1243  }
1244 }
1245 
1246 void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
1247  S390OperandConverter& i) {
1248  const MemoryAccessMode access_mode =
1249  static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
1250  if (access_mode == kMemoryAccessPoisoned) {
1251  Register value = i.OutputRegister();
1252  codegen->tasm()->AndP(value, kSpeculationPoisonRegister);
1253  }
1254 }
1255 
1256 } // namespace
1257 
1258 void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
1259  int first_unused_stack_slot) {
1260  ZoneVector<MoveOperands*> pushes(zone());
1261  GetPushCompatibleMoves(instr, kRegisterPush, &pushes);
1262 
1263  if (!pushes.empty() &&
1264  (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
1265  first_unused_stack_slot)) {
1266  S390OperandConverter g(this, instr);
1267  ZoneVector<Register> pending_pushes(zone());
1268  for (auto move : pushes) {
1269  LocationOperand destination_location(
1270  LocationOperand::cast(move->destination()));
1271  InstructionOperand source(move->source());
1272  AdjustStackPointerForTailCall(
1273  tasm(), frame_access_state(),
1274  destination_location.index() - pending_pushes.size(),
1275  &pending_pushes);
1276  // Pushes of non-register data types are not supported.
1277  DCHECK(source.IsRegister());
1278  LocationOperand source_location(LocationOperand::cast(source));
1279  pending_pushes.push_back(source_location.GetRegister());
1280  // TODO(arm): We can push more than 3 registers at once. Add support in
1281  // the macro-assembler for pushing a list of registers.
1282  if (pending_pushes.size() == 3) {
1283  FlushPendingPushRegisters(tasm(), frame_access_state(),
1284  &pending_pushes);
1285  }
1286  move->Eliminate();
1287  }
1288  FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
1289  }
1290  AdjustStackPointerForTailCall(tasm(), frame_access_state(),
1291  first_unused_stack_slot, nullptr, false);
1292 }
1293 
1294 void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
1295  int first_unused_stack_slot) {
1296  AdjustStackPointerForTailCall(tasm(), frame_access_state(),
1297  first_unused_stack_slot);
1298 }
1299 
1300 // Check that {kJavaScriptCallCodeStartRegister} is correct.
1301 void CodeGenerator::AssembleCodeStartRegisterCheck() {
1302  Register scratch = r1;
1303  __ ComputeCodeStartAddress(scratch);
1304  __ CmpP(scratch, kJavaScriptCallCodeStartRegister);
1305  __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
1306 }
1307 
1308 // Check if the code object is marked for deoptimization. If it is, then it
1309 // jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
1310 // to:
1311 // 1. read from memory the word that contains that bit, which can be found in
1312 // the flags in the referenced {CodeDataContainer} object;
1313 // 2. test kMarkedForDeoptimizationBit in those flags; and
1314 // 3. if it is not zero then it jumps to the builtin.
1315 void CodeGenerator::BailoutIfDeoptimized() {
1316  if (FLAG_debug_code) {
1317  // Check that {kJavaScriptCallCodeStartRegister} is correct.
1318  __ ComputeCodeStartAddress(ip);
1319  __ CmpP(ip, kJavaScriptCallCodeStartRegister);
1320  __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
1321  }
1322 
1323  int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
1324  __ LoadP(ip, MemOperand(kJavaScriptCallCodeStartRegister, offset));
1325  __ LoadW(ip,
1326  FieldMemOperand(ip, CodeDataContainer::kKindSpecificFlagsOffset));
1327  __ TestBit(ip, Code::kMarkedForDeoptimizationBit);
1328  // Ensure we're not serializing (otherwise we'd need to use an indirection to
1329  // access the builtin below).
1330  DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
1331  Handle<Code> code = isolate()->builtins()->builtin_handle(
1332  Builtins::kCompileLazyDeoptimizedCode);
1333  __ Jump(code, RelocInfo::CODE_TARGET, ne);
1334 }
1335 
1336 void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
1337  Register scratch = r1;
1338 
1339  Label current_pc;
1340  __ larl(scratch, &current_pc);
1341 
1342  __ bind(&current_pc);
1343  __ SubP(scratch, Operand(__ pc_offset()));
1344 
1345  // Calculate a mask which has all bits set in the normal case, but has all
1346  // bits cleared if we are speculatively executing the wrong PC.
1347  __ LoadImmP(kSpeculationPoisonRegister, Operand::Zero());
1348  __ LoadImmP(r0, Operand(-1));
1349  __ CmpP(kJavaScriptCallCodeStartRegister, scratch);
1350  __ LoadOnConditionP(eq, kSpeculationPoisonRegister, r0);
1351 }
1352 
1353 void CodeGenerator::AssembleRegisterArgumentPoisoning() {
1354  __ AndP(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
1355  __ AndP(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
1356  __ AndP(sp, sp, kSpeculationPoisonRegister);
1357 }
1358 
1359 // Assembles an instruction after register allocation, producing machine code.
1360 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
1361  Instruction* instr) {
1362  S390OperandConverter i(this, instr);
1363  ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
1364 
1365  switch (opcode) {
1366  case kArchComment:
1367 #ifdef V8_TARGET_ARCH_S390X
1368  __ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
1369 #else
1370  __ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
1371 #endif
1372  break;
1373  case kArchCallCodeObject: {
1374  if (HasRegisterInput(instr, 0)) {
1375  Register reg = i.InputRegister(0);
1376  DCHECK_IMPLIES(
1377  HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
1378  reg == kJavaScriptCallCodeStartRegister);
1379  __ AddP(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
1380  __ Call(reg);
1381  } else {
1382  __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
1383  }
1384  RecordCallPosition(instr);
1385  frame_access_state()->ClearSPDelta();
1386  break;
1387  }
1388  case kArchCallWasmFunction: {
1389  // We must not share code targets for calls to builtins for wasm code, as
1390  // they might need to be patched individually.
1391  if (instr->InputAt(0)->IsImmediate()) {
1392  Constant constant = i.ToConstant(instr->InputAt(0));
1393 #ifdef V8_TARGET_ARCH_S390X
1394  Address wasm_code = static_cast<Address>(constant.ToInt64());
1395 #else
1396  Address wasm_code = static_cast<Address>(constant.ToInt32());
1397 #endif
1398  __ Call(wasm_code, constant.rmode());
1399  } else {
1400  __ Call(i.InputRegister(0));
1401  }
1402  RecordCallPosition(instr);
1403  frame_access_state()->ClearSPDelta();
1404  break;
1405  }
1406  case kArchTailCallCodeObjectFromJSFunction:
1407  case kArchTailCallCodeObject: {
1408  if (opcode == kArchTailCallCodeObjectFromJSFunction) {
1409  AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
1410  i.TempRegister(0), i.TempRegister(1),
1411  i.TempRegister(2));
1412  }
1413  if (HasRegisterInput(instr, 0)) {
1414  Register reg = i.InputRegister(0);
1415  DCHECK_IMPLIES(
1416  HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
1417  reg == kJavaScriptCallCodeStartRegister);
1418  __ AddP(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
1419  __ Jump(reg);
1420  } else {
1421  // We cannot use the constant pool to load the target since
1422  // we've already restored the caller's frame.
1423  ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
1424  __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
1425  }
1426  frame_access_state()->ClearSPDelta();
1427  frame_access_state()->SetFrameAccessToDefault();
1428  break;
1429  }
1430  case kArchTailCallWasm: {
1431  // We must not share code targets for calls to builtins for wasm code, as
1432  // they might need to be patched individually.
1433  if (instr->InputAt(0)->IsImmediate()) {
1434  Constant constant = i.ToConstant(instr->InputAt(0));
1435 #ifdef V8_TARGET_ARCH_S390X
1436  Address wasm_code = static_cast<Address>(constant.ToInt64());
1437 #else
1438  Address wasm_code = static_cast<Address>(constant.ToInt32());
1439 #endif
1440  __ Jump(wasm_code, constant.rmode());
1441  } else {
1442  __ Jump(i.InputRegister(0));
1443  }
1444  frame_access_state()->ClearSPDelta();
1445  frame_access_state()->SetFrameAccessToDefault();
1446  break;
1447  }
1448  case kArchTailCallAddress: {
1449  CHECK(!instr->InputAt(0)->IsImmediate());
1450  Register reg = i.InputRegister(0);
1451  DCHECK_IMPLIES(
1452  HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
1453  reg == kJavaScriptCallCodeStartRegister);
1454  __ Jump(reg);
1455  frame_access_state()->ClearSPDelta();
1456  frame_access_state()->SetFrameAccessToDefault();
1457  break;
1458  }
1459  case kArchCallJSFunction: {
1460  Register func = i.InputRegister(0);
1461  if (FLAG_debug_code) {
1462  // Check the function's context matches the context argument.
1463  __ LoadP(kScratchReg,
1464  FieldMemOperand(func, JSFunction::kContextOffset));
1465  __ CmpP(cp, kScratchReg);
1466  __ Assert(eq, AbortReason::kWrongFunctionContext);
1467  }
1468  static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
1469  __ LoadP(r4, FieldMemOperand(func, JSFunction::kCodeOffset));
1470  __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
1471  __ Call(r4);
1472  RecordCallPosition(instr);
1473  frame_access_state()->ClearSPDelta();
1474  break;
1475  }
1476  case kArchPrepareCallCFunction: {
1477  int const num_parameters = MiscField::decode(instr->opcode());
1478  __ PrepareCallCFunction(num_parameters, kScratchReg);
1479  // Frame alignment requires using FP-relative frame addressing.
1480  frame_access_state()->SetFrameAccessToFP();
1481  break;
1482  }
1483  case kArchSaveCallerRegisters: {
1484  fp_mode_ =
1485  static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
1486  DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
1487  // kReturnRegister0 should have been saved before entering the stub.
1488  int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
1489  DCHECK_EQ(0, bytes % kPointerSize);
1490  DCHECK_EQ(0, frame_access_state()->sp_delta());
1491  frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
1492  DCHECK(!caller_registers_saved_);
1493  caller_registers_saved_ = true;
1494  break;
1495  }
1496  case kArchRestoreCallerRegisters: {
1497  DCHECK(fp_mode_ ==
1498  static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
1499  DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
1500  // Don't overwrite the returned value.
1501  int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
1502  frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
1503  DCHECK_EQ(0, frame_access_state()->sp_delta());
1504  DCHECK(caller_registers_saved_);
1505  caller_registers_saved_ = false;
1506  break;
1507  }
1508  case kArchPrepareTailCall:
1509  AssemblePrepareTailCall();
1510  break;
1511  case kArchCallCFunction: {
1512  int const num_parameters = MiscField::decode(instr->opcode());
1513  if (instr->InputAt(0)->IsImmediate()) {
1514  ExternalReference ref = i.InputExternalReference(0);
1515  __ CallCFunction(ref, num_parameters);
1516  } else {
1517  Register func = i.InputRegister(0);
1518  __ CallCFunction(func, num_parameters);
1519  }
1520  frame_access_state()->SetFrameAccessToDefault();
1521  // Ideally, we should decrement SP delta to match the change of stack
1522  // pointer in CallCFunction. However, for certain architectures (e.g.
1523  // ARM), there may be more strict alignment requirement, causing old SP
1524  // to be saved on the stack. In those cases, we can not calculate the SP
1525  // delta statically.
1526  frame_access_state()->ClearSPDelta();
1527  if (caller_registers_saved_) {
1528  // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
1529  // Here, we assume the sequence to be:
1530  // kArchSaveCallerRegisters;
1531  // kArchCallCFunction;
1532  // kArchRestoreCallerRegisters;
1533  int bytes =
1534  __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
1535  frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
1536  }
1537  break;
1538  }
1539  case kArchJmp:
1540  AssembleArchJump(i.InputRpo(0));
1541  break;
1542  case kArchBinarySearchSwitch:
1543  AssembleArchBinarySearchSwitch(instr);
1544  break;
1545  case kArchLookupSwitch:
1546  AssembleArchLookupSwitch(instr);
1547  break;
1548  case kArchTableSwitch:
1549  AssembleArchTableSwitch(instr);
1550  break;
1551  case kArchDebugAbort:
1552  DCHECK(i.InputRegister(0) == r3);
1553  if (!frame_access_state()->has_frame()) {
1554  // We don't actually want to generate a pile of code for this, so just
1555  // claim there is a stack frame, without generating one.
1556  FrameScope scope(tasm(), StackFrame::NONE);
1557  __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
1558  RelocInfo::CODE_TARGET);
1559  } else {
1560  __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
1561  RelocInfo::CODE_TARGET);
1562  }
1563  __ stop("kArchDebugAbort");
1564  break;
1565  case kArchDebugBreak:
1566  __ stop("kArchDebugBreak");
1567  break;
1568  case kArchNop:
1569  case kArchThrowTerminator:
1570  // don't emit code for nops.
1571  break;
1572  case kArchDeoptimize: {
1573  int deopt_state_id =
1574  BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
1575  CodeGenResult result =
1576  AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
1577  if (result != kSuccess) return result;
1578  break;
1579  }
1580  case kArchRet:
1581  AssembleReturn(instr->InputAt(0));
1582  break;
1583  case kArchStackPointer:
1584  __ LoadRR(i.OutputRegister(), sp);
1585  break;
1586  case kArchFramePointer:
1587  __ LoadRR(i.OutputRegister(), fp);
1588  break;
1589  case kArchParentFramePointer:
1590  if (frame_access_state()->has_frame()) {
1591  __ LoadP(i.OutputRegister(), MemOperand(fp, 0));
1592  } else {
1593  __ LoadRR(i.OutputRegister(), fp);
1594  }
1595  break;
1596  case kArchTruncateDoubleToI:
1597  __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
1598  i.InputDoubleRegister(0), DetermineStubCallMode());
1599  break;
1600  case kArchStoreWithWriteBarrier: {
1601  RecordWriteMode mode =
1602  static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
1603  Register object = i.InputRegister(0);
1604  Register value = i.InputRegister(2);
1605  Register scratch0 = i.TempRegister(0);
1606  Register scratch1 = i.TempRegister(1);
1607  OutOfLineRecordWrite* ool;
1608 
1609  AddressingMode addressing_mode =
1610  AddressingModeField::decode(instr->opcode());
1611  if (addressing_mode == kMode_MRI) {
1612  int32_t offset = i.InputInt32(1);
1613  ool = new (zone())
1614  OutOfLineRecordWrite(this, object, offset, value, scratch0,
1615  scratch1, mode, DetermineStubCallMode());
1616  __ StoreP(value, MemOperand(object, offset));
1617  } else {
1618  DCHECK_EQ(kMode_MRR, addressing_mode);
1619  Register offset(i.InputRegister(1));
1620  ool = new (zone())
1621  OutOfLineRecordWrite(this, object, offset, value, scratch0,
1622  scratch1, mode, DetermineStubCallMode());
1623  __ StoreP(value, MemOperand(object, offset));
1624  }
1625  __ CheckPageFlag(object, scratch0,
1626  MemoryChunk::kPointersFromHereAreInterestingMask, ne,
1627  ool->entry());
1628  __ bind(ool->exit());
1629  break;
1630  }
1631  case kArchStackSlot: {
1632  FrameOffset offset =
1633  frame_access_state()->GetFrameOffset(i.InputInt32(0));
1634  __ AddP(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
1635  Operand(offset.offset()));
1636  break;
1637  }
1638  case kArchWordPoisonOnSpeculation:
1639  DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
1640  __ AndP(i.InputRegister(0), kSpeculationPoisonRegister);
1641  break;
1642  case kS390_Abs32:
1643  // TODO(john.yan): zero-ext
1644  __ lpr(i.OutputRegister(0), i.InputRegister(0));
1645  break;
1646  case kS390_Abs64:
1647  __ lpgr(i.OutputRegister(0), i.InputRegister(0));
1648  break;
1649  case kS390_And32:
1650  // zero-ext
1651  if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1652  ASSEMBLE_BIN32_OP(RRRInstr(nrk), RM32Instr(And), RIInstr(nilf));
1653  } else {
1654  ASSEMBLE_BIN32_OP(RRInstr(nr), RM32Instr(And), RIInstr(nilf));
1655  }
1656  break;
1657  case kS390_And64:
1658  if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1659  ASSEMBLE_BIN_OP(RRRInstr(ngrk), RM64Instr(ng), nullInstr);
1660  } else {
1661  ASSEMBLE_BIN_OP(RRInstr(ngr), RM64Instr(ng), nullInstr);
1662  }
1663  break;
1664  case kS390_Or32:
1665  // zero-ext
1666  if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1667  ASSEMBLE_BIN32_OP(RRRInstr(ork), RM32Instr(Or), RIInstr(oilf));
1668  } else {
1669  ASSEMBLE_BIN32_OP(RRInstr(or_z), RM32Instr(Or), RIInstr(oilf));
1670  }
1671  break;
1672  case kS390_Or64:
1673  if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1674  ASSEMBLE_BIN_OP(RRRInstr(ogrk), RM64Instr(og), nullInstr);
1675  } else {
1676  ASSEMBLE_BIN_OP(RRInstr(ogr), RM64Instr(og), nullInstr);
1677  }
1678  break;
1679  case kS390_Xor32:
1680  // zero-ext
1681  if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1682  ASSEMBLE_BIN32_OP(RRRInstr(xrk), RM32Instr(Xor), RIInstr(xilf));
1683  } else {
1684  ASSEMBLE_BIN32_OP(RRInstr(xr), RM32Instr(Xor), RIInstr(xilf));
1685  }
1686  break;
1687  case kS390_Xor64:
1688  if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1689  ASSEMBLE_BIN_OP(RRRInstr(xgrk), RM64Instr(xg), nullInstr);
1690  } else {
1691  ASSEMBLE_BIN_OP(RRInstr(xgr), RM64Instr(xg), nullInstr);
1692  }
1693  break;
1694  case kS390_ShiftLeft32:
1695  // zero-ext
1696  if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1697  ASSEMBLE_BIN32_OP(RRRInstr(ShiftLeft), nullInstr, RRIInstr(ShiftLeft));
1698  } else {
1699  ASSEMBLE_BIN32_OP(RRInstr(sll), nullInstr, RIInstr(sll));
1700  }
1701  break;
1702  case kS390_ShiftLeft64:
1703  ASSEMBLE_BIN_OP(RRRInstr(sllg), nullInstr, RRIInstr(sllg));
1704  break;
1705  case kS390_ShiftRight32:
1706  // zero-ext
1707  if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1708  ASSEMBLE_BIN32_OP(RRRInstr(srlk), nullInstr, RRIInstr(srlk));
1709  } else {
1710  ASSEMBLE_BIN32_OP(RRInstr(srl), nullInstr, RIInstr(srl));
1711  }
1712  break;
1713  case kS390_ShiftRight64:
1714  ASSEMBLE_BIN_OP(RRRInstr(srlg), nullInstr, RRIInstr(srlg));
1715  break;
1716  case kS390_ShiftRightArith32:
1717  // zero-ext
1718  if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1719  ASSEMBLE_BIN32_OP(RRRInstr(srak), nullInstr, RRIInstr(srak));
1720  } else {
1721  ASSEMBLE_BIN32_OP(RRInstr(sra), nullInstr, RIInstr(sra));
1722  }
1723  break;
1724  case kS390_ShiftRightArith64:
1725  ASSEMBLE_BIN_OP(RRRInstr(srag), nullInstr, RRIInstr(srag));
1726  break;
1727 #if !V8_TARGET_ARCH_S390X
1728  case kS390_AddPair:
1729  // i.InputRegister(0) ... left low word.
1730  // i.InputRegister(1) ... left high word.
1731  // i.InputRegister(2) ... right low word.
1732  // i.InputRegister(3) ... right high word.
1733  __ AddLogical32(i.OutputRegister(0), i.InputRegister(0),
1734  i.InputRegister(2));
1735  __ AddLogicalWithCarry32(i.OutputRegister(1), i.InputRegister(1),
1736  i.InputRegister(3));
1737  break;
1738  case kS390_SubPair:
1739  // i.InputRegister(0) ... left low word.
1740  // i.InputRegister(1) ... left high word.
1741  // i.InputRegister(2) ... right low word.
1742  // i.InputRegister(3) ... right high word.
1743  __ SubLogical32(i.OutputRegister(0), i.InputRegister(0),
1744  i.InputRegister(2));
1745  __ SubLogicalWithBorrow32(i.OutputRegister(1), i.InputRegister(1),
1746  i.InputRegister(3));
1747  break;
1748  case kS390_MulPair:
1749  // i.InputRegister(0) ... left low word.
1750  // i.InputRegister(1) ... left high word.
1751  // i.InputRegister(2) ... right low word.
1752  // i.InputRegister(3) ... right high word.
1753  __ sllg(r0, i.InputRegister(1), Operand(32));
1754  __ sllg(r1, i.InputRegister(3), Operand(32));
1755  __ lr(r0, i.InputRegister(0));
1756  __ lr(r1, i.InputRegister(2));
1757  __ msgr(r1, r0);
1758  __ lr(i.OutputRegister(0), r1);
1759  __ srag(i.OutputRegister(1), r1, Operand(32));
1760  break;
1761  case kS390_ShiftLeftPair: {
1762  Register second_output =
1763  instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1764  if (instr->InputAt(2)->IsImmediate()) {
1765  __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1766  i.InputRegister(1), i.InputInt32(2));
1767  } else {
1768  __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1769  i.InputRegister(1), kScratchReg, i.InputRegister(2));
1770  }
1771  break;
1772  }
1773  case kS390_ShiftRightPair: {
1774  Register second_output =
1775  instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1776  if (instr->InputAt(2)->IsImmediate()) {
1777  __ ShiftRightPair(i.OutputRegister(0), second_output,
1778  i.InputRegister(0), i.InputRegister(1),
1779  i.InputInt32(2));
1780  } else {
1781  __ ShiftRightPair(i.OutputRegister(0), second_output,
1782  i.InputRegister(0), i.InputRegister(1), kScratchReg,
1783  i.InputRegister(2));
1784  }
1785  break;
1786  }
1787  case kS390_ShiftRightArithPair: {
1788  Register second_output =
1789  instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1790  if (instr->InputAt(2)->IsImmediate()) {
1791  __ ShiftRightArithPair(i.OutputRegister(0), second_output,
1792  i.InputRegister(0), i.InputRegister(1),
1793  i.InputInt32(2));
1794  } else {
1795  __ ShiftRightArithPair(i.OutputRegister(0), second_output,
1796  i.InputRegister(0), i.InputRegister(1),
1797  kScratchReg, i.InputRegister(2));
1798  }
1799  break;
1800  }
1801 #endif
1802  case kS390_RotRight32: {
1803  // zero-ext
1804  if (HasRegisterInput(instr, 1)) {
1805  __ LoadComplementRR(kScratchReg, i.InputRegister(1));
1806  __ rll(i.OutputRegister(), i.InputRegister(0), kScratchReg);
1807  } else {
1808  __ rll(i.OutputRegister(), i.InputRegister(0),
1809  Operand(32 - i.InputInt32(1)));
1810  }
1811  CHECK_AND_ZERO_EXT_OUTPUT(2);
1812  break;
1813  }
1814  case kS390_RotRight64:
1815  if (HasRegisterInput(instr, 1)) {
1816  __ lcgr(kScratchReg, i.InputRegister(1));
1817  __ rllg(i.OutputRegister(), i.InputRegister(0), kScratchReg);
1818  } else {
1819  DCHECK(HasImmediateInput(instr, 1));
1820  __ rllg(i.OutputRegister(), i.InputRegister(0),
1821  Operand(64 - i.InputInt32(1)));
1822  }
1823  break;
1824  // TODO(john.yan): clean up kS390_RotLeftAnd...
1825  case kS390_RotLeftAndClear64:
1826  if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1827  int shiftAmount = i.InputInt32(1);
1828  int endBit = 63 - shiftAmount;
1829  int startBit = 63 - i.InputInt32(2);
1830  __ RotateInsertSelectBits(i.OutputRegister(), i.InputRegister(0),
1831  Operand(startBit), Operand(endBit),
1832  Operand(shiftAmount), true);
1833  } else {
1834  int shiftAmount = i.InputInt32(1);
1835  int clearBit = 63 - i.InputInt32(2);
1836  __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1837  __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1838  __ srlg(i.OutputRegister(), i.OutputRegister(),
1839  Operand(clearBit + shiftAmount));
1840  __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(shiftAmount));
1841  }
1842  break;
1843  case kS390_RotLeftAndClearLeft64:
1844  if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1845  int shiftAmount = i.InputInt32(1);
1846  int endBit = 63;
1847  int startBit = 63 - i.InputInt32(2);
1848  __ RotateInsertSelectBits(i.OutputRegister(), i.InputRegister(0),
1849  Operand(startBit), Operand(endBit),
1850  Operand(shiftAmount), true);
1851  } else {
1852  int shiftAmount = i.InputInt32(1);
1853  int clearBit = 63 - i.InputInt32(2);
1854  __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1855  __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1856  __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1857  }
1858  break;
1859  case kS390_RotLeftAndClearRight64:
1860  if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1861  int shiftAmount = i.InputInt32(1);
1862  int endBit = 63 - i.InputInt32(2);
1863  int startBit = 0;
1864  __ RotateInsertSelectBits(i.OutputRegister(), i.InputRegister(0),
1865  Operand(startBit), Operand(endBit),
1866  Operand(shiftAmount), true);
1867  } else {
1868  int shiftAmount = i.InputInt32(1);
1869  int clearBit = i.InputInt32(2);
1870  __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1871  __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1872  __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1873  }
1874  break;
1875  case kS390_Add32: {
1876  // zero-ext
1877  if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1878  ASSEMBLE_BIN32_OP(RRRInstr(ark), RM32Instr(Add32), RRIInstr(Add32));
1879  } else {
1880  ASSEMBLE_BIN32_OP(RRInstr(ar), RM32Instr(Add32), RIInstr(Add32));
1881  }
1882  break;
1883  }
1884  case kS390_Add64:
1885  if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1886  ASSEMBLE_BIN_OP(RRRInstr(agrk), RM64Instr(ag), RRIInstr(AddP));
1887  } else {
1888  ASSEMBLE_BIN_OP(RRInstr(agr), RM64Instr(ag), RIInstr(agfi));
1889  }
1890  break;
1891  case kS390_AddFloat:
1892  ASSEMBLE_BIN_OP(DDInstr(aebr), DMTInstr(AddFloat32), nullInstr);
1893  break;
1894  case kS390_AddDouble:
1895  ASSEMBLE_BIN_OP(DDInstr(adbr), DMTInstr(AddFloat64), nullInstr);
1896  break;
1897  case kS390_Sub32:
1898  // zero-ext
1899  if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1900  ASSEMBLE_BIN32_OP(RRRInstr(srk), RM32Instr(Sub32), RRIInstr(Sub32));
1901  } else {
1902  ASSEMBLE_BIN32_OP(RRInstr(sr), RM32Instr(Sub32), RIInstr(Sub32));
1903  }
1904  break;
1905  case kS390_Sub64:
1906  if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1907  ASSEMBLE_BIN_OP(RRRInstr(sgrk), RM64Instr(sg), RRIInstr(SubP));
1908  } else {
1909  ASSEMBLE_BIN_OP(RRInstr(sgr), RM64Instr(sg), RIInstr(SubP));
1910  }
1911  break;
1912  case kS390_SubFloat:
1913  ASSEMBLE_BIN_OP(DDInstr(sebr), DMTInstr(SubFloat32), nullInstr);
1914  break;
1915  case kS390_SubDouble:
1916  ASSEMBLE_BIN_OP(DDInstr(sdbr), DMTInstr(SubFloat64), nullInstr);
1917  break;
1918  case kS390_Mul32:
1919  // zero-ext
1920  if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
1921  ASSEMBLE_BIN32_OP(RRRInstr(msrkc), RM32Instr(msc), RIInstr(Mul32));
1922  } else {
1923  ASSEMBLE_BIN32_OP(RRInstr(Mul32), RM32Instr(Mul32), RIInstr(Mul32));
1924  }
1925  break;
1926  case kS390_Mul32WithOverflow:
1927  // zero-ext
1928  ASSEMBLE_BIN32_OP(RRRInstr(Mul32WithOverflowIfCCUnequal),
1929  RRM32Instr(Mul32WithOverflowIfCCUnequal),
1930  RRIInstr(Mul32WithOverflowIfCCUnequal));
1931  break;
1932  case kS390_Mul64:
1933  ASSEMBLE_BIN_OP(RRInstr(Mul64), RM64Instr(Mul64), RIInstr(Mul64));
1934  break;
1935  case kS390_MulHigh32:
1936  // zero-ext
1937  ASSEMBLE_BIN_OP(RRRInstr(MulHigh32), RRM32Instr(MulHigh32),
1938  RRIInstr(MulHigh32));
1939  break;
1940  case kS390_MulHighU32:
1941  // zero-ext
1942  ASSEMBLE_BIN_OP(RRRInstr(MulHighU32), RRM32Instr(MulHighU32),
1943  RRIInstr(MulHighU32));
1944  break;
1945  case kS390_MulFloat:
1946  ASSEMBLE_BIN_OP(DDInstr(meebr), DMTInstr(MulFloat32), nullInstr);
1947  break;
1948  case kS390_MulDouble:
1949  ASSEMBLE_BIN_OP(DDInstr(mdbr), DMTInstr(MulFloat64), nullInstr);
1950  break;
1951  case kS390_Div64:
1952  ASSEMBLE_BIN_OP(RRRInstr(Div64), RRM64Instr(Div64), nullInstr);
1953  break;
1954  case kS390_Div32: {
1955  // zero-ext
1956  ASSEMBLE_BIN_OP(RRRInstr(Div32), RRM32Instr(Div32), nullInstr);
1957  break;
1958  }
1959  case kS390_DivU64:
1960  ASSEMBLE_BIN_OP(RRRInstr(DivU64), RRM64Instr(DivU64), nullInstr);
1961  break;
1962  case kS390_DivU32: {
1963  // zero-ext
1964  ASSEMBLE_BIN_OP(RRRInstr(DivU32), RRM32Instr(DivU32), nullInstr);
1965  break;
1966  }
1967  case kS390_DivFloat:
1968  ASSEMBLE_BIN_OP(DDInstr(debr), DMTInstr(DivFloat32), nullInstr);
1969  break;
1970  case kS390_DivDouble:
1971  ASSEMBLE_BIN_OP(DDInstr(ddbr), DMTInstr(DivFloat64), nullInstr);
1972  break;
1973  case kS390_Mod32:
1974  // zero-ext
1975  ASSEMBLE_BIN_OP(RRRInstr(Mod32), RRM32Instr(Mod32), nullInstr);
1976  break;
1977  case kS390_ModU32:
1978  // zero-ext
1979  ASSEMBLE_BIN_OP(RRRInstr(ModU32), RRM32Instr(ModU32), nullInstr);
1980  break;
1981  case kS390_Mod64:
1982  ASSEMBLE_BIN_OP(RRRInstr(Mod64), RRM64Instr(Mod64), nullInstr);
1983  break;
1984  case kS390_ModU64:
1985  ASSEMBLE_BIN_OP(RRRInstr(ModU64), RRM64Instr(ModU64), nullInstr);
1986  break;
1987  case kS390_AbsFloat:
1988  __ lpebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1989  break;
1990  case kS390_SqrtFloat:
1991  ASSEMBLE_UNARY_OP(D_DInstr(sqebr), nullInstr, nullInstr);
1992  break;
1993  case kS390_SqrtDouble:
1994  ASSEMBLE_UNARY_OP(D_DInstr(sqdbr), nullInstr, nullInstr);
1995  break;
1996  case kS390_FloorFloat:
1997  __ fiebra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF,
1998  i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1999  break;
2000  case kS390_CeilFloat:
2001  __ fiebra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF,
2002  i.OutputDoubleRegister(), i.InputDoubleRegister(0));
2003  break;
2004  case kS390_TruncateFloat:
2005  __ fiebra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0,
2006  i.OutputDoubleRegister(), i.InputDoubleRegister(0));
2007  break;
2008  // Double operations
2009  case kS390_ModDouble:
2010  ASSEMBLE_FLOAT_MODULO();
2011  break;
2012  case kIeee754Float64Acos:
2013  ASSEMBLE_IEEE754_UNOP(acos);
2014  break;
2015  case kIeee754Float64Acosh:
2016  ASSEMBLE_IEEE754_UNOP(acosh);
2017  break;
2018  case kIeee754Float64Asin:
2019  ASSEMBLE_IEEE754_UNOP(asin);
2020  break;
2021  case kIeee754Float64Asinh:
2022  ASSEMBLE_IEEE754_UNOP(asinh);
2023  break;
2024  case kIeee754Float64Atanh:
2025  ASSEMBLE_IEEE754_UNOP(atanh);
2026  break;
2027  case kIeee754Float64Atan:
2028  ASSEMBLE_IEEE754_UNOP(atan);
2029  break;
2030  case kIeee754Float64Atan2:
2031  ASSEMBLE_IEEE754_BINOP(atan2);
2032  break;
2033  case kIeee754Float64Tan:
2034  ASSEMBLE_IEEE754_UNOP(tan);
2035  break;
2036  case kIeee754Float64Tanh:
2037  ASSEMBLE_IEEE754_UNOP(tanh);
2038  break;
2039  case kIeee754Float64Cbrt:
2040  ASSEMBLE_IEEE754_UNOP(cbrt);
2041  break;
2042  case kIeee754Float64Sin:
2043  ASSEMBLE_IEEE754_UNOP(sin);
2044  break;
2045  case kIeee754Float64Sinh:
2046  ASSEMBLE_IEEE754_UNOP(sinh);
2047  break;
2048  case kIeee754Float64Cos:
2049  ASSEMBLE_IEEE754_UNOP(cos);
2050  break;
2051  case kIeee754Float64Cosh:
2052  ASSEMBLE_IEEE754_UNOP(cosh);
2053  break;
2054  case kIeee754Float64Exp:
2055  ASSEMBLE_IEEE754_UNOP(exp);
2056  break;
2057  case kIeee754Float64Expm1:
2058  ASSEMBLE_IEEE754_UNOP(expm1);
2059  break;
2060  case kIeee754Float64Log:
2061  ASSEMBLE_IEEE754_UNOP(log);
2062  break;
2063  case kIeee754Float64Log1p:
2064  ASSEMBLE_IEEE754_UNOP(log1p);
2065  break;
2066  case kIeee754Float64Log2:
2067  ASSEMBLE_IEEE754_UNOP(log2);
2068  break;
2069  case kIeee754Float64Log10:
2070  ASSEMBLE_IEEE754_UNOP(log10);
2071  break;
2072  case kIeee754Float64Pow: {
2073  __ Call(BUILTIN_CODE(isolate(), MathPowInternal), RelocInfo::CODE_TARGET);
2074  __ Move(d1, d3);
2075  break;
2076  }
2077  case kS390_Neg32:
2078  __ lcr(i.OutputRegister(), i.InputRegister(0));
2079  CHECK_AND_ZERO_EXT_OUTPUT(1);
2080  break;
2081  case kS390_Neg64:
2082  __ lcgr(i.OutputRegister(), i.InputRegister(0));
2083  break;
2084  case kS390_MaxFloat:
2085  ASSEMBLE_FLOAT_MAX();
2086  break;
2087  case kS390_MaxDouble:
2088  ASSEMBLE_DOUBLE_MAX();
2089  break;
2090  case kS390_MinFloat:
2091  ASSEMBLE_FLOAT_MIN();
2092  break;
2093  case kS390_MinDouble:
2094  ASSEMBLE_DOUBLE_MIN();
2095  break;
2096  case kS390_AbsDouble:
2097  __ lpdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
2098  break;
2099  case kS390_FloorDouble:
2100  __ fidbra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF,
2101  i.OutputDoubleRegister(), i.InputDoubleRegister(0));
2102  break;
2103  case kS390_CeilDouble:
2104  __ fidbra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF,
2105  i.OutputDoubleRegister(), i.InputDoubleRegister(0));
2106  break;
2107  case kS390_TruncateDouble:
2108  __ fidbra(v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0,
2109  i.OutputDoubleRegister(), i.InputDoubleRegister(0));
2110  break;
2111  case kS390_RoundDouble:
2112  __ fidbra(v8::internal::Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0,
2113  i.OutputDoubleRegister(), i.InputDoubleRegister(0));
2114  break;
2115  case kS390_NegFloat:
2116  ASSEMBLE_UNARY_OP(D_DInstr(lcebr), nullInstr, nullInstr);
2117  break;
2118  case kS390_NegDouble:
2119  ASSEMBLE_UNARY_OP(D_DInstr(lcdbr), nullInstr, nullInstr);
2120  break;
2121  case kS390_Cntlz32: {
2122  __ llgfr(i.OutputRegister(), i.InputRegister(0));
2123  __ flogr(r0, i.OutputRegister());
2124  __ Add32(i.OutputRegister(), r0, Operand(-32));
2125  // No need to zero-ext b/c llgfr is done already
2126  break;
2127  }
2128 #if V8_TARGET_ARCH_S390X
2129  case kS390_Cntlz64: {
2130  __ flogr(r0, i.InputRegister(0));
2131  __ LoadRR(i.OutputRegister(), r0);
2132  break;
2133  }
2134 #endif
2135  case kS390_Popcnt32:
2136  __ Popcnt32(i.OutputRegister(), i.InputRegister(0));
2137  break;
2138 #if V8_TARGET_ARCH_S390X
2139  case kS390_Popcnt64:
2140  __ Popcnt64(i.OutputRegister(), i.InputRegister(0));
2141  break;
2142 #endif
2143  case kS390_Cmp32:
2144  ASSEMBLE_COMPARE32(Cmp32, CmpLogical32);
2145  break;
2146 #if V8_TARGET_ARCH_S390X
2147  case kS390_Cmp64:
2148  ASSEMBLE_COMPARE(CmpP, CmpLogicalP);
2149  break;
2150 #endif
2151  case kS390_CmpFloat:
2152  ASSEMBLE_FLOAT_COMPARE(cebr, ceb, ley);
2153  // __ cebr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
2154  break;
2155  case kS390_CmpDouble:
2156  ASSEMBLE_FLOAT_COMPARE(cdbr, cdb, ldy);
2157  // __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
2158  break;
2159  case kS390_Tst32:
2160  if (HasRegisterInput(instr, 1)) {
2161  __ And(r0, i.InputRegister(0), i.InputRegister(1));
2162  } else {
2163  // detect tmlh/tmhl/tmhh case
2164  Operand opnd = i.InputImmediate(1);
2165  if (is_uint16(opnd.immediate())) {
2166  __ tmll(i.InputRegister(0), opnd);
2167  } else {
2168  __ lr(r0, i.InputRegister(0));
2169  __ nilf(r0, opnd);
2170  }
2171  }
2172  break;
2173  case kS390_Tst64:
2174  if (HasRegisterInput(instr, 1)) {
2175  __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
2176  } else {
2177  Operand opnd = i.InputImmediate(1);
2178  if (is_uint16(opnd.immediate())) {
2179  __ tmll(i.InputRegister(0), opnd);
2180  } else {
2181  __ AndP(r0, i.InputRegister(0), opnd);
2182  }
2183  }
2184  break;
2185  case kS390_Float64SilenceNaN: {
2186  DoubleRegister value = i.InputDoubleRegister(0);
2187  DoubleRegister result = i.OutputDoubleRegister();
2188  __ CanonicalizeNaN(result, value);
2189  break;
2190  }
2191  case kS390_StackClaim: {
2192  int num_slots = i.InputInt32(0);
2193  __ lay(sp, MemOperand(sp, -num_slots * kPointerSize));
2194  frame_access_state()->IncreaseSPDelta(num_slots);
2195  break;
2196  }
2197  case kS390_Push:
2198  if (instr->InputAt(0)->IsFPRegister()) {
2199  LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
2200  if (op->representation() == MachineRepresentation::kFloat64) {
2201  __ lay(sp, MemOperand(sp, -kDoubleSize));
2202  __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
2203  frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
2204  } else {
2205  DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
2206  __ lay(sp, MemOperand(sp, -kPointerSize));
2207  __ StoreFloat32(i.InputDoubleRegister(0), MemOperand(sp));
2208  frame_access_state()->IncreaseSPDelta(1);
2209  }
2210  } else {
2211  __ Push(i.InputRegister(0));
2212  frame_access_state()->IncreaseSPDelta(1);
2213  }
2214  break;
2215  case kS390_PushFrame: {
2216  int num_slots = i.InputInt32(1);
2217  __ lay(sp, MemOperand(sp, -num_slots * kPointerSize));
2218  if (instr->InputAt(0)->IsFPRegister()) {
2219  LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
2220  if (op->representation() == MachineRepresentation::kFloat64) {
2221  __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
2222  } else {
2223  DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
2224  __ StoreFloat32(i.InputDoubleRegister(0), MemOperand(sp));
2225  }
2226  } else {
2227  __ StoreP(i.InputRegister(0), MemOperand(sp));
2228  }
2229  break;
2230  }
2231  case kS390_StoreToStackSlot: {
2232  int slot = i.InputInt32(1);
2233  if (instr->InputAt(0)->IsFPRegister()) {
2234  LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
2235  if (op->representation() == MachineRepresentation::kFloat64) {
2236  __ StoreDouble(i.InputDoubleRegister(0),
2237  MemOperand(sp, slot * kPointerSize));
2238  } else {
2239  DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
2240  __ StoreFloat32(i.InputDoubleRegister(0),
2241  MemOperand(sp, slot * kPointerSize));
2242  }
2243  } else {
2244  __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
2245  }
2246  break;
2247  }
2248  case kS390_SignExtendWord8ToInt32:
2249  __ lbr(i.OutputRegister(), i.InputRegister(0));
2250  CHECK_AND_ZERO_EXT_OUTPUT(1);
2251  break;
2252  case kS390_SignExtendWord16ToInt32:
2253  __ lhr(i.OutputRegister(), i.InputRegister(0));
2254  CHECK_AND_ZERO_EXT_OUTPUT(1);
2255  break;
2256  case kS390_SignExtendWord8ToInt64:
2257  __ lgbr(i.OutputRegister(), i.InputRegister(0));
2258  break;
2259  case kS390_SignExtendWord16ToInt64:
2260  __ lghr(i.OutputRegister(), i.InputRegister(0));
2261  break;
2262  case kS390_SignExtendWord32ToInt64:
2263  __ lgfr(i.OutputRegister(), i.InputRegister(0));
2264  break;
2265  case kS390_Uint32ToUint64:
2266  // Zero extend
2267  __ llgfr(i.OutputRegister(), i.InputRegister(0));
2268  break;
2269  case kS390_Int64ToInt32:
2270  // sign extend
2271  __ lgfr(i.OutputRegister(), i.InputRegister(0));
2272  break;
2273  // Convert Fixed to Floating Point
2274  case kS390_Int64ToFloat32:
2275  __ ConvertInt64ToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
2276  break;
2277  case kS390_Int64ToDouble:
2278  __ ConvertInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
2279  break;
2280  case kS390_Uint64ToFloat32:
2281  __ ConvertUnsignedInt64ToFloat(i.OutputDoubleRegister(),
2282  i.InputRegister(0));
2283  break;
2284  case kS390_Uint64ToDouble:
2285  __ ConvertUnsignedInt64ToDouble(i.OutputDoubleRegister(),
2286  i.InputRegister(0));
2287  break;
2288  case kS390_Int32ToFloat32:
2289  __ ConvertIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
2290  break;
2291  case kS390_Int32ToDouble:
2292  __ ConvertIntToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
2293  break;
2294  case kS390_Uint32ToFloat32:
2295  __ ConvertUnsignedIntToFloat(i.OutputDoubleRegister(),
2296  i.InputRegister(0));
2297  break;
2298  case kS390_Uint32ToDouble:
2299  __ ConvertUnsignedIntToDouble(i.OutputDoubleRegister(),
2300  i.InputRegister(0));
2301  break;
2302  case kS390_DoubleToInt32: {
2303  Label done;
2304  __ ConvertDoubleToInt32(i.OutputRegister(0), i.InputDoubleRegister(0),
2305  kRoundToNearest);
2306  __ b(Condition(0xE), &done, Label::kNear); // normal case
2307  __ lghi(i.OutputRegister(0), Operand::Zero());
2308  __ bind(&done);
2309  break;
2310  }
2311  case kS390_DoubleToUint32: {
2312  Label done;
2313  __ ConvertDoubleToUnsignedInt32(i.OutputRegister(0),
2314  i.InputDoubleRegister(0));
2315  __ b(Condition(0xE), &done, Label::kNear); // normal case
2316  __ lghi(i.OutputRegister(0), Operand::Zero());
2317  __ bind(&done);
2318  break;
2319  }
2320  case kS390_DoubleToInt64: {
2321  Label done;
2322  if (i.OutputCount() > 1) {
2323  __ lghi(i.OutputRegister(1), Operand(1));
2324  }
2325  __ ConvertDoubleToInt64(i.OutputRegister(0), i.InputDoubleRegister(0));
2326  __ b(Condition(0xE), &done, Label::kNear); // normal case
2327  if (i.OutputCount() > 1) {
2328  __ lghi(i.OutputRegister(1), Operand::Zero());
2329  } else {
2330  __ lghi(i.OutputRegister(0), Operand::Zero());
2331  }
2332  __ bind(&done);
2333  break;
2334  }
2335  case kS390_DoubleToUint64: {
2336  Label done;
2337  if (i.OutputCount() > 1) {
2338  __ lghi(i.OutputRegister(1), Operand(1));
2339  }
2340  __ ConvertDoubleToUnsignedInt64(i.OutputRegister(0),
2341  i.InputDoubleRegister(0));
2342  __ b(Condition(0xE), &done, Label::kNear); // normal case
2343  if (i.OutputCount() > 1) {
2344  __ lghi(i.OutputRegister(1), Operand::Zero());
2345  } else {
2346  __ lghi(i.OutputRegister(0), Operand::Zero());
2347  }
2348  __ bind(&done);
2349  break;
2350  }
2351  case kS390_Float32ToInt32: {
2352  Label done;
2353  __ ConvertFloat32ToInt32(i.OutputRegister(0), i.InputDoubleRegister(0),
2354  kRoundToZero);
2355  __ b(Condition(0xE), &done, Label::kNear); // normal case
2356  __ lghi(i.OutputRegister(0), Operand::Zero());
2357  __ bind(&done);
2358  break;
2359  }
2360  case kS390_Float32ToUint32: {
2361  Label done;
2362  __ ConvertFloat32ToUnsignedInt32(i.OutputRegister(0),
2363  i.InputDoubleRegister(0));
2364  __ b(Condition(0xE), &done, Label::kNear); // normal case
2365  __ lghi(i.OutputRegister(0), Operand::Zero());
2366  __ bind(&done);
2367  break;
2368  }
2369  case kS390_Float32ToUint64: {
2370  Label done;
2371  if (i.OutputCount() > 1) {
2372  __ lghi(i.OutputRegister(1), Operand(1));
2373  }
2374  __ ConvertFloat32ToUnsignedInt64(i.OutputRegister(0),
2375  i.InputDoubleRegister(0));
2376  __ b(Condition(0xE), &done, Label::kNear); // normal case
2377  if (i.OutputCount() > 1) {
2378  __ lghi(i.OutputRegister(1), Operand::Zero());
2379  } else {
2380  __ lghi(i.OutputRegister(0), Operand::Zero());
2381  }
2382  __ bind(&done);
2383  break;
2384  }
2385  case kS390_Float32ToInt64: {
2386  Label done;
2387  if (i.OutputCount() > 1) {
2388  __ lghi(i.OutputRegister(1), Operand(1));
2389  }
2390  __ ConvertFloat32ToInt64(i.OutputRegister(0), i.InputDoubleRegister(0));
2391  __ b(Condition(0xE), &done, Label::kNear); // normal case
2392  if (i.OutputCount() > 1) {
2393  __ lghi(i.OutputRegister(1), Operand::Zero());
2394  } else {
2395  __ lghi(i.OutputRegister(0), Operand::Zero());
2396  }
2397  __ bind(&done);
2398  break;
2399  }
2400  case kS390_DoubleToFloat32:
2401  ASSEMBLE_UNARY_OP(D_DInstr(ledbr), nullInstr, nullInstr);
2402  break;
2403  case kS390_Float32ToDouble:
2404  ASSEMBLE_UNARY_OP(D_DInstr(ldebr), D_MTInstr(LoadFloat32ToDouble),
2405  nullInstr);
2406  break;
2407  case kS390_DoubleExtractLowWord32:
2408  __ lgdr(i.OutputRegister(), i.InputDoubleRegister(0));
2409  __ llgfr(i.OutputRegister(), i.OutputRegister());
2410  break;
2411  case kS390_DoubleExtractHighWord32:
2412  __ lgdr(i.OutputRegister(), i.InputDoubleRegister(0));
2413  __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(32));
2414  break;
2415  case kS390_DoubleInsertLowWord32:
2416  __ lgdr(kScratchReg, i.InputDoubleRegister(0));
2417  __ lr(kScratchReg, i.InputRegister(1));
2418  __ ldgr(i.OutputDoubleRegister(), kScratchReg);
2419  break;
2420  case kS390_DoubleInsertHighWord32:
2421  __ sllg(kScratchReg, i.InputRegister(1), Operand(32));
2422  __ lgdr(r0, i.InputDoubleRegister(0));
2423  __ lr(kScratchReg, r0);
2424  __ ldgr(i.OutputDoubleRegister(), kScratchReg);
2425  break;
2426  case kS390_DoubleConstruct:
2427  __ sllg(kScratchReg, i.InputRegister(0), Operand(32));
2428  __ lr(kScratchReg, i.InputRegister(1));
2429 
2430  // Bitwise convert from GPR to FPR
2431  __ ldgr(i.OutputDoubleRegister(), kScratchReg);
2432  break;
2433  case kS390_LoadWordS8:
2434  ASSEMBLE_LOAD_INTEGER(LoadB);
2435  EmitWordLoadPoisoningIfNeeded(this, instr, i);
2436  break;
2437  case kS390_BitcastFloat32ToInt32:
2438  ASSEMBLE_UNARY_OP(R_DInstr(MovFloatToInt), R_MInstr(LoadlW), nullInstr);
2439  break;
2440  case kS390_BitcastInt32ToFloat32:
2441  __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
2442  break;
2443 #if V8_TARGET_ARCH_S390X
2444  case kS390_BitcastDoubleToInt64:
2445  __ MovDoubleToInt64(i.OutputRegister(), i.InputDoubleRegister(0));
2446  break;
2447  case kS390_BitcastInt64ToDouble:
2448  __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
2449  break;
2450 #endif
2451  case kS390_LoadWordU8:
2452  ASSEMBLE_LOAD_INTEGER(LoadlB);
2453  EmitWordLoadPoisoningIfNeeded(this, instr, i);
2454  break;
2455  case kS390_LoadWordU16:
2456  ASSEMBLE_LOAD_INTEGER(LoadLogicalHalfWordP);
2457  EmitWordLoadPoisoningIfNeeded(this, instr, i);
2458  break;
2459  case kS390_LoadWordS16:
2460  ASSEMBLE_LOAD_INTEGER(LoadHalfWordP);
2461  EmitWordLoadPoisoningIfNeeded(this, instr, i);
2462  break;
2463  case kS390_LoadWordU32:
2464  ASSEMBLE_LOAD_INTEGER(LoadlW);
2465  EmitWordLoadPoisoningIfNeeded(this, instr, i);
2466  break;
2467  case kS390_LoadWordS32:
2468  ASSEMBLE_LOAD_INTEGER(LoadW);
2469  EmitWordLoadPoisoningIfNeeded(this, instr, i);
2470  break;
2471  case kS390_LoadReverse16:
2472  ASSEMBLE_LOAD_INTEGER(lrvh);
2473  EmitWordLoadPoisoningIfNeeded(this, instr, i);
2474  break;
2475  case kS390_LoadReverse32:
2476  ASSEMBLE_LOAD_INTEGER(lrv);
2477  EmitWordLoadPoisoningIfNeeded(this, instr, i);
2478  break;
2479  case kS390_LoadReverse64:
2480  ASSEMBLE_LOAD_INTEGER(lrvg);
2481  EmitWordLoadPoisoningIfNeeded(this, instr, i);
2482  break;
2483  case kS390_LoadReverse16RR:
2484  __ lrvr(i.OutputRegister(), i.InputRegister(0));
2485  __ rll(i.OutputRegister(), i.OutputRegister(), Operand(16));
2486  break;
2487  case kS390_LoadReverse32RR:
2488  __ lrvr(i.OutputRegister(), i.InputRegister(0));
2489  break;
2490  case kS390_LoadReverse64RR:
2491  __ lrvgr(i.OutputRegister(), i.InputRegister(0));
2492  break;
2493  case kS390_LoadWord64:
2494  ASSEMBLE_LOAD_INTEGER(lg);
2495  EmitWordLoadPoisoningIfNeeded(this, instr, i);
2496  break;
2497  case kS390_LoadAndTestWord32: {
2498  ASSEMBLE_LOADANDTEST32(ltr, lt_z);
2499  break;
2500  }
2501  case kS390_LoadAndTestWord64: {
2502  ASSEMBLE_LOADANDTEST64(ltgr, ltg);
2503  break;
2504  }
2505  case kS390_LoadFloat32:
2506  ASSEMBLE_LOAD_FLOAT(LoadFloat32);
2507  break;
2508  case kS390_LoadDouble:
2509  ASSEMBLE_LOAD_FLOAT(LoadDouble);
2510  break;
2511  case kS390_StoreWord8:
2512  ASSEMBLE_STORE_INTEGER(StoreByte);
2513  break;
2514  case kS390_StoreWord16:
2515  ASSEMBLE_STORE_INTEGER(StoreHalfWord);
2516  break;
2517  case kS390_StoreWord32:
2518  ASSEMBLE_STORE_INTEGER(StoreW);
2519  break;
2520 #if V8_TARGET_ARCH_S390X
2521  case kS390_StoreWord64:
2522  ASSEMBLE_STORE_INTEGER(StoreP);
2523  break;
2524 #endif
2525  case kS390_StoreReverse16:
2526  ASSEMBLE_STORE_INTEGER(strvh);
2527  break;
2528  case kS390_StoreReverse32:
2529  ASSEMBLE_STORE_INTEGER(strv);
2530  break;
2531  case kS390_StoreReverse64:
2532  ASSEMBLE_STORE_INTEGER(strvg);
2533  break;
2534  case kS390_StoreFloat32:
2535  ASSEMBLE_STORE_FLOAT32();
2536  break;
2537  case kS390_StoreDouble:
2538  ASSEMBLE_STORE_DOUBLE();
2539  break;
2540  case kS390_Lay:
2541  __ lay(i.OutputRegister(), i.MemoryOperand());
2542  break;
2543 // 0x aa bb cc dd
2544 // index = 3..2..1..0
2545 #define ATOMIC_EXCHANGE(start, end, shift_amount, offset) \
2546  { \
2547  Label do_cs; \
2548  __ LoadlW(output, MemOperand(r1, offset)); \
2549  __ bind(&do_cs); \
2550  __ llgfr(r0, output); \
2551  __ RotateInsertSelectBits(r0, value, Operand(start), Operand(end), \
2552  Operand(shift_amount), false); \
2553  __ csy(output, r0, MemOperand(r1, offset)); \
2554  __ bne(&do_cs, Label::kNear); \
2555  __ srl(output, Operand(shift_amount)); \
2556  }
2557 #ifdef V8_TARGET_BIG_ENDIAN
2558 #define ATOMIC_EXCHANGE_BYTE(i) \
2559  { \
2560  constexpr int idx = (i); \
2561  static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
2562  constexpr int start = 32 + 8 * idx; \
2563  constexpr int end = start + 7; \
2564  constexpr int shift_amount = (3 - idx) * 8; \
2565  ATOMIC_EXCHANGE(start, end, shift_amount, -idx); \
2566  }
2567 #define ATOMIC_EXCHANGE_HALFWORD(i) \
2568  { \
2569  constexpr int idx = (i); \
2570  static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
2571  constexpr int start = 32 + 16 * idx; \
2572  constexpr int end = start + 15; \
2573  constexpr int shift_amount = (1 - idx) * 16; \
2574  ATOMIC_EXCHANGE(start, end, shift_amount, -idx * 2); \
2575  }
2576 #else
2577 #define ATOMIC_EXCHANGE_BYTE(i) \
2578  { \
2579  constexpr int idx = (i); \
2580  static_assert(idx <= 3 && idx >= 0, "idx is out of range!"); \
2581  constexpr int start = 32 + 8 * (3 - idx); \
2582  constexpr int end = start + 7; \
2583  constexpr int shift_amount = idx * 8; \
2584  ATOMIC_EXCHANGE(start, end, shift_amount, -idx); \
2585  }
2586 #define ATOMIC_EXCHANGE_HALFWORD(i) \
2587  { \
2588  constexpr int idx = (i); \
2589  static_assert(idx <= 1 && idx >= 0, "idx is out of range!"); \
2590  constexpr int start = 32 + 16 * (1 - idx); \
2591  constexpr int end = start + 15; \
2592  constexpr int shift_amount = idx * 16; \
2593  ATOMIC_EXCHANGE(start, end, shift_amount, -idx * 2); \
2594  }
2595 #endif
2596  case kS390_Word64AtomicExchangeUint8:
2597  case kWord32AtomicExchangeInt8:
2598  case kWord32AtomicExchangeUint8: {
2599  Register base = i.InputRegister(0);
2600  Register index = i.InputRegister(1);
2601  Register value = i.InputRegister(2);
2602  Register output = i.OutputRegister();
2603  Label three, two, one, done;
2604  __ la(r1, MemOperand(base, index));
2605  __ tmll(r1, Operand(3));
2606  __ b(Condition(1), &three);
2607  __ b(Condition(2), &two);
2608  __ b(Condition(4), &one);
2609 
2610  // end with 0b00
2611  ATOMIC_EXCHANGE_BYTE(0);
2612  __ b(&done);
2613 
2614  // ending with 0b01
2615  __ bind(&one);
2616  ATOMIC_EXCHANGE_BYTE(1);
2617  __ b(&done);
2618 
2619  // ending with 0b10
2620  __ bind(&two);
2621  ATOMIC_EXCHANGE_BYTE(2);
2622  __ b(&done);
2623 
2624  // ending with 0b11
2625  __ bind(&three);
2626  ATOMIC_EXCHANGE_BYTE(3);
2627 
2628  __ bind(&done);
2629  if (opcode == kWord32AtomicExchangeInt8) {
2630  __ lgbr(output, output);
2631  } else {
2632  __ llgcr(output, output);
2633  }
2634  break;
2635  }
2636  case kS390_Word64AtomicExchangeUint16:
2637  case kWord32AtomicExchangeInt16:
2638  case kWord32AtomicExchangeUint16: {
2639  Register base = i.InputRegister(0);
2640  Register index = i.InputRegister(1);
2641  Register value = i.InputRegister(2);
2642  Register output = i.OutputRegister();
2643  Label two, unaligned, done;
2644  __ la(r1, MemOperand(base, index));
2645  __ tmll(r1, Operand(3));
2646  __ b(Condition(2), &two);
2647 
2648  // end with 0b00
2649  ATOMIC_EXCHANGE_HALFWORD(0);
2650  __ b(&done);
2651 
2652  // ending with 0b10
2653  __ bind(&two);
2654  ATOMIC_EXCHANGE_HALFWORD(1);
2655 
2656  __ bind(&done);
2657  if (opcode == kWord32AtomicExchangeInt16) {
2658  __ lghr(output, output);
2659  } else {
2660  __ llghr(output, output);
2661  }
2662  break;
2663  }
2664  case kS390_Word64AtomicExchangeUint32:
2665  case kWord32AtomicExchangeWord32: {
2666  Register base = i.InputRegister(0);
2667  Register index = i.InputRegister(1);
2668  Register value = i.InputRegister(2);
2669  Register output = i.OutputRegister();
2670  Label do_cs;
2671  __ lay(r1, MemOperand(base, index));
2672  __ LoadlW(output, MemOperand(r1));
2673  __ bind(&do_cs);
2674  __ cs(output, value, MemOperand(r1));
2675  __ bne(&do_cs, Label::kNear);
2676  break;
2677  }
2678  case kWord32AtomicCompareExchangeInt8:
2679  ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(LoadB);
2680  break;
2681  case kS390_Word64AtomicCompareExchangeUint8:
2682  case kWord32AtomicCompareExchangeUint8:
2683  ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(LoadlB);
2684  break;
2685  case kWord32AtomicCompareExchangeInt16:
2686  ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(LoadHalfWordP);
2687  break;
2688  case kS390_Word64AtomicCompareExchangeUint16:
2689  case kWord32AtomicCompareExchangeUint16:
2690  ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(LoadLogicalHalfWordP);
2691  break;
2692  case kS390_Word64AtomicCompareExchangeUint32:
2693  case kWord32AtomicCompareExchangeWord32:
2694  ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_WORD();
2695  break;
2696 #define ATOMIC_BINOP_CASE(op, inst) \
2697  case kWord32Atomic##op##Int8: \
2698  ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() { \
2699  intptr_t shift_right = static_cast<intptr_t>(shift_amount); \
2700  __ srlk(result, prev, Operand(shift_right)); \
2701  __ LoadB(result, result); \
2702  }); \
2703  break; \
2704  case kS390_Word64Atomic##op##Uint8: \
2705  case kWord32Atomic##op##Uint8: \
2706  ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() { \
2707  int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
2708  __ RotateInsertSelectBits(result, prev, Operand(56), Operand(63), \
2709  Operand(static_cast<intptr_t>(rotate_left)), \
2710  true); \
2711  }); \
2712  break; \
2713  case kWord32Atomic##op##Int16: \
2714  ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() { \
2715  intptr_t shift_right = static_cast<intptr_t>(shift_amount); \
2716  __ srlk(result, prev, Operand(shift_right)); \
2717  __ LoadHalfWordP(result, result); \
2718  }); \
2719  break; \
2720  case kS390_Word64Atomic##op##Uint16: \
2721  case kWord32Atomic##op##Uint16: \
2722  ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() { \
2723  int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
2724  __ RotateInsertSelectBits(result, prev, Operand(48), Operand(63), \
2725  Operand(static_cast<intptr_t>(rotate_left)), \
2726  true); \
2727  }); \
2728  break;
2729  ATOMIC_BINOP_CASE(Add, Add32)
2730  ATOMIC_BINOP_CASE(Sub, Sub32)
2731  ATOMIC_BINOP_CASE(And, And)
2732  ATOMIC_BINOP_CASE(Or, Or)
2733  ATOMIC_BINOP_CASE(Xor, Xor)
2734 #undef ATOMIC_BINOP_CASE
2735  case kS390_Word64AtomicAddUint32:
2736  case kWord32AtomicAddWord32:
2737  ASSEMBLE_ATOMIC_BINOP_WORD(laa);
2738  break;
2739  case kS390_Word64AtomicSubUint32:
2740  case kWord32AtomicSubWord32:
2741  ASSEMBLE_ATOMIC_BINOP_WORD(LoadAndSub32);
2742  break;
2743  case kS390_Word64AtomicAndUint32:
2744  case kWord32AtomicAndWord32:
2745  ASSEMBLE_ATOMIC_BINOP_WORD(lan);
2746  break;
2747  case kS390_Word64AtomicOrUint32:
2748  case kWord32AtomicOrWord32:
2749  ASSEMBLE_ATOMIC_BINOP_WORD(lao);
2750  break;
2751  case kS390_Word64AtomicXorUint32:
2752  case kWord32AtomicXorWord32:
2753  ASSEMBLE_ATOMIC_BINOP_WORD(lax);
2754  break;
2755  case kS390_Word64AtomicAddUint64:
2756  ASSEMBLE_ATOMIC_BINOP_WORD64(laag);
2757  break;
2758  case kS390_Word64AtomicSubUint64:
2759  ASSEMBLE_ATOMIC_BINOP_WORD64(LoadAndSub64);
2760  break;
2761  case kS390_Word64AtomicAndUint64:
2762  ASSEMBLE_ATOMIC_BINOP_WORD64(lang);
2763  break;
2764  case kS390_Word64AtomicOrUint64:
2765  ASSEMBLE_ATOMIC_BINOP_WORD64(laog);
2766  break;
2767  case kS390_Word64AtomicXorUint64:
2768  ASSEMBLE_ATOMIC_BINOP_WORD64(laxg);
2769  break;
2770  case kS390_Word64AtomicExchangeUint64: {
2771  Register base = i.InputRegister(0);
2772  Register index = i.InputRegister(1);
2773  Register value = i.InputRegister(2);
2774  Register output = i.OutputRegister();
2775  Label do_cs;
2776  __ la(r1, MemOperand(base, index));
2777  __ lg(output, MemOperand(r1));
2778  __ bind(&do_cs);
2779  __ csg(output, value, MemOperand(r1));
2780  __ bne(&do_cs, Label::kNear);
2781  break;
2782  }
2783  case kS390_Word64AtomicCompareExchangeUint64:
2784  ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64();
2785  break;
2786  default:
2787  UNREACHABLE();
2788  break;
2789  }
2790  return kSuccess;
2791 } // NOLINT(readability/fn_size)
2792 
2793 // Assembles branches after an instruction.
2794 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
2795  S390OperandConverter i(this, instr);
2796  Label* tlabel = branch->true_label;
2797  Label* flabel = branch->false_label;
2798  ArchOpcode op = instr->arch_opcode();
2799  FlagsCondition condition = branch->condition;
2800 
2801  Condition cond = FlagsConditionToCondition(condition, op);
2802  if (op == kS390_CmpFloat || op == kS390_CmpDouble) {
2803  // check for unordered if necessary
2804  // Branching to flabel/tlabel according to what's expected by tests
2805  if (cond == le || cond == eq || cond == lt) {
2806  __ bunordered(flabel);
2807  } else if (cond == gt || cond == ne || cond == ge) {
2808  __ bunordered(tlabel);
2809  }
2810  }
2811  __ b(cond, tlabel);
2812  if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
2813 }
2814 
2815 void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
2816  Instruction* instr) {
2817  // TODO(John) Handle float comparisons (kUnordered[Not]Equal).
2818  if (condition == kUnorderedEqual || condition == kUnorderedNotEqual ||
2819  condition == kOverflow || condition == kNotOverflow) {
2820  return;
2821  }
2822 
2823  condition = NegateFlagsCondition(condition);
2824  __ LoadImmP(r0, Operand::Zero());
2825  __ LoadOnConditionP(FlagsConditionToCondition(condition, kArchNop),
2826  kSpeculationPoisonRegister, r0);
2827 }
2828 
2829 void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
2830  BranchInfo* branch) {
2831  AssembleArchBranch(instr, branch);
2832 }
2833 
2834 void CodeGenerator::AssembleArchJump(RpoNumber target) {
2835  if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
2836 }
2837 
2838 void CodeGenerator::AssembleArchTrap(Instruction* instr,
2839  FlagsCondition condition) {
2840  class OutOfLineTrap final : public OutOfLineCode {
2841  public:
2842  OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
2843  : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
2844 
2845  void Generate() final {
2846  S390OperandConverter i(gen_, instr_);
2847  TrapId trap_id =
2848  static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
2849  GenerateCallToTrap(trap_id);
2850  }
2851 
2852  private:
2853  void GenerateCallToTrap(TrapId trap_id) {
2854  if (trap_id == TrapId::kInvalid) {
2855  // We cannot test calls to the runtime in cctest/test-run-wasm.
2856  // Therefore we emit a call to C here instead of a call to the runtime.
2857  // We use the context register as the scratch register, because we do
2858  // not have a context here.
2859  __ PrepareCallCFunction(0, 0, cp);
2860  __ CallCFunction(
2861  ExternalReference::wasm_call_trap_callback_for_testing(), 0);
2862  __ LeaveFrame(StackFrame::WASM_COMPILED);
2863  auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
2864  int pop_count =
2865  static_cast<int>(call_descriptor->StackParameterCount());
2866  __ Drop(pop_count);
2867  __ Ret();
2868  } else {
2869  gen_->AssembleSourcePosition(instr_);
2870  // A direct call to a wasm runtime stub defined in this module.
2871  // Just encode the stub index. This will be patched when the code
2872  // is added to the native module and copied into wasm code space.
2873  __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
2874  ReferenceMap* reference_map =
2875  new (gen_->zone()) ReferenceMap(gen_->zone());
2876  gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
2877  Safepoint::kNoLazyDeopt);
2878  if (FLAG_debug_code) {
2879  __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
2880  }
2881  }
2882  }
2883 
2884  Instruction* instr_;
2885  CodeGenerator* gen_;
2886  };
2887  auto ool = new (zone()) OutOfLineTrap(this, instr);
2888  Label* tlabel = ool->entry();
2889  Label end;
2890 
2891  ArchOpcode op = instr->arch_opcode();
2892  Condition cond = FlagsConditionToCondition(condition, op);
2893  if (op == kS390_CmpFloat || op == kS390_CmpDouble) {
2894  // check for unordered if necessary
2895  if (cond == le || cond == eq || cond == lt) {
2896  __ bunordered(&end);
2897  } else if (cond == gt || cond == ne || cond == ge) {
2898  __ bunordered(tlabel);
2899  }
2900  }
2901  __ b(cond, tlabel);
2902  __ bind(&end);
2903 }
2904 
2905 // Assembles boolean materializations after an instruction.
2906 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
2907  FlagsCondition condition) {
2908  S390OperandConverter i(this, instr);
2909  ArchOpcode op = instr->arch_opcode();
2910  bool check_unordered = (op == kS390_CmpDouble || op == kS390_CmpFloat);
2911 
2912  // Overflow checked for add/sub only.
2913  DCHECK((condition != kOverflow && condition != kNotOverflow) ||
2914  (op == kS390_Add32 || op == kS390_Add64 || op == kS390_Sub32 ||
2915  op == kS390_Sub64 || op == kS390_Mul32));
2916 
2917  // Materialize a full 32-bit 1 or 0 value. The result register is always the
2918  // last output of the instruction.
2919  DCHECK_NE(0u, instr->OutputCount());
2920  Register reg = i.OutputRegister(instr->OutputCount() - 1);
2921  Condition cond = FlagsConditionToCondition(condition, op);
2922  Label done;
2923  if (check_unordered) {
2924  __ LoadImmP(reg, (cond == eq || cond == le || cond == lt) ? Operand::Zero()
2925  : Operand(1));
2926  __ bunordered(&done);
2927  }
2928 
2929  // TODO(john.yan): use load imm high on condition here
2930  __ LoadImmP(reg, Operand::Zero());
2931  __ LoadImmP(kScratchReg, Operand(1));
2932  // locr is sufficient since reg's upper 32 is guarrantee to be 0
2933  __ locr(cond, reg, kScratchReg);
2934  __ bind(&done);
2935 }
2936 
2937 void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
2938  S390OperandConverter i(this, instr);
2939  Register input = i.InputRegister(0);
2940  std::vector<std::pair<int32_t, Label*>> cases;
2941  for (size_t index = 2; index < instr->InputCount(); index += 2) {
2942  cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
2943  }
2944  AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
2945  cases.data() + cases.size());
2946 }
2947 
2948 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
2949  S390OperandConverter i(this, instr);
2950  Register input = i.InputRegister(0);
2951  for (size_t index = 2; index < instr->InputCount(); index += 2) {
2952  __ Cmp32(input, Operand(i.InputInt32(index + 0)));
2953  __ beq(GetLabel(i.InputRpo(index + 1)));
2954  }
2955  AssembleArchJump(i.InputRpo(1));
2956 }
2957 
2958 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
2959  S390OperandConverter i(this, instr);
2960  Register input = i.InputRegister(0);
2961  int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
2962  Label** cases = zone()->NewArray<Label*>(case_count);
2963  for (int32_t index = 0; index < case_count; ++index) {
2964  cases[index] = GetLabel(i.InputRpo(index + 2));
2965  }
2966  Label* const table = AddJumpTable(cases, case_count);
2967  __ CmpLogicalP(input, Operand(case_count));
2968  __ bge(GetLabel(i.InputRpo(1)));
2969  __ larl(kScratchReg, table);
2970  __ ShiftLeftP(r1, input, Operand(kPointerSizeLog2));
2971  __ LoadP(kScratchReg, MemOperand(kScratchReg, r1));
2972  __ Jump(kScratchReg);
2973 }
2974 
2975 void CodeGenerator::FinishFrame(Frame* frame) {
2976  auto call_descriptor = linkage()->GetIncomingDescriptor();
2977  const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
2978 
2979  // Save callee-saved Double registers.
2980  if (double_saves != 0) {
2981  frame->AlignSavedCalleeRegisterSlots();
2982  DCHECK_EQ(kNumCalleeSavedDoubles,
2983  base::bits::CountPopulation(double_saves));
2984  frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
2985  (kDoubleSize / kPointerSize));
2986  }
2987  // Save callee-saved registers.
2988  const RegList saves = call_descriptor->CalleeSavedRegisters();
2989  if (saves != 0) {
2990  // register save area does not include the fp or constant pool pointer.
2991  const int num_saves = kNumCalleeSaved - 1;
2992  DCHECK(num_saves == base::bits::CountPopulation(saves));
2993  frame->AllocateSavedCalleeRegisterSlots(num_saves);
2994  }
2995 }
2996 
2997 void CodeGenerator::AssembleConstructFrame() {
2998  auto call_descriptor = linkage()->GetIncomingDescriptor();
2999 
3000  if (frame_access_state()->has_frame()) {
3001  if (call_descriptor->IsCFunctionCall()) {
3002  __ Push(r14, fp);
3003  __ LoadRR(fp, sp);
3004  } else if (call_descriptor->IsJSFunctionCall()) {
3005  __ Prologue(ip);
3006  if (call_descriptor->PushArgumentCount()) {
3007  __ Push(kJavaScriptCallArgCountRegister);
3008  }
3009  } else {
3010  StackFrame::Type type = info()->GetOutputStackFrameType();
3011  // TODO(mbrandy): Detect cases where ip is the entrypoint (for
3012  // efficient intialization of the constant pool pointer register).
3013  __ StubPrologue(type);
3014  if (call_descriptor->IsWasmFunctionCall()) {
3015  __ Push(kWasmInstanceRegister);
3016  } else if (call_descriptor->IsWasmImportWrapper()) {
3017  // WASM import wrappers are passed a tuple in the place of the instance.
3018  // Unpack the tuple into the instance and the target callable.
3019  // This must be done here in the codegen because it cannot be expressed
3020  // properly in the graph.
3021  __ LoadP(kJSFunctionRegister,
3022  FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
3023  __ LoadP(kWasmInstanceRegister,
3024  FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
3025  __ Push(kWasmInstanceRegister);
3026  }
3027  }
3028  }
3029 
3030  int shrink_slots = frame()->GetTotalFrameSlotCount() -
3031  call_descriptor->CalculateFixedFrameSize();
3032  if (info()->is_osr()) {
3033  // TurboFan OSR-compiled functions cannot be entered directly.
3034  __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
3035 
3036  // Unoptimized code jumps directly to this entrypoint while the unoptimized
3037  // frame is still on the stack. Optimized code uses OSR values directly from
3038  // the unoptimized frame. Thus, all that needs to be done is to allocate the
3039  // remaining stack slots.
3040  if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
3041  osr_pc_offset_ = __ pc_offset();
3042  shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
3043  ResetSpeculationPoison();
3044  }
3045 
3046  const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
3047  const RegList saves = call_descriptor->CalleeSavedRegisters();
3048 
3049  if (shrink_slots > 0) {
3050  if (info()->IsWasm() && shrink_slots > 128) {
3051  // For WebAssembly functions with big frames we have to do the stack
3052  // overflow check before we construct the frame. Otherwise we may not
3053  // have enough space on the stack to call the runtime for the stack
3054  // overflow.
3055  Label done;
3056 
3057  // If the frame is bigger than the stack, we throw the stack overflow
3058  // exception unconditionally. Thereby we can avoid the integer overflow
3059  // check in the condition code.
3060  if ((shrink_slots * kPointerSize) < (FLAG_stack_size * 1024)) {
3061  Register scratch = r1;
3062  __ LoadP(
3063  scratch,
3064  FieldMemOperand(kWasmInstanceRegister,
3065  WasmInstanceObject::kRealStackLimitAddressOffset));
3066  __ LoadP(scratch, MemOperand(scratch));
3067  __ AddP(scratch, scratch, Operand(shrink_slots * kPointerSize));
3068  __ CmpLogicalP(sp, scratch);
3069  __ bge(&done);
3070  }
3071 
3072  __ LoadP(r4, FieldMemOperand(kWasmInstanceRegister,
3073  WasmInstanceObject::kCEntryStubOffset));
3074  __ Move(cp, Smi::zero());
3075  __ CallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, r4);
3076  // We come from WebAssembly, there are no references for the GC.
3077  ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
3078  RecordSafepoint(reference_map, Safepoint::kSimple, 0,
3079  Safepoint::kNoLazyDeopt);
3080  if (FLAG_debug_code) {
3081  __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
3082  }
3083 
3084  __ bind(&done);
3085  }
3086 
3087  // Skip callee-saved and return slots, which are pushed below.
3088  shrink_slots -= base::bits::CountPopulation(saves);
3089  shrink_slots -= frame()->GetReturnSlotCount();
3090  shrink_slots -=
3091  (kDoubleSize / kPointerSize) * base::bits::CountPopulation(saves_fp);
3092  __ lay(sp, MemOperand(sp, -shrink_slots * kPointerSize));
3093  }
3094 
3095  // Save callee-saved Double registers.
3096  if (saves_fp != 0) {
3097  __ MultiPushDoubles(saves_fp);
3098  DCHECK_EQ(kNumCalleeSavedDoubles, base::bits::CountPopulation(saves_fp));
3099  }
3100 
3101  // Save callee-saved registers.
3102  if (saves != 0) {
3103  __ MultiPush(saves);
3104  // register save area does not include the fp or constant pool pointer.
3105  }
3106 
3107  const int returns = frame()->GetReturnSlotCount();
3108  if (returns != 0) {
3109  // Create space for returns.
3110  __ lay(sp, MemOperand(sp, -returns * kPointerSize));
3111  }
3112 }
3113 
3114 void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
3115  auto call_descriptor = linkage()->GetIncomingDescriptor();
3116  int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
3117 
3118  const int returns = frame()->GetReturnSlotCount();
3119  if (returns != 0) {
3120  // Create space for returns.
3121  __ lay(sp, MemOperand(sp, returns * kPointerSize));
3122  }
3123 
3124  // Restore registers.
3125  const RegList saves = call_descriptor->CalleeSavedRegisters();
3126  if (saves != 0) {
3127  __ MultiPop(saves);
3128  }
3129 
3130  // Restore double registers.
3131  const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
3132  if (double_saves != 0) {
3133  __ MultiPopDoubles(double_saves);
3134  }
3135 
3136  S390OperandConverter g(this, nullptr);
3137  if (call_descriptor->IsCFunctionCall()) {
3138  AssembleDeconstructFrame();
3139  } else if (frame_access_state()->has_frame()) {
3140  // Canonicalize JSFunction return sites for now unless they have an variable
3141  // number of stack slot pops
3142  if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
3143  if (return_label_.is_bound()) {
3144  __ b(&return_label_);
3145  return;
3146  } else {
3147  __ bind(&return_label_);
3148  AssembleDeconstructFrame();
3149  }
3150  } else {
3151  AssembleDeconstructFrame();
3152  }
3153  }
3154  if (pop->IsImmediate()) {
3155  pop_count += g.ToConstant(pop).ToInt32();
3156  } else {
3157  __ Drop(g.ToRegister(pop));
3158  }
3159  __ Drop(pop_count);
3160  __ Ret();
3161 }
3162 
3163 void CodeGenerator::FinishCode() {}
3164 
3165 void CodeGenerator::AssembleMove(InstructionOperand* source,
3166  InstructionOperand* destination) {
3167  S390OperandConverter g(this, nullptr);
3168  // Dispatch on the source and destination operand kinds. Not all
3169  // combinations are possible.
3170  if (source->IsRegister()) {
3171  DCHECK(destination->IsRegister() || destination->IsStackSlot());
3172  Register src = g.ToRegister(source);
3173  if (destination->IsRegister()) {
3174  __ Move(g.ToRegister(destination), src);
3175  } else {
3176  __ StoreP(src, g.ToMemOperand(destination));
3177  }
3178  } else if (source->IsStackSlot()) {
3179  DCHECK(destination->IsRegister() || destination->IsStackSlot());
3180  MemOperand src = g.ToMemOperand(source);
3181  if (destination->IsRegister()) {
3182  __ LoadP(g.ToRegister(destination), src);
3183  } else {
3184  Register temp = kScratchReg;
3185  __ LoadP(temp, src, r0);
3186  __ StoreP(temp, g.ToMemOperand(destination));
3187  }
3188  } else if (source->IsConstant()) {
3189  Constant src = g.ToConstant(source);
3190  if (destination->IsRegister() || destination->IsStackSlot()) {
3191  Register dst =
3192  destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
3193  switch (src.type()) {
3194  case Constant::kInt32:
3195 #if V8_TARGET_ARCH_S390X
3196  if (false) {
3197 #else
3198  if (RelocInfo::IsWasmReference(src.rmode())) {
3199 #endif
3200  __ mov(dst, Operand(src.ToInt32(), src.rmode()));
3201  } else {
3202  __ Load(dst, Operand(src.ToInt32()));
3203  }
3204  break;
3205  case Constant::kInt64:
3206 #if V8_TARGET_ARCH_S390X
3207  if (RelocInfo::IsWasmReference(src.rmode())) {
3208  __ mov(dst, Operand(src.ToInt64(), src.rmode()));
3209  } else {
3210  __ Load(dst, Operand(src.ToInt64()));
3211  }
3212 #else
3213  __ mov(dst, Operand(src.ToInt64()));
3214 #endif // V8_TARGET_ARCH_S390X
3215  break;
3216  case Constant::kFloat32:
3217  __ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
3218  break;
3219  case Constant::kFloat64:
3220  __ mov(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
3221  break;
3222  case Constant::kExternalReference:
3223  __ Move(dst, src.ToExternalReference());
3224  break;
3225  case Constant::kDelayedStringConstant:
3226  __ mov(dst, Operand::EmbeddedStringConstant(
3227  src.ToDelayedStringConstant()));
3228  break;
3229  case Constant::kHeapObject: {
3230  Handle<HeapObject> src_object = src.ToHeapObject();
3231  RootIndex index;
3232  if (IsMaterializableFromRoot(src_object, &index)) {
3233  __ LoadRoot(dst, index);
3234  } else {
3235  __ Move(dst, src_object);
3236  }
3237  break;
3238  }
3239  case Constant::kRpoNumber:
3240  UNREACHABLE(); // TODO(dcarney): loading RPO constants on S390.
3241  break;
3242  }
3243  if (destination->IsStackSlot()) {
3244  __ StoreP(dst, g.ToMemOperand(destination), r0);
3245  }
3246  } else {
3247  DoubleRegister dst = destination->IsFPRegister()
3248  ? g.ToDoubleRegister(destination)
3249  : kScratchDoubleReg;
3250  double value = (src.type() == Constant::kFloat32)
3251  ? src.ToFloat32()
3252  : src.ToFloat64().value();
3253  if (src.type() == Constant::kFloat32) {
3254  __ LoadFloat32Literal(dst, src.ToFloat32(), kScratchReg);
3255  } else {
3256  __ LoadDoubleLiteral(dst, value, kScratchReg);
3257  }
3258 
3259  if (destination->IsFloatStackSlot()) {
3260  __ StoreFloat32(dst, g.ToMemOperand(destination));
3261  } else if (destination->IsDoubleStackSlot()) {
3262  __ StoreDouble(dst, g.ToMemOperand(destination));
3263  }
3264  }
3265  } else if (source->IsFPRegister()) {
3266  DoubleRegister src = g.ToDoubleRegister(source);
3267  if (destination->IsFPRegister()) {
3268  DoubleRegister dst = g.ToDoubleRegister(destination);
3269  __ Move(dst, src);
3270  } else {
3271  DCHECK(destination->IsFPStackSlot());
3272  LocationOperand* op = LocationOperand::cast(source);
3273  if (op->representation() == MachineRepresentation::kFloat64) {
3274  __ StoreDouble(src, g.ToMemOperand(destination));
3275  } else {
3276  __ StoreFloat32(src, g.ToMemOperand(destination));
3277  }
3278  }
3279  } else if (source->IsFPStackSlot()) {
3280  DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
3281  MemOperand src = g.ToMemOperand(source);
3282  if (destination->IsFPRegister()) {
3283  LocationOperand* op = LocationOperand::cast(source);
3284  if (op->representation() == MachineRepresentation::kFloat64) {
3285  __ LoadDouble(g.ToDoubleRegister(destination), src);
3286  } else {
3287  __ LoadFloat32(g.ToDoubleRegister(destination), src);
3288  }
3289  } else {
3290  LocationOperand* op = LocationOperand::cast(source);
3291  DoubleRegister temp = kScratchDoubleReg;
3292  if (op->representation() == MachineRepresentation::kFloat64) {
3293  __ LoadDouble(temp, src);
3294  __ StoreDouble(temp, g.ToMemOperand(destination));
3295  } else {
3296  __ LoadFloat32(temp, src);
3297  __ StoreFloat32(temp, g.ToMemOperand(destination));
3298  }
3299  }
3300  } else {
3301  UNREACHABLE();
3302  }
3303 }
3304 
3305 // Swaping contents in source and destination.
3306 // source and destination could be:
3307 // Register,
3308 // FloatRegister,
3309 // DoubleRegister,
3310 // StackSlot,
3311 // FloatStackSlot,
3312 // or DoubleStackSlot
3313 void CodeGenerator::AssembleSwap(InstructionOperand* source,
3314  InstructionOperand* destination) {
3315  S390OperandConverter g(this, nullptr);
3316  if (source->IsRegister()) {
3317  Register src = g.ToRegister(source);
3318  if (destination->IsRegister()) {
3319  __ SwapP(src, g.ToRegister(destination), kScratchReg);
3320  } else {
3321  DCHECK(destination->IsStackSlot());
3322  __ SwapP(src, g.ToMemOperand(destination), kScratchReg);
3323  }
3324  } else if (source->IsStackSlot()) {
3325  DCHECK(destination->IsStackSlot());
3326  __ SwapP(g.ToMemOperand(source), g.ToMemOperand(destination), kScratchReg,
3327  r0);
3328  } else if (source->IsFloatRegister()) {
3329  DoubleRegister src = g.ToDoubleRegister(source);
3330  if (destination->IsFloatRegister()) {
3331  __ SwapFloat32(src, g.ToDoubleRegister(destination), kScratchDoubleReg);
3332  } else {
3333  DCHECK(destination->IsFloatStackSlot());
3334  __ SwapFloat32(src, g.ToMemOperand(destination), kScratchDoubleReg);
3335  }
3336  } else if (source->IsDoubleRegister()) {
3337  DoubleRegister src = g.ToDoubleRegister(source);
3338  if (destination->IsDoubleRegister()) {
3339  __ SwapDouble(src, g.ToDoubleRegister(destination), kScratchDoubleReg);
3340  } else {
3341  DCHECK(destination->IsDoubleStackSlot());
3342  __ SwapDouble(src, g.ToMemOperand(destination), kScratchDoubleReg);
3343  }
3344  } else if (source->IsFloatStackSlot()) {
3345  DCHECK(destination->IsFloatStackSlot());
3346  __ SwapFloat32(g.ToMemOperand(source), g.ToMemOperand(destination),
3347  kScratchDoubleReg, d0);
3348  } else if (source->IsDoubleStackSlot()) {
3349  DCHECK(destination->IsDoubleStackSlot());
3350  __ SwapDouble(g.ToMemOperand(source), g.ToMemOperand(destination),
3351  kScratchDoubleReg, d0);
3352  } else if (source->IsSimd128Register()) {
3353  UNREACHABLE();
3354  } else {
3355  UNREACHABLE();
3356  }
3357 }
3358 
3359 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
3360  for (size_t index = 0; index < target_count; ++index) {
3361  __ emit_label_addr(targets[index]);
3362  }
3363 }
3364 
3365 #undef __
3366 
3367 } // namespace compiler
3368 } // namespace internal
3369 } // namespace v8
Definition: libplatform.h:13