V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
macro-assembler-arm64.h
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
6 #error This header must be included via macro-assembler.h
7 #endif
8 
9 #ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
10 #define V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
11 
12 #include <vector>
13 
14 #include "src/arm64/assembler-arm64.h"
15 #include "src/bailout-reason.h"
16 #include "src/base/bits.h"
17 #include "src/globals.h"
18 
19 // Simulator specific helpers.
20 #if USE_SIMULATOR
21  // TODO(all): If possible automatically prepend an indicator like
22  // UNIMPLEMENTED or LOCATION.
23  #define ASM_UNIMPLEMENTED(message) \
24  __ Debug(message, __LINE__, NO_PARAM)
25  #define ASM_UNIMPLEMENTED_BREAK(message) \
26  __ Debug(message, __LINE__, \
27  FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
28 #if DEBUG
29 #define ASM_LOCATION(message) __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
30 #define ASM_LOCATION_IN_ASSEMBLER(message) \
31  Debug("LOCATION: " message, __LINE__, NO_PARAM)
32 #else
33 #define ASM_LOCATION(message)
34 #define ASM_LOCATION_IN_ASSEMBLER(message)
35 #endif
36 #else
37 #define ASM_UNIMPLEMENTED(message)
38 #define ASM_UNIMPLEMENTED_BREAK(message)
39 #define ASM_LOCATION(message)
40 #define ASM_LOCATION_IN_ASSEMBLER(message)
41 #endif
42 
43 
44 namespace v8 {
45 namespace internal {
46 
47 // Give alias names to registers for calling conventions.
48 constexpr Register kReturnRegister0 = x0;
49 constexpr Register kReturnRegister1 = x1;
50 constexpr Register kReturnRegister2 = x2;
51 constexpr Register kJSFunctionRegister = x1;
52 constexpr Register kContextRegister = cp;
53 constexpr Register kAllocateSizeRegister = x1;
54 
55 #if defined(V8_OS_WIN)
56 // x18 is reserved as platform register on Windows ARM64.
57 constexpr Register kSpeculationPoisonRegister = x23;
58 #else
59 constexpr Register kSpeculationPoisonRegister = x18;
60 #endif
61 
62 constexpr Register kInterpreterAccumulatorRegister = x0;
63 constexpr Register kInterpreterBytecodeOffsetRegister = x19;
64 constexpr Register kInterpreterBytecodeArrayRegister = x20;
65 constexpr Register kInterpreterDispatchTableRegister = x21;
66 
67 constexpr Register kJavaScriptCallArgCountRegister = x0;
68 constexpr Register kJavaScriptCallCodeStartRegister = x2;
69 constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
70 constexpr Register kJavaScriptCallNewTargetRegister = x3;
71 constexpr Register kJavaScriptCallExtraArg1Register = x2;
72 
73 constexpr Register kOffHeapTrampolineRegister = ip0;
74 constexpr Register kRuntimeCallFunctionRegister = x1;
75 constexpr Register kRuntimeCallArgCountRegister = x0;
76 constexpr Register kRuntimeCallArgvRegister = x11;
77 constexpr Register kWasmInstanceRegister = x7;
78 constexpr Register kWasmCompileLazyFuncIndexRegister = x8;
79 
80 #define LS_MACRO_LIST(V) \
81  V(Ldrb, Register&, rt, LDRB_w) \
82  V(Strb, Register&, rt, STRB_w) \
83  V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
84  V(Ldrh, Register&, rt, LDRH_w) \
85  V(Strh, Register&, rt, STRH_w) \
86  V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
87  V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
88  V(Str, CPURegister&, rt, StoreOpFor(rt)) \
89  V(Ldrsw, Register&, rt, LDRSW_x)
90 
91 #define LSPAIR_MACRO_LIST(V) \
92  V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \
93  V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
94  V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
95 
96 #define LDA_STL_MACRO_LIST(V) \
97  V(Ldarb, ldarb) \
98  V(Ldarh, ldarh) \
99  V(Ldar, ldar) \
100  V(Ldaxrb, ldaxrb) \
101  V(Ldaxrh, ldaxrh) \
102  V(Ldaxr, ldaxr) \
103  V(Stlrb, stlrb) \
104  V(Stlrh, stlrh) \
105  V(Stlr, stlr)
106 
107 #define STLX_MACRO_LIST(V) \
108  V(Stlxrb, stlxrb) \
109  V(Stlxrh, stlxrh) \
110  V(Stlxr, stlxr)
111 
112 // ----------------------------------------------------------------------------
113 // Static helper functions
114 
115 // Generate a MemOperand for loading a field from an object.
116 inline MemOperand FieldMemOperand(Register object, int offset);
117 
118 // ----------------------------------------------------------------------------
119 // MacroAssembler
120 
121 enum BranchType {
122  // Copies of architectural conditions.
123  // The associated conditions can be used in place of those, the code will
124  // take care of reinterpreting them with the correct type.
125  integer_eq = eq,
126  integer_ne = ne,
127  integer_hs = hs,
128  integer_lo = lo,
129  integer_mi = mi,
130  integer_pl = pl,
131  integer_vs = vs,
132  integer_vc = vc,
133  integer_hi = hi,
134  integer_ls = ls,
135  integer_ge = ge,
136  integer_lt = lt,
137  integer_gt = gt,
138  integer_le = le,
139  integer_al = al,
140  integer_nv = nv,
141 
142  // These two are *different* from the architectural codes al and nv.
143  // 'always' is used to generate unconditional branches.
144  // 'never' is used to not generate a branch (generally as the inverse
145  // branch type of 'always).
146  always, never,
147  // cbz and cbnz
148  reg_zero, reg_not_zero,
149  // tbz and tbnz
150  reg_bit_clear, reg_bit_set,
151 
152  // Aliases.
153  kBranchTypeFirstCondition = eq,
154  kBranchTypeLastCondition = nv,
155  kBranchTypeFirstUsingReg = reg_zero,
156  kBranchTypeFirstUsingBit = reg_bit_clear
157 };
158 
159 inline BranchType InvertBranchType(BranchType type) {
160  if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
161  return static_cast<BranchType>(
162  NegateCondition(static_cast<Condition>(type)));
163  } else {
164  return static_cast<BranchType>(type ^ 1);
165  }
166 }
167 
168 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
169 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
170 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
171 enum TargetAddressStorageMode {
172  CAN_INLINE_TARGET_ADDRESS,
173  NEVER_INLINE_TARGET_ADDRESS
174 };
175 enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
176 
177 // The macro assembler supports moving automatically pre-shifted immediates for
178 // arithmetic and logical instructions, and then applying a post shift in the
179 // instruction to undo the modification, in order to reduce the code emitted for
180 // an operation. For example:
181 //
182 // Add(x0, x0, 0x1f7de) => movz x16, 0xfbef; add x0, x0, x16, lsl #1.
183 //
184 // This optimisation can be only partially applied when the stack pointer is an
185 // operand or destination, so this enumeration is used to control the shift.
186 enum PreShiftImmMode {
187  kNoShift, // Don't pre-shift.
188  kLimitShiftForSP, // Limit pre-shift for add/sub extend use.
189  kAnyShift // Allow any pre-shift.
190 };
191 
192 class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
193  public:
194  TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size)
195  : TurboAssemblerBase(options, buffer, buffer_size) {}
196 
197  TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
198  void* buffer, int buffer_size,
199  CodeObjectRequired create_code_object)
200  : TurboAssemblerBase(isolate, options, buffer, buffer_size,
201  create_code_object) {}
202 
203 #if DEBUG
204  void set_allow_macro_instructions(bool value) {
205  allow_macro_instructions_ = value;
206  }
207  bool allow_macro_instructions() const { return allow_macro_instructions_; }
208 #endif
209 
210  // We should not use near calls or jumps for calls to external references,
211  // since the code spaces are not guaranteed to be close to each other.
212  bool CanUseNearCallOrJump(RelocInfo::Mode rmode) {
213  return rmode != RelocInfo::EXTERNAL_REFERENCE;
214  }
215 
216  static bool IsNearCallOffset(int64_t offset);
217 
218  // Activation support.
219  void EnterFrame(StackFrame::Type type);
220  void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
221  // Out-of-line constant pool not implemented on arm64.
222  UNREACHABLE();
223  }
224  void LeaveFrame(StackFrame::Type type);
225 
226  inline void InitializeRootRegister();
227 
228  void Mov(const Register& rd, const Operand& operand,
229  DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
230  void Mov(const Register& rd, uint64_t imm);
231  void Mov(const VRegister& vd, int vd_index, const VRegister& vn,
232  int vn_index) {
233  DCHECK(allow_macro_instructions());
234  mov(vd, vd_index, vn, vn_index);
235  }
236  void Mov(const VRegister& vd, const VRegister& vn, int index) {
237  DCHECK(allow_macro_instructions());
238  mov(vd, vn, index);
239  }
240  void Mov(const VRegister& vd, int vd_index, const Register& rn) {
241  DCHECK(allow_macro_instructions());
242  mov(vd, vd_index, rn);
243  }
244  void Mov(const Register& rd, const VRegister& vn, int vn_index) {
245  DCHECK(allow_macro_instructions());
246  mov(rd, vn, vn_index);
247  }
248 
249  // This is required for compatibility with architecture independent code.
250  // Remove if not needed.
251  void Move(Register dst, Smi src);
252 
253  // Register swap. Note that the register operands should be distinct.
254  void Swap(Register lhs, Register rhs);
255  void Swap(VRegister lhs, VRegister rhs);
256 
257 // NEON by element instructions.
258 #define NEON_BYELEMENT_MACRO_LIST(V) \
259  V(fmla, Fmla) \
260  V(fmls, Fmls) \
261  V(fmul, Fmul) \
262  V(fmulx, Fmulx) \
263  V(mul, Mul) \
264  V(mla, Mla) \
265  V(mls, Mls) \
266  V(sqdmulh, Sqdmulh) \
267  V(sqrdmulh, Sqrdmulh) \
268  V(sqdmull, Sqdmull) \
269  V(sqdmull2, Sqdmull2) \
270  V(sqdmlal, Sqdmlal) \
271  V(sqdmlal2, Sqdmlal2) \
272  V(sqdmlsl, Sqdmlsl) \
273  V(sqdmlsl2, Sqdmlsl2) \
274  V(smull, Smull) \
275  V(smull2, Smull2) \
276  V(smlal, Smlal) \
277  V(smlal2, Smlal2) \
278  V(smlsl, Smlsl) \
279  V(smlsl2, Smlsl2) \
280  V(umull, Umull) \
281  V(umull2, Umull2) \
282  V(umlal, Umlal) \
283  V(umlal2, Umlal2) \
284  V(umlsl, Umlsl) \
285  V(umlsl2, Umlsl2)
286 
287 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
288  void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm, \
289  int vm_index) { \
290  DCHECK(allow_macro_instructions()); \
291  ASM(vd, vn, vm, vm_index); \
292  }
293  NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
294 #undef DEFINE_MACRO_ASM_FUNC
295 
296 // NEON 2 vector register instructions.
297 #define NEON_2VREG_MACRO_LIST(V) \
298  V(abs, Abs) \
299  V(addp, Addp) \
300  V(addv, Addv) \
301  V(cls, Cls) \
302  V(clz, Clz) \
303  V(cnt, Cnt) \
304  V(faddp, Faddp) \
305  V(fcvtas, Fcvtas) \
306  V(fcvtau, Fcvtau) \
307  V(fcvtms, Fcvtms) \
308  V(fcvtmu, Fcvtmu) \
309  V(fcvtns, Fcvtns) \
310  V(fcvtnu, Fcvtnu) \
311  V(fcvtps, Fcvtps) \
312  V(fcvtpu, Fcvtpu) \
313  V(fmaxnmp, Fmaxnmp) \
314  V(fmaxnmv, Fmaxnmv) \
315  V(fmaxp, Fmaxp) \
316  V(fmaxv, Fmaxv) \
317  V(fminnmp, Fminnmp) \
318  V(fminnmv, Fminnmv) \
319  V(fminp, Fminp) \
320  V(fminv, Fminv) \
321  V(fneg, Fneg) \
322  V(frecpe, Frecpe) \
323  V(frecpx, Frecpx) \
324  V(frinta, Frinta) \
325  V(frinti, Frinti) \
326  V(frintm, Frintm) \
327  V(frintn, Frintn) \
328  V(frintp, Frintp) \
329  V(frintx, Frintx) \
330  V(frintz, Frintz) \
331  V(frsqrte, Frsqrte) \
332  V(fsqrt, Fsqrt) \
333  V(mov, Mov) \
334  V(mvn, Mvn) \
335  V(neg, Neg) \
336  V(not_, Not) \
337  V(rbit, Rbit) \
338  V(rev16, Rev16) \
339  V(rev32, Rev32) \
340  V(rev64, Rev64) \
341  V(sadalp, Sadalp) \
342  V(saddlp, Saddlp) \
343  V(saddlv, Saddlv) \
344  V(smaxv, Smaxv) \
345  V(sminv, Sminv) \
346  V(sqabs, Sqabs) \
347  V(sqneg, Sqneg) \
348  V(sqxtn2, Sqxtn2) \
349  V(sqxtn, Sqxtn) \
350  V(sqxtun2, Sqxtun2) \
351  V(sqxtun, Sqxtun) \
352  V(suqadd, Suqadd) \
353  V(sxtl2, Sxtl2) \
354  V(sxtl, Sxtl) \
355  V(uadalp, Uadalp) \
356  V(uaddlp, Uaddlp) \
357  V(uaddlv, Uaddlv) \
358  V(umaxv, Umaxv) \
359  V(uminv, Uminv) \
360  V(uqxtn2, Uqxtn2) \
361  V(uqxtn, Uqxtn) \
362  V(urecpe, Urecpe) \
363  V(ursqrte, Ursqrte) \
364  V(usqadd, Usqadd) \
365  V(uxtl2, Uxtl2) \
366  V(uxtl, Uxtl) \
367  V(xtn2, Xtn2) \
368  V(xtn, Xtn)
369 
370 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
371  void MASM(const VRegister& vd, const VRegister& vn) { \
372  DCHECK(allow_macro_instructions()); \
373  ASM(vd, vn); \
374  }
375  NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
376 #undef DEFINE_MACRO_ASM_FUNC
377 #undef NEON_2VREG_MACRO_LIST
378 
379 // NEON 2 vector register with immediate instructions.
380 #define NEON_2VREG_FPIMM_MACRO_LIST(V) \
381  V(fcmeq, Fcmeq) \
382  V(fcmge, Fcmge) \
383  V(fcmgt, Fcmgt) \
384  V(fcmle, Fcmle) \
385  V(fcmlt, Fcmlt)
386 
387 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
388  void MASM(const VRegister& vd, const VRegister& vn, double imm) { \
389  DCHECK(allow_macro_instructions()); \
390  ASM(vd, vn, imm); \
391  }
392  NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
393 #undef DEFINE_MACRO_ASM_FUNC
394 
395 // NEON 3 vector register instructions.
396 #define NEON_3VREG_MACRO_LIST(V) \
397  V(add, Add) \
398  V(addhn2, Addhn2) \
399  V(addhn, Addhn) \
400  V(addp, Addp) \
401  V(and_, And) \
402  V(bic, Bic) \
403  V(bif, Bif) \
404  V(bit, Bit) \
405  V(bsl, Bsl) \
406  V(cmeq, Cmeq) \
407  V(cmge, Cmge) \
408  V(cmgt, Cmgt) \
409  V(cmhi, Cmhi) \
410  V(cmhs, Cmhs) \
411  V(cmtst, Cmtst) \
412  V(eor, Eor) \
413  V(fabd, Fabd) \
414  V(facge, Facge) \
415  V(facgt, Facgt) \
416  V(faddp, Faddp) \
417  V(fcmeq, Fcmeq) \
418  V(fcmge, Fcmge) \
419  V(fcmgt, Fcmgt) \
420  V(fmaxnmp, Fmaxnmp) \
421  V(fmaxp, Fmaxp) \
422  V(fminnmp, Fminnmp) \
423  V(fminp, Fminp) \
424  V(fmla, Fmla) \
425  V(fmls, Fmls) \
426  V(fmulx, Fmulx) \
427  V(frecps, Frecps) \
428  V(frsqrts, Frsqrts) \
429  V(mla, Mla) \
430  V(mls, Mls) \
431  V(mul, Mul) \
432  V(orn, Orn) \
433  V(orr, Orr) \
434  V(pmull2, Pmull2) \
435  V(pmull, Pmull) \
436  V(pmul, Pmul) \
437  V(raddhn2, Raddhn2) \
438  V(raddhn, Raddhn) \
439  V(rsubhn2, Rsubhn2) \
440  V(rsubhn, Rsubhn) \
441  V(sabal2, Sabal2) \
442  V(sabal, Sabal) \
443  V(saba, Saba) \
444  V(sabdl2, Sabdl2) \
445  V(sabdl, Sabdl) \
446  V(sabd, Sabd) \
447  V(saddl2, Saddl2) \
448  V(saddl, Saddl) \
449  V(saddw2, Saddw2) \
450  V(saddw, Saddw) \
451  V(shadd, Shadd) \
452  V(shsub, Shsub) \
453  V(smaxp, Smaxp) \
454  V(smax, Smax) \
455  V(sminp, Sminp) \
456  V(smin, Smin) \
457  V(smlal2, Smlal2) \
458  V(smlal, Smlal) \
459  V(smlsl2, Smlsl2) \
460  V(smlsl, Smlsl) \
461  V(smull2, Smull2) \
462  V(smull, Smull) \
463  V(sqadd, Sqadd) \
464  V(sqdmlal2, Sqdmlal2) \
465  V(sqdmlal, Sqdmlal) \
466  V(sqdmlsl2, Sqdmlsl2) \
467  V(sqdmlsl, Sqdmlsl) \
468  V(sqdmulh, Sqdmulh) \
469  V(sqdmull2, Sqdmull2) \
470  V(sqdmull, Sqdmull) \
471  V(sqrdmulh, Sqrdmulh) \
472  V(sqrshl, Sqrshl) \
473  V(sqshl, Sqshl) \
474  V(sqsub, Sqsub) \
475  V(srhadd, Srhadd) \
476  V(srshl, Srshl) \
477  V(sshl, Sshl) \
478  V(ssubl2, Ssubl2) \
479  V(ssubl, Ssubl) \
480  V(ssubw2, Ssubw2) \
481  V(ssubw, Ssubw) \
482  V(subhn2, Subhn2) \
483  V(subhn, Subhn) \
484  V(sub, Sub) \
485  V(trn1, Trn1) \
486  V(trn2, Trn2) \
487  V(uabal2, Uabal2) \
488  V(uabal, Uabal) \
489  V(uaba, Uaba) \
490  V(uabdl2, Uabdl2) \
491  V(uabdl, Uabdl) \
492  V(uabd, Uabd) \
493  V(uaddl2, Uaddl2) \
494  V(uaddl, Uaddl) \
495  V(uaddw2, Uaddw2) \
496  V(uaddw, Uaddw) \
497  V(uhadd, Uhadd) \
498  V(uhsub, Uhsub) \
499  V(umaxp, Umaxp) \
500  V(umax, Umax) \
501  V(uminp, Uminp) \
502  V(umin, Umin) \
503  V(umlal2, Umlal2) \
504  V(umlal, Umlal) \
505  V(umlsl2, Umlsl2) \
506  V(umlsl, Umlsl) \
507  V(umull2, Umull2) \
508  V(umull, Umull) \
509  V(uqadd, Uqadd) \
510  V(uqrshl, Uqrshl) \
511  V(uqshl, Uqshl) \
512  V(uqsub, Uqsub) \
513  V(urhadd, Urhadd) \
514  V(urshl, Urshl) \
515  V(ushl, Ushl) \
516  V(usubl2, Usubl2) \
517  V(usubl, Usubl) \
518  V(usubw2, Usubw2) \
519  V(usubw, Usubw) \
520  V(uzp1, Uzp1) \
521  V(uzp2, Uzp2) \
522  V(zip1, Zip1) \
523  V(zip2, Zip2)
524 
525 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
526  void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm) { \
527  DCHECK(allow_macro_instructions()); \
528  ASM(vd, vn, vm); \
529  }
530  NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
531 #undef DEFINE_MACRO_ASM_FUNC
532 
533  void Bic(const VRegister& vd, const int imm8, const int left_shift = 0) {
534  DCHECK(allow_macro_instructions());
535  bic(vd, imm8, left_shift);
536  }
537 
538  // This is required for compatibility in architecture independent code.
539  inline void jmp(Label* L);
540 
541  void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
542  inline void B(Label* label);
543  inline void B(Condition cond, Label* label);
544  void B(Label* label, Condition cond);
545 
546  void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
547  void Tbz(const Register& rt, unsigned bit_pos, Label* label);
548 
549  void Cbnz(const Register& rt, Label* label);
550  void Cbz(const Register& rt, Label* label);
551 
552  inline void Dmb(BarrierDomain domain, BarrierType type);
553  inline void Dsb(BarrierDomain domain, BarrierType type);
554  inline void Isb();
555  inline void Csdb();
556 
557  bool AllowThisStubCall(CodeStub* stub);
558 
559  // Call a runtime routine. This expects {centry} to contain a fitting CEntry
560  // builtin for the target runtime function and uses an indirect call.
561  void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
562 
563  // Removes current frame and its arguments from the stack preserving
564  // the arguments and a return address pushed to the stack for the next call.
565  // Both |callee_args_count| and |caller_args_count_reg| do not include
566  // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
567  // is trashed.
568  void PrepareForTailCall(const ParameterCount& callee_args_count,
569  Register caller_args_count_reg, Register scratch0,
570  Register scratch1);
571 
572  inline void SmiUntag(Register dst, Register src);
573  inline void SmiUntag(Register dst, const MemOperand& src);
574  inline void SmiUntag(Register smi);
575 
576  // Calls Abort(msg) if the condition cond is not satisfied.
577  // Use --debug_code to enable.
578  void Assert(Condition cond, AbortReason reason);
579 
580  // Like Assert(), but without condition.
581  // Use --debug_code to enable.
582  void AssertUnreachable(AbortReason reason);
583 
584  void AssertSmi(Register object,
585  AbortReason reason = AbortReason::kOperandIsNotASmi);
586 
587  // Like Assert(), but always enabled.
588  void Check(Condition cond, AbortReason reason);
589 
590  inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
591 
592  // Print a message to stderr and abort execution.
593  void Abort(AbortReason reason);
594 
595  // Remaining instructions are simple pass-through calls to the assembler.
596  inline void Asr(const Register& rd, const Register& rn, unsigned shift);
597  inline void Asr(const Register& rd, const Register& rn, const Register& rm);
598 
599  // Try to move an immediate into the destination register in a single
600  // instruction. Returns true for success, and updates the contents of dst.
601  // Returns false, otherwise.
602  bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
603 
604  inline void Bind(Label* label);
605 
606  static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
607 
608  CPURegList* TmpList() { return &tmp_list_; }
609  CPURegList* FPTmpList() { return &fptmp_list_; }
610 
611  static CPURegList DefaultTmpList();
612  static CPURegList DefaultFPTmpList();
613 
614  // Move macros.
615  inline void Mvn(const Register& rd, uint64_t imm);
616  void Mvn(const Register& rd, const Operand& operand);
617  static bool IsImmMovn(uint64_t imm, unsigned reg_size);
618  static bool IsImmMovz(uint64_t imm, unsigned reg_size);
619 
620  void LogicalMacro(const Register& rd, const Register& rn,
621  const Operand& operand, LogicalOp op);
622  void AddSubMacro(const Register& rd, const Register& rn,
623  const Operand& operand, FlagsUpdate S, AddSubOp op);
624  inline void Orr(const Register& rd, const Register& rn,
625  const Operand& operand);
626  void Orr(const VRegister& vd, const int imm8, const int left_shift = 0) {
627  DCHECK(allow_macro_instructions());
628  orr(vd, imm8, left_shift);
629  }
630  inline void Orn(const Register& rd, const Register& rn,
631  const Operand& operand);
632  inline void Eor(const Register& rd, const Register& rn,
633  const Operand& operand);
634  inline void Eon(const Register& rd, const Register& rn,
635  const Operand& operand);
636  inline void And(const Register& rd, const Register& rn,
637  const Operand& operand);
638  inline void Ands(const Register& rd, const Register& rn,
639  const Operand& operand);
640  inline void Tst(const Register& rn, const Operand& operand);
641  inline void Bic(const Register& rd, const Register& rn,
642  const Operand& operand);
643  inline void Blr(const Register& xn);
644  inline void Cmp(const Register& rn, const Operand& operand);
645  inline void Subs(const Register& rd, const Register& rn,
646  const Operand& operand);
647  void Csel(const Register& rd, const Register& rn, const Operand& operand,
648  Condition cond);
649 
650  // Emits a runtime assert that the stack pointer is aligned.
651  void AssertSpAligned();
652 
653  // Copy slot_count stack slots from the stack offset specified by src to
654  // the stack offset specified by dst. The offsets and count are expressed in
655  // slot-sized units. Offset dst must be less than src, or the gap between
656  // them must be greater than or equal to slot_count, otherwise the result is
657  // unpredictable. The function may corrupt its register arguments. The
658  // registers must not alias each other.
659  void CopySlots(int dst, Register src, Register slot_count);
660  void CopySlots(Register dst, Register src, Register slot_count);
661 
662  // Copy count double words from the address in register src to the address
663  // in register dst. There are two modes for this function:
664  // 1) Address dst must be less than src, or the gap between them must be
665  // greater than or equal to count double words, otherwise the result is
666  // unpredictable. This is the default mode.
667  // 2) Address src must be less than dst, or the gap between them must be
668  // greater than or equal to count double words, otherwise the result is
669  // undpredictable. In this mode, src and dst specify the last (highest)
670  // address of the regions to copy from and to.
671  // The case where src == dst is not supported.
672  // The function may corrupt its register arguments. The registers must not
673  // alias each other.
674  enum CopyDoubleWordsMode { kDstLessThanSrc, kSrcLessThanDst };
675  void CopyDoubleWords(Register dst, Register src, Register count,
676  CopyDoubleWordsMode mode = kDstLessThanSrc);
677 
678  // Calculate the address of a double word-sized slot at slot_offset from the
679  // stack pointer, and write it to dst. Positive slot_offsets are at addresses
680  // greater than sp, with slot zero at sp.
681  void SlotAddress(Register dst, int slot_offset);
682  void SlotAddress(Register dst, Register slot_offset);
683 
684  // Load a literal from the inline constant pool.
685  inline void Ldr(const CPURegister& rt, const Operand& imm);
686 
687  // Claim or drop stack space without actually accessing memory.
688  //
689  // In debug mode, both of these will write invalid data into the claimed or
690  // dropped space.
691  //
692  // The stack pointer must be aligned to 16 bytes and the size claimed or
693  // dropped must be a multiple of 16 bytes.
694  //
695  // Note that unit_size must be specified in bytes. For variants which take a
696  // Register count, the unit size must be a power of two.
697  inline void Claim(int64_t count, uint64_t unit_size = kXRegSize);
698  inline void Claim(const Register& count, uint64_t unit_size = kXRegSize);
699  inline void Drop(int64_t count, uint64_t unit_size = kXRegSize);
700  inline void Drop(const Register& count, uint64_t unit_size = kXRegSize);
701 
702  // Drop 'count' arguments from the stack, rounded up to a multiple of two,
703  // without actually accessing memory.
704  // We assume the size of the arguments is the pointer size.
705  // An optional mode argument is passed, which can indicate we need to
706  // explicitly add the receiver to the count.
707  enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
708  inline void DropArguments(const Register& count,
709  ArgumentsCountMode mode = kCountIncludesReceiver);
710  inline void DropArguments(int64_t count,
711  ArgumentsCountMode mode = kCountIncludesReceiver);
712 
713  // Drop 'count' slots from stack, rounded up to a multiple of two, without
714  // actually accessing memory.
715  inline void DropSlots(int64_t count);
716 
717  // Push a single argument, with padding, to the stack.
718  inline void PushArgument(const Register& arg);
719 
720  // Add and sub macros.
721  inline void Add(const Register& rd, const Register& rn,
722  const Operand& operand);
723  inline void Adds(const Register& rd, const Register& rn,
724  const Operand& operand);
725  inline void Sub(const Register& rd, const Register& rn,
726  const Operand& operand);
727 
728  // Abort execution if argument is not a positive or zero integer, enabled via
729  // --debug-code.
730  void AssertPositiveOrZero(Register value);
731 
732 #define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
733  inline void FN(const REGTYPE REG, const MemOperand& addr);
734  LS_MACRO_LIST(DECLARE_FUNCTION)
735 #undef DECLARE_FUNCTION
736 
737  // Push or pop up to 4 registers of the same width to or from the stack.
738  //
739  // If an argument register is 'NoReg', all further arguments are also assumed
740  // to be 'NoReg', and are thus not pushed or popped.
741  //
742  // Arguments are ordered such that "Push(a, b);" is functionally equivalent
743  // to "Push(a); Push(b);".
744  //
745  // It is valid to push the same register more than once, and there is no
746  // restriction on the order in which registers are specified.
747  //
748  // It is not valid to pop into the same register more than once in one
749  // operation, not even into the zero register.
750  //
751  // The stack pointer must be aligned to 16 bytes on entry and the total size
752  // of the specified registers must also be a multiple of 16 bytes.
753  //
754  // Other than the registers passed into Pop, the stack pointer and (possibly)
755  // the system stack pointer, these methods do not modify any other registers.
756  void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
757  const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
758  void Push(const CPURegister& src0, const CPURegister& src1,
759  const CPURegister& src2, const CPURegister& src3,
760  const CPURegister& src4, const CPURegister& src5 = NoReg,
761  const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
762  void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
763  const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
764  void Pop(const CPURegister& dst0, const CPURegister& dst1,
765  const CPURegister& dst2, const CPURegister& dst3,
766  const CPURegister& dst4, const CPURegister& dst5 = NoReg,
767  const CPURegister& dst6 = NoReg, const CPURegister& dst7 = NoReg);
768  void Push(const Register& src0, const VRegister& src1);
769 
770  // This is a convenience method for pushing a single Handle<Object>.
771  inline void Push(Handle<HeapObject> object);
772  inline void Push(Smi smi);
773 
774  // Aliases of Push and Pop, required for V8 compatibility.
775  inline void push(Register src) { Push(src); }
776  inline void pop(Register dst) { Pop(dst); }
777 
778  void SaveRegisters(RegList registers);
779  void RestoreRegisters(RegList registers);
780 
781  void CallRecordWriteStub(Register object, Register address,
782  RememberedSetAction remembered_set_action,
783  SaveFPRegsMode fp_mode);
784  void CallRecordWriteStub(Register object, Register address,
785  RememberedSetAction remembered_set_action,
786  SaveFPRegsMode fp_mode, Address wasm_target);
787 
788  // Alternative forms of Push and Pop, taking a RegList or CPURegList that
789  // specifies the registers that are to be pushed or popped. Higher-numbered
790  // registers are associated with higher memory addresses (as in the A32 push
791  // and pop instructions).
792  //
793  // (Push|Pop)SizeRegList allow you to specify the register size as a
794  // parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and
795  // kSRegSizeInBits are supported.
796  //
797  // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
798  void PushCPURegList(CPURegList registers);
799  void PopCPURegList(CPURegList registers);
800 
801  // Calculate how much stack space (in bytes) are required to store caller
802  // registers excluding those specified in the arguments.
803  int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
804  Register exclusion) const;
805 
806  // Push caller saved registers on the stack, and return the number of bytes
807  // stack pointer is adjusted.
808  int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion = no_reg);
809 
810  // Restore caller saved registers from the stack, and return the number of
811  // bytes stack pointer is adjusted.
812  int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion = no_reg);
813 
814  // Move an immediate into register dst, and return an Operand object for use
815  // with a subsequent instruction that accepts a shift. The value moved into
816  // dst is not necessarily equal to imm; it may have had a shifting operation
817  // applied to it that will be subsequently undone by the shift applied in the
818  // Operand.
819  Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm,
820  PreShiftImmMode mode);
821 
822  void CheckPageFlagSet(const Register& object, const Register& scratch,
823  int mask, Label* if_any_set);
824 
825  void CheckPageFlagClear(const Register& object, const Register& scratch,
826  int mask, Label* if_all_clear);
827 
828  // Test the bits of register defined by bit_pattern, and branch if ANY of
829  // those bits are set. May corrupt the status flags.
830  inline void TestAndBranchIfAnySet(const Register& reg,
831  const uint64_t bit_pattern, Label* label);
832 
833  // Test the bits of register defined by bit_pattern, and branch if ALL of
834  // those bits are clear (ie. not set.) May corrupt the status flags.
835  inline void TestAndBranchIfAllClear(const Register& reg,
836  const uint64_t bit_pattern, Label* label);
837 
838  inline void Brk(int code);
839 
840  inline void JumpIfSmi(Register value, Label* smi_label,
841  Label* not_smi_label = nullptr);
842 
843  inline void JumpIfEqual(Register x, int32_t y, Label* dest);
844  inline void JumpIfLessThan(Register x, int32_t y, Label* dest);
845 
846  inline void Fmov(VRegister fd, VRegister fn);
847  inline void Fmov(VRegister fd, Register rn);
848  // Provide explicit double and float interfaces for FP immediate moves, rather
849  // than relying on implicit C++ casts. This allows signalling NaNs to be
850  // preserved when the immediate matches the format of fd. Most systems convert
851  // signalling NaNs to quiet NaNs when converting between float and double.
852  inline void Fmov(VRegister fd, double imm);
853  inline void Fmov(VRegister fd, float imm);
854  // Provide a template to allow other types to be converted automatically.
855  template <typename T>
856  void Fmov(VRegister fd, T imm) {
857  DCHECK(allow_macro_instructions());
858  Fmov(fd, static_cast<double>(imm));
859  }
860  inline void Fmov(Register rd, VRegister fn);
861 
862  void Movi(const VRegister& vd, uint64_t imm, Shift shift = LSL,
863  int shift_amount = 0);
864  void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
865 
866  void LoadFromConstantsTable(Register destination,
867  int constant_index) override;
868  void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
869  void LoadRootRelative(Register destination, int32_t offset) override;
870 
871  void Jump(Register target, Condition cond = al);
872  void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
873  void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
874 
875  void Call(Register target);
876  void Call(Address target, RelocInfo::Mode rmode);
877  void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
878  void Call(ExternalReference target);
879 
880  // Generate an indirect call (for when a direct call's range is not adequate).
881  void IndirectCall(Address target, RelocInfo::Mode rmode);
882 
883  void CallForDeoptimization(Address target, int deopt_id,
884  RelocInfo::Mode rmode);
885 
886  // Calls a C function.
887  // The called function is not allowed to trigger a
888  // garbage collection, since that might move the code and invalidate the
889  // return address (unless this is somehow accounted for by the called
890  // function).
891  void CallCFunction(ExternalReference function, int num_reg_arguments);
892  void CallCFunction(ExternalReference function, int num_reg_arguments,
893  int num_double_arguments);
894  void CallCFunction(Register function, int num_reg_arguments,
895  int num_double_arguments);
896 
897  // Performs a truncating conversion of a floating point number as used by
898  // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
899  // Exits with 'result' holding the answer.
900  void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
901  DoubleRegister double_input, StubCallMode stub_mode);
902 
903  inline void Mul(const Register& rd, const Register& rn, const Register& rm);
904 
905  inline void Fcvtzs(const Register& rd, const VRegister& fn);
906  void Fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0) {
907  DCHECK(allow_macro_instructions());
908  fcvtzs(vd, vn, fbits);
909  }
910 
911  inline void Fcvtzu(const Register& rd, const VRegister& fn);
912  void Fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0) {
913  DCHECK(allow_macro_instructions());
914  fcvtzu(vd, vn, fbits);
915  }
916 
917  inline void Madd(const Register& rd, const Register& rn, const Register& rm,
918  const Register& ra);
919  inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
920  inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
921  inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
922  inline void Msub(const Register& rd, const Register& rn, const Register& rm,
923  const Register& ra);
924 
925  inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
926  inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
927  inline void Umull(const Register& rd, const Register& rn, const Register& rm);
928  inline void Smull(const Register& rd, const Register& rn, const Register& rm);
929 
930  inline void Sxtb(const Register& rd, const Register& rn);
931  inline void Sxth(const Register& rd, const Register& rn);
932  inline void Sxtw(const Register& rd, const Register& rn);
933  inline void Ubfiz(const Register& rd, const Register& rn, unsigned lsb,
934  unsigned width);
935  inline void Ubfx(const Register& rd, const Register& rn, unsigned lsb,
936  unsigned width);
937  inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
938  inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
939  inline void Ror(const Register& rd, const Register& rs, unsigned shift);
940  inline void Ror(const Register& rd, const Register& rn, const Register& rm);
941  inline void Cmn(const Register& rn, const Operand& operand);
942  inline void Fadd(const VRegister& fd, const VRegister& fn,
943  const VRegister& fm);
944  inline void Fcmp(const VRegister& fn, const VRegister& fm);
945  inline void Fcmp(const VRegister& fn, double value);
946  inline void Fabs(const VRegister& fd, const VRegister& fn);
947  inline void Fmul(const VRegister& fd, const VRegister& fn,
948  const VRegister& fm);
949  inline void Fsub(const VRegister& fd, const VRegister& fn,
950  const VRegister& fm);
951  inline void Fdiv(const VRegister& fd, const VRegister& fn,
952  const VRegister& fm);
953  inline void Fmax(const VRegister& fd, const VRegister& fn,
954  const VRegister& fm);
955  inline void Fmin(const VRegister& fd, const VRegister& fn,
956  const VRegister& fm);
957  inline void Rbit(const Register& rd, const Register& rn);
958  inline void Rev(const Register& rd, const Register& rn);
959 
960  enum AdrHint {
961  // The target must be within the immediate range of adr.
962  kAdrNear,
963  // The target may be outside of the immediate range of adr. Additional
964  // instructions may be emitted.
965  kAdrFar
966  };
967  void Adr(const Register& rd, Label* label, AdrHint = kAdrNear);
968 
969  // Add/sub with carry macros.
970  inline void Adc(const Register& rd, const Register& rn,
971  const Operand& operand);
972 
973  // Conditional macros.
974  inline void Ccmp(const Register& rn, const Operand& operand, StatusFlags nzcv,
975  Condition cond);
976 
977  inline void Clz(const Register& rd, const Register& rn);
978 
979  // Poke 'src' onto the stack. The offset is in bytes. The stack pointer must
980  // be 16 byte aligned.
981  void Poke(const CPURegister& src, const Operand& offset);
982 
983  // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
984  // The stack pointer must be aligned to 16 bytes.
985  void Peek(const CPURegister& dst, const Operand& offset);
986 
987  // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
988  // with 'src2' at a higher address than 'src1'. The offset is in bytes. The
989  // stack pointer must be 16 byte aligned.
990  void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
991 
992  inline void Sbfx(const Register& rd, const Register& rn, unsigned lsb,
993  unsigned width);
994 
995  inline void Bfi(const Register& rd, const Register& rn, unsigned lsb,
996  unsigned width);
997 
998  inline void Scvtf(const VRegister& fd, const Register& rn,
999  unsigned fbits = 0);
1000  void Scvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
1001  DCHECK(allow_macro_instructions());
1002  scvtf(vd, vn, fbits);
1003  }
1004  inline void Ucvtf(const VRegister& fd, const Register& rn,
1005  unsigned fbits = 0);
1006  void Ucvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
1007  DCHECK(allow_macro_instructions());
1008  ucvtf(vd, vn, fbits);
1009  }
1010 
1011  void AssertFPCRState(Register fpcr = NoReg);
1012  void CanonicalizeNaN(const VRegister& dst, const VRegister& src);
1013  void CanonicalizeNaN(const VRegister& reg) { CanonicalizeNaN(reg, reg); }
1014 
1015  inline void CmovX(const Register& rd, const Register& rn, Condition cond);
1016  inline void Cset(const Register& rd, Condition cond);
1017  inline void Csetm(const Register& rd, Condition cond);
1018  inline void Fccmp(const VRegister& fn, const VRegister& fm, StatusFlags nzcv,
1019  Condition cond);
1020  inline void Csinc(const Register& rd, const Register& rn, const Register& rm,
1021  Condition cond);
1022 
1023  inline void Fcvt(const VRegister& fd, const VRegister& fn);
1024 
1025  int ActivationFrameAlignment();
1026 
1027  void Ins(const VRegister& vd, int vd_index, const VRegister& vn,
1028  int vn_index) {
1029  DCHECK(allow_macro_instructions());
1030  ins(vd, vd_index, vn, vn_index);
1031  }
1032  void Ins(const VRegister& vd, int vd_index, const Register& rn) {
1033  DCHECK(allow_macro_instructions());
1034  ins(vd, vd_index, rn);
1035  }
1036 
1037  inline void Bl(Label* label);
1038  inline void Br(const Register& xn);
1039 
1040  inline void Uxtb(const Register& rd, const Register& rn);
1041  inline void Uxth(const Register& rd, const Register& rn);
1042  inline void Uxtw(const Register& rd, const Register& rn);
1043 
1044  void Dup(const VRegister& vd, const VRegister& vn, int index) {
1045  DCHECK(allow_macro_instructions());
1046  dup(vd, vn, index);
1047  }
1048  void Dup(const VRegister& vd, const Register& rn) {
1049  DCHECK(allow_macro_instructions());
1050  dup(vd, rn);
1051  }
1052 
1053 #define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
1054  inline void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
1055  LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
1056 #undef DECLARE_FUNCTION
1057 
1058 #define NEON_2VREG_SHIFT_MACRO_LIST(V) \
1059  V(rshrn, Rshrn) \
1060  V(rshrn2, Rshrn2) \
1061  V(shl, Shl) \
1062  V(shll, Shll) \
1063  V(shll2, Shll2) \
1064  V(shrn, Shrn) \
1065  V(shrn2, Shrn2) \
1066  V(sli, Sli) \
1067  V(sqrshrn, Sqrshrn) \
1068  V(sqrshrn2, Sqrshrn2) \
1069  V(sqrshrun, Sqrshrun) \
1070  V(sqrshrun2, Sqrshrun2) \
1071  V(sqshl, Sqshl) \
1072  V(sqshlu, Sqshlu) \
1073  V(sqshrn, Sqshrn) \
1074  V(sqshrn2, Sqshrn2) \
1075  V(sqshrun, Sqshrun) \
1076  V(sqshrun2, Sqshrun2) \
1077  V(sri, Sri) \
1078  V(srshr, Srshr) \
1079  V(srsra, Srsra) \
1080  V(sshll, Sshll) \
1081  V(sshll2, Sshll2) \
1082  V(sshr, Sshr) \
1083  V(ssra, Ssra) \
1084  V(uqrshrn, Uqrshrn) \
1085  V(uqrshrn2, Uqrshrn2) \
1086  V(uqshl, Uqshl) \
1087  V(uqshrn, Uqshrn) \
1088  V(uqshrn2, Uqshrn2) \
1089  V(urshr, Urshr) \
1090  V(ursra, Ursra) \
1091  V(ushll, Ushll) \
1092  V(ushll2, Ushll2) \
1093  V(ushr, Ushr) \
1094  V(usra, Usra)
1095 
1096 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
1097  void MASM(const VRegister& vd, const VRegister& vn, int shift) { \
1098  DCHECK(allow_macro_instructions()); \
1099  ASM(vd, vn, shift); \
1100  }
1101  NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
1102 #undef DEFINE_MACRO_ASM_FUNC
1103 
1104  void Umov(const Register& rd, const VRegister& vn, int vn_index) {
1105  DCHECK(allow_macro_instructions());
1106  umov(rd, vn, vn_index);
1107  }
1108  void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1109  DCHECK(allow_macro_instructions());
1110  tbl(vd, vn, vm);
1111  }
1112  void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1113  const VRegister& vm) {
1114  DCHECK(allow_macro_instructions());
1115  tbl(vd, vn, vn2, vm);
1116  }
1117  void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1118  const VRegister& vn3, const VRegister& vm) {
1119  DCHECK(allow_macro_instructions());
1120  tbl(vd, vn, vn2, vn3, vm);
1121  }
1122  void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1123  const VRegister& vn3, const VRegister& vn4, const VRegister& vm) {
1124  DCHECK(allow_macro_instructions());
1125  tbl(vd, vn, vn2, vn3, vn4, vm);
1126  }
1127  void Ext(const VRegister& vd, const VRegister& vn, const VRegister& vm,
1128  int index) {
1129  DCHECK(allow_macro_instructions());
1130  ext(vd, vn, vm, index);
1131  }
1132 
1133  void Smov(const Register& rd, const VRegister& vn, int vn_index) {
1134  DCHECK(allow_macro_instructions());
1135  smov(rd, vn, vn_index);
1136  }
1137 
1138 // Load-acquire/store-release macros.
1139 #define DECLARE_FUNCTION(FN, OP) \
1140  inline void FN(const Register& rt, const Register& rn);
1141  LDA_STL_MACRO_LIST(DECLARE_FUNCTION)
1142 #undef DECLARE_FUNCTION
1143 
1144  // Load an object from the root table.
1145  void LoadRoot(Register destination, RootIndex index) override;
1146 
1147  inline void Ret(const Register& xn = lr);
1148 
1149  // Perform a conversion from a double to a signed int64. If the input fits in
1150  // range of the 64-bit result, execution branches to done. Otherwise,
1151  // execution falls through, and the sign of the result can be used to
1152  // determine if overflow was towards positive or negative infinity.
1153  //
1154  // On successful conversion, the least significant 32 bits of the result are
1155  // equivalent to the ECMA-262 operation "ToInt32".
1156  //
1157  // Only public for the test code in test-code-stubs-arm64.cc.
1158  void TryConvertDoubleToInt64(Register result, DoubleRegister input,
1159  Label* done);
1160 
1161  inline void Mrs(const Register& rt, SystemRegister sysreg);
1162 
1163  // Generates function prologue code.
1164  void Prologue();
1165 
1166  void Cmgt(const VRegister& vd, const VRegister& vn, int imm) {
1167  DCHECK(allow_macro_instructions());
1168  cmgt(vd, vn, imm);
1169  }
1170  void Cmge(const VRegister& vd, const VRegister& vn, int imm) {
1171  DCHECK(allow_macro_instructions());
1172  cmge(vd, vn, imm);
1173  }
1174  void Cmeq(const VRegister& vd, const VRegister& vn, int imm) {
1175  DCHECK(allow_macro_instructions());
1176  cmeq(vd, vn, imm);
1177  }
1178 
1179  inline void Neg(const Register& rd, const Operand& operand);
1180  inline void Negs(const Register& rd, const Operand& operand);
1181 
1182  // Compute rd = abs(rm).
1183  // This function clobbers the condition flags. On output the overflow flag is
1184  // set iff the negation overflowed.
1185  //
1186  // If rm is the minimum representable value, the result is not representable.
1187  // Handlers for each case can be specified using the relevant labels.
1188  void Abs(const Register& rd, const Register& rm,
1189  Label* is_not_representable = nullptr,
1190  Label* is_representable = nullptr);
1191 
1192  inline void Cls(const Register& rd, const Register& rn);
1193  inline void Cneg(const Register& rd, const Register& rn, Condition cond);
1194  inline void Rev16(const Register& rd, const Register& rn);
1195  inline void Rev32(const Register& rd, const Register& rn);
1196  inline void Fcvtns(const Register& rd, const VRegister& fn);
1197  inline void Fcvtnu(const Register& rd, const VRegister& fn);
1198  inline void Fcvtms(const Register& rd, const VRegister& fn);
1199  inline void Fcvtmu(const Register& rd, const VRegister& fn);
1200  inline void Fcvtas(const Register& rd, const VRegister& fn);
1201  inline void Fcvtau(const Register& rd, const VRegister& fn);
1202 
1203  // Compute the start of the generated instruction stream from the current PC.
1204  // This is an alternative to embedding the {CodeObject} handle as a reference.
1205  void ComputeCodeStartAddress(const Register& rd);
1206 
1207  void ResetSpeculationPoisonRegister();
1208 
1209  protected:
1210  // The actual Push and Pop implementations. These don't generate any code
1211  // other than that required for the push or pop. This allows
1212  // (Push|Pop)CPURegList to bundle together run-time assertions for a large
1213  // block of registers.
1214  //
1215  // Note that size is per register, and is specified in bytes.
1216  void PushHelper(int count, int size, const CPURegister& src0,
1217  const CPURegister& src1, const CPURegister& src2,
1218  const CPURegister& src3);
1219  void PopHelper(int count, int size, const CPURegister& dst0,
1220  const CPURegister& dst1, const CPURegister& dst2,
1221  const CPURegister& dst3);
1222 
1223  void ConditionalCompareMacro(const Register& rn, const Operand& operand,
1224  StatusFlags nzcv, Condition cond,
1225  ConditionalCompareOp op);
1226 
1227  void AddSubWithCarryMacro(const Register& rd, const Register& rn,
1228  const Operand& operand, FlagsUpdate S,
1229  AddSubWithCarryOp op);
1230 
1231  // Call Printf. On a native build, a simple call will be generated, but if the
1232  // simulator is being used then a suitable pseudo-instruction is used. The
1233  // arguments and stack must be prepared by the caller as for a normal AAPCS64
1234  // call to 'printf'.
1235  //
1236  // The 'args' argument should point to an array of variable arguments in their
1237  // proper PCS registers (and in calling order). The argument registers can
1238  // have mixed types. The format string (x0) should not be included.
1239  void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr);
1240 
1241  private:
1242 #if DEBUG
1243  // Tell whether any of the macro instruction can be used. When false the
1244  // MacroAssembler will assert if a method which can emit a variable number
1245  // of instructions is called.
1246  bool allow_macro_instructions_ = true;
1247 #endif
1248 
1249 
1250  // Scratch registers available for use by the MacroAssembler.
1251  CPURegList tmp_list_ = DefaultTmpList();
1252  CPURegList fptmp_list_ = DefaultFPTmpList();
1253 
1254  // Helps resolve branching to labels potentially out of range.
1255  // If the label is not bound, it registers the information necessary to later
1256  // be able to emit a veneer for this branch if necessary.
1257  // If the label is bound, it returns true if the label (or the previous link
1258  // in the label chain) is out of range. In that case the caller is responsible
1259  // for generating appropriate code.
1260  // Otherwise it returns false.
1261  // This function also checks wether veneers need to be emitted.
1262  bool NeedExtraInstructionsOrRegisterBranch(Label* label,
1263  ImmBranchType branch_type);
1264 
1265  void Movi16bitHelper(const VRegister& vd, uint64_t imm);
1266  void Movi32bitHelper(const VRegister& vd, uint64_t imm);
1267  void Movi64bitHelper(const VRegister& vd, uint64_t imm);
1268 
1269  void LoadStoreMacro(const CPURegister& rt, const MemOperand& addr,
1270  LoadStoreOp op);
1271 
1272  void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
1273  const MemOperand& addr, LoadStorePairOp op);
1274 
1275  void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al);
1276 
1277  void CallRecordWriteStub(Register object, Register address,
1278  RememberedSetAction remembered_set_action,
1279  SaveFPRegsMode fp_mode, Handle<Code> code_target,
1280  Address wasm_target);
1281 };
1282 
1283 class MacroAssembler : public TurboAssembler {
1284  public:
1285  MacroAssembler(const AssemblerOptions& options, void* buffer, int size)
1286  : TurboAssembler(options, buffer, size) {}
1287 
1288  MacroAssembler(Isolate* isolate, void* buffer, int size,
1289  CodeObjectRequired create_code_object)
1290  : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
1291  size, create_code_object) {}
1292 
1293  MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
1294  void* buffer, int size, CodeObjectRequired create_code_object);
1295 
1296  // Instruction set functions ------------------------------------------------
1297  // Logical macros.
1298  inline void Bics(const Register& rd, const Register& rn,
1299  const Operand& operand);
1300 
1301  inline void Adcs(const Register& rd, const Register& rn,
1302  const Operand& operand);
1303  inline void Sbc(const Register& rd, const Register& rn,
1304  const Operand& operand);
1305  inline void Sbcs(const Register& rd, const Register& rn,
1306  const Operand& operand);
1307  inline void Ngc(const Register& rd, const Operand& operand);
1308  inline void Ngcs(const Register& rd, const Operand& operand);
1309 
1310  inline void Ccmn(const Register& rn, const Operand& operand, StatusFlags nzcv,
1311  Condition cond);
1312 
1313 #define DECLARE_FUNCTION(FN, OP) \
1314  inline void FN(const Register& rs, const Register& rt, const Register& rn);
1315  STLX_MACRO_LIST(DECLARE_FUNCTION)
1316 #undef DECLARE_FUNCTION
1317 
1318  // Branch type inversion relies on these relations.
1319  STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
1320  (reg_bit_clear == (reg_bit_set ^ 1)) &&
1321  (always == (never ^ 1)));
1322 
1323  inline void Bfxil(const Register& rd, const Register& rn, unsigned lsb,
1324  unsigned width);
1325  inline void Cinc(const Register& rd, const Register& rn, Condition cond);
1326  inline void Cinv(const Register& rd, const Register& rn, Condition cond);
1327  inline void CzeroX(const Register& rd, Condition cond);
1328  inline void Csinv(const Register& rd, const Register& rn, const Register& rm,
1329  Condition cond);
1330  inline void Csneg(const Register& rd, const Register& rn, const Register& rm,
1331  Condition cond);
1332  inline void Extr(const Register& rd, const Register& rn, const Register& rm,
1333  unsigned lsb);
1334  inline void Fcsel(const VRegister& fd, const VRegister& fn,
1335  const VRegister& fm, Condition cond);
1336  void Fcvtl(const VRegister& vd, const VRegister& vn) {
1337  DCHECK(allow_macro_instructions());
1338  fcvtl(vd, vn);
1339  }
1340  void Fcvtl2(const VRegister& vd, const VRegister& vn) {
1341  DCHECK(allow_macro_instructions());
1342  fcvtl2(vd, vn);
1343  }
1344  void Fcvtn(const VRegister& vd, const VRegister& vn) {
1345  DCHECK(allow_macro_instructions());
1346  fcvtn(vd, vn);
1347  }
1348  void Fcvtn2(const VRegister& vd, const VRegister& vn) {
1349  DCHECK(allow_macro_instructions());
1350  fcvtn2(vd, vn);
1351  }
1352  void Fcvtxn(const VRegister& vd, const VRegister& vn) {
1353  DCHECK(allow_macro_instructions());
1354  fcvtxn(vd, vn);
1355  }
1356  void Fcvtxn2(const VRegister& vd, const VRegister& vn) {
1357  DCHECK(allow_macro_instructions());
1358  fcvtxn2(vd, vn);
1359  }
1360  inline void Fmadd(const VRegister& fd, const VRegister& fn,
1361  const VRegister& fm, const VRegister& fa);
1362  inline void Fmaxnm(const VRegister& fd, const VRegister& fn,
1363  const VRegister& fm);
1364  inline void Fminnm(const VRegister& fd, const VRegister& fn,
1365  const VRegister& fm);
1366  inline void Fmsub(const VRegister& fd, const VRegister& fn,
1367  const VRegister& fm, const VRegister& fa);
1368  inline void Fnmadd(const VRegister& fd, const VRegister& fn,
1369  const VRegister& fm, const VRegister& fa);
1370  inline void Fnmsub(const VRegister& fd, const VRegister& fn,
1371  const VRegister& fm, const VRegister& fa);
1372  inline void Hint(SystemHint code);
1373  inline void Hlt(int code);
1374  inline void Ldnp(const CPURegister& rt, const CPURegister& rt2,
1375  const MemOperand& src);
1376  inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
1377  inline void Msr(SystemRegister sysreg, const Register& rt);
1378  inline void Nop() { nop(); }
1379  void Mvni(const VRegister& vd, const int imm8, Shift shift = LSL,
1380  const int shift_amount = 0) {
1381  DCHECK(allow_macro_instructions());
1382  mvni(vd, imm8, shift, shift_amount);
1383  }
1384  inline void Rev(const Register& rd, const Register& rn);
1385  inline void Sbfiz(const Register& rd, const Register& rn, unsigned lsb,
1386  unsigned width);
1387  inline void Smaddl(const Register& rd, const Register& rn, const Register& rm,
1388  const Register& ra);
1389  inline void Smsubl(const Register& rd, const Register& rn, const Register& rm,
1390  const Register& ra);
1391  inline void Smulh(const Register& rd, const Register& rn, const Register& rm);
1392  inline void Stnp(const CPURegister& rt, const CPURegister& rt2,
1393  const MemOperand& dst);
1394  inline void Umaddl(const Register& rd, const Register& rn, const Register& rm,
1395  const Register& ra);
1396  inline void Umsubl(const Register& rd, const Register& rn, const Register& rm,
1397  const Register& ra);
1398 
1399  void Cmle(const VRegister& vd, const VRegister& vn, int imm) {
1400  DCHECK(allow_macro_instructions());
1401  cmle(vd, vn, imm);
1402  }
1403  void Cmlt(const VRegister& vd, const VRegister& vn, int imm) {
1404  DCHECK(allow_macro_instructions());
1405  cmlt(vd, vn, imm);
1406  }
1407 
1408  void Ld1(const VRegister& vt, const MemOperand& src) {
1409  DCHECK(allow_macro_instructions());
1410  ld1(vt, src);
1411  }
1412  void Ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
1413  DCHECK(allow_macro_instructions());
1414  ld1(vt, vt2, src);
1415  }
1416  void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1417  const MemOperand& src) {
1418  DCHECK(allow_macro_instructions());
1419  ld1(vt, vt2, vt3, src);
1420  }
1421  void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1422  const VRegister& vt4, const MemOperand& src) {
1423  DCHECK(allow_macro_instructions());
1424  ld1(vt, vt2, vt3, vt4, src);
1425  }
1426  void Ld1(const VRegister& vt, int lane, const MemOperand& src) {
1427  DCHECK(allow_macro_instructions());
1428  ld1(vt, lane, src);
1429  }
1430  void Ld1r(const VRegister& vt, const MemOperand& src) {
1431  DCHECK(allow_macro_instructions());
1432  ld1r(vt, src);
1433  }
1434  void Ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
1435  DCHECK(allow_macro_instructions());
1436  ld2(vt, vt2, src);
1437  }
1438  void Ld2(const VRegister& vt, const VRegister& vt2, int lane,
1439  const MemOperand& src) {
1440  DCHECK(allow_macro_instructions());
1441  ld2(vt, vt2, lane, src);
1442  }
1443  void Ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
1444  DCHECK(allow_macro_instructions());
1445  ld2r(vt, vt2, src);
1446  }
1447  void Ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1448  const MemOperand& src) {
1449  DCHECK(allow_macro_instructions());
1450  ld3(vt, vt2, vt3, src);
1451  }
1452  void Ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1453  int lane, const MemOperand& src) {
1454  DCHECK(allow_macro_instructions());
1455  ld3(vt, vt2, vt3, lane, src);
1456  }
1457  void Ld3r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1458  const MemOperand& src) {
1459  DCHECK(allow_macro_instructions());
1460  ld3r(vt, vt2, vt3, src);
1461  }
1462  void Ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1463  const VRegister& vt4, const MemOperand& src) {
1464  DCHECK(allow_macro_instructions());
1465  ld4(vt, vt2, vt3, vt4, src);
1466  }
1467  void Ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1468  const VRegister& vt4, int lane, const MemOperand& src) {
1469  DCHECK(allow_macro_instructions());
1470  ld4(vt, vt2, vt3, vt4, lane, src);
1471  }
1472  void Ld4r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1473  const VRegister& vt4, const MemOperand& src) {
1474  DCHECK(allow_macro_instructions());
1475  ld4r(vt, vt2, vt3, vt4, src);
1476  }
1477  void St1(const VRegister& vt, const MemOperand& dst) {
1478  DCHECK(allow_macro_instructions());
1479  st1(vt, dst);
1480  }
1481  void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
1482  DCHECK(allow_macro_instructions());
1483  st1(vt, vt2, dst);
1484  }
1485  void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1486  const MemOperand& dst) {
1487  DCHECK(allow_macro_instructions());
1488  st1(vt, vt2, vt3, dst);
1489  }
1490  void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1491  const VRegister& vt4, const MemOperand& dst) {
1492  DCHECK(allow_macro_instructions());
1493  st1(vt, vt2, vt3, vt4, dst);
1494  }
1495  void St1(const VRegister& vt, int lane, const MemOperand& dst) {
1496  DCHECK(allow_macro_instructions());
1497  st1(vt, lane, dst);
1498  }
1499  void St2(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
1500  DCHECK(allow_macro_instructions());
1501  st2(vt, vt2, dst);
1502  }
1503  void St3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1504  const MemOperand& dst) {
1505  DCHECK(allow_macro_instructions());
1506  st3(vt, vt2, vt3, dst);
1507  }
1508  void St4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1509  const VRegister& vt4, const MemOperand& dst) {
1510  DCHECK(allow_macro_instructions());
1511  st4(vt, vt2, vt3, vt4, dst);
1512  }
1513  void St2(const VRegister& vt, const VRegister& vt2, int lane,
1514  const MemOperand& dst) {
1515  DCHECK(allow_macro_instructions());
1516  st2(vt, vt2, lane, dst);
1517  }
1518  void St3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1519  int lane, const MemOperand& dst) {
1520  DCHECK(allow_macro_instructions());
1521  st3(vt, vt2, vt3, lane, dst);
1522  }
1523  void St4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1524  const VRegister& vt4, int lane, const MemOperand& dst) {
1525  DCHECK(allow_macro_instructions());
1526  st4(vt, vt2, vt3, vt4, lane, dst);
1527  }
1528  void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1529  DCHECK(allow_macro_instructions());
1530  tbx(vd, vn, vm);
1531  }
1532  void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1533  const VRegister& vm) {
1534  DCHECK(allow_macro_instructions());
1535  tbx(vd, vn, vn2, vm);
1536  }
1537  void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1538  const VRegister& vn3, const VRegister& vm) {
1539  DCHECK(allow_macro_instructions());
1540  tbx(vd, vn, vn2, vn3, vm);
1541  }
1542  void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1543  const VRegister& vn3, const VRegister& vn4, const VRegister& vm) {
1544  DCHECK(allow_macro_instructions());
1545  tbx(vd, vn, vn2, vn3, vn4, vm);
1546  }
1547 
1548  void LoadObject(Register result, Handle<Object> object);
1549 
1550  inline void PushSizeRegList(RegList registers, unsigned reg_size,
1551  CPURegister::RegisterType type = CPURegister::kRegister) {
1552  PushCPURegList(CPURegList(type, reg_size, registers));
1553  }
1554  inline void PopSizeRegList(RegList registers, unsigned reg_size,
1555  CPURegister::RegisterType type = CPURegister::kRegister) {
1556  PopCPURegList(CPURegList(type, reg_size, registers));
1557  }
1558  inline void PushXRegList(RegList regs) {
1559  PushSizeRegList(regs, kXRegSizeInBits);
1560  }
1561  inline void PopXRegList(RegList regs) {
1562  PopSizeRegList(regs, kXRegSizeInBits);
1563  }
1564  inline void PushWRegList(RegList regs) {
1565  PushSizeRegList(regs, kWRegSizeInBits);
1566  }
1567  inline void PopWRegList(RegList regs) {
1568  PopSizeRegList(regs, kWRegSizeInBits);
1569  }
1570  inline void PushDRegList(RegList regs) {
1571  PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kVRegister);
1572  }
1573  inline void PopDRegList(RegList regs) {
1574  PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kVRegister);
1575  }
1576  inline void PushSRegList(RegList regs) {
1577  PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kVRegister);
1578  }
1579  inline void PopSRegList(RegList regs) {
1580  PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kVRegister);
1581  }
1582 
1583  // Push the specified register 'count' times.
1584  void PushMultipleTimes(CPURegister src, Register count);
1585 
1586  // Sometimes callers need to push or pop multiple registers in a way that is
1587  // difficult to structure efficiently for fixed Push or Pop calls. This scope
1588  // allows push requests to be queued up, then flushed at once. The
1589  // MacroAssembler will try to generate the most efficient sequence required.
1590  //
1591  // Unlike the other Push and Pop macros, PushPopQueue can handle mixed sets of
1592  // register sizes and types.
1594  public:
1595  explicit PushPopQueue(MacroAssembler* masm) : masm_(masm), size_(0) {}
1596 
1597  ~PushPopQueue() {
1598  DCHECK(queued_.empty());
1599  }
1600 
1601  void Queue(const CPURegister& rt) {
1602  size_ += rt.SizeInBytes();
1603  queued_.push_back(rt);
1604  }
1605 
1606  void PushQueued();
1607  void PopQueued();
1608 
1609  private:
1610  MacroAssembler* masm_;
1611  int size_;
1612  std::vector<CPURegister> queued_;
1613  };
1614 
1615  // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
1616  // values peeked will be adjacent, with the value in 'dst2' being from a
1617  // higher address than 'dst1'. The offset is in bytes. The stack pointer must
1618  // be aligned to 16 bytes.
1619  void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
1620 
1621  // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
1622  // register.
1623  inline void ClaimBySMI(const Register& count_smi,
1624  uint64_t unit_size = kXRegSize);
1625  inline void DropBySMI(const Register& count_smi,
1626  uint64_t unit_size = kXRegSize);
1627 
1628  // Compare a register with an operand, and branch to label depending on the
1629  // condition. May corrupt the status flags.
1630  inline void CompareAndBranch(const Register& lhs,
1631  const Operand& rhs,
1632  Condition cond,
1633  Label* label);
1634 
1635  // Insert one or more instructions into the instruction stream that encode
1636  // some caller-defined data. The instructions used will be executable with no
1637  // side effects.
1638  inline void InlineData(uint64_t data);
1639 
1640  // Insert an instrumentation enable marker into the instruction stream.
1641  inline void EnableInstrumentation();
1642 
1643  // Insert an instrumentation disable marker into the instruction stream.
1644  inline void DisableInstrumentation();
1645 
1646  // Insert an instrumentation event marker into the instruction stream. These
1647  // will be picked up by the instrumentation system to annotate an instruction
1648  // profile. The argument marker_name must be a printable two character string;
1649  // it will be encoded in the event marker.
1650  inline void AnnotateInstrumentation(const char* marker_name);
1651 
1652  // Preserve the callee-saved registers (as defined by AAPCS64).
1653  //
1654  // Higher-numbered registers are pushed before lower-numbered registers, and
1655  // thus get higher addresses.
1656  // Floating-point registers are pushed before general-purpose registers, and
1657  // thus get higher addresses.
1658  //
1659  // Note that registers are not checked for invalid values. Use this method
1660  // only if you know that the GC won't try to examine the values on the stack.
1661  void PushCalleeSavedRegisters();
1662 
1663  // Restore the callee-saved registers (as defined by AAPCS64).
1664  //
1665  // Higher-numbered registers are popped after lower-numbered registers, and
1666  // thus come from higher addresses.
1667  // Floating-point registers are popped after general-purpose registers, and
1668  // thus come from higher addresses.
1669  void PopCalleeSavedRegisters();
1670 
1671  // Helpers ------------------------------------------------------------------
1672 
1673  static int SafepointRegisterStackIndex(int reg_code);
1674 
1675  template<typename Field>
1676  void DecodeField(Register dst, Register src) {
1677  static const int shift = Field::kShift;
1678  static const int setbits = CountSetBits(Field::kMask, 32);
1679  Ubfx(dst, src, shift, setbits);
1680  }
1681 
1682  template<typename Field>
1683  void DecodeField(Register reg) {
1684  DecodeField<Field>(reg, reg);
1685  }
1686 
1687  // ---- SMI and Number Utilities ----
1688 
1689  inline void SmiTag(Register dst, Register src);
1690  inline void SmiTag(Register smi);
1691 
1692  inline void JumpIfNotSmi(Register value, Label* not_smi_label);
1693  inline void JumpIfBothSmi(Register value1, Register value2,
1694  Label* both_smi_label,
1695  Label* not_smi_label = nullptr);
1696  inline void JumpIfEitherSmi(Register value1, Register value2,
1697  Label* either_smi_label,
1698  Label* not_smi_label = nullptr);
1699  inline void JumpIfEitherNotSmi(Register value1,
1700  Register value2,
1701  Label* not_smi_label);
1702  inline void JumpIfBothNotSmi(Register value1,
1703  Register value2,
1704  Label* not_smi_label);
1705 
1706  // Abort execution if argument is a smi, enabled via --debug-code.
1707  void AssertNotSmi(Register object,
1708  AbortReason reason = AbortReason::kOperandIsASmi);
1709 
1710  inline void ObjectTag(Register tagged_obj, Register obj);
1711  inline void ObjectUntag(Register untagged_obj, Register obj);
1712 
1713  // Abort execution if argument is not a Constructor, enabled via --debug-code.
1714  void AssertConstructor(Register object);
1715 
1716  // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1717  void AssertFunction(Register object);
1718 
1719  // Abort execution if argument is not a JSGeneratorObject (or subclass),
1720  // enabled via --debug-code.
1721  void AssertGeneratorObject(Register object);
1722 
1723  // Abort execution if argument is not a JSBoundFunction,
1724  // enabled via --debug-code.
1725  void AssertBoundFunction(Register object);
1726 
1727  // Abort execution if argument is not undefined or an AllocationSite, enabled
1728  // via --debug-code.
1729  void AssertUndefinedOrAllocationSite(Register object);
1730 
1731  // Try to represent a double as a signed 64-bit int.
1732  // This succeeds if the result compares equal to the input, so inputs of -0.0
1733  // are represented as 0 and handled as a success.
1734  //
1735  // On output the Z flag is set if the operation was successful.
1736  void TryRepresentDoubleAsInt64(Register as_int, VRegister value,
1737  VRegister scratch_d,
1738  Label* on_successful_conversion = nullptr,
1739  Label* on_failed_conversion = nullptr) {
1740  DCHECK(as_int.Is64Bits());
1741  TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
1742  on_failed_conversion);
1743  }
1744 
1745  // ---- Calling / Jumping helpers ----
1746 
1747  void CallStub(CodeStub* stub);
1748  void TailCallStub(CodeStub* stub);
1749 
1750  void CallRuntime(const Runtime::Function* f,
1751  int num_arguments,
1752  SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1753 
1754  // Convenience function: Same as above, but takes the fid instead.
1755  void CallRuntime(Runtime::FunctionId fid, int num_arguments,
1756  SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1757  CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
1758  }
1759 
1760  // Convenience function: Same as above, but takes the fid instead.
1761  void CallRuntime(Runtime::FunctionId fid,
1762  SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1763  const Runtime::Function* function = Runtime::FunctionForId(fid);
1764  CallRuntime(function, function->nargs, save_doubles);
1765  }
1766 
1767  void TailCallRuntime(Runtime::FunctionId fid);
1768 
1769  // Jump to a runtime routine.
1770  void JumpToExternalReference(const ExternalReference& builtin,
1771  bool builtin_exit_frame = false);
1772 
1773  // Generates a trampoline to jump to the off-heap instruction stream.
1774  void JumpToInstructionStream(Address entry);
1775 
1776  // Registers used through the invocation chain are hard-coded.
1777  // We force passing the parameters to ensure the contracts are correctly
1778  // honoured by the caller.
1779  // 'function' must be x1.
1780  // 'actual' must use an immediate or x0.
1781  // 'expected' must use an immediate or x2.
1782  // 'call_kind' must be x5.
1783  void InvokePrologue(const ParameterCount& expected,
1784  const ParameterCount& actual, Label* done,
1785  InvokeFlag flag, bool* definitely_mismatches);
1786 
1787  // On function call, call into the debugger if necessary.
1788  void CheckDebugHook(Register fun, Register new_target,
1789  const ParameterCount& expected,
1790  const ParameterCount& actual);
1791  void InvokeFunctionCode(Register function, Register new_target,
1792  const ParameterCount& expected,
1793  const ParameterCount& actual, InvokeFlag flag);
1794  // Invoke the JavaScript function in the given register.
1795  // Changes the current context to the context in the function before invoking.
1796  void InvokeFunction(Register function, Register new_target,
1797  const ParameterCount& actual, InvokeFlag flag);
1798  void InvokeFunction(Register function, const ParameterCount& expected,
1799  const ParameterCount& actual, InvokeFlag flag);
1800 
1801  // ---- Code generation helpers ----
1802 
1803  // Frame restart support
1804  void MaybeDropFrames();
1805 
1806  // ---------------------------------------------------------------------------
1807  // Support functions.
1808 
1809  // Compare object type for heap object. heap_object contains a non-Smi
1810  // whose object type should be compared with the given type. This both
1811  // sets the flags and leaves the object type in the type_reg register.
1812  // It leaves the map in the map register (unless the type_reg and map register
1813  // are the same register). It leaves the heap object in the heap_object
1814  // register unless the heap_object register is the same register as one of the
1815  // other registers.
1816  void CompareObjectType(Register heap_object,
1817  Register map,
1818  Register type_reg,
1819  InstanceType type);
1820 
1821 
1822  // Compare object type for heap object, and branch if equal (or not.)
1823  // heap_object contains a non-Smi whose object type should be compared with
1824  // the given type. This both sets the flags and leaves the object type in
1825  // the type_reg register. It leaves the map in the map register (unless the
1826  // type_reg and map register are the same register). It leaves the heap
1827  // object in the heap_object register unless the heap_object register is the
1828  // same register as one of the other registers.
1829  void JumpIfObjectType(Register object,
1830  Register map,
1831  Register type_reg,
1832  InstanceType type,
1833  Label* if_cond_pass,
1834  Condition cond = eq);
1835 
1836  // Compare instance type in a map. map contains a valid map object whose
1837  // object type should be compared with the given type. This both
1838  // sets the flags and leaves the object type in the type_reg register.
1839  void CompareInstanceType(Register map,
1840  Register type_reg,
1841  InstanceType type);
1842 
1843  // Load the elements kind field from a map, and return it in the result
1844  // register.
1845  void LoadElementsKindFromMap(Register result, Register map);
1846 
1847  // Compare the object in a register to a value from the root list.
1848  void CompareRoot(const Register& obj, RootIndex index);
1849 
1850  // Compare the object in a register to a value and jump if they are equal.
1851  void JumpIfRoot(const Register& obj, RootIndex index, Label* if_equal);
1852 
1853  // Compare the object in a register to a value and jump if they are not equal.
1854  void JumpIfNotRoot(const Register& obj, RootIndex index, Label* if_not_equal);
1855 
1856  // Compare the contents of a register with an operand, and branch to true,
1857  // false or fall through, depending on condition.
1858  void CompareAndSplit(const Register& lhs,
1859  const Operand& rhs,
1860  Condition cond,
1861  Label* if_true,
1862  Label* if_false,
1863  Label* fall_through);
1864 
1865  // Test the bits of register defined by bit_pattern, and branch to
1866  // if_any_set, if_all_clear or fall_through accordingly.
1867  void TestAndSplit(const Register& reg,
1868  uint64_t bit_pattern,
1869  Label* if_all_clear,
1870  Label* if_any_set,
1871  Label* fall_through);
1872 
1873  // ---------------------------------------------------------------------------
1874  // Frames.
1875 
1876  void ExitFramePreserveFPRegs();
1877  void ExitFrameRestoreFPRegs();
1878 
1879  // Enter exit frame. Exit frames are used when calling C code from generated
1880  // (JavaScript) code.
1881  //
1882  // The only registers modified by this function are the provided scratch
1883  // register, the frame pointer and the stack pointer.
1884  //
1885  // The 'extra_space' argument can be used to allocate some space in the exit
1886  // frame that will be ignored by the GC. This space will be reserved in the
1887  // bottom of the frame immediately above the return address slot.
1888  //
1889  // Set up a stack frame and registers as follows:
1890  // fp[8]: CallerPC (lr)
1891  // fp -> fp[0]: CallerFP (old fp)
1892  // fp[-8]: SPOffset (new sp)
1893  // fp[-16]: CodeObject()
1894  // fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
1895  // sp[8]: Memory reserved for the caller if extra_space != 0.
1896  // Alignment padding, if necessary.
1897  // sp -> sp[0]: Space reserved for the return address.
1898  //
1899  // This function also stores the new frame information in the top frame, so
1900  // that the new frame becomes the current frame.
1901  void EnterExitFrame(bool save_doubles, const Register& scratch,
1902  int extra_space = 0,
1903  StackFrame::Type frame_type = StackFrame::EXIT);
1904 
1905  // Leave the current exit frame, after a C function has returned to generated
1906  // (JavaScript) code.
1907  //
1908  // This effectively unwinds the operation of EnterExitFrame:
1909  // * Preserved doubles are restored (if restore_doubles is true).
1910  // * The frame information is removed from the top frame.
1911  // * The exit frame is dropped.
1912  void LeaveExitFrame(bool save_doubles, const Register& scratch,
1913  const Register& scratch2);
1914 
1915  // Load the global proxy from the current context.
1916  void LoadGlobalProxy(Register dst);
1917 
1918  // ---------------------------------------------------------------------------
1919  // In-place weak references.
1920  void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
1921 
1922  // ---------------------------------------------------------------------------
1923  // StatsCounter support
1924 
1925  void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1926  Register scratch2);
1927  void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1928  Register scratch2);
1929 
1930  // ---------------------------------------------------------------------------
1931  // Garbage collector support (GC).
1932 
1933  // Push and pop the registers that can hold pointers, as defined by the
1934  // RegList constant kSafepointSavedRegisters.
1935  void PushSafepointRegisters();
1936  void PopSafepointRegisters();
1937 
1938  void CheckPageFlag(const Register& object, const Register& scratch, int mask,
1939  Condition cc, Label* condition_met);
1940 
1941  // Notify the garbage collector that we wrote a pointer into an object.
1942  // |object| is the object being stored into, |value| is the object being
1943  // stored. value and scratch registers are clobbered by the operation.
1944  // The offset is the offset from the start of the object, not the offset from
1945  // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
1946  void RecordWriteField(
1947  Register object, int offset, Register value, Register scratch,
1948  LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
1949  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1950  SmiCheck smi_check = INLINE_SMI_CHECK);
1951 
1952  // For a given |object| notify the garbage collector that the slot |address|
1953  // has been written. |value| is the object being stored. The value and
1954  // address registers are clobbered by the operation.
1955  void RecordWrite(
1956  Register object, Register address, Register value,
1957  LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
1958  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1959  SmiCheck smi_check = INLINE_SMI_CHECK);
1960 
1961  // ---------------------------------------------------------------------------
1962  // Debugging.
1963 
1964  void AssertRegisterIsRoot(
1965  Register reg, RootIndex index,
1966  AbortReason reason = AbortReason::kRegisterDidNotMatchExpectedRoot);
1967 
1968  // Abort if the specified register contains the invalid color bit pattern.
1969  // The pattern must be in bits [1:0] of 'reg' register.
1970  //
1971  // If emit_debug_code() is false, this emits no code.
1972  void AssertHasValidColor(const Register& reg);
1973 
1974  void LoadNativeContextSlot(int index, Register dst);
1975 
1976  // Like printf, but print at run-time from generated code.
1977  //
1978  // The caller must ensure that arguments for floating-point placeholders
1979  // (such as %e, %f or %g) are VRegisters, and that arguments for integer
1980  // placeholders are Registers.
1981  //
1982  // Format placeholders that refer to more than one argument, or to a specific
1983  // argument, are not supported. This includes formats like "%1$d" or "%.*d".
1984  //
1985  // This function automatically preserves caller-saved registers so that
1986  // calling code can use Printf at any point without having to worry about
1987  // corruption. The preservation mechanism generates a lot of code. If this is
1988  // a problem, preserve the important registers manually and then call
1989  // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
1990  // implicitly preserved.
1991  void Printf(const char * format,
1992  CPURegister arg0 = NoCPUReg,
1993  CPURegister arg1 = NoCPUReg,
1994  CPURegister arg2 = NoCPUReg,
1995  CPURegister arg3 = NoCPUReg);
1996 
1997  // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
1998  //
1999  // The return code from the system printf call will be returned in x0.
2000  void PrintfNoPreserve(const char * format,
2001  const CPURegister& arg0 = NoCPUReg,
2002  const CPURegister& arg1 = NoCPUReg,
2003  const CPURegister& arg2 = NoCPUReg,
2004  const CPURegister& arg3 = NoCPUReg);
2005 
2006  private:
2007  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
2008  void InNewSpace(Register object,
2009  Condition cond, // eq for new space, ne otherwise.
2010  Label* branch);
2011 
2012  // Try to represent a double as an int so that integer fast-paths may be
2013  // used. Not every valid integer value is guaranteed to be caught.
2014  // It supports both 32-bit and 64-bit integers depending whether 'as_int'
2015  // is a W or X register.
2016  //
2017  // This does not distinguish between +0 and -0, so if this distinction is
2018  // important it must be checked separately.
2019  //
2020  // On output the Z flag is set if the operation was successful.
2021  void TryRepresentDoubleAsInt(Register as_int, VRegister value,
2022  VRegister scratch_d,
2023  Label* on_successful_conversion = nullptr,
2024  Label* on_failed_conversion = nullptr);
2025 
2026  public:
2027  // Far branches resolving.
2028  //
2029  // The various classes of branch instructions with immediate offsets have
2030  // different ranges. While the Assembler will fail to assemble a branch
2031  // exceeding its range, the MacroAssembler offers a mechanism to resolve
2032  // branches to too distant targets, either by tweaking the generated code to
2033  // use branch instructions with wider ranges or generating veneers.
2034  //
2035  // Currently branches to distant targets are resolved using unconditional
2036  // branch isntructions with a range of +-128MB. If that becomes too little
2037  // (!), the mechanism can be extended to generate special veneers for really
2038  // far targets.
2039 };
2040 
2041 
2042 // Use this scope when you need a one-to-one mapping between methods and
2043 // instructions. This scope prevents the MacroAssembler from being called and
2044 // literal pools from being emitted. It also asserts the number of instructions
2045 // emitted is what you specified when creating the scope.
2047  public:
2048  explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0)
2049  : tasm_(tasm)
2050 #ifdef DEBUG
2051  ,
2052  size_(count * kInstrSize)
2053 #endif
2054  {
2055  // Before blocking the const pool, see if it needs to be emitted.
2056  tasm_->CheckConstPool(false, true);
2057  tasm_->CheckVeneerPool(false, true);
2058 
2059  tasm_->StartBlockPools();
2060 #ifdef DEBUG
2061  if (count != 0) {
2062  tasm_->bind(&start_);
2063  }
2064  previous_allow_macro_instructions_ = tasm_->allow_macro_instructions();
2065  tasm_->set_allow_macro_instructions(false);
2066 #endif
2067  }
2068 
2070  tasm_->EndBlockPools();
2071 #ifdef DEBUG
2072  if (start_.is_bound()) {
2073  DCHECK(tasm_->SizeOfCodeGeneratedSince(&start_) == size_);
2074  }
2075  tasm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
2076 #endif
2077  }
2078 
2079  private:
2080  TurboAssembler* tasm_;
2081 #ifdef DEBUG
2082  size_t size_;
2083  Label start_;
2084  bool previous_allow_macro_instructions_;
2085 #endif
2086 };
2087 
2088 // This scope utility allows scratch registers to be managed safely. The
2089 // TurboAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
2090 // registers. These registers can be allocated on demand, and will be returned
2091 // at the end of the scope.
2092 //
2093 // When the scope ends, the MacroAssembler's lists will be restored to their
2094 // original state, even if the lists were modified by some other means. Note
2095 // that this scope can be nested but the destructors need to run in the opposite
2096 // order as the constructors. We do not have assertions for this.
2098  public:
2099  explicit UseScratchRegisterScope(TurboAssembler* tasm)
2100  : available_(tasm->TmpList()),
2101  availablefp_(tasm->FPTmpList()),
2102  old_available_(available_->list()),
2103  old_availablefp_(availablefp_->list()) {
2104  DCHECK_EQ(available_->type(), CPURegister::kRegister);
2105  DCHECK_EQ(availablefp_->type(), CPURegister::kVRegister);
2106  }
2107 
2108  ~UseScratchRegisterScope();
2109 
2110  // Take a register from the appropriate temps list. It will be returned
2111  // automatically when the scope ends.
2112  Register AcquireW() { return AcquireNextAvailable(available_).W(); }
2113  Register AcquireX() { return AcquireNextAvailable(available_).X(); }
2114  VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
2115  VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
2116  VRegister AcquireQ() { return AcquireNextAvailable(availablefp_).Q(); }
2117  VRegister AcquireV(VectorFormat format) {
2118  return VRegister::Create(AcquireNextAvailable(availablefp_).code(), format);
2119  }
2120 
2121  Register AcquireSameSizeAs(const Register& reg);
2122  VRegister AcquireSameSizeAs(const VRegister& reg);
2123 
2124  private:
2125  static CPURegister AcquireNextAvailable(CPURegList* available);
2126 
2127  // Available scratch registers.
2128  CPURegList* available_; // kRegister
2129  CPURegList* availablefp_; // kVRegister
2130 
2131  // The state of the available lists at the start of this scope.
2132  RegList old_available_; // kRegister
2133  RegList old_availablefp_; // kVRegister
2134 };
2135 
2136 MemOperand ContextMemOperand(Register context, int index = 0);
2137 MemOperand NativeContextMemOperand();
2138 
2139 // Encode and decode information about patchable inline SMI checks.
2141  public:
2142  explicit InlineSmiCheckInfo(Address info);
2143 
2144  bool HasSmiCheck() const { return smi_check_ != nullptr; }
2145 
2146  const Register& SmiRegister() const {
2147  return reg_;
2148  }
2149 
2150  Instruction* SmiCheck() const {
2151  return smi_check_;
2152  }
2153 
2154  int SmiCheckDelta() const { return smi_check_delta_; }
2155 
2156  // Use MacroAssembler::InlineData to emit information about patchable inline
2157  // SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
2158  // indicate that there is no inline SMI check. Note that 'reg' cannot be sp.
2159  //
2160  // The generated patch information can be read using the InlineSMICheckInfo
2161  // class.
2162  static void Emit(MacroAssembler* masm, const Register& reg,
2163  const Label* smi_check);
2164 
2165  // Emit information to indicate that there is no inline SMI check.
2166  static void EmitNotInlined(MacroAssembler* masm) {
2167  Label unbound;
2168  Emit(masm, NoReg, &unbound);
2169  }
2170 
2171  private:
2172  Register reg_;
2173  int smi_check_delta_;
2174  Instruction* smi_check_;
2175 
2176  // Fields in the data encoded by InlineData.
2177 
2178  // A width of 5 (Rd_width) for the SMI register precludes the use of sp,
2179  // since kSPRegInternalCode is 63. However, sp should never hold a SMI or be
2180  // used in a patchable check. The Emit() method checks this.
2181  //
2182  // Note that the total size of the fields is restricted by the underlying
2183  // storage size handled by the BitField class, which is a uint32_t.
2184  class RegisterBits : public BitField<unsigned, 0, 5> {};
2185  class DeltaBits : public BitField<uint32_t, 5, 32-5> {};
2186 };
2187 
2188 } // namespace internal
2189 } // namespace v8
2190 
2191 #define ACCESS_MASM(masm) masm->
2192 
2193 #endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
Definition: libplatform.h:13