V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
assembler-arm.h
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32 
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2012 the V8 project authors. All rights reserved.
36 
37 // A light-weight ARM Assembler
38 // Generates user mode instructions for the ARM architecture up to version 5
39 
40 #ifndef V8_ARM_ASSEMBLER_ARM_H_
41 #define V8_ARM_ASSEMBLER_ARM_H_
42 
43 #include <stdio.h>
44 #include <vector>
45 
46 #include "src/arm/constants-arm.h"
47 #include "src/assembler.h"
48 #include "src/boxed-float.h"
49 #include "src/constant-pool.h"
50 #include "src/double.h"
51 
52 namespace v8 {
53 namespace internal {
54 
55 // clang-format off
56 #define GENERAL_REGISTERS(V) \
57  V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
58  V(r8) V(r9) V(r10) V(fp) V(ip) V(sp) V(lr) V(pc)
59 
60 #define ALLOCATABLE_GENERAL_REGISTERS(V) \
61  V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
62  V(r8) V(r9)
63 
64 #define FLOAT_REGISTERS(V) \
65  V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) \
66  V(s8) V(s9) V(s10) V(s11) V(s12) V(s13) V(s14) V(s15) \
67  V(s16) V(s17) V(s18) V(s19) V(s20) V(s21) V(s22) V(s23) \
68  V(s24) V(s25) V(s26) V(s27) V(s28) V(s29) V(s30) V(s31)
69 
70 #define LOW_DOUBLE_REGISTERS(V) \
71  V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
72  V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)
73 
74 #define NON_LOW_DOUBLE_REGISTERS(V) \
75  V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
76  V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
77 
78 #define DOUBLE_REGISTERS(V) \
79  LOW_DOUBLE_REGISTERS(V) NON_LOW_DOUBLE_REGISTERS(V)
80 
81 #define SIMD128_REGISTERS(V) \
82  V(q0) V(q1) V(q2) V(q3) V(q4) V(q5) V(q6) V(q7) \
83  V(q8) V(q9) V(q10) V(q11) V(q12) V(q13) V(q14) V(q15)
84 
85 #define ALLOCATABLE_DOUBLE_REGISTERS(V) \
86  V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
87  V(d8) V(d9) V(d10) V(d11) V(d12) \
88  V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
89  V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
90 
91 #define ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(V) \
92  V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
93  V(d8) V(d9) V(d10) V(d11) V(d12) V(d15)
94 
95 #define C_REGISTERS(V) \
96  V(cr0) V(cr1) V(cr2) V(cr3) V(cr4) V(cr5) V(cr6) V(cr7) \
97  V(cr8) V(cr9) V(cr10) V(cr11) V(cr12) V(cr15)
98 // clang-format on
99 
100 // The ARM ABI does not specify the usage of register r9, which may be reserved
101 // as the static base or thread register on some platforms, in which case we
102 // leave it alone. Adjust the value of kR9Available accordingly:
103 const int kR9Available = 1; // 1 if available to us, 0 if reserved
104 
105 // Register list in load/store instructions
106 // Note that the bit values must match those used in actual instruction encoding
107 const int kNumRegs = 16;
108 
109 // Caller-saved/arguments registers
110 const RegList kJSCallerSaved =
111  1 << 0 | // r0 a1
112  1 << 1 | // r1 a2
113  1 << 2 | // r2 a3
114  1 << 3; // r3 a4
115 
116 const int kNumJSCallerSaved = 4;
117 
118 // Callee-saved registers preserved when switching from C to JavaScript
119 const RegList kCalleeSaved =
120  1 << 4 | // r4 v1
121  1 << 5 | // r5 v2
122  1 << 6 | // r6 v3
123  1 << 7 | // r7 v4 (cp in JavaScript code)
124  1 << 8 | // r8 v5 (pp in JavaScript code)
125  kR9Available << 9 | // r9 v6
126  1 << 10 | // r10 v7
127  1 << 11; // r11 v8 (fp in JavaScript code)
128 
129 // When calling into C++ (only for C++ calls that can't cause a GC).
130 // The call code will take care of lr, fp, etc.
131 const RegList kCallerSaved =
132  1 << 0 | // r0
133  1 << 1 | // r1
134  1 << 2 | // r2
135  1 << 3 | // r3
136  1 << 9; // r9
137 
138 const int kNumCalleeSaved = 7 + kR9Available;
139 
140 // Double registers d8 to d15 are callee-saved.
141 const int kNumDoubleCalleeSaved = 8;
142 
143 // Number of registers for which space is reserved in safepoints. Must be a
144 // multiple of 8.
145 // TODO(regis): Only 8 registers may actually be sufficient. Revisit.
146 const int kNumSafepointRegisters = 16;
147 
148 // Define the list of registers actually saved at safepoints.
149 // Note that the number of saved registers may be smaller than the reserved
150 // space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
151 const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
152 const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
153 
154 enum RegisterCode {
155 #define REGISTER_CODE(R) kRegCode_##R,
156  GENERAL_REGISTERS(REGISTER_CODE)
157 #undef REGISTER_CODE
158  kRegAfterLast
159 };
160 
161 class Register : public RegisterBase<Register, kRegAfterLast> {
162  friend class RegisterBase;
163  explicit constexpr Register(int code) : RegisterBase(code) {}
164 };
165 
166 ASSERT_TRIVIALLY_COPYABLE(Register);
167 static_assert(sizeof(Register) == sizeof(int),
168  "Register can efficiently be passed by value");
169 
170 // r7: context register
171 #define DECLARE_REGISTER(R) \
172  constexpr Register R = Register::from_code<kRegCode_##R>();
173 GENERAL_REGISTERS(DECLARE_REGISTER)
174 #undef DECLARE_REGISTER
175 constexpr Register no_reg = Register::no_reg();
176 
177 constexpr bool kPadArguments = false;
178 constexpr bool kSimpleFPAliasing = false;
179 constexpr bool kSimdMaskRegisters = false;
180 
181 enum SwVfpRegisterCode {
182 #define REGISTER_CODE(R) kSwVfpCode_##R,
183  FLOAT_REGISTERS(REGISTER_CODE)
184 #undef REGISTER_CODE
185  kSwVfpAfterLast
186 };
187 
188 // Representation of a list of non-overlapping VFP registers. This list
189 // represents the data layout of VFP registers as a bitfield:
190 // S registers cover 1 bit
191 // D registers cover 2 bits
192 // Q registers cover 4 bits
193 //
194 // This way, we make sure no registers in the list ever overlap. However, a list
195 // may represent multiple different sets of registers,
196 // e.g. [d0 s2 s3] <=> [s0 s1 d1].
197 typedef uint64_t VfpRegList;
198 
199 // Single word VFP register.
200 class SwVfpRegister : public RegisterBase<SwVfpRegister, kSwVfpAfterLast> {
201  public:
202  static constexpr int kSizeInBytes = 4;
203 
204  static void split_code(int reg_code, int* vm, int* m) {
205  DCHECK(from_code(reg_code).is_valid());
206  *m = reg_code & 0x1;
207  *vm = reg_code >> 1;
208  }
209  void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
210  VfpRegList ToVfpRegList() const {
211  DCHECK(is_valid());
212  // Each bit in the list corresponds to a S register.
213  return uint64_t{0x1} << code();
214  }
215 
216  private:
217  friend class RegisterBase;
218  explicit constexpr SwVfpRegister(int code) : RegisterBase(code) {}
219 };
220 
221 ASSERT_TRIVIALLY_COPYABLE(SwVfpRegister);
222 static_assert(sizeof(SwVfpRegister) == sizeof(int),
223  "SwVfpRegister can efficiently be passed by value");
224 
226 
227 enum DoubleRegisterCode {
228 #define REGISTER_CODE(R) kDoubleCode_##R,
229  DOUBLE_REGISTERS(REGISTER_CODE)
230 #undef REGISTER_CODE
231  kDoubleAfterLast
232 };
233 
234 // Double word VFP register.
235 class DwVfpRegister : public RegisterBase<DwVfpRegister, kDoubleAfterLast> {
236  public:
237  static constexpr int kSizeInBytes = 8;
238 
239  inline static int NumRegisters();
240 
241  static void split_code(int reg_code, int* vm, int* m) {
242  DCHECK(from_code(reg_code).is_valid());
243  *m = (reg_code & 0x10) >> 4;
244  *vm = reg_code & 0x0F;
245  }
246  void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
247  VfpRegList ToVfpRegList() const {
248  DCHECK(is_valid());
249  // A D register overlaps two S registers.
250  return uint64_t{0x3} << (code() * 2);
251  }
252 
253  private:
254  friend class RegisterBase;
255  friend class LowDwVfpRegister;
256  explicit constexpr DwVfpRegister(int code) : RegisterBase(code) {}
257 };
258 
259 ASSERT_TRIVIALLY_COPYABLE(DwVfpRegister);
260 static_assert(sizeof(DwVfpRegister) == sizeof(int),
261  "DwVfpRegister can efficiently be passed by value");
262 
264 
265 
266 // Double word VFP register d0-15.
268  : public RegisterBase<LowDwVfpRegister, kDoubleCode_d16> {
269  public:
270  constexpr operator DwVfpRegister() const { return DwVfpRegister(reg_code_); }
271 
272  SwVfpRegister low() const { return SwVfpRegister::from_code(code() * 2); }
273  SwVfpRegister high() const {
274  return SwVfpRegister::from_code(code() * 2 + 1);
275  }
276  VfpRegList ToVfpRegList() const {
277  DCHECK(is_valid());
278  // A D register overlaps two S registers.
279  return uint64_t{0x3} << (code() * 2);
280  }
281 
282  private:
283  friend class RegisterBase;
284  explicit constexpr LowDwVfpRegister(int code) : RegisterBase(code) {}
285 };
286 
287 enum Simd128RegisterCode {
288 #define REGISTER_CODE(R) kSimd128Code_##R,
289  SIMD128_REGISTERS(REGISTER_CODE)
290 #undef REGISTER_CODE
291  kSimd128AfterLast
292 };
293 
294 // Quad word NEON register.
295 class QwNeonRegister : public RegisterBase<QwNeonRegister, kSimd128AfterLast> {
296  public:
297  static void split_code(int reg_code, int* vm, int* m) {
298  DCHECK(from_code(reg_code).is_valid());
299  int encoded_code = reg_code << 1;
300  *m = (encoded_code & 0x10) >> 4;
301  *vm = encoded_code & 0x0F;
302  }
303  void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
304  DwVfpRegister low() const { return DwVfpRegister::from_code(code() * 2); }
305  DwVfpRegister high() const {
306  return DwVfpRegister::from_code(code() * 2 + 1);
307  }
308  VfpRegList ToVfpRegList() const {
309  DCHECK(is_valid());
310  // A Q register overlaps four S registers.
311  return uint64_t{0xf} << (code() * 4);
312  }
313 
314  private:
315  friend class RegisterBase;
316  explicit constexpr QwNeonRegister(int code) : RegisterBase(code) {}
317 };
318 
319 
321 
323 
324 enum CRegisterCode {
325 #define REGISTER_CODE(R) kCCode_##R,
326  C_REGISTERS(REGISTER_CODE)
327 #undef REGISTER_CODE
328  kCAfterLast
329 };
330 
331 // Coprocessor register
332 class CRegister : public RegisterBase<CRegister, kCAfterLast> {
333  friend class RegisterBase;
334  explicit constexpr CRegister(int code) : RegisterBase(code) {}
335 };
336 
337 // Support for the VFP registers s0 to s31 (d0 to d15).
338 // Note that "s(N):s(N+1)" is the same as "d(N/2)".
339 #define DECLARE_FLOAT_REGISTER(R) \
340  constexpr SwVfpRegister R = SwVfpRegister::from_code<kSwVfpCode_##R>();
341 FLOAT_REGISTERS(DECLARE_FLOAT_REGISTER)
342 #undef DECLARE_FLOAT_REGISTER
343 
344 #define DECLARE_LOW_DOUBLE_REGISTER(R) \
345  constexpr LowDwVfpRegister R = LowDwVfpRegister::from_code<kDoubleCode_##R>();
346 LOW_DOUBLE_REGISTERS(DECLARE_LOW_DOUBLE_REGISTER)
347 #undef DECLARE_LOW_DOUBLE_REGISTER
348 
349 #define DECLARE_DOUBLE_REGISTER(R) \
350  constexpr DwVfpRegister R = DwVfpRegister::from_code<kDoubleCode_##R>();
351 NON_LOW_DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
352 #undef DECLARE_DOUBLE_REGISTER
353 
354 constexpr DwVfpRegister no_dreg = DwVfpRegister::no_reg();
355 
356 #define DECLARE_SIMD128_REGISTER(R) \
357  constexpr Simd128Register R = Simd128Register::from_code<kSimd128Code_##R>();
358 SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER)
359 #undef DECLARE_SIMD128_REGISTER
360 
361 // Aliases for double registers.
362 constexpr LowDwVfpRegister kFirstCalleeSavedDoubleReg = d8;
363 constexpr LowDwVfpRegister kLastCalleeSavedDoubleReg = d15;
364 constexpr LowDwVfpRegister kDoubleRegZero = d13;
365 
366 constexpr CRegister no_creg = CRegister::no_reg();
367 
368 #define DECLARE_C_REGISTER(R) \
369  constexpr CRegister R = CRegister::from_code<kCCode_##R>();
370 C_REGISTERS(DECLARE_C_REGISTER)
371 #undef DECLARE_C_REGISTER
372 
373 // Coprocessor number
374 enum Coprocessor {
375  p0 = 0,
376  p1 = 1,
377  p2 = 2,
378  p3 = 3,
379  p4 = 4,
380  p5 = 5,
381  p6 = 6,
382  p7 = 7,
383  p8 = 8,
384  p9 = 9,
385  p10 = 10,
386  p11 = 11,
387  p12 = 12,
388  p13 = 13,
389  p14 = 14,
390  p15 = 15
391 };
392 
393 // -----------------------------------------------------------------------------
394 // Machine instruction Operands
395 
396 // Class Operand represents a shifter operand in data processing instructions
397 class Operand {
398  public:
399  // immediate
400  V8_INLINE explicit Operand(int32_t immediate,
401  RelocInfo::Mode rmode = RelocInfo::NONE);
402  V8_INLINE static Operand Zero();
403  V8_INLINE explicit Operand(const ExternalReference& f);
404  explicit Operand(Handle<HeapObject> handle);
405  V8_INLINE explicit Operand(Smi value);
406 
407  // rm
408  V8_INLINE explicit Operand(Register rm);
409 
410  // rm <shift_op> shift_imm
411  explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
412  V8_INLINE static Operand SmiUntag(Register rm) {
413  return Operand(rm, ASR, kSmiTagSize);
414  }
415  V8_INLINE static Operand PointerOffsetFromSmiKey(Register key) {
416  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
417  return Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize);
418  }
419  V8_INLINE static Operand DoubleOffsetFromSmiKey(Register key) {
420  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kDoubleSizeLog2);
421  return Operand(key, LSL, kDoubleSizeLog2 - kSmiTagSize);
422  }
423 
424  // rm <shift_op> rs
425  explicit Operand(Register rm, ShiftOp shift_op, Register rs);
426 
427  static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
428  static Operand EmbeddedCode(CodeStub* stub);
429  static Operand EmbeddedStringConstant(const StringConstantBase* str);
430 
431  // Return true if this is a register operand.
432  bool IsRegister() const {
433  return rm_.is_valid() && rs_ == no_reg && shift_op_ == LSL &&
434  shift_imm_ == 0;
435  }
436  // Return true if this is a register operand shifted with an immediate.
437  bool IsImmediateShiftedRegister() const {
438  return rm_.is_valid() && !rs_.is_valid();
439  }
440  // Return true if this is a register operand shifted with a register.
441  bool IsRegisterShiftedRegister() const {
442  return rm_.is_valid() && rs_.is_valid();
443  }
444 
445  // Return the number of actual instructions required to implement the given
446  // instruction for this particular operand. This can be a single instruction,
447  // if no load into a scratch register is necessary, or anything between 2 and
448  // 4 instructions when we need to load from the constant pool (depending upon
449  // whether the constant pool entry is in the small or extended section). If
450  // the instruction this operand is used for is a MOV or MVN instruction the
451  // actual instruction to use is required for this calculation. For other
452  // instructions instr is ignored.
453  //
454  // The value returned is only valid as long as no entries are added to the
455  // constant pool between this call and the actual instruction being emitted.
456  int InstructionsRequired(const Assembler* assembler, Instr instr = 0) const;
457  bool MustOutputRelocInfo(const Assembler* assembler) const;
458 
459  inline int32_t immediate() const {
460  DCHECK(IsImmediate());
461  DCHECK(!IsHeapObjectRequest());
462  return value_.immediate;
463  }
464  bool IsImmediate() const {
465  return !rm_.is_valid();
466  }
467 
468  HeapObjectRequest heap_object_request() const {
469  DCHECK(IsHeapObjectRequest());
470  return value_.heap_object_request;
471  }
472  bool IsHeapObjectRequest() const {
473  DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
474  DCHECK_IMPLIES(is_heap_object_request_,
475  rmode_ == RelocInfo::EMBEDDED_OBJECT ||
476  rmode_ == RelocInfo::CODE_TARGET);
477  return is_heap_object_request_;
478  }
479 
480  Register rm() const { return rm_; }
481  Register rs() const { return rs_; }
482  ShiftOp shift_op() const { return shift_op_; }
483 
484 
485  private:
486  Register rm_ = no_reg;
487  Register rs_ = no_reg;
488  ShiftOp shift_op_;
489  int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
490  union Value {
491  Value() {}
492  HeapObjectRequest heap_object_request; // if is_heap_object_request_
493  int32_t immediate; // otherwise
494  } value_; // valid if rm_ == no_reg
495  bool is_heap_object_request_ = false;
496  RelocInfo::Mode rmode_;
497 
498  friend class Assembler;
499 };
500 
501 
502 // Class MemOperand represents a memory operand in load and store instructions
503 class MemOperand {
504  public:
505  // [rn +/- offset] Offset/NegOffset
506  // [rn +/- offset]! PreIndex/NegPreIndex
507  // [rn], +/- offset PostIndex/NegPostIndex
508  // offset is any signed 32-bit value; offset is first loaded to a scratch
509  // register if it does not fit the addressing mode (12-bit unsigned and sign
510  // bit)
511  explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset);
512 
513  // [rn +/- rm] Offset/NegOffset
514  // [rn +/- rm]! PreIndex/NegPreIndex
515  // [rn], +/- rm PostIndex/NegPostIndex
516  explicit MemOperand(Register rn, Register rm, AddrMode am = Offset);
517 
518  // [rn +/- rm <shift_op> shift_imm] Offset/NegOffset
519  // [rn +/- rm <shift_op> shift_imm]! PreIndex/NegPreIndex
520  // [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex
521  explicit MemOperand(Register rn, Register rm,
522  ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
523  V8_INLINE static MemOperand PointerAddressFromSmiKey(Register array,
524  Register key,
525  AddrMode am = Offset) {
526  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
527  return MemOperand(array, key, LSL, kPointerSizeLog2 - kSmiTagSize, am);
528  }
529 
530  void set_offset(int32_t offset) {
531  DCHECK(rm_ == no_reg);
532  offset_ = offset;
533  }
534 
535  uint32_t offset() const {
536  DCHECK(rm_ == no_reg);
537  return offset_;
538  }
539 
540  Register rn() const { return rn_; }
541  Register rm() const { return rm_; }
542  AddrMode am() const { return am_; }
543 
544  bool OffsetIsUint12Encodable() const {
545  return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_);
546  }
547 
548  private:
549  Register rn_; // base
550  Register rm_; // register offset
551  int32_t offset_; // valid if rm_ == no_reg
552  ShiftOp shift_op_;
553  int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
554  AddrMode am_; // bits P, U, and W
555 
556  friend class Assembler;
557 };
558 
559 
560 // Class NeonMemOperand represents a memory operand in load and
561 // store NEON instructions
563  public:
564  // [rn {:align}] Offset
565  // [rn {:align}]! PostIndex
566  explicit NeonMemOperand(Register rn, AddrMode am = Offset, int align = 0);
567 
568  // [rn {:align}], rm PostIndex
569  explicit NeonMemOperand(Register rn, Register rm, int align = 0);
570 
571  Register rn() const { return rn_; }
572  Register rm() const { return rm_; }
573  int align() const { return align_; }
574 
575  private:
576  void SetAlignment(int align);
577 
578  Register rn_; // base
579  Register rm_; // register increment
580  int align_;
581 };
582 
583 
584 // Class NeonListOperand represents a list of NEON registers
586  public:
587  explicit NeonListOperand(DoubleRegister base, int register_count = 1)
588  : base_(base), register_count_(register_count) {}
589  explicit NeonListOperand(QwNeonRegister q_reg)
590  : base_(q_reg.low()), register_count_(2) {}
591  DoubleRegister base() const { return base_; }
592  int register_count() { return register_count_; }
593  int length() const { return register_count_ - 1; }
594  NeonListType type() const {
595  switch (register_count_) {
596  default: UNREACHABLE();
597  // Fall through.
598  case 1: return nlt_1;
599  case 2: return nlt_2;
600  case 3: return nlt_3;
601  case 4: return nlt_4;
602  }
603  }
604  private:
605  DoubleRegister base_;
606  int register_count_;
607 };
608 
609 class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
610  public:
611  // Create an assembler. Instructions and relocation information are emitted
612  // into a buffer, with the instructions starting from the beginning and the
613  // relocation information starting from the end of the buffer. See CodeDesc
614  // for a detailed comment on the layout (globals.h).
615  //
616  // If the provided buffer is nullptr, the assembler allocates and grows its
617  // own buffer, and buffer_size determines the initial buffer size. The buffer
618  // is owned by the assembler and deallocated upon destruction of the
619  // assembler.
620  //
621  // If the provided buffer is not nullptr, the assembler uses the provided
622  // buffer for code generation and assumes its size to be buffer_size. If the
623  // buffer is too small, a fatal error occurs. No deallocation of the buffer is
624  // done upon destruction of the assembler.
625  Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
626  virtual ~Assembler();
627 
628  virtual void AbortedCodeGeneration() {
629  pending_32_bit_constants_.clear();
630  }
631 
632  // GetCode emits any pending (non-emitted) code and fills the descriptor
633  // desc. GetCode() is idempotent; it returns the same result if no other
634  // Assembler functions are invoked in between GetCode() calls.
635  void GetCode(Isolate* isolate, CodeDesc* desc);
636 
637  // Label operations & relative jumps (PPUM Appendix D)
638  //
639  // Takes a branch opcode (cc) and a label (L) and generates
640  // either a backward branch or a forward branch and links it
641  // to the label fixup chain. Usage:
642  //
643  // Label L; // unbound label
644  // j(cc, &L); // forward branch to unbound label
645  // bind(&L); // bind label to the current pc
646  // j(cc, &L); // backward branch to bound label
647  // bind(&L); // illegal: a label may be bound only once
648  //
649  // Note: The same Label can be used for forward and backward branches
650  // but it may be bound only once.
651 
652  void bind(Label* L); // binds an unbound label L to the current code position
653 
654  // Returns the branch offset to the given label from the current code position
655  // Links the label to the current position if it is still unbound
656  // Manages the jump elimination optimization if the second parameter is true.
657  int branch_offset(Label* L);
658 
659  // Returns true if the given pc address is the start of a constant pool load
660  // instruction sequence.
661  V8_INLINE static bool is_constant_pool_load(Address pc);
662 
663  // Return the address in the constant pool of the code target address used by
664  // the branch/call instruction at pc, or the object in a mov.
665  V8_INLINE static Address constant_pool_entry_address(Address pc,
666  Address constant_pool);
667 
668  // Read/Modify the code target address in the branch/call instruction at pc.
669  // The isolate argument is unused (and may be nullptr) when skipping flushing.
670  V8_INLINE static Address target_address_at(Address pc, Address constant_pool);
671  V8_INLINE static void set_target_address_at(
672  Address pc, Address constant_pool, Address target,
673  ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
674 
675  // Return the code target address at a call site from the return address
676  // of that call in the instruction stream.
677  V8_INLINE static Address target_address_from_return_address(Address pc);
678 
679  // Given the address of the beginning of a call, return the address
680  // in the instruction stream that the call will return from.
681  V8_INLINE static Address return_address_from_call_start(Address pc);
682 
683  // This sets the branch destination (which is in the constant pool on ARM).
684  // This is for calls and branches within generated code.
685  inline static void deserialization_set_special_target_at(
686  Address constant_pool_entry, Code code, Address target);
687 
688  // Get the size of the special target encoded at 'location'.
689  inline static int deserialization_special_target_size(Address location);
690 
691  // This sets the internal reference at the pc.
692  inline static void deserialization_set_target_internal_reference_at(
693  Address pc, Address target,
694  RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
695 
696  // Here we are patching the address in the constant pool, not the actual call
697  // instruction. The address in the constant pool is the same size as a
698  // pointer.
699  static constexpr int kSpecialTargetSize = kPointerSize;
700 
701  RegList* GetScratchRegisterList() { return &scratch_register_list_; }
702  VfpRegList* GetScratchVfpRegisterList() {
703  return &scratch_vfp_register_list_;
704  }
705 
706  // ---------------------------------------------------------------------------
707  // Code generation
708 
709  // Insert the smallest number of nop instructions
710  // possible to align the pc offset to a multiple
711  // of m. m must be a power of 2 (>= 4).
712  void Align(int m);
713  // Insert the smallest number of zero bytes possible to align the pc offset
714  // to a mulitple of m. m must be a power of 2 (>= 2).
715  void DataAlign(int m);
716  // Aligns code to something that's optimal for a jump target for the platform.
717  void CodeTargetAlign();
718 
719  // Branch instructions
720  void b(int branch_offset, Condition cond = al,
721  RelocInfo::Mode rmode = RelocInfo::NONE);
722  void bl(int branch_offset, Condition cond = al,
723  RelocInfo::Mode rmode = RelocInfo::NONE);
724  void blx(int branch_offset); // v5 and above
725  void blx(Register target, Condition cond = al); // v5 and above
726  void bx(Register target, Condition cond = al); // v5 and above, plus v4t
727 
728  // Convenience branch instructions using labels
729  void b(Label* L, Condition cond = al);
730  void b(Condition cond, Label* L) { b(L, cond); }
731  void bl(Label* L, Condition cond = al);
732  void bl(Condition cond, Label* L) { bl(L, cond); }
733  void blx(Label* L); // v5 and above
734 
735  // Data-processing instructions
736 
737  void and_(Register dst, Register src1, const Operand& src2,
738  SBit s = LeaveCC, Condition cond = al);
739  void and_(Register dst, Register src1, Register src2, SBit s = LeaveCC,
740  Condition cond = al);
741 
742  void eor(Register dst, Register src1, const Operand& src2,
743  SBit s = LeaveCC, Condition cond = al);
744  void eor(Register dst, Register src1, Register src2, SBit s = LeaveCC,
745  Condition cond = al);
746 
747  void sub(Register dst, Register src1, const Operand& src2,
748  SBit s = LeaveCC, Condition cond = al);
749  void sub(Register dst, Register src1, Register src2,
750  SBit s = LeaveCC, Condition cond = al);
751 
752  void rsb(Register dst, Register src1, const Operand& src2,
753  SBit s = LeaveCC, Condition cond = al);
754 
755  void add(Register dst, Register src1, const Operand& src2,
756  SBit s = LeaveCC, Condition cond = al);
757  void add(Register dst, Register src1, Register src2,
758  SBit s = LeaveCC, Condition cond = al);
759 
760  void adc(Register dst, Register src1, const Operand& src2,
761  SBit s = LeaveCC, Condition cond = al);
762 
763  void sbc(Register dst, Register src1, const Operand& src2,
764  SBit s = LeaveCC, Condition cond = al);
765 
766  void rsc(Register dst, Register src1, const Operand& src2,
767  SBit s = LeaveCC, Condition cond = al);
768 
769  void tst(Register src1, const Operand& src2, Condition cond = al);
770  void tst(Register src1, Register src2, Condition cond = al);
771 
772  void teq(Register src1, const Operand& src2, Condition cond = al);
773 
774  void cmp(Register src1, const Operand& src2, Condition cond = al);
775  void cmp(Register src1, Register src2, Condition cond = al);
776 
777  void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond = al);
778 
779  void cmn(Register src1, const Operand& src2, Condition cond = al);
780 
781  void orr(Register dst, Register src1, const Operand& src2,
782  SBit s = LeaveCC, Condition cond = al);
783  void orr(Register dst, Register src1, Register src2,
784  SBit s = LeaveCC, Condition cond = al);
785 
786  void mov(Register dst, const Operand& src,
787  SBit s = LeaveCC, Condition cond = al);
788  void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al);
789 
790  // Load the position of the label relative to the generated code object
791  // pointer in a register.
792  void mov_label_offset(Register dst, Label* label);
793 
794  // ARMv7 instructions for loading a 32 bit immediate in two instructions.
795  // The constant for movw and movt should be in the range 0-0xffff.
796  void movw(Register reg, uint32_t immediate, Condition cond = al);
797  void movt(Register reg, uint32_t immediate, Condition cond = al);
798 
799  void bic(Register dst, Register src1, const Operand& src2,
800  SBit s = LeaveCC, Condition cond = al);
801 
802  void mvn(Register dst, const Operand& src,
803  SBit s = LeaveCC, Condition cond = al);
804 
805  // Shift instructions
806 
807  void asr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
808  Condition cond = al);
809 
810  void lsl(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
811  Condition cond = al);
812 
813  void lsr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
814  Condition cond = al);
815 
816  // Multiply instructions
817 
818  void mla(Register dst, Register src1, Register src2, Register srcA,
819  SBit s = LeaveCC, Condition cond = al);
820 
821  void mls(Register dst, Register src1, Register src2, Register srcA,
822  Condition cond = al);
823 
824  void sdiv(Register dst, Register src1, Register src2,
825  Condition cond = al);
826 
827  void udiv(Register dst, Register src1, Register src2, Condition cond = al);
828 
829  void mul(Register dst, Register src1, Register src2,
830  SBit s = LeaveCC, Condition cond = al);
831 
832  void smmla(Register dst, Register src1, Register src2, Register srcA,
833  Condition cond = al);
834 
835  void smmul(Register dst, Register src1, Register src2, Condition cond = al);
836 
837  void smlal(Register dstL, Register dstH, Register src1, Register src2,
838  SBit s = LeaveCC, Condition cond = al);
839 
840  void smull(Register dstL, Register dstH, Register src1, Register src2,
841  SBit s = LeaveCC, Condition cond = al);
842 
843  void umlal(Register dstL, Register dstH, Register src1, Register src2,
844  SBit s = LeaveCC, Condition cond = al);
845 
846  void umull(Register dstL, Register dstH, Register src1, Register src2,
847  SBit s = LeaveCC, Condition cond = al);
848 
849  // Miscellaneous arithmetic instructions
850 
851  void clz(Register dst, Register src, Condition cond = al); // v5 and above
852 
853  // Saturating instructions. v6 and above.
854 
855  // Unsigned saturate.
856  //
857  // Saturate an optionally shifted signed value to an unsigned range.
858  //
859  // usat dst, #satpos, src
860  // usat dst, #satpos, src, lsl #sh
861  // usat dst, #satpos, src, asr #sh
862  //
863  // Register dst will contain:
864  //
865  // 0, if s < 0
866  // (1 << satpos) - 1, if s > ((1 << satpos) - 1)
867  // s, otherwise
868  //
869  // where s is the contents of src after shifting (if used.)
870  void usat(Register dst, int satpos, const Operand& src, Condition cond = al);
871 
872  // Bitfield manipulation instructions. v7 and above.
873 
874  void ubfx(Register dst, Register src, int lsb, int width,
875  Condition cond = al);
876 
877  void sbfx(Register dst, Register src, int lsb, int width,
878  Condition cond = al);
879 
880  void bfc(Register dst, int lsb, int width, Condition cond = al);
881 
882  void bfi(Register dst, Register src, int lsb, int width,
883  Condition cond = al);
884 
885  void pkhbt(Register dst, Register src1, const Operand& src2,
886  Condition cond = al);
887 
888  void pkhtb(Register dst, Register src1, const Operand& src2,
889  Condition cond = al);
890 
891  void sxtb(Register dst, Register src, int rotate = 0, Condition cond = al);
892  void sxtab(Register dst, Register src1, Register src2, int rotate = 0,
893  Condition cond = al);
894  void sxth(Register dst, Register src, int rotate = 0, Condition cond = al);
895  void sxtah(Register dst, Register src1, Register src2, int rotate = 0,
896  Condition cond = al);
897 
898  void uxtb(Register dst, Register src, int rotate = 0, Condition cond = al);
899  void uxtab(Register dst, Register src1, Register src2, int rotate = 0,
900  Condition cond = al);
901  void uxtb16(Register dst, Register src, int rotate = 0, Condition cond = al);
902  void uxth(Register dst, Register src, int rotate = 0, Condition cond = al);
903  void uxtah(Register dst, Register src1, Register src2, int rotate = 0,
904  Condition cond = al);
905 
906  // Reverse the bits in a register.
907  void rbit(Register dst, Register src, Condition cond = al);
908  void rev(Register dst, Register src, Condition cond = al);
909 
910  // Status register access instructions
911 
912  void mrs(Register dst, SRegister s, Condition cond = al);
913  void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al);
914 
915  // Load/Store instructions
916  void ldr(Register dst, const MemOperand& src, Condition cond = al);
917  void str(Register src, const MemOperand& dst, Condition cond = al);
918  void ldrb(Register dst, const MemOperand& src, Condition cond = al);
919  void strb(Register src, const MemOperand& dst, Condition cond = al);
920  void ldrh(Register dst, const MemOperand& src, Condition cond = al);
921  void strh(Register src, const MemOperand& dst, Condition cond = al);
922  void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
923  void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
924  void ldrd(Register dst1,
925  Register dst2,
926  const MemOperand& src, Condition cond = al);
927  void strd(Register src1,
928  Register src2,
929  const MemOperand& dst, Condition cond = al);
930 
931  // Load literal from a pc relative address.
932  void ldr_pcrel(Register dst, int imm12, Condition cond = al);
933 
934  // Load/Store exclusive instructions
935  void ldrex(Register dst, Register src, Condition cond = al);
936  void strex(Register src1, Register src2, Register dst, Condition cond = al);
937  void ldrexb(Register dst, Register src, Condition cond = al);
938  void strexb(Register src1, Register src2, Register dst, Condition cond = al);
939  void ldrexh(Register dst, Register src, Condition cond = al);
940  void strexh(Register src1, Register src2, Register dst, Condition cond = al);
941  void ldrexd(Register dst1, Register dst2, Register src, Condition cond = al);
942  void strexd(Register res, Register src1, Register src2, Register dst,
943  Condition cond = al);
944 
945  // Preload instructions
946  void pld(const MemOperand& address);
947 
948  // Load/Store multiple instructions
949  void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
950  void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
951 
952  // Exception-generating instructions and debugging support
953  void stop(const char* msg,
954  Condition cond = al,
955  int32_t code = kDefaultStopCode);
956 
957  void bkpt(uint32_t imm16); // v5 and above
958  void svc(uint32_t imm24, Condition cond = al);
959 
960  // Synchronization instructions.
961  // On ARMv6, an equivalent CP15 operation will be used.
962  void dmb(BarrierOption option);
963  void dsb(BarrierOption option);
964  void isb(BarrierOption option);
965 
966  // Conditional speculation barrier.
967  void csdb();
968 
969  // Coprocessor instructions
970 
971  void cdp(Coprocessor coproc, int opcode_1,
972  CRegister crd, CRegister crn, CRegister crm,
973  int opcode_2, Condition cond = al);
974 
975  void cdp2(Coprocessor coproc, int opcode_1,
976  CRegister crd, CRegister crn, CRegister crm,
977  int opcode_2); // v5 and above
978 
979  void mcr(Coprocessor coproc, int opcode_1,
980  Register rd, CRegister crn, CRegister crm,
981  int opcode_2 = 0, Condition cond = al);
982 
983  void mcr2(Coprocessor coproc, int opcode_1,
984  Register rd, CRegister crn, CRegister crm,
985  int opcode_2 = 0); // v5 and above
986 
987  void mrc(Coprocessor coproc, int opcode_1,
988  Register rd, CRegister crn, CRegister crm,
989  int opcode_2 = 0, Condition cond = al);
990 
991  void mrc2(Coprocessor coproc, int opcode_1,
992  Register rd, CRegister crn, CRegister crm,
993  int opcode_2 = 0); // v5 and above
994 
995  void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src,
996  LFlag l = Short, Condition cond = al);
997  void ldc(Coprocessor coproc, CRegister crd, Register base, int option,
998  LFlag l = Short, Condition cond = al);
999 
1000  void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
1001  LFlag l = Short); // v5 and above
1002  void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
1003  LFlag l = Short); // v5 and above
1004 
1005  // Support for VFP.
1006  // All these APIs support S0 to S31 and D0 to D31.
1007 
1008  void vldr(const DwVfpRegister dst,
1009  const Register base,
1010  int offset,
1011  const Condition cond = al);
1012  void vldr(const DwVfpRegister dst,
1013  const MemOperand& src,
1014  const Condition cond = al);
1015 
1016  void vldr(const SwVfpRegister dst,
1017  const Register base,
1018  int offset,
1019  const Condition cond = al);
1020  void vldr(const SwVfpRegister dst,
1021  const MemOperand& src,
1022  const Condition cond = al);
1023 
1024  void vstr(const DwVfpRegister src,
1025  const Register base,
1026  int offset,
1027  const Condition cond = al);
1028  void vstr(const DwVfpRegister src,
1029  const MemOperand& dst,
1030  const Condition cond = al);
1031 
1032  void vstr(const SwVfpRegister src,
1033  const Register base,
1034  int offset,
1035  const Condition cond = al);
1036  void vstr(const SwVfpRegister src,
1037  const MemOperand& dst,
1038  const Condition cond = al);
1039 
1040  void vldm(BlockAddrMode am,
1041  Register base,
1042  DwVfpRegister first,
1043  DwVfpRegister last,
1044  Condition cond = al);
1045 
1046  void vstm(BlockAddrMode am,
1047  Register base,
1048  DwVfpRegister first,
1049  DwVfpRegister last,
1050  Condition cond = al);
1051 
1052  void vldm(BlockAddrMode am,
1053  Register base,
1054  SwVfpRegister first,
1055  SwVfpRegister last,
1056  Condition cond = al);
1057 
1058  void vstm(BlockAddrMode am,
1059  Register base,
1060  SwVfpRegister first,
1061  SwVfpRegister last,
1062  Condition cond = al);
1063 
1064  void vmov(const SwVfpRegister dst, Float32 imm);
1065  void vmov(const DwVfpRegister dst,
1066  Double imm,
1067  const Register extra_scratch = no_reg);
1068  void vmov(const SwVfpRegister dst,
1069  const SwVfpRegister src,
1070  const Condition cond = al);
1071  void vmov(const DwVfpRegister dst,
1072  const DwVfpRegister src,
1073  const Condition cond = al);
1074  void vmov(const DwVfpRegister dst,
1075  const Register src1,
1076  const Register src2,
1077  const Condition cond = al);
1078  void vmov(const Register dst1,
1079  const Register dst2,
1080  const DwVfpRegister src,
1081  const Condition cond = al);
1082  void vmov(const SwVfpRegister dst,
1083  const Register src,
1084  const Condition cond = al);
1085  void vmov(const Register dst,
1086  const SwVfpRegister src,
1087  const Condition cond = al);
1088  void vcvt_f64_s32(const DwVfpRegister dst,
1089  const SwVfpRegister src,
1090  VFPConversionMode mode = kDefaultRoundToZero,
1091  const Condition cond = al);
1092  void vcvt_f32_s32(const SwVfpRegister dst,
1093  const SwVfpRegister src,
1094  VFPConversionMode mode = kDefaultRoundToZero,
1095  const Condition cond = al);
1096  void vcvt_f64_u32(const DwVfpRegister dst,
1097  const SwVfpRegister src,
1098  VFPConversionMode mode = kDefaultRoundToZero,
1099  const Condition cond = al);
1100  void vcvt_f32_u32(const SwVfpRegister dst,
1101  const SwVfpRegister src,
1102  VFPConversionMode mode = kDefaultRoundToZero,
1103  const Condition cond = al);
1104  void vcvt_s32_f32(const SwVfpRegister dst,
1105  const SwVfpRegister src,
1106  VFPConversionMode mode = kDefaultRoundToZero,
1107  const Condition cond = al);
1108  void vcvt_u32_f32(const SwVfpRegister dst,
1109  const SwVfpRegister src,
1110  VFPConversionMode mode = kDefaultRoundToZero,
1111  const Condition cond = al);
1112  void vcvt_s32_f64(const SwVfpRegister dst,
1113  const DwVfpRegister src,
1114  VFPConversionMode mode = kDefaultRoundToZero,
1115  const Condition cond = al);
1116  void vcvt_u32_f64(const SwVfpRegister dst,
1117  const DwVfpRegister src,
1118  VFPConversionMode mode = kDefaultRoundToZero,
1119  const Condition cond = al);
1120  void vcvt_f64_f32(const DwVfpRegister dst,
1121  const SwVfpRegister src,
1122  VFPConversionMode mode = kDefaultRoundToZero,
1123  const Condition cond = al);
1124  void vcvt_f32_f64(const SwVfpRegister dst,
1125  const DwVfpRegister src,
1126  VFPConversionMode mode = kDefaultRoundToZero,
1127  const Condition cond = al);
1128  void vcvt_f64_s32(const DwVfpRegister dst,
1129  int fraction_bits,
1130  const Condition cond = al);
1131 
1132  void vmrs(const Register dst, const Condition cond = al);
1133  void vmsr(const Register dst, const Condition cond = al);
1134 
1135  void vneg(const DwVfpRegister dst,
1136  const DwVfpRegister src,
1137  const Condition cond = al);
1138  void vneg(const SwVfpRegister dst, const SwVfpRegister src,
1139  const Condition cond = al);
1140  void vabs(const DwVfpRegister dst,
1141  const DwVfpRegister src,
1142  const Condition cond = al);
1143  void vabs(const SwVfpRegister dst, const SwVfpRegister src,
1144  const Condition cond = al);
1145  void vadd(const DwVfpRegister dst,
1146  const DwVfpRegister src1,
1147  const DwVfpRegister src2,
1148  const Condition cond = al);
1149  void vadd(const SwVfpRegister dst, const SwVfpRegister src1,
1150  const SwVfpRegister src2, const Condition cond = al);
1151  void vsub(const DwVfpRegister dst,
1152  const DwVfpRegister src1,
1153  const DwVfpRegister src2,
1154  const Condition cond = al);
1155  void vsub(const SwVfpRegister dst, const SwVfpRegister src1,
1156  const SwVfpRegister src2, const Condition cond = al);
1157  void vmul(const DwVfpRegister dst,
1158  const DwVfpRegister src1,
1159  const DwVfpRegister src2,
1160  const Condition cond = al);
1161  void vmul(const SwVfpRegister dst, const SwVfpRegister src1,
1162  const SwVfpRegister src2, const Condition cond = al);
1163  void vmla(const DwVfpRegister dst,
1164  const DwVfpRegister src1,
1165  const DwVfpRegister src2,
1166  const Condition cond = al);
1167  void vmla(const SwVfpRegister dst, const SwVfpRegister src1,
1168  const SwVfpRegister src2, const Condition cond = al);
1169  void vmls(const DwVfpRegister dst,
1170  const DwVfpRegister src1,
1171  const DwVfpRegister src2,
1172  const Condition cond = al);
1173  void vmls(const SwVfpRegister dst, const SwVfpRegister src1,
1174  const SwVfpRegister src2, const Condition cond = al);
1175  void vdiv(const DwVfpRegister dst,
1176  const DwVfpRegister src1,
1177  const DwVfpRegister src2,
1178  const Condition cond = al);
1179  void vdiv(const SwVfpRegister dst, const SwVfpRegister src1,
1180  const SwVfpRegister src2, const Condition cond = al);
1181  void vcmp(const DwVfpRegister src1,
1182  const DwVfpRegister src2,
1183  const Condition cond = al);
1184  void vcmp(const SwVfpRegister src1, const SwVfpRegister src2,
1185  const Condition cond = al);
1186  void vcmp(const DwVfpRegister src1,
1187  const double src2,
1188  const Condition cond = al);
1189  void vcmp(const SwVfpRegister src1, const float src2,
1190  const Condition cond = al);
1191 
1192  void vmaxnm(const DwVfpRegister dst,
1193  const DwVfpRegister src1,
1194  const DwVfpRegister src2);
1195  void vmaxnm(const SwVfpRegister dst,
1196  const SwVfpRegister src1,
1197  const SwVfpRegister src2);
1198  void vminnm(const DwVfpRegister dst,
1199  const DwVfpRegister src1,
1200  const DwVfpRegister src2);
1201  void vminnm(const SwVfpRegister dst,
1202  const SwVfpRegister src1,
1203  const SwVfpRegister src2);
1204 
1205  // VSEL supports cond in {eq, ne, ge, lt, gt, le, vs, vc}.
1206  void vsel(const Condition cond,
1207  const DwVfpRegister dst,
1208  const DwVfpRegister src1,
1209  const DwVfpRegister src2);
1210  void vsel(const Condition cond,
1211  const SwVfpRegister dst,
1212  const SwVfpRegister src1,
1213  const SwVfpRegister src2);
1214 
1215  void vsqrt(const DwVfpRegister dst,
1216  const DwVfpRegister src,
1217  const Condition cond = al);
1218  void vsqrt(const SwVfpRegister dst, const SwVfpRegister src,
1219  const Condition cond = al);
1220 
1221  // ARMv8 rounding instructions.
1222  void vrinta(const SwVfpRegister dst, const SwVfpRegister src);
1223  void vrinta(const DwVfpRegister dst, const DwVfpRegister src);
1224  void vrintn(const SwVfpRegister dst, const SwVfpRegister src);
1225  void vrintn(const DwVfpRegister dst, const DwVfpRegister src);
1226  void vrintm(const SwVfpRegister dst, const SwVfpRegister src);
1227  void vrintm(const DwVfpRegister dst, const DwVfpRegister src);
1228  void vrintp(const SwVfpRegister dst, const SwVfpRegister src);
1229  void vrintp(const DwVfpRegister dst, const DwVfpRegister src);
1230  void vrintz(const SwVfpRegister dst, const SwVfpRegister src,
1231  const Condition cond = al);
1232  void vrintz(const DwVfpRegister dst, const DwVfpRegister src,
1233  const Condition cond = al);
1234 
1235  // Support for NEON.
1236 
1237  // All these APIs support D0 to D31 and Q0 to Q15.
1238  void vld1(NeonSize size,
1239  const NeonListOperand& dst,
1240  const NeonMemOperand& src);
1241  void vst1(NeonSize size,
1242  const NeonListOperand& src,
1243  const NeonMemOperand& dst);
1244  // dt represents the narrower type
1245  void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src);
1246  // dt represents the narrower type.
1247  void vqmovn(NeonDataType dt, DwVfpRegister dst, QwNeonRegister src);
1248 
1249  // Only unconditional core <-> scalar moves are currently supported.
1250  void vmov(NeonDataType dt, DwVfpRegister dst, int index, Register src);
1251  void vmov(NeonDataType dt, Register dst, DwVfpRegister src, int index);
1252 
1253  void vmov(QwNeonRegister dst, QwNeonRegister src);
1254  void vdup(NeonSize size, QwNeonRegister dst, Register src);
1255  void vdup(NeonSize size, QwNeonRegister dst, DwVfpRegister src, int index);
1256  void vdup(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int index);
1257 
1258  void vcvt_f32_s32(QwNeonRegister dst, QwNeonRegister src);
1259  void vcvt_f32_u32(QwNeonRegister dst, QwNeonRegister src);
1260  void vcvt_s32_f32(QwNeonRegister dst, QwNeonRegister src);
1261  void vcvt_u32_f32(QwNeonRegister dst, QwNeonRegister src);
1262 
1263  void vmvn(QwNeonRegister dst, QwNeonRegister src);
1264  void vswp(DwVfpRegister dst, DwVfpRegister src);
1265  void vswp(QwNeonRegister dst, QwNeonRegister src);
1266  void vabs(QwNeonRegister dst, QwNeonRegister src);
1267  void vabs(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
1268  void vneg(QwNeonRegister dst, QwNeonRegister src);
1269  void vneg(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
1270 
1271  void vand(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1272  void veor(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2);
1273  void veor(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1274  void vbsl(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1275  void vorr(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1276  void vadd(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1277  void vadd(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
1278  QwNeonRegister src2);
1279  void vqadd(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
1280  QwNeonRegister src2);
1281  void vsub(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1282  void vsub(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
1283  QwNeonRegister src2);
1284  void vqsub(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
1285  QwNeonRegister src2);
1286  void vmul(QwNeonRegister dst, QwNeonRegister src1,
1287  QwNeonRegister src2);
1288  void vmul(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
1289  QwNeonRegister src2);
1290  void vmin(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1291  void vmin(NeonDataType dt, QwNeonRegister dst,
1292  QwNeonRegister src1, QwNeonRegister src2);
1293  void vmax(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1294  void vmax(NeonDataType dt, QwNeonRegister dst,
1295  QwNeonRegister src1, QwNeonRegister src2);
1296  void vpadd(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2);
1297  void vpadd(NeonSize size, DwVfpRegister dst, DwVfpRegister src1,
1298  DwVfpRegister src2);
1299  void vpmin(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
1300  DwVfpRegister src2);
1301  void vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
1302  DwVfpRegister src2);
1303  void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
1304  void vshr(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
1305  void vsli(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int shift);
1306  void vsri(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int shift);
1307  // vrecpe and vrsqrte only support floating point lanes.
1308  void vrecpe(QwNeonRegister dst, QwNeonRegister src);
1309  void vrsqrte(QwNeonRegister dst, QwNeonRegister src);
1310  void vrecps(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1311  void vrsqrts(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1312  void vtst(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
1313  QwNeonRegister src2);
1314  void vceq(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1315  void vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
1316  QwNeonRegister src2);
1317  void vcge(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1318  void vcge(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
1319  QwNeonRegister src2);
1320  void vcgt(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1321  void vcgt(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
1322  QwNeonRegister src2);
1323  void vext(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2,
1324  int bytes);
1325  void vzip(NeonSize size, DwVfpRegister src1, DwVfpRegister src2);
1326  void vzip(NeonSize size, QwNeonRegister src1, QwNeonRegister src2);
1327  void vuzp(NeonSize size, DwVfpRegister src1, DwVfpRegister src2);
1328  void vuzp(NeonSize size, QwNeonRegister src1, QwNeonRegister src2);
1329  void vrev16(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
1330  void vrev32(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
1331  void vrev64(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
1332  void vtrn(NeonSize size, DwVfpRegister src1, DwVfpRegister src2);
1333  void vtrn(NeonSize size, QwNeonRegister src1, QwNeonRegister src2);
1334  void vtbl(DwVfpRegister dst, const NeonListOperand& list,
1335  DwVfpRegister index);
1336  void vtbx(DwVfpRegister dst, const NeonListOperand& list,
1337  DwVfpRegister index);
1338 
1339  // Pseudo instructions
1340 
1341  // Different nop operations are used by the code generator to detect certain
1342  // states of the generated code.
1343  enum NopMarkerTypes {
1344  NON_MARKING_NOP = 0,
1345  DEBUG_BREAK_NOP,
1346  // IC markers.
1347  PROPERTY_ACCESS_INLINED,
1348  PROPERTY_ACCESS_INLINED_CONTEXT,
1349  PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
1350  // Helper values.
1351  LAST_CODE_MARKER,
1352  FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
1353  };
1354 
1355  void nop(int type = 0); // 0 is the default non-marking type.
1356 
1357  void push(Register src, Condition cond = al) {
1358  str(src, MemOperand(sp, 4, NegPreIndex), cond);
1359  }
1360 
1361  void pop(Register dst, Condition cond = al) {
1362  ldr(dst, MemOperand(sp, 4, PostIndex), cond);
1363  }
1364 
1365  void pop();
1366 
1367  void vpush(QwNeonRegister src, Condition cond = al) {
1368  vstm(db_w, sp, src.low(), src.high(), cond);
1369  }
1370 
1371  void vpush(DwVfpRegister src, Condition cond = al) {
1372  vstm(db_w, sp, src, src, cond);
1373  }
1374 
1375  void vpush(SwVfpRegister src, Condition cond = al) {
1376  vstm(db_w, sp, src, src, cond);
1377  }
1378 
1379  void vpop(DwVfpRegister dst, Condition cond = al) {
1380  vldm(ia_w, sp, dst, dst, cond);
1381  }
1382 
1383  // Jump unconditionally to given label.
1384  void jmp(Label* L) { b(L, al); }
1385 
1386  // Check the code size generated from label to here.
1387  int SizeOfCodeGeneratedSince(Label* label) {
1388  return pc_offset() - label->pos();
1389  }
1390 
1391  // Check the number of instructions generated from label to here.
1392  int InstructionsGeneratedSince(Label* label) {
1393  return SizeOfCodeGeneratedSince(label) / kInstrSize;
1394  }
1395 
1396  // Check whether an immediate fits an addressing mode 1 instruction.
1397  static bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
1398 
1399  // Check whether an immediate fits an addressing mode 2 instruction.
1400  bool ImmediateFitsAddrMode2Instruction(int32_t imm32);
1401 
1402  // Class for scoping postponing the constant pool generation.
1404  public:
1405  explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
1406  assem_->StartBlockConstPool();
1407  }
1408  ~BlockConstPoolScope() {
1409  assem_->EndBlockConstPool();
1410  }
1411 
1412  private:
1413  Assembler* assem_;
1414 
1415  DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
1416  };
1417 
1418  // Record a comment relocation entry that can be used by a disassembler.
1419  // Use --code-comments to enable.
1420  void RecordComment(const char* msg);
1421 
1422  // Record a deoptimization reason that can be used by a log or cpu profiler.
1423  // Use --trace-deopt to enable.
1424  void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
1425  int id);
1426 
1427  // Record the emission of a constant pool.
1428  //
1429  // The emission of constant pool depends on the size of the code generated and
1430  // the number of RelocInfo recorded.
1431  // The Debug mechanism needs to map code offsets between two versions of a
1432  // function, compiled with and without debugger support (see for example
1433  // Debug::PrepareForBreakPoints()).
1434  // Compiling functions with debugger support generates additional code
1435  // (DebugCodegen::GenerateSlot()). This may affect the emission of the
1436  // constant pools and cause the version of the code with debugger support to
1437  // have constant pools generated in different places.
1438  // Recording the position and size of emitted constant pools allows to
1439  // correctly compute the offset mappings between the different versions of a
1440  // function in all situations.
1441  //
1442  // The parameter indicates the size of the constant pool (in bytes), including
1443  // the marker and branch over the data.
1444  void RecordConstPool(int size);
1445 
1446  // Writes a single byte or word of data in the code stream. Used
1447  // for inline tables, e.g., jump-tables. CheckConstantPool() should be
1448  // called before any use of db/dd/dq/dp to ensure that constant pools
1449  // are not emitted as part of the tables generated.
1450  void db(uint8_t data);
1451  void dd(uint32_t data);
1452  void dq(uint64_t data);
1453  void dp(uintptr_t data) { dd(data); }
1454 
1455  // Read/patch instructions
1456  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
1457  void instr_at_put(int pos, Instr instr) {
1458  *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
1459  }
1460  static Instr instr_at(Address pc) { return *reinterpret_cast<Instr*>(pc); }
1461  static void instr_at_put(Address pc, Instr instr) {
1462  *reinterpret_cast<Instr*>(pc) = instr;
1463  }
1464  static Condition GetCondition(Instr instr);
1465  static bool IsLdrRegisterImmediate(Instr instr);
1466  static bool IsVldrDRegisterImmediate(Instr instr);
1467  static int GetLdrRegisterImmediateOffset(Instr instr);
1468  static int GetVldrDRegisterImmediateOffset(Instr instr);
1469  static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
1470  static Instr SetVldrDRegisterImmediateOffset(Instr instr, int offset);
1471  static bool IsStrRegisterImmediate(Instr instr);
1472  static Instr SetStrRegisterImmediateOffset(Instr instr, int offset);
1473  static bool IsAddRegisterImmediate(Instr instr);
1474  static Instr SetAddRegisterImmediateOffset(Instr instr, int offset);
1475  static Register GetRd(Instr instr);
1476  static Register GetRn(Instr instr);
1477  static Register GetRm(Instr instr);
1478  static bool IsPush(Instr instr);
1479  static bool IsPop(Instr instr);
1480  static bool IsStrRegFpOffset(Instr instr);
1481  static bool IsLdrRegFpOffset(Instr instr);
1482  static bool IsStrRegFpNegOffset(Instr instr);
1483  static bool IsLdrRegFpNegOffset(Instr instr);
1484  static bool IsLdrPcImmediateOffset(Instr instr);
1485  static bool IsVldrDPcImmediateOffset(Instr instr);
1486  static bool IsBlxReg(Instr instr);
1487  static bool IsBlxIp(Instr instr);
1488  static bool IsTstImmediate(Instr instr);
1489  static bool IsCmpRegister(Instr instr);
1490  static bool IsCmpImmediate(Instr instr);
1491  static Register GetCmpImmediateRegister(Instr instr);
1492  static int GetCmpImmediateRawImmediate(Instr instr);
1493  static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
1494  static bool IsMovImmed(Instr instr);
1495  static bool IsOrrImmed(Instr instr);
1496  static bool IsMovT(Instr instr);
1497  static Instr GetMovTPattern();
1498  static bool IsMovW(Instr instr);
1499  static Instr GetMovWPattern();
1500  static Instr EncodeMovwImmediate(uint32_t immediate);
1501  static Instr PatchMovwImmediate(Instr instruction, uint32_t immediate);
1502  static int DecodeShiftImm(Instr instr);
1503  static Instr PatchShiftImm(Instr instr, int immed);
1504 
1505  // Constants in pools are accessed via pc relative addressing, which can
1506  // reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point
1507  // PC-relative loads, thereby defining a maximum distance between the
1508  // instruction and the accessed constant.
1509  static constexpr int kMaxDistToIntPool = 4 * KB;
1510  // All relocations could be integer, it therefore acts as the limit.
1511  static constexpr int kMinNumPendingConstants = 4;
1512  static constexpr int kMaxNumPending32Constants =
1513  kMaxDistToIntPool / kInstrSize;
1514 
1515  // Postpone the generation of the constant pool for the specified number of
1516  // instructions.
1517  void BlockConstPoolFor(int instructions);
1518 
1519  // Check if is time to emit a constant pool.
1520  void CheckConstPool(bool force_emit, bool require_jump);
1521 
1522  void MaybeCheckConstPool() {
1523  if (pc_offset() >= next_buffer_check_) {
1524  CheckConstPool(false, true);
1525  }
1526  }
1527 
1528  // Move a 32-bit immediate into a register, potentially via the constant pool.
1529  void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al);
1530 
1531  // Get the code target object for a pc-relative call or jump.
1532  V8_INLINE Handle<Code> relative_code_target_object_handle_at(
1533  Address pc_) const;
1534 
1535  protected:
1536  int buffer_space() const { return reloc_info_writer.pos() - pc_; }
1537 
1538  // Decode branch instruction at pos and return branch target pos
1539  int target_at(int pos);
1540 
1541  // Patch branch instruction at pos to branch to given branch target pos
1542  void target_at_put(int pos, int target_pos);
1543 
1544  // Prevent contant pool emission until EndBlockConstPool is called.
1545  // Calls to this function can be nested but must be followed by an equal
1546  // number of call to EndBlockConstpool.
1547  void StartBlockConstPool() {
1548  if (const_pool_blocked_nesting_++ == 0) {
1549  // Prevent constant pool checks happening by setting the next check to
1550  // the biggest possible offset.
1551  next_buffer_check_ = kMaxInt;
1552  }
1553  }
1554 
1555  // Resume constant pool emission. Needs to be called as many times as
1556  // StartBlockConstPool to have an effect.
1557  void EndBlockConstPool() {
1558  if (--const_pool_blocked_nesting_ == 0) {
1559 #ifdef DEBUG
1560  // Max pool start (if we need a jump and an alignment).
1561  int start = pc_offset() + kInstrSize + 2 * kPointerSize;
1562  // Check the constant pool hasn't been blocked for too long.
1563  DCHECK(pending_32_bit_constants_.empty() ||
1564  (start < first_const_pool_32_use_ + kMaxDistToIntPool));
1565 #endif
1566  // Two cases:
1567  // * no_const_pool_before_ >= next_buffer_check_ and the emission is
1568  // still blocked
1569  // * no_const_pool_before_ < next_buffer_check_ and the next emit will
1570  // trigger a check.
1571  next_buffer_check_ = no_const_pool_before_;
1572  }
1573  }
1574 
1575  bool is_const_pool_blocked() const {
1576  return (const_pool_blocked_nesting_ > 0) ||
1577  (pc_offset() < no_const_pool_before_);
1578  }
1579 
1580  bool VfpRegisterIsAvailable(DwVfpRegister reg) {
1581  DCHECK(reg.is_valid());
1582  return IsEnabled(VFP32DREGS) ||
1583  (reg.code() < LowDwVfpRegister::kNumRegisters);
1584  }
1585 
1586  bool VfpRegisterIsAvailable(QwNeonRegister reg) {
1587  DCHECK(reg.is_valid());
1588  return IsEnabled(VFP32DREGS) ||
1589  (reg.code() < LowDwVfpRegister::kNumRegisters / 2);
1590  }
1591 
1592  inline void emit(Instr x);
1593 
1594  // Code generation
1595  // The relocation writer's position is at least kGap bytes below the end of
1596  // the generated instructions. This is so that multi-instruction sequences do
1597  // not have to check for overflow. The same is true for writes of large
1598  // relocation info entries.
1599  static constexpr int kGap = 32;
1600 
1601  // Relocation info generation
1602  // Each relocation is encoded as a variable size value
1603  static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
1604  RelocInfoWriter reloc_info_writer;
1605 
1606  // ConstantPoolEntry records are used during code generation as temporary
1607  // containers for constants and code target addresses until they are emitted
1608  // to the constant pool. These records are temporarily stored in a separate
1609  // buffer until a constant pool is emitted.
1610  // If every instruction in a long sequence is accessing the pool, we need one
1611  // pending relocation entry per instruction.
1612 
1613  // The buffers of pending constant pool entries.
1614  std::vector<ConstantPoolEntry> pending_32_bit_constants_;
1615 
1616  // Scratch registers available for use by the Assembler.
1617  RegList scratch_register_list_;
1618  VfpRegList scratch_vfp_register_list_;
1619 
1620  private:
1621  // Avoid overflows for displacements etc.
1622  static const int kMaximalBufferSize = 512 * MB;
1623 
1624  int next_buffer_check_; // pc offset of next buffer check
1625 
1626  // Constant pool generation
1627  // Pools are emitted in the instruction stream, preferably after unconditional
1628  // jumps or after returns from functions (in dead code locations).
1629  // If a long code sequence does not contain unconditional jumps, it is
1630  // necessary to emit the constant pool before the pool gets too far from the
1631  // location it is accessed from. In this case, we emit a jump over the emitted
1632  // constant pool.
1633  // Constants in the pool may be addresses of functions that gets relocated;
1634  // if so, a relocation info entry is associated to the constant pool entry.
1635 
1636  // Repeated checking whether the constant pool should be emitted is rather
1637  // expensive. By default we only check again once a number of instructions
1638  // has been generated. That also means that the sizing of the buffers is not
1639  // an exact science, and that we rely on some slop to not overrun buffers.
1640  static constexpr int kCheckPoolIntervalInst = 32;
1641  static constexpr int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
1642 
1643  // Emission of the constant pool may be blocked in some code sequences.
1644  int const_pool_blocked_nesting_; // Block emission if this is not zero.
1645  int no_const_pool_before_; // Block emission before this pc offset.
1646 
1647  // Keep track of the first instruction requiring a constant pool entry
1648  // since the previous constant pool was emitted.
1649  int first_const_pool_32_use_;
1650 
1651  // The bound position, before this we cannot do instruction elimination.
1652  int last_bound_pos_;
1653 
1654  inline void CheckBuffer();
1655  void GrowBuffer();
1656 
1657  // Instruction generation
1658  void AddrMode1(Instr instr, Register rd, Register rn, const Operand& x);
1659  // Attempt to encode operand |x| for instruction |instr| and return true on
1660  // success. The result will be encoded in |instr| directly. This method may
1661  // change the opcode if deemed beneficial, for instance, MOV may be turned
1662  // into MVN, ADD into SUB, AND into BIC, ...etc. The only reason this method
1663  // may fail is that the operand is an immediate that cannot be encoded.
1664  bool AddrMode1TryEncodeOperand(Instr* instr, const Operand& x);
1665 
1666  void AddrMode2(Instr instr, Register rd, const MemOperand& x);
1667  void AddrMode3(Instr instr, Register rd, const MemOperand& x);
1668  void AddrMode4(Instr instr, Register rn, RegList rl);
1669  void AddrMode5(Instr instr, CRegister crd, const MemOperand& x);
1670 
1671  // Labels
1672  void print(const Label* L);
1673  void bind_to(Label* L, int pos);
1674  void next(Label* L);
1675 
1676  // Record reloc info for current pc_
1677  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
1678  void ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
1679  intptr_t value);
1680  void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
1681 
1682  friend class RelocInfo;
1683  friend class BlockConstPoolScope;
1684  friend class EnsureSpace;
1685  friend class UseScratchRegisterScope;
1686 };
1687 
1689  public:
1690  V8_INLINE explicit EnsureSpace(Assembler* assembler);
1691 };
1692 
1694  public:
1695  PatchingAssembler(const AssemblerOptions& options, byte* address,
1696  int instructions);
1697  ~PatchingAssembler();
1698 
1699  void Emit(Address addr);
1700  void PadWithNops();
1701 };
1702 
1703 // This scope utility allows scratch registers to be managed safely. The
1704 // Assembler's GetScratchRegisterList() is used as a pool of scratch
1705 // registers. These registers can be allocated on demand, and will be returned
1706 // at the end of the scope.
1707 //
1708 // When the scope ends, the Assembler's list will be restored to its original
1709 // state, even if the list is modified by some other means. Note that this scope
1710 // can be nested but the destructors need to run in the opposite order as the
1711 // constructors. We do not have assertions for this.
1713  public:
1714  explicit UseScratchRegisterScope(Assembler* assembler);
1716 
1717  // Take a register from the list and return it.
1718  Register Acquire();
1719  SwVfpRegister AcquireS() { return AcquireVfp<SwVfpRegister>(); }
1720  LowDwVfpRegister AcquireLowD() { return AcquireVfp<LowDwVfpRegister>(); }
1721  DwVfpRegister AcquireD() {
1722  DwVfpRegister reg = AcquireVfp<DwVfpRegister>();
1723  DCHECK(assembler_->VfpRegisterIsAvailable(reg));
1724  return reg;
1725  }
1726  QwNeonRegister AcquireQ() {
1727  QwNeonRegister reg = AcquireVfp<QwNeonRegister>();
1728  DCHECK(assembler_->VfpRegisterIsAvailable(reg));
1729  return reg;
1730  }
1731 
1732  // Check if we have registers available to acquire.
1733  bool CanAcquire() const { return *assembler_->GetScratchRegisterList() != 0; }
1734  bool CanAcquireD() const { return CanAcquireVfp<DwVfpRegister>(); }
1735 
1736  private:
1737  friend class Assembler;
1738  friend class TurboAssembler;
1739 
1740  template <typename T>
1741  bool CanAcquireVfp() const;
1742 
1743  template <typename T>
1744  T AcquireVfp();
1745 
1746  Assembler* assembler_;
1747  // Available scratch registers at the start of this scope.
1748  RegList old_available_;
1749  VfpRegList old_available_vfp_;
1750 };
1751 
1752 // Define {RegisterName} methods for the register types.
1753 DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS);
1754 DEFINE_REGISTER_NAMES(SwVfpRegister, FLOAT_REGISTERS);
1755 DEFINE_REGISTER_NAMES(DwVfpRegister, DOUBLE_REGISTERS);
1756 DEFINE_REGISTER_NAMES(LowDwVfpRegister, LOW_DOUBLE_REGISTERS);
1757 DEFINE_REGISTER_NAMES(QwNeonRegister, SIMD128_REGISTERS);
1758 DEFINE_REGISTER_NAMES(CRegister, C_REGISTERS);
1759 
1760 } // namespace internal
1761 } // namespace v8
1762 
1763 #endif // V8_ARM_ASSEMBLER_ARM_H_
Definition: libplatform.h:13