V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
instructions-arm64.h
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_ARM64_INSTRUCTIONS_ARM64_H_
6 #define V8_ARM64_INSTRUCTIONS_ARM64_H_
7 
8 #include "src/arm64/constants-arm64.h"
9 #include "src/arm64/utils-arm64.h"
10 #include "src/globals.h"
11 #include "src/utils.h"
12 
13 namespace v8 {
14 namespace internal {
15 
16 struct AssemblerOptions;
17 
18 // ISA constants. --------------------------------------------------------------
19 
20 typedef uint32_t Instr;
21 
22 #if defined(V8_OS_WIN)
23 extern "C" {
24 #endif
25 
26 extern const float16 kFP16PositiveInfinity;
27 extern const float16 kFP16NegativeInfinity;
28 extern const float kFP32PositiveInfinity;
29 extern const float kFP32NegativeInfinity;
30 extern const double kFP64PositiveInfinity;
31 extern const double kFP64NegativeInfinity;
32 
33 // This value is a signalling NaN as both a double and as a float (taking the
34 // least-significant word).
35 extern const double kFP64SignallingNaN;
36 extern const float kFP32SignallingNaN;
37 
38 // A similar value, but as a quiet NaN.
39 extern const double kFP64QuietNaN;
40 extern const float kFP32QuietNaN;
41 
42 // The default NaN values (for FPCR.DN=1).
43 extern const double kFP64DefaultNaN;
44 extern const float kFP32DefaultNaN;
45 extern const float16 kFP16DefaultNaN;
46 
47 #if defined(V8_OS_WIN)
48 } // end of extern "C"
49 #endif
50 
51 unsigned CalcLSDataSize(LoadStoreOp op);
52 unsigned CalcLSPairDataSize(LoadStorePairOp op);
53 
54 enum ImmBranchType {
55  UnknownBranchType = 0,
56  CondBranchType = 1,
57  UncondBranchType = 2,
58  CompareBranchType = 3,
59  TestBranchType = 4
60 };
61 
62 enum AddrMode {
63  Offset,
64  PreIndex,
65  PostIndex
66 };
67 
68 enum FPRounding {
69  // The first four values are encodable directly by FPCR<RMode>.
70  FPTieEven = 0x0,
71  FPPositiveInfinity = 0x1,
72  FPNegativeInfinity = 0x2,
73  FPZero = 0x3,
74 
75  // The final rounding modes are only available when explicitly specified by
76  // the instruction (such as with fcvta). They cannot be set in FPCR.
77  FPTieAway,
78  FPRoundOdd
79 };
80 
81 enum Reg31Mode {
82  Reg31IsStackPointer,
83  Reg31IsZeroRegister
84 };
85 
86 // Instructions. ---------------------------------------------------------------
87 
88 class Instruction {
89  public:
90  V8_INLINE Instr InstructionBits() const {
91  return *reinterpret_cast<const Instr*>(this);
92  }
93 
94  V8_INLINE void SetInstructionBits(Instr new_instr) {
95  *reinterpret_cast<Instr*>(this) = new_instr;
96  }
97 
98  int Bit(int pos) const {
99  return (InstructionBits() >> pos) & 1;
100  }
101 
102  uint32_t Bits(int msb, int lsb) const {
103  return unsigned_bitextract_32(msb, lsb, InstructionBits());
104  }
105 
106  int32_t SignedBits(int msb, int lsb) const {
107  int32_t bits = *(reinterpret_cast<const int32_t*>(this));
108  return signed_bitextract_32(msb, lsb, bits);
109  }
110 
111  Instr Mask(uint32_t mask) const {
112  return InstructionBits() & mask;
113  }
114 
115  V8_INLINE const Instruction* following(int count = 1) const {
116  return InstructionAtOffset(count * static_cast<int>(kInstrSize));
117  }
118 
119  V8_INLINE Instruction* following(int count = 1) {
120  return InstructionAtOffset(count * static_cast<int>(kInstrSize));
121  }
122 
123  V8_INLINE const Instruction* preceding(int count = 1) const {
124  return following(-count);
125  }
126 
127  V8_INLINE Instruction* preceding(int count = 1) {
128  return following(-count);
129  }
130 
131 #define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
132  int32_t Name() const { return Func(HighBit, LowBit); }
133  INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
134  #undef DEFINE_GETTER
135 
136  // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
137  // formed from ImmPCRelLo and ImmPCRelHi.
138  int ImmPCRel() const {
139  DCHECK(IsPCRelAddressing());
140  int offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
141  int width = ImmPCRelLo_width + ImmPCRelHi_width;
142  return signed_bitextract_32(width - 1, 0, offset);
143  }
144 
145  uint64_t ImmLogical();
146  unsigned ImmNEONabcdefgh() const;
147  float ImmFP32();
148  double ImmFP64();
149  float ImmNEONFP32() const;
150  double ImmNEONFP64() const;
151 
152  unsigned SizeLS() const {
153  return CalcLSDataSize(static_cast<LoadStoreOp>(Mask(LoadStoreMask)));
154  }
155 
156  unsigned SizeLSPair() const {
157  return CalcLSPairDataSize(
158  static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
159  }
160 
161  int NEONLSIndex(int access_size_shift) const {
162  int q = NEONQ();
163  int s = NEONS();
164  int size = NEONLSSize();
165  int index = (q << 3) | (s << 2) | size;
166  return index >> access_size_shift;
167  }
168 
169  // Helpers.
170  bool IsCondBranchImm() const {
171  return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
172  }
173 
174  bool IsUncondBranchImm() const {
175  return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
176  }
177 
178  bool IsCompareBranch() const {
179  return Mask(CompareBranchFMask) == CompareBranchFixed;
180  }
181 
182  bool IsTestBranch() const {
183  return Mask(TestBranchFMask) == TestBranchFixed;
184  }
185 
186  bool IsImmBranch() const {
187  return BranchType() != UnknownBranchType;
188  }
189 
190  static float Imm8ToFP32(uint32_t imm8) {
191  // Imm8: abcdefgh (8 bits)
192  // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
193  // where B is b ^ 1
194  uint32_t bits = imm8;
195  uint32_t bit7 = (bits >> 7) & 0x1;
196  uint32_t bit6 = (bits >> 6) & 0x1;
197  uint32_t bit5_to_0 = bits & 0x3f;
198  uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
199 
200  return bit_cast<float>(result);
201  }
202 
203  static double Imm8ToFP64(uint32_t imm8) {
204  // Imm8: abcdefgh (8 bits)
205  // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
206  // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
207  // where B is b ^ 1
208  uint32_t bits = imm8;
209  uint64_t bit7 = (bits >> 7) & 0x1;
210  uint64_t bit6 = (bits >> 6) & 0x1;
211  uint64_t bit5_to_0 = bits & 0x3f;
212  uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
213 
214  return bit_cast<double>(result);
215  }
216 
217  bool IsLdrLiteral() const {
218  return Mask(LoadLiteralFMask) == LoadLiteralFixed;
219  }
220 
221  bool IsLdrLiteralX() const {
222  return Mask(LoadLiteralMask) == LDR_x_lit;
223  }
224 
225  bool IsPCRelAddressing() const {
226  return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
227  }
228 
229  bool IsAdr() const {
230  return Mask(PCRelAddressingMask) == ADR;
231  }
232 
233  bool IsBrk() const { return Mask(ExceptionMask) == BRK; }
234 
235  bool IsUnresolvedInternalReference() const {
236  // Unresolved internal references are encoded as two consecutive brk
237  // instructions.
238  return IsBrk() && following()->IsBrk();
239  }
240 
241  bool IsLogicalImmediate() const {
242  return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
243  }
244 
245  bool IsAddSubImmediate() const {
246  return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
247  }
248 
249  bool IsAddSubShifted() const {
250  return Mask(AddSubShiftedFMask) == AddSubShiftedFixed;
251  }
252 
253  bool IsAddSubExtended() const {
254  return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
255  }
256 
257  // Match any loads or stores, including pairs.
258  bool IsLoadOrStore() const {
259  return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
260  }
261 
262  // Match any loads, including pairs.
263  bool IsLoad() const;
264  // Match any stores, including pairs.
265  bool IsStore() const;
266 
267  // Indicate whether Rd can be the stack pointer or the zero register. This
268  // does not check that the instruction actually has an Rd field.
269  Reg31Mode RdMode() const {
270  // The following instructions use sp or wsp as Rd:
271  // Add/sub (immediate) when not setting the flags.
272  // Add/sub (extended) when not setting the flags.
273  // Logical (immediate) when not setting the flags.
274  // Otherwise, r31 is the zero register.
275  if (IsAddSubImmediate() || IsAddSubExtended()) {
276  if (Mask(AddSubSetFlagsBit)) {
277  return Reg31IsZeroRegister;
278  } else {
279  return Reg31IsStackPointer;
280  }
281  }
282  if (IsLogicalImmediate()) {
283  // Of the logical (immediate) instructions, only ANDS (and its aliases)
284  // can set the flags. The others can all write into sp.
285  // Note that some logical operations are not available to
286  // immediate-operand instructions, so we have to combine two masks here.
287  if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
288  return Reg31IsZeroRegister;
289  } else {
290  return Reg31IsStackPointer;
291  }
292  }
293  return Reg31IsZeroRegister;
294  }
295 
296  // Indicate whether Rn can be the stack pointer or the zero register. This
297  // does not check that the instruction actually has an Rn field.
298  Reg31Mode RnMode() const {
299  // The following instructions use sp or wsp as Rn:
300  // All loads and stores.
301  // Add/sub (immediate).
302  // Add/sub (extended).
303  // Otherwise, r31 is the zero register.
304  if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
305  return Reg31IsStackPointer;
306  }
307  return Reg31IsZeroRegister;
308  }
309 
310  ImmBranchType BranchType() const {
311  if (IsCondBranchImm()) {
312  return CondBranchType;
313  } else if (IsUncondBranchImm()) {
314  return UncondBranchType;
315  } else if (IsCompareBranch()) {
316  return CompareBranchType;
317  } else if (IsTestBranch()) {
318  return TestBranchType;
319  } else {
320  return UnknownBranchType;
321  }
322  }
323 
324  static int ImmBranchRangeBitwidth(ImmBranchType branch_type) {
325  switch (branch_type) {
326  case UncondBranchType:
327  return ImmUncondBranch_width;
328  case CondBranchType:
329  return ImmCondBranch_width;
330  case CompareBranchType:
331  return ImmCmpBranch_width;
332  case TestBranchType:
333  return ImmTestBranch_width;
334  default:
335  UNREACHABLE();
336  }
337  }
338 
339  // The range of the branch instruction, expressed as 'instr +- range'.
340  static int32_t ImmBranchRange(ImmBranchType branch_type) {
341  return (1 << (ImmBranchRangeBitwidth(branch_type) + kInstrSizeLog2)) / 2 -
342  kInstrSize;
343  }
344 
345  int ImmBranch() const {
346  switch (BranchType()) {
347  case CondBranchType: return ImmCondBranch();
348  case UncondBranchType: return ImmUncondBranch();
349  case CompareBranchType: return ImmCmpBranch();
350  case TestBranchType: return ImmTestBranch();
351  default: UNREACHABLE();
352  }
353  return 0;
354  }
355 
356  int ImmUnresolvedInternalReference() const {
357  DCHECK(IsUnresolvedInternalReference());
358  // Unresolved references are encoded as two consecutive brk instructions.
359  // The associated immediate is made of the two 16-bit payloads.
360  int32_t high16 = ImmException();
361  int32_t low16 = following()->ImmException();
362  return (high16 << 16) | low16;
363  }
364 
365  bool IsUnconditionalBranch() const {
366  return Mask(UnconditionalBranchMask) == B;
367  }
368 
369  bool IsBranchAndLink() const { return Mask(UnconditionalBranchMask) == BL; }
370 
371  bool IsBranchAndLinkToRegister() const {
372  return Mask(UnconditionalBranchToRegisterMask) == BLR;
373  }
374 
375  bool IsMovz() const {
376  return (Mask(MoveWideImmediateMask) == MOVZ_x) ||
377  (Mask(MoveWideImmediateMask) == MOVZ_w);
378  }
379 
380  bool IsMovk() const {
381  return (Mask(MoveWideImmediateMask) == MOVK_x) ||
382  (Mask(MoveWideImmediateMask) == MOVK_w);
383  }
384 
385  bool IsMovn() const {
386  return (Mask(MoveWideImmediateMask) == MOVN_x) ||
387  (Mask(MoveWideImmediateMask) == MOVN_w);
388  }
389 
390  bool IsNop(int n) {
391  // A marking nop is an instruction
392  // mov r<n>, r<n>
393  // which is encoded as
394  // orr r<n>, xzr, r<n>
395  return (Mask(LogicalShiftedMask) == ORR_x) &&
396  (Rd() == Rm()) &&
397  (Rd() == n);
398  }
399 
400  // Find the PC offset encoded in this instruction. 'this' may be a branch or
401  // a PC-relative addressing instruction.
402  // The offset returned is unscaled.
403  int64_t ImmPCOffset();
404 
405  // Find the target of this instruction. 'this' may be a branch or a
406  // PC-relative addressing instruction.
407  Instruction* ImmPCOffsetTarget();
408 
409  static bool IsValidImmPCOffset(ImmBranchType branch_type, ptrdiff_t offset);
410  bool IsTargetInImmPCOffsetRange(Instruction* target);
411  // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
412  // a PC-relative addressing instruction.
413  void SetImmPCOffsetTarget(const AssemblerOptions& options,
414  Instruction* target);
415  void SetUnresolvedInternalReferenceImmTarget(const AssemblerOptions& options,
416  Instruction* target);
417  // Patch a literal load instruction to load from 'source'.
418  void SetImmLLiteral(Instruction* source);
419 
420  uintptr_t LiteralAddress() {
421  int offset = ImmLLiteral() << kLoadLiteralScaleLog2;
422  return reinterpret_cast<uintptr_t>(this) + offset;
423  }
424 
425  enum CheckAlignment { NO_CHECK, CHECK_ALIGNMENT };
426 
427  V8_INLINE const Instruction* InstructionAtOffset(
428  int64_t offset, CheckAlignment check = CHECK_ALIGNMENT) const {
429  // The FUZZ_disasm test relies on no check being done.
430  DCHECK(check == NO_CHECK || IsAligned(offset, kInstrSize));
431  return this + offset;
432  }
433 
434  V8_INLINE Instruction* InstructionAtOffset(
435  int64_t offset, CheckAlignment check = CHECK_ALIGNMENT) {
436  // The FUZZ_disasm test relies on no check being done.
437  DCHECK(check == NO_CHECK || IsAligned(offset, kInstrSize));
438  return this + offset;
439  }
440 
441  template<typename T> V8_INLINE static Instruction* Cast(T src) {
442  return reinterpret_cast<Instruction*>(src);
443  }
444 
445  V8_INLINE ptrdiff_t DistanceTo(Instruction* target) {
446  return reinterpret_cast<Address>(target) - reinterpret_cast<Address>(this);
447  }
448 
449 
450  static const int ImmPCRelRangeBitwidth = 21;
451  static bool IsValidPCRelOffset(ptrdiff_t offset) { return is_int21(offset); }
452  void SetPCRelImmTarget(const AssemblerOptions& options, Instruction* target);
453  void SetBranchImmTarget(Instruction* target);
454 };
455 
456 // Functions for handling NEON vector format information.
457 enum VectorFormat {
458  kFormatUndefined = 0xffffffff,
459  kFormat8B = NEON_8B,
460  kFormat16B = NEON_16B,
461  kFormat4H = NEON_4H,
462  kFormat8H = NEON_8H,
463  kFormat2S = NEON_2S,
464  kFormat4S = NEON_4S,
465  kFormat1D = NEON_1D,
466  kFormat2D = NEON_2D,
467 
468  // Scalar formats. We add the scalar bit to distinguish between scalar and
469  // vector enumerations; the bit is always set in the encoding of scalar ops
470  // and always clear for vector ops. Although kFormatD and kFormat1D appear
471  // to be the same, their meaning is subtly different. The first is a scalar
472  // operation, the second a vector operation that only affects one lane.
473  kFormatB = NEON_B | NEONScalar,
474  kFormatH = NEON_H | NEONScalar,
475  kFormatS = NEON_S | NEONScalar,
476  kFormatD = NEON_D | NEONScalar
477 };
478 
479 VectorFormat VectorFormatHalfWidth(VectorFormat vform);
480 VectorFormat VectorFormatDoubleWidth(VectorFormat vform);
481 VectorFormat VectorFormatDoubleLanes(VectorFormat vform);
482 VectorFormat VectorFormatHalfLanes(VectorFormat vform);
483 VectorFormat ScalarFormatFromLaneSize(int lanesize);
484 VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform);
485 VectorFormat VectorFormatFillQ(VectorFormat vform);
486 VectorFormat ScalarFormatFromFormat(VectorFormat vform);
487 unsigned RegisterSizeInBitsFromFormat(VectorFormat vform);
488 unsigned RegisterSizeInBytesFromFormat(VectorFormat vform);
489 int LaneSizeInBytesFromFormat(VectorFormat vform);
490 unsigned LaneSizeInBitsFromFormat(VectorFormat vform);
491 int LaneSizeInBytesLog2FromFormat(VectorFormat vform);
492 int LaneCountFromFormat(VectorFormat vform);
493 int MaxLaneCountFromFormat(VectorFormat vform);
494 bool IsVectorFormat(VectorFormat vform);
495 int64_t MaxIntFromFormat(VectorFormat vform);
496 int64_t MinIntFromFormat(VectorFormat vform);
497 uint64_t MaxUintFromFormat(VectorFormat vform);
498 
499 // Where Instruction looks at instructions generated by the Assembler,
500 // InstructionSequence looks at instructions sequences generated by the
501 // MacroAssembler.
503  public:
504  static InstructionSequence* At(Address address) {
505  return reinterpret_cast<InstructionSequence*>(address);
506  }
507 
508  // Sequences generated by MacroAssembler::InlineData().
509  bool IsInlineData() const;
510  uint64_t InlineData() const;
511 };
512 
513 
514 // Simulator/Debugger debug instructions ---------------------------------------
515 // Each debug marker is represented by a HLT instruction. The immediate comment
516 // field in the instruction is used to identify the type of debug marker. Each
517 // marker encodes arguments in a different way, as described below.
518 
519 // Indicate to the Debugger that the instruction is a redirected call.
520 const Instr kImmExceptionIsRedirectedCall = 0xca11;
521 
522 // Represent unreachable code. This is used as a guard in parts of the code that
523 // should not be reachable, such as in data encoded inline in the instructions.
524 const Instr kImmExceptionIsUnreachable = 0xdebf;
525 
526 // A pseudo 'printf' instruction. The arguments will be passed to the platform
527 // printf method.
528 const Instr kImmExceptionIsPrintf = 0xdeb1;
529 // Most parameters are stored in ARM64 registers as if the printf
530 // pseudo-instruction was a call to the real printf method:
531 // x0: The format string.
532 // x1-x7: Optional arguments.
533 // d0-d7: Optional arguments.
534 //
535 // Also, the argument layout is described inline in the instructions:
536 // - arg_count: The number of arguments.
537 // - arg_pattern: A set of PrintfArgPattern values, packed into two-bit fields.
538 //
539 // Floating-point and integer arguments are passed in separate sets of registers
540 // in AAPCS64 (even for varargs functions), so it is not possible to determine
541 // the type of each argument without some information about the values that were
542 // passed in. This information could be retrieved from the printf format string,
543 // but the format string is not trivial to parse so we encode the relevant
544 // information with the HLT instruction.
545 const unsigned kPrintfArgCountOffset = 1 * kInstrSize;
546 const unsigned kPrintfArgPatternListOffset = 2 * kInstrSize;
547 const unsigned kPrintfLength = 3 * kInstrSize;
548 
549 const unsigned kPrintfMaxArgCount = 4;
550 
551 // The argument pattern is a set of two-bit-fields, each with one of the
552 // following values:
553 enum PrintfArgPattern {
554  kPrintfArgW = 1,
555  kPrintfArgX = 2,
556  // There is no kPrintfArgS because floats are always converted to doubles in C
557  // varargs calls.
558  kPrintfArgD = 3
559 };
560 static const unsigned kPrintfArgPatternBits = 2;
561 
562 // A pseudo 'debug' instruction.
563 const Instr kImmExceptionIsDebug = 0xdeb0;
564 // Parameters are inlined in the code after a debug pseudo-instruction:
565 // - Debug code.
566 // - Debug parameters.
567 // - Debug message string. This is a nullptr-terminated ASCII string, padded to
568 // kInstrSize so that subsequent instructions are correctly aligned.
569 // - A kImmExceptionIsUnreachable marker, to catch accidental execution of the
570 // string data.
571 const unsigned kDebugCodeOffset = 1 * kInstrSize;
572 const unsigned kDebugParamsOffset = 2 * kInstrSize;
573 const unsigned kDebugMessageOffset = 3 * kInstrSize;
574 
575 // Debug parameters.
576 // Used without a TRACE_ option, the Debugger will print the arguments only
577 // once. Otherwise TRACE_ENABLE and TRACE_DISABLE will enable or disable tracing
578 // before every instruction for the specified LOG_ parameters.
579 //
580 // TRACE_OVERRIDE enables the specified LOG_ parameters, and disabled any
581 // others that were not specified.
582 //
583 // For example:
584 //
585 // __ debug("print registers and fp registers", 0, LOG_REGS | LOG_VREGS);
586 // will print the registers and fp registers only once.
587 //
588 // __ debug("trace disasm", 1, TRACE_ENABLE | LOG_DISASM);
589 // starts disassembling the code.
590 //
591 // __ debug("trace rets", 2, TRACE_ENABLE | LOG_REGS);
592 // adds the general purpose registers to the trace.
593 //
594 // __ debug("stop regs", 3, TRACE_DISABLE | LOG_REGS);
595 // stops tracing the registers.
596 const unsigned kDebuggerTracingDirectivesMask = 3 << 6;
597 enum DebugParameters {
598  NO_PARAM = 0,
599  BREAK = 1 << 0,
600  LOG_DISASM = 1 << 1, // Use only with TRACE. Disassemble the code.
601  LOG_REGS = 1 << 2, // Log general purpose registers.
602  LOG_VREGS = 1 << 3, // Log NEON and floating-point registers.
603  LOG_SYS_REGS = 1 << 4, // Log the status flags.
604  LOG_WRITE = 1 << 5, // Log any memory write.
605 
606  LOG_NONE = 0,
607  LOG_STATE = LOG_REGS | LOG_VREGS | LOG_SYS_REGS,
608  LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE,
609 
610  // Trace control.
611  TRACE_ENABLE = 1 << 6,
612  TRACE_DISABLE = 2 << 6,
613  TRACE_OVERRIDE = 3 << 6
614 };
615 
616 enum NEONFormat {
617  NF_UNDEF = 0,
618  NF_8B = 1,
619  NF_16B = 2,
620  NF_4H = 3,
621  NF_8H = 4,
622  NF_2S = 5,
623  NF_4S = 6,
624  NF_1D = 7,
625  NF_2D = 8,
626  NF_B = 9,
627  NF_H = 10,
628  NF_S = 11,
629  NF_D = 12
630 };
631 
632 static const unsigned kNEONFormatMaxBits = 6;
633 
635  // The bit positions in the instruction to consider.
636  uint8_t bits[kNEONFormatMaxBits];
637 
638  // Mapping from concatenated bits to format.
639  NEONFormat map[1 << kNEONFormatMaxBits];
640 };
641 
643  public:
644  enum SubstitutionMode { kPlaceholder, kFormat };
645 
646  // Construct a format decoder with increasingly specific format maps for each
647  // substitution. If no format map is specified, the default is the integer
648  // format map.
649  explicit NEONFormatDecoder(const Instruction* instr);
650  NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format);
651  NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format0,
652  const NEONFormatMap* format1);
653  NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format0,
654  const NEONFormatMap* format1, const NEONFormatMap* format2);
655 
656  // Set the format mapping for all or individual substitutions.
657  void SetFormatMaps(const NEONFormatMap* format0,
658  const NEONFormatMap* format1 = nullptr,
659  const NEONFormatMap* format2 = nullptr);
660  void SetFormatMap(unsigned index, const NEONFormatMap* format);
661 
662  // Substitute %s in the input string with the placeholder string for each
663  // register, ie. "'B", "'H", etc.
664  const char* SubstitutePlaceholders(const char* string);
665 
666  // Substitute %s in the input string with a new string based on the
667  // substitution mode.
668  const char* Substitute(const char* string, SubstitutionMode mode0 = kFormat,
669  SubstitutionMode mode1 = kFormat,
670  SubstitutionMode mode2 = kFormat);
671 
672  // Append a "2" to a mnemonic string based of the state of the Q bit.
673  const char* Mnemonic(const char* mnemonic);
674 
675  VectorFormat GetVectorFormat(int format_index = 0);
676  VectorFormat GetVectorFormat(const NEONFormatMap* format_map);
677 
678  // Built in mappings for common cases.
679 
680  // The integer format map uses three bits (Q, size<1:0>) to encode the
681  // "standard" set of NEON integer vector formats.
682  static const NEONFormatMap* IntegerFormatMap() {
683  static const NEONFormatMap map = {
684  {23, 22, 30},
685  {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D}};
686  return &map;
687  }
688 
689  // The long integer format map uses two bits (size<1:0>) to encode the
690  // long set of NEON integer vector formats. These are used in narrow, wide
691  // and long operations.
692  static const NEONFormatMap* LongIntegerFormatMap() {
693  static const NEONFormatMap map = {{23, 22}, {NF_8H, NF_4S, NF_2D}};
694  return &map;
695  }
696 
697  // The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector
698  // formats: NF_2S, NF_4S, NF_2D.
699  static const NEONFormatMap* FPFormatMap() {
700  // The FP format map assumes two bits (Q, size<0>) are used to encode the
701  // NEON FP vector formats: NF_2S, NF_4S, NF_2D.
702  static const NEONFormatMap map = {{22, 30},
703  {NF_2S, NF_4S, NF_UNDEF, NF_2D}};
704  return &map;
705  }
706 
707  // The load/store format map uses three bits (Q, 11, 10) to encode the
708  // set of NEON vector formats.
709  static const NEONFormatMap* LoadStoreFormatMap() {
710  static const NEONFormatMap map = {
711  {11, 10, 30},
712  {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}};
713  return &map;
714  }
715 
716  // The logical format map uses one bit (Q) to encode the NEON vector format:
717  // NF_8B, NF_16B.
718  static const NEONFormatMap* LogicalFormatMap() {
719  static const NEONFormatMap map = {{30}, {NF_8B, NF_16B}};
720  return &map;
721  }
722 
723  // The triangular format map uses between two and five bits to encode the NEON
724  // vector format:
725  // xxx10->8B, xxx11->16B, xx100->4H, xx101->8H
726  // x1000->2S, x1001->4S, 10001->2D, all others undefined.
727  static const NEONFormatMap* TriangularFormatMap() {
728  static const NEONFormatMap map = {
729  {19, 18, 17, 16, 30},
730  {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
731  NF_2S, NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
732  NF_UNDEF, NF_2D, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
733  NF_2S, NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B}};
734  return &map;
735  }
736 
737  // The scalar format map uses two bits (size<1:0>) to encode the NEON scalar
738  // formats: NF_B, NF_H, NF_S, NF_D.
739  static const NEONFormatMap* ScalarFormatMap() {
740  static const NEONFormatMap map = {{23, 22}, {NF_B, NF_H, NF_S, NF_D}};
741  return &map;
742  }
743 
744  // The long scalar format map uses two bits (size<1:0>) to encode the longer
745  // NEON scalar formats: NF_H, NF_S, NF_D.
746  static const NEONFormatMap* LongScalarFormatMap() {
747  static const NEONFormatMap map = {{23, 22}, {NF_H, NF_S, NF_D}};
748  return &map;
749  }
750 
751  // The FP scalar format map assumes one bit (size<0>) is used to encode the
752  // NEON FP scalar formats: NF_S, NF_D.
753  static const NEONFormatMap* FPScalarFormatMap() {
754  static const NEONFormatMap map = {{22}, {NF_S, NF_D}};
755  return &map;
756  }
757 
758  // The triangular scalar format map uses between one and four bits to encode
759  // the NEON FP scalar formats:
760  // xxx1->B, xx10->H, x100->S, 1000->D, all others undefined.
761  static const NEONFormatMap* TriangularScalarFormatMap() {
762  static const NEONFormatMap map = {
763  {19, 18, 17, 16},
764  {NF_UNDEF, NF_B, NF_H, NF_B, NF_S, NF_B, NF_H, NF_B, NF_D, NF_B, NF_H,
765  NF_B, NF_S, NF_B, NF_H, NF_B}};
766  return &map;
767  }
768 
769  private:
770  // Get a pointer to a string that represents the format or placeholder for
771  // the specified substitution index, based on the format map and instruction.
772  const char* GetSubstitute(int index, SubstitutionMode mode);
773 
774  // Get the NEONFormat enumerated value for bits obtained from the
775  // instruction based on the specified format mapping.
776  NEONFormat GetNEONFormat(const NEONFormatMap* format_map);
777 
778  // Convert a NEONFormat into a string.
779  static const char* NEONFormatAsString(NEONFormat format);
780 
781  // Convert a NEONFormat into a register placeholder string.
782  static const char* NEONFormatAsPlaceholder(NEONFormat format);
783 
784  // Select bits from instrbits_ defined by the bits array, concatenate them,
785  // and return the value.
786  uint8_t PickBits(const uint8_t bits[]);
787 
788  Instr instrbits_;
789  const NEONFormatMap* formats_[3];
790  char form_buffer_[64];
791  char mne_buffer_[16];
792 };
793 } // namespace internal
794 } // namespace v8
795 
796 
797 #endif // V8_ARM64_INSTRUCTIONS_ARM64_H_
Definition: libplatform.h:13