V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
instructions-arm64.cc
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_ARM64
6 
7 #include "src/arm64/assembler-arm64-inl.h"
8 #include "src/arm64/instructions-arm64.h"
9 
10 namespace v8 {
11 namespace internal {
12 
13 bool Instruction::IsLoad() const {
14  if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
15  return false;
16  }
17 
18  if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
19  return Mask(LoadStorePairLBit) != 0;
20  } else {
21  LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
22  switch (op) {
23  case LDRB_w:
24  case LDRH_w:
25  case LDR_w:
26  case LDR_x:
27  case LDRSB_w:
28  case LDRSB_x:
29  case LDRSH_w:
30  case LDRSH_x:
31  case LDRSW_x:
32  case LDR_b:
33  case LDR_h:
34  case LDR_s:
35  case LDR_d:
36  case LDR_q:
37  return true;
38  default: return false;
39  }
40  }
41 }
42 
43 
44 bool Instruction::IsStore() const {
45  if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
46  return false;
47  }
48 
49  if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
50  return Mask(LoadStorePairLBit) == 0;
51  } else {
52  LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
53  switch (op) {
54  case STRB_w:
55  case STRH_w:
56  case STR_w:
57  case STR_x:
58  case STR_b:
59  case STR_h:
60  case STR_s:
61  case STR_d:
62  case STR_q:
63  return true;
64  default: return false;
65  }
66  }
67 }
68 
69 
70 static uint64_t RotateRight(uint64_t value,
71  unsigned int rotate,
72  unsigned int width) {
73  DCHECK_LE(width, 64);
74  rotate &= 63;
75  return ((value & ((1ULL << rotate) - 1ULL)) << (width - rotate)) |
76  (value >> rotate);
77 }
78 
79 
80 static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
81  uint64_t value,
82  unsigned width) {
83  DCHECK((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
84  (width == 32));
85  DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
86  uint64_t result = value & ((1ULL << width) - 1ULL);
87  for (unsigned i = width; i < reg_size; i *= 2) {
88  result |= (result << i);
89  }
90  return result;
91 }
92 
93 
94 // Logical immediates can't encode zero, so a return value of zero is used to
95 // indicate a failure case. Specifically, where the constraints on imm_s are not
96 // met.
97 uint64_t Instruction::ImmLogical() {
98  unsigned reg_size = SixtyFourBits() ? kXRegSizeInBits : kWRegSizeInBits;
99  int32_t n = BitN();
100  int32_t imm_s = ImmSetBits();
101  int32_t imm_r = ImmRotate();
102 
103  // An integer is constructed from the n, imm_s and imm_r bits according to
104  // the following table:
105  //
106  // N imms immr size S R
107  // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
108  // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
109  // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
110  // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
111  // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
112  // 0 11110s xxxxxr 2 UInt(s) UInt(r)
113  // (s bits must not be all set)
114  //
115  // A pattern is constructed of size bits, where the least significant S+1
116  // bits are set. The pattern is rotated right by R, and repeated across a
117  // 32 or 64-bit value, depending on destination register width.
118  //
119 
120  if (n == 1) {
121  if (imm_s == 0x3F) {
122  return 0;
123  }
124  uint64_t bits = (1ULL << (imm_s + 1)) - 1;
125  return RotateRight(bits, imm_r, 64);
126  } else {
127  if ((imm_s >> 1) == 0x1F) {
128  return 0;
129  }
130  for (int width = 0x20; width >= 0x2; width >>= 1) {
131  if ((imm_s & width) == 0) {
132  int mask = width - 1;
133  if ((imm_s & mask) == mask) {
134  return 0;
135  }
136  uint64_t bits = (1ULL << ((imm_s & mask) + 1)) - 1;
137  return RepeatBitsAcrossReg(reg_size,
138  RotateRight(bits, imm_r & mask, width),
139  width);
140  }
141  }
142  }
143  UNREACHABLE();
144 }
145 
146 uint32_t Instruction::ImmNEONabcdefgh() const {
147  return ImmNEONabc() << 5 | ImmNEONdefgh();
148 }
149 
150 float Instruction::ImmFP32() { return Imm8ToFP32(ImmFP()); }
151 
152 double Instruction::ImmFP64() { return Imm8ToFP64(ImmFP()); }
153 
154 float Instruction::ImmNEONFP32() const { return Imm8ToFP32(ImmNEONabcdefgh()); }
155 
156 double Instruction::ImmNEONFP64() const {
157  return Imm8ToFP64(ImmNEONabcdefgh());
158 }
159 
160 unsigned CalcLSDataSize(LoadStoreOp op) {
161  DCHECK_EQ(static_cast<unsigned>(LSSize_offset + LSSize_width),
162  kInstrSize * 8);
163  unsigned size = static_cast<Instr>(op) >> LSSize_offset;
164  if ((op & LSVector_mask) != 0) {
165  // Vector register memory operations encode the access size in the "size"
166  // and "opc" fields.
167  if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
168  size = kQRegSizeLog2;
169  }
170  }
171  return size;
172 }
173 
174 unsigned CalcLSPairDataSize(LoadStorePairOp op) {
175  static_assert(kXRegSize == kDRegSize, "X and D registers must be same size.");
176  static_assert(kWRegSize == kSRegSize, "W and S registers must be same size.");
177  switch (op) {
178  case STP_q:
179  case LDP_q:
180  return kQRegSizeLog2;
181  case STP_x:
182  case LDP_x:
183  case STP_d:
184  case LDP_d:
185  return kXRegSizeLog2;
186  default:
187  return kWRegSizeLog2;
188  }
189 }
190 
191 
192 int64_t Instruction::ImmPCOffset() {
193  int64_t offset;
194  if (IsPCRelAddressing()) {
195  // PC-relative addressing. Only ADR is supported.
196  offset = ImmPCRel();
197  } else if (BranchType() != UnknownBranchType) {
198  // All PC-relative branches.
199  // Relative branch offsets are instruction-size-aligned.
200  offset = ImmBranch() << kInstrSizeLog2;
201  } else if (IsUnresolvedInternalReference()) {
202  // Internal references are always word-aligned.
203  offset = ImmUnresolvedInternalReference() << kInstrSizeLog2;
204  } else {
205  // Load literal (offset from PC).
206  DCHECK(IsLdrLiteral());
207  // The offset is always shifted by 2 bits, even for loads to 64-bits
208  // registers.
209  offset = ImmLLiteral() << kInstrSizeLog2;
210  }
211  return offset;
212 }
213 
214 
215 Instruction* Instruction::ImmPCOffsetTarget() {
216  return InstructionAtOffset(ImmPCOffset());
217 }
218 
219 
220 bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
221  ptrdiff_t offset) {
222  return is_intn(offset, ImmBranchRangeBitwidth(branch_type));
223 }
224 
225 
226 bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
227  return IsValidImmPCOffset(BranchType(), DistanceTo(target));
228 }
229 
230 void Instruction::SetImmPCOffsetTarget(const AssemblerOptions& options,
231  Instruction* target) {
232  if (IsPCRelAddressing()) {
233  SetPCRelImmTarget(options, target);
234  } else if (BranchType() != UnknownBranchType) {
235  SetBranchImmTarget(target);
236  } else if (IsUnresolvedInternalReference()) {
237  SetUnresolvedInternalReferenceImmTarget(options, target);
238  } else {
239  // Load literal (offset from PC).
240  SetImmLLiteral(target);
241  }
242 }
243 
244 void Instruction::SetPCRelImmTarget(const AssemblerOptions& options,
245  Instruction* target) {
246  // ADRP is not supported, so 'this' must point to an ADR instruction.
247  DCHECK(IsAdr());
248 
249  ptrdiff_t target_offset = DistanceTo(target);
250  Instr imm;
251  if (Instruction::IsValidPCRelOffset(target_offset)) {
252  imm = Assembler::ImmPCRelAddress(static_cast<int>(target_offset));
253  SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
254  } else {
255  PatchingAssembler patcher(options, reinterpret_cast<byte*>(this),
256  PatchingAssembler::kAdrFarPatchableNInstrs);
257  patcher.PatchAdrFar(target_offset);
258  }
259 }
260 
261 
262 void Instruction::SetBranchImmTarget(Instruction* target) {
263  DCHECK(IsAligned(DistanceTo(target), kInstrSize));
264  DCHECK(
265  IsValidImmPCOffset(BranchType(), DistanceTo(target) >> kInstrSizeLog2));
266  int offset = static_cast<int>(DistanceTo(target) >> kInstrSizeLog2);
267  Instr branch_imm = 0;
268  uint32_t imm_mask = 0;
269  switch (BranchType()) {
270  case CondBranchType: {
271  branch_imm = Assembler::ImmCondBranch(offset);
272  imm_mask = ImmCondBranch_mask;
273  break;
274  }
275  case UncondBranchType: {
276  branch_imm = Assembler::ImmUncondBranch(offset);
277  imm_mask = ImmUncondBranch_mask;
278  break;
279  }
280  case CompareBranchType: {
281  branch_imm = Assembler::ImmCmpBranch(offset);
282  imm_mask = ImmCmpBranch_mask;
283  break;
284  }
285  case TestBranchType: {
286  branch_imm = Assembler::ImmTestBranch(offset);
287  imm_mask = ImmTestBranch_mask;
288  break;
289  }
290  default: UNREACHABLE();
291  }
292  SetInstructionBits(Mask(~imm_mask) | branch_imm);
293 }
294 
295 void Instruction::SetUnresolvedInternalReferenceImmTarget(
296  const AssemblerOptions& options, Instruction* target) {
297  DCHECK(IsUnresolvedInternalReference());
298  DCHECK(IsAligned(DistanceTo(target), kInstrSize));
299  DCHECK(is_int32(DistanceTo(target) >> kInstrSizeLog2));
300  int32_t target_offset =
301  static_cast<int32_t>(DistanceTo(target) >> kInstrSizeLog2);
302  uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
303  uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);
304 
305  PatchingAssembler patcher(options, reinterpret_cast<byte*>(this), 2);
306  patcher.brk(high16);
307  patcher.brk(low16);
308 }
309 
310 
311 void Instruction::SetImmLLiteral(Instruction* source) {
312  DCHECK(IsLdrLiteral());
313  DCHECK(IsAligned(DistanceTo(source), kInstrSize));
314  DCHECK(Assembler::IsImmLLiteral(DistanceTo(source)));
315  Instr imm = Assembler::ImmLLiteral(
316  static_cast<int>(DistanceTo(source) >> kLoadLiteralScaleLog2));
317  Instr mask = ImmLLiteral_mask;
318 
319  SetInstructionBits(Mask(~mask) | imm);
320 }
321 
322 
323 // TODO(jbramley): We can't put this inline in the class because things like
324 // xzr and Register are not defined in that header. Consider adding
325 // instructions-arm64-inl.h to work around this.
326 bool InstructionSequence::IsInlineData() const {
327  // Inline data is encoded as a single movz instruction which writes to xzr
328  // (x31).
329  return IsMovz() && SixtyFourBits() && (Rd() == kZeroRegCode);
330  // TODO(all): If we extend ::InlineData() to support bigger data, we need
331  // to update this method too.
332 }
333 
334 
335 // TODO(jbramley): We can't put this inline in the class because things like
336 // xzr and Register are not defined in that header. Consider adding
337 // instructions-arm64-inl.h to work around this.
338 uint64_t InstructionSequence::InlineData() const {
339  DCHECK(IsInlineData());
340  uint64_t payload = ImmMoveWide();
341  // TODO(all): If we extend ::InlineData() to support bigger data, we need
342  // to update this method too.
343  return payload;
344 }
345 
346 VectorFormat VectorFormatHalfWidth(VectorFormat vform) {
347  DCHECK(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D ||
348  vform == kFormatH || vform == kFormatS || vform == kFormatD);
349  switch (vform) {
350  case kFormat8H:
351  return kFormat8B;
352  case kFormat4S:
353  return kFormat4H;
354  case kFormat2D:
355  return kFormat2S;
356  case kFormatH:
357  return kFormatB;
358  case kFormatS:
359  return kFormatH;
360  case kFormatD:
361  return kFormatS;
362  default:
363  UNREACHABLE();
364  }
365 }
366 
367 VectorFormat VectorFormatDoubleWidth(VectorFormat vform) {
368  DCHECK(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S ||
369  vform == kFormatB || vform == kFormatH || vform == kFormatS);
370  switch (vform) {
371  case kFormat8B:
372  return kFormat8H;
373  case kFormat4H:
374  return kFormat4S;
375  case kFormat2S:
376  return kFormat2D;
377  case kFormatB:
378  return kFormatH;
379  case kFormatH:
380  return kFormatS;
381  case kFormatS:
382  return kFormatD;
383  default:
384  UNREACHABLE();
385  }
386 }
387 
388 VectorFormat VectorFormatFillQ(VectorFormat vform) {
389  switch (vform) {
390  case kFormatB:
391  case kFormat8B:
392  case kFormat16B:
393  return kFormat16B;
394  case kFormatH:
395  case kFormat4H:
396  case kFormat8H:
397  return kFormat8H;
398  case kFormatS:
399  case kFormat2S:
400  case kFormat4S:
401  return kFormat4S;
402  case kFormatD:
403  case kFormat1D:
404  case kFormat2D:
405  return kFormat2D;
406  default:
407  UNREACHABLE();
408  }
409 }
410 
411 VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform) {
412  switch (vform) {
413  case kFormat4H:
414  return kFormat8B;
415  case kFormat8H:
416  return kFormat16B;
417  case kFormat2S:
418  return kFormat4H;
419  case kFormat4S:
420  return kFormat8H;
421  case kFormat1D:
422  return kFormat2S;
423  case kFormat2D:
424  return kFormat4S;
425  default:
426  UNREACHABLE();
427  }
428 }
429 
430 VectorFormat VectorFormatDoubleLanes(VectorFormat vform) {
431  DCHECK(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S);
432  switch (vform) {
433  case kFormat8B:
434  return kFormat16B;
435  case kFormat4H:
436  return kFormat8H;
437  case kFormat2S:
438  return kFormat4S;
439  default:
440  UNREACHABLE();
441  }
442 }
443 
444 VectorFormat VectorFormatHalfLanes(VectorFormat vform) {
445  DCHECK(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S);
446  switch (vform) {
447  case kFormat16B:
448  return kFormat8B;
449  case kFormat8H:
450  return kFormat4H;
451  case kFormat4S:
452  return kFormat2S;
453  default:
454  UNREACHABLE();
455  }
456 }
457 
458 VectorFormat ScalarFormatFromLaneSize(int laneSize) {
459  switch (laneSize) {
460  case 8:
461  return kFormatB;
462  case 16:
463  return kFormatH;
464  case 32:
465  return kFormatS;
466  case 64:
467  return kFormatD;
468  default:
469  UNREACHABLE();
470  }
471 }
472 
473 VectorFormat ScalarFormatFromFormat(VectorFormat vform) {
474  return ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform));
475 }
476 
477 unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) {
478  return RegisterSizeInBitsFromFormat(vform) / 8;
479 }
480 
481 unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) {
482  DCHECK_NE(vform, kFormatUndefined);
483  switch (vform) {
484  case kFormatB:
485  return kBRegSizeInBits;
486  case kFormatH:
487  return kHRegSizeInBits;
488  case kFormatS:
489  return kSRegSizeInBits;
490  case kFormatD:
491  return kDRegSizeInBits;
492  case kFormat8B:
493  case kFormat4H:
494  case kFormat2S:
495  case kFormat1D:
496  return kDRegSizeInBits;
497  default:
498  return kQRegSizeInBits;
499  }
500 }
501 
502 unsigned LaneSizeInBitsFromFormat(VectorFormat vform) {
503  DCHECK_NE(vform, kFormatUndefined);
504  switch (vform) {
505  case kFormatB:
506  case kFormat8B:
507  case kFormat16B:
508  return 8;
509  case kFormatH:
510  case kFormat4H:
511  case kFormat8H:
512  return 16;
513  case kFormatS:
514  case kFormat2S:
515  case kFormat4S:
516  return 32;
517  case kFormatD:
518  case kFormat1D:
519  case kFormat2D:
520  return 64;
521  default:
522  UNREACHABLE();
523  }
524 }
525 
526 int LaneSizeInBytesFromFormat(VectorFormat vform) {
527  return LaneSizeInBitsFromFormat(vform) / 8;
528 }
529 
530 int LaneSizeInBytesLog2FromFormat(VectorFormat vform) {
531  DCHECK_NE(vform, kFormatUndefined);
532  switch (vform) {
533  case kFormatB:
534  case kFormat8B:
535  case kFormat16B:
536  return 0;
537  case kFormatH:
538  case kFormat4H:
539  case kFormat8H:
540  return 1;
541  case kFormatS:
542  case kFormat2S:
543  case kFormat4S:
544  return 2;
545  case kFormatD:
546  case kFormat1D:
547  case kFormat2D:
548  return 3;
549  default:
550  UNREACHABLE();
551  }
552 }
553 
554 int LaneCountFromFormat(VectorFormat vform) {
555  DCHECK_NE(vform, kFormatUndefined);
556  switch (vform) {
557  case kFormat16B:
558  return 16;
559  case kFormat8B:
560  case kFormat8H:
561  return 8;
562  case kFormat4H:
563  case kFormat4S:
564  return 4;
565  case kFormat2S:
566  case kFormat2D:
567  return 2;
568  case kFormat1D:
569  case kFormatB:
570  case kFormatH:
571  case kFormatS:
572  case kFormatD:
573  return 1;
574  default:
575  UNREACHABLE();
576  }
577 }
578 
579 int MaxLaneCountFromFormat(VectorFormat vform) {
580  DCHECK_NE(vform, kFormatUndefined);
581  switch (vform) {
582  case kFormatB:
583  case kFormat8B:
584  case kFormat16B:
585  return 16;
586  case kFormatH:
587  case kFormat4H:
588  case kFormat8H:
589  return 8;
590  case kFormatS:
591  case kFormat2S:
592  case kFormat4S:
593  return 4;
594  case kFormatD:
595  case kFormat1D:
596  case kFormat2D:
597  return 2;
598  default:
599  UNREACHABLE();
600  }
601 }
602 
603 // Does 'vform' indicate a vector format or a scalar format?
604 bool IsVectorFormat(VectorFormat vform) {
605  DCHECK_NE(vform, kFormatUndefined);
606  switch (vform) {
607  case kFormatB:
608  case kFormatH:
609  case kFormatS:
610  case kFormatD:
611  return false;
612  default:
613  return true;
614  }
615 }
616 
617 int64_t MaxIntFromFormat(VectorFormat vform) {
618  return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
619 }
620 
621 int64_t MinIntFromFormat(VectorFormat vform) {
622  return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform));
623 }
624 
625 uint64_t MaxUintFromFormat(VectorFormat vform) {
626  return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
627 }
628 
629 NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr) {
630  instrbits_ = instr->InstructionBits();
631  SetFormatMaps(IntegerFormatMap());
632 }
633 
634 NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr,
635  const NEONFormatMap* format) {
636  instrbits_ = instr->InstructionBits();
637  SetFormatMaps(format);
638 }
639 
640 NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr,
641  const NEONFormatMap* format0,
642  const NEONFormatMap* format1) {
643  instrbits_ = instr->InstructionBits();
644  SetFormatMaps(format0, format1);
645 }
646 
647 NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr,
648  const NEONFormatMap* format0,
649  const NEONFormatMap* format1,
650  const NEONFormatMap* format2) {
651  instrbits_ = instr->InstructionBits();
652  SetFormatMaps(format0, format1, format2);
653 }
654 
655 void NEONFormatDecoder::SetFormatMaps(const NEONFormatMap* format0,
656  const NEONFormatMap* format1,
657  const NEONFormatMap* format2) {
658  DCHECK_NOT_NULL(format0);
659  formats_[0] = format0;
660  formats_[1] = (format1 == nullptr) ? formats_[0] : format1;
661  formats_[2] = (format2 == nullptr) ? formats_[1] : format2;
662 }
663 
664 void NEONFormatDecoder::SetFormatMap(unsigned index,
665  const NEONFormatMap* format) {
666  DCHECK_LT(index, arraysize(formats_));
667  DCHECK_NOT_NULL(format);
668  formats_[index] = format;
669 }
670 
671 const char* NEONFormatDecoder::SubstitutePlaceholders(const char* string) {
672  return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder);
673 }
674 
675 const char* NEONFormatDecoder::Substitute(const char* string,
676  SubstitutionMode mode0,
677  SubstitutionMode mode1,
678  SubstitutionMode mode2) {
679  snprintf(form_buffer_, sizeof(form_buffer_), string, GetSubstitute(0, mode0),
680  GetSubstitute(1, mode1), GetSubstitute(2, mode2));
681  return form_buffer_;
682 }
683 
684 const char* NEONFormatDecoder::Mnemonic(const char* mnemonic) {
685  if ((instrbits_ & NEON_Q) != 0) {
686  snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic);
687  return mne_buffer_;
688  }
689  return mnemonic;
690 }
691 
692 VectorFormat NEONFormatDecoder::GetVectorFormat(int format_index) {
693  return GetVectorFormat(formats_[format_index]);
694 }
695 
696 VectorFormat NEONFormatDecoder::GetVectorFormat(
697  const NEONFormatMap* format_map) {
698  static const VectorFormat vform[] = {
699  kFormatUndefined, kFormat8B, kFormat16B, kFormat4H, kFormat8H,
700  kFormat2S, kFormat4S, kFormat1D, kFormat2D, kFormatB,
701  kFormatH, kFormatS, kFormatD};
702  DCHECK_LT(GetNEONFormat(format_map), arraysize(vform));
703  return vform[GetNEONFormat(format_map)];
704 }
705 
706 const char* NEONFormatDecoder::GetSubstitute(int index, SubstitutionMode mode) {
707  if (mode == kFormat) {
708  return NEONFormatAsString(GetNEONFormat(formats_[index]));
709  }
710  DCHECK_EQ(mode, kPlaceholder);
711  return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index]));
712 }
713 
714 NEONFormat NEONFormatDecoder::GetNEONFormat(const NEONFormatMap* format_map) {
715  return format_map->map[PickBits(format_map->bits)];
716 }
717 
718 const char* NEONFormatDecoder::NEONFormatAsString(NEONFormat format) {
719  static const char* formats[] = {"undefined", "8b", "16b", "4h", "8h",
720  "2s", "4s", "1d", "2d", "b",
721  "h", "s", "d"};
722  DCHECK_LT(format, arraysize(formats));
723  return formats[format];
724 }
725 
726 const char* NEONFormatDecoder::NEONFormatAsPlaceholder(NEONFormat format) {
727  DCHECK((format == NF_B) || (format == NF_H) || (format == NF_S) ||
728  (format == NF_D) || (format == NF_UNDEF));
729  static const char* formats[] = {
730  "undefined", "undefined", "undefined", "undefined", "undefined",
731  "undefined", "undefined", "undefined", "undefined", "'B",
732  "'H", "'S", "'D"};
733  return formats[format];
734 }
735 
736 uint8_t NEONFormatDecoder::PickBits(const uint8_t bits[]) {
737  uint8_t result = 0;
738  for (unsigned b = 0; b < kNEONFormatMaxBits; b++) {
739  if (bits[b] == 0) break;
740  result <<= 1;
741  result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1;
742  }
743  return result;
744 }
745 } // namespace internal
746 } // namespace v8
747 
748 #endif // V8_TARGET_ARCH_ARM64
Definition: libplatform.h:13