29 #if V8_TARGET_ARCH_ARM64 31 #include "src/arm64/assembler-arm64.h" 33 #include "src/arm64/assembler-arm64-inl.h" 34 #include "src/base/bits.h" 35 #include "src/base/cpu.h" 36 #include "src/code-stubs.h" 37 #include "src/frame-constants.h" 38 #include "src/register-configuration.h" 39 #include "src/string-constants.h" 47 void CpuFeatures::ProbeImpl(
bool cross_compile) {
52 if (cross_compile)
return;
59 void CpuFeatures::PrintTarget() { }
60 void CpuFeatures::PrintFeatures() {}
65 CPURegister CPURegList::PopLowestIndex() {
70 int index = CountTrailingZeros(list_, kRegListSizeInBits);
71 DCHECK((1LL << index) & list_);
73 return CPURegister::Create(index, size_, type_);
77 CPURegister CPURegList::PopHighestIndex() {
82 int index = CountLeadingZeros(list_, kRegListSizeInBits);
83 index = kRegListSizeInBits - 1 - index;
84 DCHECK((1LL << index) & list_);
86 return CPURegister::Create(index, size_, type_);
90 void CPURegList::RemoveCalleeSaved() {
91 if (type() == CPURegister::kRegister) {
92 Remove(GetCalleeSaved(RegisterSizeInBits()));
93 }
else if (type() == CPURegister::kVRegister) {
94 Remove(GetCalleeSavedV(RegisterSizeInBits()));
96 DCHECK_EQ(type(), CPURegister::kNoRegister);
103 CPURegList CPURegList::GetCalleeSaved(
int size) {
104 return CPURegList(CPURegister::kRegister, size, 19, 29);
107 CPURegList CPURegList::GetCalleeSavedV(
int size) {
108 return CPURegList(CPURegister::kVRegister, size, 8, 15);
112 CPURegList CPURegList::GetCallerSaved(
int size) {
113 #if defined(V8_OS_WIN) 116 CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 17);
119 CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
125 CPURegList CPURegList::GetCallerSavedV(
int size) {
127 CPURegList list = CPURegList(CPURegister::kVRegister, size, 0, 7);
128 list.Combine(CPURegList(CPURegister::kVRegister, size, 16, 31));
138 CPURegList CPURegList::GetSafepointSavedRegisters() {
139 CPURegList list = CPURegList::GetCalleeSaved();
141 CPURegList(CPURegister::kRegister, kXRegSizeInBits, kJSCallerSaved));
155 #if !defined(V8_OS_WIN) 171 const int RelocInfo::kApplyMask =
172 RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
173 RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
174 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE);
176 bool RelocInfo::IsCodedSpecially() {
179 Instruction* instr =
reinterpret_cast<Instruction*
>(pc_);
180 if (instr->IsLdrLiteralX()) {
183 DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
189 bool RelocInfo::IsInConstantPool() {
190 Instruction* instr =
reinterpret_cast<Instruction*
>(pc_);
191 return instr->IsLdrLiteralX();
194 int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
195 DCHECK(IsRuntimeEntry(rmode_));
196 Instruction* movz_instr =
reinterpret_cast<Instruction*
>(pc_)->preceding();
197 DCHECK(movz_instr->IsMovz());
198 uint64_t imm =
static_cast<uint64_t
>(movz_instr->ImmMoveWide())
199 << (16 * movz_instr->ShiftMoveWide());
200 DCHECK_LE(imm, INT_MAX);
202 return static_cast<int>(imm);
205 uint32_t RelocInfo::wasm_call_tag()
const {
206 DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
207 Instruction* instr =
reinterpret_cast<Instruction*
>(pc_);
208 if (instr->IsLdrLiteralX()) {
210 Memory<Address>(Assembler::target_pointer_address_at(pc_)));
212 DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
213 return static_cast<uint32_t>(instr->ImmPCOffset() / kInstrSize);
217 bool AreAliased(
const CPURegister& reg1,
const CPURegister& reg2,
218 const CPURegister& reg3,
const CPURegister& reg4,
219 const CPURegister& reg5,
const CPURegister& reg6,
220 const CPURegister& reg7,
const CPURegister& reg8) {
221 int number_of_valid_regs = 0;
222 int number_of_valid_fpregs = 0;
224 RegList unique_regs = 0;
225 RegList unique_fpregs = 0;
227 const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
229 for (
unsigned i = 0;
i < arraysize(regs);
i++) {
230 if (regs[
i].IsRegister()) {
231 number_of_valid_regs++;
232 unique_regs |= regs[
i].bit();
233 }
else if (regs[
i].IsVRegister()) {
234 number_of_valid_fpregs++;
235 unique_fpregs |= regs[
i].bit();
237 DCHECK(!regs[
i].IsValid());
241 int number_of_unique_regs =
242 CountSetBits(unique_regs,
sizeof(unique_regs) * kBitsPerByte);
243 int number_of_unique_fpregs =
244 CountSetBits(unique_fpregs,
sizeof(unique_fpregs) * kBitsPerByte);
246 DCHECK(number_of_valid_regs >= number_of_unique_regs);
247 DCHECK(number_of_valid_fpregs >= number_of_unique_fpregs);
249 return (number_of_valid_regs != number_of_unique_regs) ||
250 (number_of_valid_fpregs != number_of_unique_fpregs);
254 bool AreSameSizeAndType(
const CPURegister& reg1,
const CPURegister& reg2,
255 const CPURegister& reg3,
const CPURegister& reg4,
256 const CPURegister& reg5,
const CPURegister& reg6,
257 const CPURegister& reg7,
const CPURegister& reg8) {
258 DCHECK(reg1.IsValid());
260 match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
261 match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
262 match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
263 match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
264 match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
265 match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
266 match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
270 bool AreSameFormat(
const VRegister& reg1,
const VRegister& reg2,
271 const VRegister& reg3,
const VRegister& reg4) {
272 DCHECK(reg1.IsValid());
273 return (!reg2.IsValid() || reg2.IsSameFormat(reg1)) &&
274 (!reg3.IsValid() || reg3.IsSameFormat(reg1)) &&
275 (!reg4.IsValid() || reg4.IsSameFormat(reg1));
278 bool AreConsecutive(
const VRegister& reg1,
const VRegister& reg2,
279 const VRegister& reg3,
const VRegister& reg4) {
280 DCHECK(reg1.IsValid());
281 if (!reg2.IsValid()) {
282 DCHECK(!reg3.IsValid() && !reg4.IsValid());
284 }
else if (reg2.code() != ((reg1.code() + 1) % kNumberOfVRegisters)) {
288 if (!reg3.IsValid()) {
289 DCHECK(!reg4.IsValid());
291 }
else if (reg3.code() != ((reg2.code() + 1) % kNumberOfVRegisters)) {
295 if (!reg4.IsValid()) {
297 }
else if (reg4.code() != ((reg3.code() + 1) % kNumberOfVRegisters)) {
304 void Immediate::InitializeHandle(Handle<HeapObject> handle) {
305 value_ =
static_cast<intptr_t
>(handle.address());
306 rmode_ = RelocInfo::EMBEDDED_OBJECT;
310 bool Operand::NeedsRelocation(
const Assembler* assembler)
const {
311 RelocInfo::Mode rmode = immediate_.rmode();
313 if (RelocInfo::IsOnlyForSerializer(rmode)) {
314 return assembler->options().record_reloc_info_for_serialization;
317 return !RelocInfo::IsNone(rmode);
320 bool ConstPool::AddSharedEntry(SharedEntryMap& entry_map, uint64_t data,
322 auto existing = entry_map.find(data);
323 if (existing == entry_map.end()) {
324 entry_map[data] =
static_cast<int>(entries_.size());
325 entries_.push_back(std::make_pair(data, std::vector<int>(1, offset)));
328 int index = existing->second;
329 entries_[index].second.push_back(offset);
334 bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode) {
335 DCHECK(mode != RelocInfo::COMMENT && mode != RelocInfo::CONST_POOL &&
336 mode != RelocInfo::VENEER_POOL &&
337 mode != RelocInfo::DEOPT_SCRIPT_OFFSET &&
338 mode != RelocInfo::DEOPT_INLINING_ID &&
339 mode != RelocInfo::DEOPT_REASON && mode != RelocInfo::DEOPT_ID);
341 bool write_reloc_info =
true;
343 uint64_t raw_data =
static_cast<uint64_t
>(data);
344 int offset = assm_->pc_offset();
349 if (RelocInfo::IsShareableRelocMode(mode)) {
350 write_reloc_info = AddSharedEntry(shared_entries_, raw_data, offset);
351 }
else if (mode == RelocInfo::CODE_TARGET && raw_data != 0) {
353 write_reloc_info = AddSharedEntry(handle_to_index_map_, raw_data, offset);
355 entries_.push_back(std::make_pair(raw_data, std::vector<int>(1, offset)));
358 if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) {
360 assm_->SetNextConstPoolCheckIn(1);
363 return write_reloc_info;
367 int ConstPool::DistanceToFirstUse() {
368 DCHECK_GE(first_use_, 0);
369 return assm_->pc_offset() - first_use_;
373 int ConstPool::MaxPcOffset() {
376 if (IsEmpty())
return kMaxInt;
380 return first_use_ + kMaxLoadLiteralRange - WorstCaseSize();
384 int ConstPool::WorstCaseSize() {
385 if (IsEmpty())
return 0;
393 return 4 * kInstrSize + EntryCount() * kPointerSize;
397 int ConstPool::SizeIfEmittedAtCurrentPc(
bool require_jump) {
398 if (IsEmpty())
return 0;
405 int prologue_size = require_jump ? kInstrSize : 0;
406 prologue_size += 2 * kInstrSize;
408 IsAligned(assm_->pc_offset() + prologue_size, 8) ? 0 : kInstrSize;
411 return prologue_size + EntryCount() * kPointerSize;
415 void ConstPool::Emit(
bool require_jump) {
416 DCHECK(!assm_->is_const_pool_blocked());
418 Assembler::BlockPoolsScope block_pools(assm_);
420 int size = SizeIfEmittedAtCurrentPc(require_jump);
422 assm_->bind(&size_check);
424 assm_->RecordConstPool(size);
446 assm_->b(&after_pool);
450 assm_->RecordComment(
"[ Constant Pool");
459 assm_->RecordComment(
"]");
461 if (after_pool.is_linked()) {
462 assm_->bind(&after_pool);
465 DCHECK(assm_->SizeOfCodeGeneratedSince(&size_check) ==
466 static_cast<unsigned>(size));
470 void ConstPool::Clear() {
471 shared_entries_.clear();
472 handle_to_index_map_.clear();
478 void ConstPool::EmitMarker() {
483 int word_count = EntryCount() * 2 + 1 +
484 (IsAligned(assm_->pc_offset(), 8) ? 0 : 1);
485 assm_->Emit(LDR_x_lit |
486 Assembler::ImmLLiteral(word_count) |
491 MemOperand::PairResult MemOperand::AreConsistentForPair(
492 const MemOperand& operandA,
493 const MemOperand& operandB,
494 int access_size_log2) {
495 DCHECK_GE(access_size_log2, 0);
496 DCHECK_LE(access_size_log2, 3);
499 if (!operandA.base().Is(operandB.base()) ||
500 (operandA.addrmode() != Offset) ||
501 (operandB.addrmode() != Offset) ||
502 ((operandA.offset() & ((1 << access_size_log2) - 1)) != 0)) {
507 if ((operandB.offset() == operandA.offset() + (1LL << access_size_log2)) &&
508 is_int7(operandA.offset() >> access_size_log2)) {
511 if ((operandA.offset() == operandB.offset() + (1LL << access_size_log2)) &&
512 is_int7(operandB.offset() >> access_size_log2)) {
519 void ConstPool::EmitGuard() {
521 Instruction* instr =
reinterpret_cast<Instruction*
>(assm_->pc());
522 DCHECK(instr->preceding()->IsLdrLiteralX() &&
523 instr->preceding()->Rt() == xzr.code());
525 assm_->EmitPoolGuard();
529 void ConstPool::EmitEntries() {
530 DCHECK(IsAligned(assm_->pc_offset(), 8));
533 for (
const auto& entry : entries_) {
534 for (
const auto& pc : entry.second) {
535 Instruction* instr = assm_->InstructionAt(pc);
538 DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
539 instr->SetImmPCOffsetTarget(assm_->options(), assm_->pc());
542 assm_->dc64(entry.first);
549 Assembler::Assembler(
const AssemblerOptions& options,
void* buffer,
551 : AssemblerBase(options, buffer, buffer_size),
553 unresolved_branches_() {
554 const_pool_blocked_nesting_ = 0;
555 veneer_pool_blocked_nesting_ = 0;
560 Assembler::~Assembler() {
561 DCHECK(constpool_.IsEmpty());
562 DCHECK_EQ(const_pool_blocked_nesting_, 0);
563 DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
567 void Assembler::Reset() {
569 DCHECK((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
570 DCHECK_EQ(const_pool_blocked_nesting_, 0);
571 DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
572 DCHECK(unresolved_branches_.empty());
573 memset(buffer_, 0, pc_ - buffer_);
576 ReserveCodeTargetSpace(64);
577 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
579 next_constant_pool_check_ = 0;
580 next_veneer_pool_check_ = kMaxInt;
581 no_const_pool_before_ = 0;
584 void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
585 DCHECK_IMPLIES(isolate ==
nullptr, heap_object_requests_.empty());
586 for (
auto& request : heap_object_requests_) {
587 Address pc =
reinterpret_cast<Address
>(buffer_) + request.offset();
588 switch (request.kind()) {
589 case HeapObjectRequest::kHeapNumber: {
590 Handle<HeapObject>
object =
591 isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
592 set_target_address_at(pc, 0 ,
object.address());
595 case HeapObjectRequest::kCodeStub: {
596 request.code_stub()->set_isolate(isolate);
597 Instruction* instr =
reinterpret_cast<Instruction*
>(pc);
598 DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
599 DCHECK_EQ(instr->ImmPCOffset() % kInstrSize, 0);
600 UpdateCodeTarget(instr->ImmPCOffset() >> kInstrSizeLog2,
601 request.code_stub()->GetCode());
604 case HeapObjectRequest::kStringConstant: {
605 const StringConstantBase* str = request.string();
607 set_target_address_at(pc, 0 ,
608 str->AllocateStringConstant(isolate).address());
615 void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
617 CheckConstPool(
true,
false);
618 DCHECK(constpool_.IsEmpty());
620 AllocateAndInstallRequestedHeapObjects(isolate);
624 desc->buffer =
reinterpret_cast<byte*
>(buffer_);
625 desc->buffer_size = buffer_size_;
626 desc->instr_size = pc_offset();
628 static_cast<int>((
reinterpret_cast<byte*
>(buffer_) + buffer_size_) -
629 reloc_info_writer.pos());
631 desc->constant_pool_size = 0;
632 desc->unwinding_info_size = 0;
633 desc->unwinding_info =
nullptr;
638 void Assembler::Align(
int m) {
639 DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
640 while ((pc_offset() & (m - 1)) != 0) {
646 void Assembler::CheckLabelLinkChain(Label
const * label) {
648 if (label->is_linked()) {
649 static const int kMaxLinksToCheck = 64;
650 int links_checked = 0;
651 int64_t linkoffset = label->pos();
652 bool end_of_chain =
false;
653 while (!end_of_chain) {
654 if (++links_checked > kMaxLinksToCheck)
break;
655 Instruction * link = InstructionAt(linkoffset);
656 int64_t linkpcoffset = link->ImmPCOffset();
657 int64_t prevlinkoffset = linkoffset + linkpcoffset;
659 end_of_chain = (linkoffset == prevlinkoffset);
660 linkoffset = linkoffset + linkpcoffset;
667 void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
669 Instruction* label_veneer) {
670 DCHECK(label->is_linked());
672 CheckLabelLinkChain(label);
674 Instruction* link = InstructionAt(label->pos());
675 Instruction* prev_link = link;
676 Instruction* next_link;
677 bool end_of_chain =
false;
679 while (link != branch && !end_of_chain) {
680 next_link = link->ImmPCOffsetTarget();
681 end_of_chain = (link == next_link);
686 DCHECK(branch == link);
687 next_link = branch->ImmPCOffsetTarget();
689 if (branch == prev_link) {
691 if (branch == next_link) {
697 static_cast<int>(reinterpret_cast<byte*>(next_link) - buffer_));
700 }
else if (branch == next_link) {
702 prev_link->SetImmPCOffsetTarget(options(), prev_link);
706 if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
707 prev_link->SetImmPCOffsetTarget(options(), next_link);
708 }
else if (label_veneer !=
nullptr) {
710 prev_link->SetImmPCOffsetTarget(options(), prev_link);
712 end_of_chain =
false;
714 while (!end_of_chain) {
715 next_link = link->ImmPCOffsetTarget();
716 end_of_chain = (link == next_link);
717 link->SetImmPCOffsetTarget(options(), label_veneer);
739 CHECK(prev_link->IsTargetInImmPCOffsetRange(next_link));
744 CheckLabelLinkChain(label);
748 void Assembler::bind(Label* label) {
753 DCHECK(!label->is_near_linked());
754 DCHECK(!label->is_bound());
756 DeleteUnresolvedBranchInfoForLabel(label);
770 while (label->is_linked()) {
771 int linkoffset = label->pos();
772 Instruction* link = InstructionAt(linkoffset);
773 int prevlinkoffset = linkoffset +
static_cast<int>(link->ImmPCOffset());
775 CheckLabelLinkChain(label);
777 DCHECK_GE(linkoffset, 0);
778 DCHECK(linkoffset < pc_offset());
779 DCHECK((linkoffset > prevlinkoffset) ||
780 (linkoffset - prevlinkoffset == kStartOfLabelLinkChain));
781 DCHECK_GE(prevlinkoffset, 0);
784 if (link->IsUnresolvedInternalReference()) {
787 internal_reference_positions_.push_back(linkoffset);
788 PatchingAssembler patcher(options(), reinterpret_cast<byte*>(link), 2);
789 patcher.dc64(reinterpret_cast<uintptr_t>(pc_));
791 link->SetImmPCOffsetTarget(options(),
792 reinterpret_cast<Instruction*>(pc_));
796 if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) {
801 label->link_to(prevlinkoffset);
804 label->bind_to(pc_offset());
806 DCHECK(label->is_bound());
807 DCHECK(!label->is_linked());
811 int Assembler::LinkAndGetByteOffsetTo(Label* label) {
812 DCHECK_EQ(
sizeof(*pc_), 1);
813 CheckLabelLinkChain(label);
816 if (label->is_bound()) {
826 offset = label->pos() - pc_offset();
827 DCHECK_LE(offset, 0);
829 if (label->is_linked()) {
835 offset = label->pos() - pc_offset();
836 DCHECK_NE(offset, kStartOfLabelLinkChain);
844 offset = kStartOfLabelLinkChain;
847 label->link_to(pc_offset());
854 void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) {
855 DCHECK(label->is_linked());
856 CheckLabelLinkChain(label);
858 int link_offset = label->pos();
860 bool end_of_chain =
false;
862 while (!end_of_chain) {
863 Instruction * link = InstructionAt(link_offset);
864 link_pcoffset =
static_cast<int>(link->ImmPCOffset());
867 if (link->IsImmBranch()) {
868 int max_reachable_pc =
869 static_cast<int>(InstructionOffset(link) +
870 Instruction::ImmBranchRange(link->BranchType()));
871 typedef std::multimap<int, FarBranchInfo>::iterator unresolved_info_it;
872 std::pair<unresolved_info_it, unresolved_info_it> range;
873 range = unresolved_branches_.equal_range(max_reachable_pc);
874 unresolved_info_it it;
875 for (it = range.first; it != range.second; ++it) {
876 if (it->second.pc_offset_ == link_offset) {
877 unresolved_branches_.erase(it);
883 end_of_chain = (link_pcoffset == 0);
884 link_offset = link_offset + link_pcoffset;
889 void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
890 if (unresolved_branches_.empty()) {
891 DCHECK_EQ(next_veneer_pool_check_, kMaxInt);
895 if (label->is_linked()) {
898 DeleteUnresolvedBranchInfoForLabelTraverse(label);
900 if (unresolved_branches_.empty()) {
901 next_veneer_pool_check_ = kMaxInt;
903 next_veneer_pool_check_ =
904 unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
909 void Assembler::StartBlockConstPool() {
910 if (const_pool_blocked_nesting_++ == 0) {
913 next_constant_pool_check_ = kMaxInt;
918 void Assembler::EndBlockConstPool() {
919 if (--const_pool_blocked_nesting_ == 0) {
921 DCHECK(pc_offset() < constpool_.MaxPcOffset());
927 next_constant_pool_check_ = no_const_pool_before_;
932 bool Assembler::is_const_pool_blocked()
const {
933 return (const_pool_blocked_nesting_ > 0) ||
934 (pc_offset() < no_const_pool_before_);
938 bool Assembler::IsConstantPoolAt(Instruction* instr) {
942 bool result = instr->IsLdrLiteralX() && (instr->Rt() == kZeroRegCode);
946 DCHECK(!result || (instr->following()->IsBranchAndLinkToRegister() &&
947 instr->following()->Rn() == kZeroRegCode));
953 int Assembler::ConstantPoolSizeAt(Instruction* instr) {
958 if ((instr->Mask(ExceptionMask) == HLT) &&
959 (instr->ImmException() == kImmExceptionIsDebug)) {
960 const char* message =
961 reinterpret_cast<const char*
>(
962 instr->InstructionAtOffset(kDebugMessageOffset));
963 int size =
static_cast<int>(kDebugMessageOffset + strlen(message) + 1);
964 return RoundUp(size, kInstrSize) / kInstrSize;
967 if ((instr->Mask(ExceptionMask) == HLT) &&
968 (instr->ImmException() == kImmExceptionIsPrintf)) {
969 return kPrintfLength / kInstrSize;
972 if (IsConstantPoolAt(instr)) {
973 return instr->ImmLLiteral();
980 void Assembler::EmitPoolGuard() {
987 void Assembler::StartBlockVeneerPool() {
988 ++veneer_pool_blocked_nesting_;
992 void Assembler::EndBlockVeneerPool() {
993 if (--veneer_pool_blocked_nesting_ == 0) {
995 DCHECK(unresolved_branches_.empty() ||
996 (pc_offset() < unresolved_branches_first_limit()));
1001 void Assembler::br(
const Register& xn) {
1002 DCHECK(xn.Is64Bits());
1007 void Assembler::blr(
const Register& xn) {
1008 DCHECK(xn.Is64Bits());
1011 DCHECK(!xn.Is(xzr));
1016 void Assembler::ret(
const Register& xn) {
1017 DCHECK(xn.Is64Bits());
1022 void Assembler::b(
int imm26) {
1023 Emit(B | ImmUncondBranch(imm26));
1027 void Assembler::b(Label* label) {
1028 b(LinkAndGetInstructionOffsetTo(label));
1032 void Assembler::b(
int imm19, Condition cond) {
1033 Emit(B_cond | ImmCondBranch(imm19) | cond);
1037 void Assembler::b(Label* label, Condition cond) {
1038 b(LinkAndGetInstructionOffsetTo(label), cond);
1042 void Assembler::bl(
int imm26) {
1043 Emit(BL | ImmUncondBranch(imm26));
1047 void Assembler::bl(Label* label) {
1048 bl(LinkAndGetInstructionOffsetTo(label));
1052 void Assembler::cbz(
const Register& rt,
1054 Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
1058 void Assembler::cbz(
const Register& rt,
1060 cbz(rt, LinkAndGetInstructionOffsetTo(label));
1064 void Assembler::cbnz(
const Register& rt,
1066 Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
1070 void Assembler::cbnz(
const Register& rt,
1072 cbnz(rt, LinkAndGetInstructionOffsetTo(label));
1076 void Assembler::tbz(
const Register& rt,
1079 DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
1080 Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
1084 void Assembler::tbz(
const Register& rt,
1087 tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
1091 void Assembler::tbnz(
const Register& rt,
1094 DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
1095 Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
1099 void Assembler::tbnz(
const Register& rt,
1102 tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
1106 void Assembler::adr(
const Register& rd,
int imm21) {
1107 DCHECK(rd.Is64Bits());
1108 Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd));
1112 void Assembler::adr(
const Register& rd, Label* label) {
1113 adr(rd, LinkAndGetByteOffsetTo(label));
1117 void Assembler::add(
const Register& rd,
1119 const Operand& operand) {
1120 AddSub(rd, rn, operand, LeaveFlags, ADD);
1124 void Assembler::adds(
const Register& rd,
1126 const Operand& operand) {
1127 AddSub(rd, rn, operand, SetFlags, ADD);
1131 void Assembler::cmn(
const Register& rn,
1132 const Operand& operand) {
1133 Register zr = AppropriateZeroRegFor(rn);
1134 adds(zr, rn, operand);
1138 void Assembler::sub(
const Register& rd,
1140 const Operand& operand) {
1141 AddSub(rd, rn, operand, LeaveFlags, SUB);
1145 void Assembler::subs(
const Register& rd,
1147 const Operand& operand) {
1148 AddSub(rd, rn, operand, SetFlags, SUB);
1152 void Assembler::cmp(
const Register& rn,
const Operand& operand) {
1153 Register zr = AppropriateZeroRegFor(rn);
1154 subs(zr, rn, operand);
1158 void Assembler::neg(
const Register& rd,
const Operand& operand) {
1159 Register zr = AppropriateZeroRegFor(rd);
1160 sub(rd, zr, operand);
1164 void Assembler::negs(
const Register& rd,
const Operand& operand) {
1165 Register zr = AppropriateZeroRegFor(rd);
1166 subs(rd, zr, operand);
1170 void Assembler::adc(
const Register& rd,
1172 const Operand& operand) {
1173 AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC);
1177 void Assembler::adcs(
const Register& rd,
1179 const Operand& operand) {
1180 AddSubWithCarry(rd, rn, operand, SetFlags, ADC);
1184 void Assembler::sbc(
const Register& rd,
1186 const Operand& operand) {
1187 AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC);
1191 void Assembler::sbcs(
const Register& rd,
1193 const Operand& operand) {
1194 AddSubWithCarry(rd, rn, operand, SetFlags, SBC);
1198 void Assembler::ngc(
const Register& rd,
const Operand& operand) {
1199 Register zr = AppropriateZeroRegFor(rd);
1200 sbc(rd, zr, operand);
1204 void Assembler::ngcs(
const Register& rd,
const Operand& operand) {
1205 Register zr = AppropriateZeroRegFor(rd);
1206 sbcs(rd, zr, operand);
1211 void Assembler::and_(
const Register& rd,
1213 const Operand& operand) {
1214 Logical(rd, rn, operand, AND);
1218 void Assembler::ands(
const Register& rd,
1220 const Operand& operand) {
1221 Logical(rd, rn, operand, ANDS);
1225 void Assembler::tst(
const Register& rn,
1226 const Operand& operand) {
1227 ands(AppropriateZeroRegFor(rn), rn, operand);
1231 void Assembler::bic(
const Register& rd,
1233 const Operand& operand) {
1234 Logical(rd, rn, operand, BIC);
1238 void Assembler::bics(
const Register& rd,
1240 const Operand& operand) {
1241 Logical(rd, rn, operand, BICS);
1245 void Assembler::orr(
const Register& rd,
1247 const Operand& operand) {
1248 Logical(rd, rn, operand, ORR);
1252 void Assembler::orn(
const Register& rd,
1254 const Operand& operand) {
1255 Logical(rd, rn, operand, ORN);
1259 void Assembler::eor(
const Register& rd,
1261 const Operand& operand) {
1262 Logical(rd, rn, operand, EOR);
1266 void Assembler::eon(
const Register& rd,
1268 const Operand& operand) {
1269 Logical(rd, rn, operand, EON);
1273 void Assembler::lslv(
const Register& rd,
1275 const Register& rm) {
1276 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1277 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1278 Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
1282 void Assembler::lsrv(
const Register& rd,
1284 const Register& rm) {
1285 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1286 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1287 Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
1291 void Assembler::asrv(
const Register& rd,
1293 const Register& rm) {
1294 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1295 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1296 Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
1300 void Assembler::rorv(
const Register& rd,
1302 const Register& rm) {
1303 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1304 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1305 Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
1310 void Assembler::bfm(
const Register& rd,
const Register& rn,
int immr,
1312 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1313 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1314 Emit(SF(rd) | BFM | N |
1315 ImmR(immr, rd.SizeInBits()) |
1316 ImmS(imms, rn.SizeInBits()) |
1321 void Assembler::sbfm(
const Register& rd,
const Register& rn,
int immr,
1323 DCHECK(rd.Is64Bits() || rn.Is32Bits());
1324 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1325 Emit(SF(rd) | SBFM | N |
1326 ImmR(immr, rd.SizeInBits()) |
1327 ImmS(imms, rn.SizeInBits()) |
1332 void Assembler::ubfm(
const Register& rd,
const Register& rn,
int immr,
1334 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1335 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1336 Emit(SF(rd) | UBFM | N |
1337 ImmR(immr, rd.SizeInBits()) |
1338 ImmS(imms, rn.SizeInBits()) |
1343 void Assembler::extr(
const Register& rd,
const Register& rn,
const Register& rm,
1345 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1346 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1347 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1348 Emit(SF(rd) | EXTR | N | Rm(rm) |
1349 ImmS(lsb, rn.SizeInBits()) | Rn(rn) | Rd(rd));
1353 void Assembler::csel(
const Register& rd,
1357 ConditionalSelect(rd, rn, rm, cond, CSEL);
1361 void Assembler::csinc(
const Register& rd,
1365 ConditionalSelect(rd, rn, rm, cond, CSINC);
1369 void Assembler::csinv(
const Register& rd,
1373 ConditionalSelect(rd, rn, rm, cond, CSINV);
1377 void Assembler::csneg(
const Register& rd,
1381 ConditionalSelect(rd, rn, rm, cond, CSNEG);
1385 void Assembler::cset(
const Register &rd, Condition cond) {
1386 DCHECK((cond != al) && (cond != nv));
1387 Register zr = AppropriateZeroRegFor(rd);
1388 csinc(rd, zr, zr, NegateCondition(cond));
1392 void Assembler::csetm(
const Register &rd, Condition cond) {
1393 DCHECK((cond != al) && (cond != nv));
1394 Register zr = AppropriateZeroRegFor(rd);
1395 csinv(rd, zr, zr, NegateCondition(cond));
1399 void Assembler::cinc(
const Register &rd,
const Register &rn, Condition cond) {
1400 DCHECK((cond != al) && (cond != nv));
1401 csinc(rd, rn, rn, NegateCondition(cond));
1405 void Assembler::cinv(
const Register &rd,
const Register &rn, Condition cond) {
1406 DCHECK((cond != al) && (cond != nv));
1407 csinv(rd, rn, rn, NegateCondition(cond));
1411 void Assembler::cneg(
const Register &rd,
const Register &rn, Condition cond) {
1412 DCHECK((cond != al) && (cond != nv));
1413 csneg(rd, rn, rn, NegateCondition(cond));
1417 void Assembler::ConditionalSelect(
const Register& rd,
1421 ConditionalSelectOp op) {
1422 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1423 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1424 Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
1428 void Assembler::ccmn(
const Register& rn,
1429 const Operand& operand,
1432 ConditionalCompare(rn, operand, nzcv, cond, CCMN);
1436 void Assembler::ccmp(
const Register& rn,
1437 const Operand& operand,
1440 ConditionalCompare(rn, operand, nzcv, cond, CCMP);
1444 void Assembler::DataProcessing3Source(
const Register& rd,
1448 DataProcessing3SourceOp op) {
1449 Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
1453 void Assembler::mul(
const Register& rd,
1455 const Register& rm) {
1456 DCHECK(AreSameSizeAndType(rd, rn, rm));
1457 Register zr = AppropriateZeroRegFor(rn);
1458 DataProcessing3Source(rd, rn, rm, zr, MADD);
1462 void Assembler::madd(
const Register& rd,
1465 const Register& ra) {
1466 DCHECK(AreSameSizeAndType(rd, rn, rm, ra));
1467 DataProcessing3Source(rd, rn, rm, ra, MADD);
1471 void Assembler::mneg(
const Register& rd,
1473 const Register& rm) {
1474 DCHECK(AreSameSizeAndType(rd, rn, rm));
1475 Register zr = AppropriateZeroRegFor(rn);
1476 DataProcessing3Source(rd, rn, rm, zr, MSUB);
1480 void Assembler::msub(
const Register& rd,
1483 const Register& ra) {
1484 DCHECK(AreSameSizeAndType(rd, rn, rm, ra));
1485 DataProcessing3Source(rd, rn, rm, ra, MSUB);
1489 void Assembler::smaddl(
const Register& rd,
1492 const Register& ra) {
1493 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1494 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1495 DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
1499 void Assembler::smsubl(
const Register& rd,
1502 const Register& ra) {
1503 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1504 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1505 DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
1509 void Assembler::umaddl(
const Register& rd,
1512 const Register& ra) {
1513 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1514 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1515 DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
1519 void Assembler::umsubl(
const Register& rd,
1522 const Register& ra) {
1523 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1524 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1525 DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
1529 void Assembler::smull(
const Register& rd,
1531 const Register& rm) {
1532 DCHECK(rd.Is64Bits());
1533 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1534 DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
1538 void Assembler::smulh(
const Register& rd,
1540 const Register& rm) {
1541 DCHECK(AreSameSizeAndType(rd, rn, rm));
1542 DataProcessing3Source(rd, rn, rm, xzr, SMULH_x);
1546 void Assembler::sdiv(
const Register& rd,
1548 const Register& rm) {
1549 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1550 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1551 Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
1555 void Assembler::udiv(
const Register& rd,
1557 const Register& rm) {
1558 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1559 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1560 Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
1564 void Assembler::rbit(
const Register& rd,
1565 const Register& rn) {
1566 DataProcessing1Source(rd, rn, RBIT);
1570 void Assembler::rev16(
const Register& rd,
1571 const Register& rn) {
1572 DataProcessing1Source(rd, rn, REV16);
1576 void Assembler::rev32(
const Register& rd,
1577 const Register& rn) {
1578 DCHECK(rd.Is64Bits());
1579 DataProcessing1Source(rd, rn, REV);
1583 void Assembler::rev(
const Register& rd,
1584 const Register& rn) {
1585 DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
1589 void Assembler::clz(
const Register& rd,
1590 const Register& rn) {
1591 DataProcessing1Source(rd, rn, CLZ);
1595 void Assembler::cls(
const Register& rd,
1596 const Register& rn) {
1597 DataProcessing1Source(rd, rn, CLS);
1601 void Assembler::ldp(
const CPURegister& rt,
1602 const CPURegister& rt2,
1603 const MemOperand& src) {
1604 LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
1608 void Assembler::stp(
const CPURegister& rt,
1609 const CPURegister& rt2,
1610 const MemOperand& dst) {
1611 LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
1615 void Assembler::ldpsw(
const Register& rt,
1616 const Register& rt2,
1617 const MemOperand& src) {
1618 DCHECK(rt.Is64Bits());
1619 LoadStorePair(rt, rt2, src, LDPSW_x);
1623 void Assembler::LoadStorePair(
const CPURegister& rt,
1624 const CPURegister& rt2,
1625 const MemOperand& addr,
1626 LoadStorePairOp op) {
1628 DCHECK(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
1629 DCHECK(AreSameSizeAndType(rt, rt2));
1630 DCHECK(IsImmLSPair(addr.offset(), CalcLSPairDataSize(op)));
1631 int offset =
static_cast<int>(addr.offset());
1633 Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
1634 ImmLSPair(offset, CalcLSPairDataSize(op));
1637 if (addr.IsImmediateOffset()) {
1638 addrmodeop = LoadStorePairOffsetFixed;
1641 DCHECK(!rt.Is(addr.base()));
1642 DCHECK(!rt2.Is(addr.base()));
1643 DCHECK_NE(addr.offset(), 0);
1644 if (addr.IsPreIndex()) {
1645 addrmodeop = LoadStorePairPreIndexFixed;
1647 DCHECK(addr.IsPostIndex());
1648 addrmodeop = LoadStorePairPostIndexFixed;
1651 Emit(addrmodeop | memop);
1656 void Assembler::ldrb(
const Register& rt,
const MemOperand& src) {
1657 LoadStore(rt, src, LDRB_w);
1661 void Assembler::strb(
const Register& rt,
const MemOperand& dst) {
1662 LoadStore(rt, dst, STRB_w);
1666 void Assembler::ldrsb(
const Register& rt,
const MemOperand& src) {
1667 LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w);
1671 void Assembler::ldrh(
const Register& rt,
const MemOperand& src) {
1672 LoadStore(rt, src, LDRH_w);
1676 void Assembler::strh(
const Register& rt,
const MemOperand& dst) {
1677 LoadStore(rt, dst, STRH_w);
1681 void Assembler::ldrsh(
const Register& rt,
const MemOperand& src) {
1682 LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w);
1686 void Assembler::ldr(
const CPURegister& rt,
const MemOperand& src) {
1687 LoadStore(rt, src, LoadOpFor(rt));
1691 void Assembler::str(
const CPURegister& rt,
const MemOperand& src) {
1692 LoadStore(rt, src, StoreOpFor(rt));
1696 void Assembler::ldrsw(
const Register& rt,
const MemOperand& src) {
1697 DCHECK(rt.Is64Bits());
1698 LoadStore(rt, src, LDRSW_x);
1702 void Assembler::ldr_pcrel(
const CPURegister& rt,
int imm19) {
1705 DCHECK(!rt.IsZero());
1706 Emit(LoadLiteralOpFor(rt) | ImmLLiteral(imm19) | Rt(rt));
1709 Operand Operand::EmbeddedNumber(
double number) {
1711 if (DoubleToSmiInteger(number, &smi)) {
1712 return Operand(Immediate(Smi::FromInt(smi)));
1714 Operand result(0, RelocInfo::EMBEDDED_OBJECT);
1715 result.heap_object_request_.emplace(number);
1716 DCHECK(result.IsHeapObjectRequest());
1720 Operand Operand::EmbeddedCode(CodeStub* stub) {
1721 Operand result(0, RelocInfo::CODE_TARGET);
1722 result.heap_object_request_.emplace(stub);
1723 DCHECK(result.IsHeapObjectRequest());
1727 Operand Operand::EmbeddedStringConstant(
const StringConstantBase* str) {
1728 Operand result(0, RelocInfo::EMBEDDED_OBJECT);
1729 result.heap_object_request_.emplace(str);
1730 DCHECK(result.IsHeapObjectRequest());
1734 void Assembler::ldr(
const CPURegister& rt,
const Operand& operand) {
1735 if (operand.IsHeapObjectRequest()) {
1736 RequestHeapObject(operand.heap_object_request());
1737 ldr(rt, operand.immediate_for_heap_object_request());
1739 ldr(rt, operand.immediate());
1743 void Assembler::ldr(
const CPURegister& rt,
const Immediate& imm) {
1745 DCHECK(rt.Is64Bits());
1747 RecordRelocInfo(imm.rmode(), imm.value());
1748 BlockConstPoolFor(1);
1754 void Assembler::ldar(
const Register& rt,
const Register& rn) {
1755 DCHECK(rn.Is64Bits());
1756 LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAR_w : LDAR_x;
1757 Emit(op | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
1760 void Assembler::ldaxr(
const Register& rt,
const Register& rn) {
1761 DCHECK(rn.Is64Bits());
1762 LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAXR_w : LDAXR_x;
1763 Emit(op | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
1766 void Assembler::stlr(
const Register& rt,
const Register& rn) {
1767 DCHECK(rn.Is64Bits());
1768 LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLR_w : STLR_x;
1769 Emit(op | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
1772 void Assembler::stlxr(
const Register& rs,
const Register& rt,
1773 const Register& rn) {
1774 DCHECK(rn.Is64Bits());
1775 DCHECK(!rs.Is(rt) && !rs.Is(rn));
1776 LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLXR_w : STLXR_x;
1777 Emit(op | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
1780 void Assembler::ldarb(
const Register& rt,
const Register& rn) {
1781 DCHECK(rt.Is32Bits());
1782 DCHECK(rn.Is64Bits());
1783 Emit(LDAR_b | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
1786 void Assembler::ldaxrb(
const Register& rt,
const Register& rn) {
1787 DCHECK(rt.Is32Bits());
1788 DCHECK(rn.Is64Bits());
1789 Emit(LDAXR_b | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
1792 void Assembler::stlrb(
const Register& rt,
const Register& rn) {
1793 DCHECK(rt.Is32Bits());
1794 DCHECK(rn.Is64Bits());
1795 Emit(STLR_b | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
1798 void Assembler::stlxrb(
const Register& rs,
const Register& rt,
1799 const Register& rn) {
1800 DCHECK(rs.Is32Bits());
1801 DCHECK(rt.Is32Bits());
1802 DCHECK(rn.Is64Bits());
1803 DCHECK(!rs.Is(rt) && !rs.Is(rn));
1804 Emit(STLXR_b | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
1807 void Assembler::ldarh(
const Register& rt,
const Register& rn) {
1808 DCHECK(rt.Is32Bits());
1809 DCHECK(rn.Is64Bits());
1810 Emit(LDAR_h | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
1813 void Assembler::ldaxrh(
const Register& rt,
const Register& rn) {
1814 DCHECK(rt.Is32Bits());
1815 DCHECK(rn.Is64Bits());
1816 Emit(LDAXR_h | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
1819 void Assembler::stlrh(
const Register& rt,
const Register& rn) {
1820 DCHECK(rt.Is32Bits());
1821 DCHECK(rn.Is64Bits());
1822 Emit(STLR_h | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
1825 void Assembler::stlxrh(
const Register& rs,
const Register& rt,
1826 const Register& rn) {
1827 DCHECK(rs.Is32Bits());
1828 DCHECK(rt.Is32Bits());
1829 DCHECK(rn.Is64Bits());
1830 DCHECK(!rs.Is(rt) && !rs.Is(rn));
1831 Emit(STLXR_h | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
1834 void Assembler::NEON3DifferentL(
const VRegister& vd,
const VRegister& vn,
1835 const VRegister& vm, NEON3DifferentOp vop) {
1836 DCHECK(AreSameFormat(vn, vm));
1837 DCHECK((vn.Is1H() && vd.Is1S()) || (vn.Is1S() && vd.Is1D()) ||
1838 (vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) ||
1839 (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) ||
1840 (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
1841 Instr format, op = vop;
1842 if (vd.IsScalar()) {
1843 op |= NEON_Q | NEONScalar;
1844 format = SFormat(vn);
1846 format = VFormat(vn);
1848 Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
1851 void Assembler::NEON3DifferentW(
const VRegister& vd,
const VRegister& vn,
1852 const VRegister& vm, NEON3DifferentOp vop) {
1853 DCHECK(AreSameFormat(vd, vn));
1854 DCHECK((vm.Is8B() && vd.Is8H()) || (vm.Is4H() && vd.Is4S()) ||
1855 (vm.Is2S() && vd.Is2D()) || (vm.Is16B() && vd.Is8H()) ||
1856 (vm.Is8H() && vd.Is4S()) || (vm.Is4S() && vd.Is2D()));
1857 Emit(VFormat(vm) | vop | Rm(vm) | Rn(vn) | Rd(vd));
1860 void Assembler::NEON3DifferentHN(
const VRegister& vd,
const VRegister& vn,
1861 const VRegister& vm, NEON3DifferentOp vop) {
1862 DCHECK(AreSameFormat(vm, vn));
1863 DCHECK((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) ||
1864 (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) ||
1865 (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
1866 Emit(VFormat(vd) | vop | Rm(vm) | Rn(vn) | Rd(vd));
1869 #define NEON_3DIFF_LONG_LIST(V) \ 1870 V(pmull, NEON_PMULL, vn.IsVector() && vn.Is8B()) \ 1871 V(pmull2, NEON_PMULL2, vn.IsVector() && vn.Is16B()) \ 1872 V(saddl, NEON_SADDL, vn.IsVector() && vn.IsD()) \ 1873 V(saddl2, NEON_SADDL2, vn.IsVector() && vn.IsQ()) \ 1874 V(sabal, NEON_SABAL, vn.IsVector() && vn.IsD()) \ 1875 V(sabal2, NEON_SABAL2, vn.IsVector() && vn.IsQ()) \ 1876 V(uabal, NEON_UABAL, vn.IsVector() && vn.IsD()) \ 1877 V(uabal2, NEON_UABAL2, vn.IsVector() && vn.IsQ()) \ 1878 V(sabdl, NEON_SABDL, vn.IsVector() && vn.IsD()) \ 1879 V(sabdl2, NEON_SABDL2, vn.IsVector() && vn.IsQ()) \ 1880 V(uabdl, NEON_UABDL, vn.IsVector() && vn.IsD()) \ 1881 V(uabdl2, NEON_UABDL2, vn.IsVector() && vn.IsQ()) \ 1882 V(smlal, NEON_SMLAL, vn.IsVector() && vn.IsD()) \ 1883 V(smlal2, NEON_SMLAL2, vn.IsVector() && vn.IsQ()) \ 1884 V(umlal, NEON_UMLAL, vn.IsVector() && vn.IsD()) \ 1885 V(umlal2, NEON_UMLAL2, vn.IsVector() && vn.IsQ()) \ 1886 V(smlsl, NEON_SMLSL, vn.IsVector() && vn.IsD()) \ 1887 V(smlsl2, NEON_SMLSL2, vn.IsVector() && vn.IsQ()) \ 1888 V(umlsl, NEON_UMLSL, vn.IsVector() && vn.IsD()) \ 1889 V(umlsl2, NEON_UMLSL2, vn.IsVector() && vn.IsQ()) \ 1890 V(smull, NEON_SMULL, vn.IsVector() && vn.IsD()) \ 1891 V(smull2, NEON_SMULL2, vn.IsVector() && vn.IsQ()) \ 1892 V(umull, NEON_UMULL, vn.IsVector() && vn.IsD()) \ 1893 V(umull2, NEON_UMULL2, vn.IsVector() && vn.IsQ()) \ 1894 V(ssubl, NEON_SSUBL, vn.IsVector() && vn.IsD()) \ 1895 V(ssubl2, NEON_SSUBL2, vn.IsVector() && vn.IsQ()) \ 1896 V(uaddl, NEON_UADDL, vn.IsVector() && vn.IsD()) \ 1897 V(uaddl2, NEON_UADDL2, vn.IsVector() && vn.IsQ()) \ 1898 V(usubl, NEON_USUBL, vn.IsVector() && vn.IsD()) \ 1899 V(usubl2, NEON_USUBL2, vn.IsVector() && vn.IsQ()) \ 1900 V(sqdmlal, NEON_SQDMLAL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ 1901 V(sqdmlal2, NEON_SQDMLAL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \ 1902 V(sqdmlsl, NEON_SQDMLSL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ 1903 V(sqdmlsl2, NEON_SQDMLSL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \ 1904 V(sqdmull, NEON_SQDMULL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ 1905 V(sqdmull2, NEON_SQDMULL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) 1907 #define DEFINE_ASM_FUNC(FN, OP, AS) \ 1908 void Assembler::FN(const VRegister& vd, const VRegister& vn, \ 1909 const VRegister& vm) { \ 1911 NEON3DifferentL(vd, vn, vm, OP); \ 1913 NEON_3DIFF_LONG_LIST(DEFINE_ASM_FUNC)
1914 #undef DEFINE_ASM_FUNC 1916 #define NEON_3DIFF_HN_LIST(V) \ 1917 V(addhn, NEON_ADDHN, vd.IsD()) \ 1918 V(addhn2, NEON_ADDHN2, vd.IsQ()) \ 1919 V(raddhn, NEON_RADDHN, vd.IsD()) \ 1920 V(raddhn2, NEON_RADDHN2, vd.IsQ()) \ 1921 V(subhn, NEON_SUBHN, vd.IsD()) \ 1922 V(subhn2, NEON_SUBHN2, vd.IsQ()) \ 1923 V(rsubhn, NEON_RSUBHN, vd.IsD()) \ 1924 V(rsubhn2, NEON_RSUBHN2, vd.IsQ()) 1926 #define DEFINE_ASM_FUNC(FN, OP, AS) \ 1927 void Assembler::FN(const VRegister& vd, const VRegister& vn, \ 1928 const VRegister& vm) { \ 1930 NEON3DifferentHN(vd, vn, vm, OP); \ 1932 NEON_3DIFF_HN_LIST(DEFINE_ASM_FUNC)
1933 #undef DEFINE_ASM_FUNC 1935 void Assembler::NEONPerm(
const VRegister& vd,
const VRegister& vn,
1936 const VRegister& vm, NEONPermOp op) {
1937 DCHECK(AreSameFormat(vd, vn, vm));
1939 Emit(VFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
1942 void Assembler::trn1(
const VRegister& vd,
const VRegister& vn,
1943 const VRegister& vm) {
1944 NEONPerm(vd, vn, vm, NEON_TRN1);
1947 void Assembler::trn2(
const VRegister& vd,
const VRegister& vn,
1948 const VRegister& vm) {
1949 NEONPerm(vd, vn, vm, NEON_TRN2);
1952 void Assembler::uzp1(
const VRegister& vd,
const VRegister& vn,
1953 const VRegister& vm) {
1954 NEONPerm(vd, vn, vm, NEON_UZP1);
1957 void Assembler::uzp2(
const VRegister& vd,
const VRegister& vn,
1958 const VRegister& vm) {
1959 NEONPerm(vd, vn, vm, NEON_UZP2);
1962 void Assembler::zip1(
const VRegister& vd,
const VRegister& vn,
1963 const VRegister& vm) {
1964 NEONPerm(vd, vn, vm, NEON_ZIP1);
1967 void Assembler::zip2(
const VRegister& vd,
const VRegister& vn,
1968 const VRegister& vm) {
1969 NEONPerm(vd, vn, vm, NEON_ZIP2);
1972 void Assembler::NEONShiftImmediate(
const VRegister& vd,
const VRegister& vn,
1973 NEONShiftImmediateOp op,
int immh_immb) {
1974 DCHECK(AreSameFormat(vd, vn));
1976 if (vn.IsScalar()) {
1978 scalar = NEONScalar;
1980 q = vd.IsD() ? 0 : NEON_Q;
1983 Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
1986 void Assembler::NEONShiftLeftImmediate(
const VRegister& vd,
const VRegister& vn,
1987 int shift, NEONShiftImmediateOp op) {
1988 int laneSizeInBits = vn.LaneSizeInBits();
1989 DCHECK((shift >= 0) && (shift < laneSizeInBits));
1990 NEONShiftImmediate(vd, vn, op, (laneSizeInBits + shift) << 16);
1993 void Assembler::NEONShiftRightImmediate(
const VRegister& vd,
1994 const VRegister& vn,
int shift,
1995 NEONShiftImmediateOp op) {
1996 int laneSizeInBits = vn.LaneSizeInBits();
1997 DCHECK((shift >= 1) && (shift <= laneSizeInBits));
1998 NEONShiftImmediate(vd, vn, op, ((2 * laneSizeInBits) - shift) << 16);
2001 void Assembler::NEONShiftImmediateL(
const VRegister& vd,
const VRegister& vn,
2002 int shift, NEONShiftImmediateOp op) {
2003 int laneSizeInBits = vn.LaneSizeInBits();
2004 DCHECK((shift >= 0) && (shift < laneSizeInBits));
2005 int immh_immb = (laneSizeInBits + shift) << 16;
2007 DCHECK((vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) ||
2008 (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) ||
2009 (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
2011 q = vn.IsD() ? 0 : NEON_Q;
2012 Emit(q | op | immh_immb | Rn(vn) | Rd(vd));
2015 void Assembler::NEONShiftImmediateN(
const VRegister& vd,
const VRegister& vn,
2016 int shift, NEONShiftImmediateOp op) {
2018 int laneSizeInBits = vd.LaneSizeInBits();
2019 DCHECK((shift >= 1) && (shift <= laneSizeInBits));
2020 int immh_immb = (2 * laneSizeInBits - shift) << 16;
2022 if (vn.IsScalar()) {
2023 DCHECK((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) ||
2024 (vd.Is1S() && vn.Is1D()));
2026 scalar = NEONScalar;
2028 DCHECK((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) ||
2029 (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) ||
2030 (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
2032 q = vd.IsD() ? 0 : NEON_Q;
2034 Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
2037 void Assembler::shl(
const VRegister& vd,
const VRegister& vn,
int shift) {
2038 DCHECK(vd.IsVector() || vd.Is1D());
2039 NEONShiftLeftImmediate(vd, vn, shift, NEON_SHL);
2042 void Assembler::sli(
const VRegister& vd,
const VRegister& vn,
int shift) {
2043 DCHECK(vd.IsVector() || vd.Is1D());
2044 NEONShiftLeftImmediate(vd, vn, shift, NEON_SLI);
2047 void Assembler::sqshl(
const VRegister& vd,
const VRegister& vn,
int shift) {
2048 NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHL_imm);
2051 void Assembler::sqshlu(
const VRegister& vd,
const VRegister& vn,
int shift) {
2052 NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHLU);
2055 void Assembler::uqshl(
const VRegister& vd,
const VRegister& vn,
int shift) {
2056 NEONShiftLeftImmediate(vd, vn, shift, NEON_UQSHL_imm);
2059 void Assembler::sshll(
const VRegister& vd,
const VRegister& vn,
int shift) {
2061 NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
2064 void Assembler::sshll2(
const VRegister& vd,
const VRegister& vn,
int shift) {
2066 NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
2069 void Assembler::sxtl(
const VRegister& vd,
const VRegister& vn) {
2073 void Assembler::sxtl2(
const VRegister& vd,
const VRegister& vn) {
2077 void Assembler::ushll(
const VRegister& vd,
const VRegister& vn,
int shift) {
2079 NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
2082 void Assembler::ushll2(
const VRegister& vd,
const VRegister& vn,
int shift) {
2084 NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
2087 void Assembler::uxtl(
const VRegister& vd,
const VRegister& vn) {
2091 void Assembler::uxtl2(
const VRegister& vd,
const VRegister& vn) {
2095 void Assembler::sri(
const VRegister& vd,
const VRegister& vn,
int shift) {
2096 DCHECK(vd.IsVector() || vd.Is1D());
2097 NEONShiftRightImmediate(vd, vn, shift, NEON_SRI);
2100 void Assembler::sshr(
const VRegister& vd,
const VRegister& vn,
int shift) {
2101 DCHECK(vd.IsVector() || vd.Is1D());
2102 NEONShiftRightImmediate(vd, vn, shift, NEON_SSHR);
2105 void Assembler::ushr(
const VRegister& vd,
const VRegister& vn,
int shift) {
2106 DCHECK(vd.IsVector() || vd.Is1D());
2107 NEONShiftRightImmediate(vd, vn, shift, NEON_USHR);
2110 void Assembler::srshr(
const VRegister& vd,
const VRegister& vn,
int shift) {
2111 DCHECK(vd.IsVector() || vd.Is1D());
2112 NEONShiftRightImmediate(vd, vn, shift, NEON_SRSHR);
2115 void Assembler::urshr(
const VRegister& vd,
const VRegister& vn,
int shift) {
2116 DCHECK(vd.IsVector() || vd.Is1D());
2117 NEONShiftRightImmediate(vd, vn, shift, NEON_URSHR);
2120 void Assembler::ssra(
const VRegister& vd,
const VRegister& vn,
int shift) {
2121 DCHECK(vd.IsVector() || vd.Is1D());
2122 NEONShiftRightImmediate(vd, vn, shift, NEON_SSRA);
2125 void Assembler::usra(
const VRegister& vd,
const VRegister& vn,
int shift) {
2126 DCHECK(vd.IsVector() || vd.Is1D());
2127 NEONShiftRightImmediate(vd, vn, shift, NEON_USRA);
2130 void Assembler::srsra(
const VRegister& vd,
const VRegister& vn,
int shift) {
2131 DCHECK(vd.IsVector() || vd.Is1D());
2132 NEONShiftRightImmediate(vd, vn, shift, NEON_SRSRA);
2135 void Assembler::ursra(
const VRegister& vd,
const VRegister& vn,
int shift) {
2136 DCHECK(vd.IsVector() || vd.Is1D());
2137 NEONShiftRightImmediate(vd, vn, shift, NEON_URSRA);
2140 void Assembler::shrn(
const VRegister& vd,
const VRegister& vn,
int shift) {
2141 DCHECK(vn.IsVector() && vd.IsD());
2142 NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
2145 void Assembler::shrn2(
const VRegister& vd,
const VRegister& vn,
int shift) {
2146 DCHECK(vn.IsVector() && vd.IsQ());
2147 NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
2150 void Assembler::rshrn(
const VRegister& vd,
const VRegister& vn,
int shift) {
2151 DCHECK(vn.IsVector() && vd.IsD());
2152 NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
2155 void Assembler::rshrn2(
const VRegister& vd,
const VRegister& vn,
int shift) {
2156 DCHECK(vn.IsVector() && vd.IsQ());
2157 NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
2160 void Assembler::sqshrn(
const VRegister& vd,
const VRegister& vn,
int shift) {
2161 DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
2162 NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
2165 void Assembler::sqshrn2(
const VRegister& vd,
const VRegister& vn,
int shift) {
2166 DCHECK(vn.IsVector() && vd.IsQ());
2167 NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
2170 void Assembler::sqrshrn(
const VRegister& vd,
const VRegister& vn,
int shift) {
2171 DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
2172 NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
2175 void Assembler::sqrshrn2(
const VRegister& vd,
const VRegister& vn,
int shift) {
2176 DCHECK(vn.IsVector() && vd.IsQ());
2177 NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
2180 void Assembler::sqshrun(
const VRegister& vd,
const VRegister& vn,
int shift) {
2181 DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
2182 NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
2185 void Assembler::sqshrun2(
const VRegister& vd,
const VRegister& vn,
int shift) {
2186 DCHECK(vn.IsVector() && vd.IsQ());
2187 NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
2190 void Assembler::sqrshrun(
const VRegister& vd,
const VRegister& vn,
int shift) {
2191 DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
2192 NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
2195 void Assembler::sqrshrun2(
const VRegister& vd,
const VRegister& vn,
int shift) {
2196 DCHECK(vn.IsVector() && vd.IsQ());
2197 NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
2200 void Assembler::uqshrn(
const VRegister& vd,
const VRegister& vn,
int shift) {
2201 DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
2202 NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
2205 void Assembler::uqshrn2(
const VRegister& vd,
const VRegister& vn,
int shift) {
2206 DCHECK(vn.IsVector() && vd.IsQ());
2207 NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
2210 void Assembler::uqrshrn(
const VRegister& vd,
const VRegister& vn,
int shift) {
2211 DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
2212 NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
2215 void Assembler::uqrshrn2(
const VRegister& vd,
const VRegister& vn,
int shift) {
2216 DCHECK(vn.IsVector() && vd.IsQ());
2217 NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
2220 void Assembler::uaddw(
const VRegister& vd,
const VRegister& vn,
2221 const VRegister& vm) {
2223 NEON3DifferentW(vd, vn, vm, NEON_UADDW);
2226 void Assembler::uaddw2(
const VRegister& vd,
const VRegister& vn,
2227 const VRegister& vm) {
2229 NEON3DifferentW(vd, vn, vm, NEON_UADDW2);
2232 void Assembler::saddw(
const VRegister& vd,
const VRegister& vn,
2233 const VRegister& vm) {
2235 NEON3DifferentW(vd, vn, vm, NEON_SADDW);
2238 void Assembler::saddw2(
const VRegister& vd,
const VRegister& vn,
2239 const VRegister& vm) {
2241 NEON3DifferentW(vd, vn, vm, NEON_SADDW2);
2244 void Assembler::usubw(
const VRegister& vd,
const VRegister& vn,
2245 const VRegister& vm) {
2247 NEON3DifferentW(vd, vn, vm, NEON_USUBW);
2250 void Assembler::usubw2(
const VRegister& vd,
const VRegister& vn,
2251 const VRegister& vm) {
2253 NEON3DifferentW(vd, vn, vm, NEON_USUBW2);
2256 void Assembler::ssubw(
const VRegister& vd,
const VRegister& vn,
2257 const VRegister& vm) {
2259 NEON3DifferentW(vd, vn, vm, NEON_SSUBW);
2262 void Assembler::ssubw2(
const VRegister& vd,
const VRegister& vn,
2263 const VRegister& vm) {
2265 NEON3DifferentW(vd, vn, vm, NEON_SSUBW2);
2268 void Assembler::mov(
const Register& rd,
const Register& rm) {
2272 if (rd.IsSP() || rm.IsSP()) {
2275 orr(rd, AppropriateZeroRegFor(rd), rm);
2279 void Assembler::ins(
const VRegister& vd,
int vd_index,
const Register& rn) {
2282 int lane_size = vd.LaneSizeInBytes();
2283 NEONFormatField format;
2284 switch (lane_size) {
2298 DCHECK_EQ(lane_size, 8);
2304 DCHECK((0 <= vd_index) &&
2305 (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
2306 Emit(NEON_INS_GENERAL | ImmNEON5(format, vd_index) | Rn(rn) | Rd(vd));
2309 void Assembler::mov(
const Register& rd,
const VRegister& vn,
int vn_index) {
2310 DCHECK_GE(vn.SizeInBytes(), 4);
2311 umov(rd, vn, vn_index);
2314 void Assembler::smov(
const Register& rd,
const VRegister& vn,
int vn_index) {
2317 int lane_size = vn.LaneSizeInBytes();
2318 NEONFormatField format;
2320 switch (lane_size) {
2328 DCHECK_EQ(lane_size, 4);
2333 q = rd.IsW() ? 0 : NEON_Q;
2334 DCHECK((0 <= vn_index) &&
2335 (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
2336 Emit(q | NEON_SMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd));
2339 void Assembler::cls(
const VRegister& vd,
const VRegister& vn) {
2340 DCHECK(AreSameFormat(vd, vn));
2341 DCHECK(!vd.Is1D() && !vd.Is2D());
2342 Emit(VFormat(vn) | NEON_CLS | Rn(vn) | Rd(vd));
2345 void Assembler::clz(
const VRegister& vd,
const VRegister& vn) {
2346 DCHECK(AreSameFormat(vd, vn));
2347 DCHECK(!vd.Is1D() && !vd.Is2D());
2348 Emit(VFormat(vn) | NEON_CLZ | Rn(vn) | Rd(vd));
2351 void Assembler::cnt(
const VRegister& vd,
const VRegister& vn) {
2352 DCHECK(AreSameFormat(vd, vn));
2353 DCHECK(vd.Is8B() || vd.Is16B());
2354 Emit(VFormat(vn) | NEON_CNT | Rn(vn) | Rd(vd));
2357 void Assembler::rev16(
const VRegister& vd,
const VRegister& vn) {
2358 DCHECK(AreSameFormat(vd, vn));
2359 DCHECK(vd.Is8B() || vd.Is16B());
2360 Emit(VFormat(vn) | NEON_REV16 | Rn(vn) | Rd(vd));
2363 void Assembler::rev32(
const VRegister& vd,
const VRegister& vn) {
2364 DCHECK(AreSameFormat(vd, vn));
2365 DCHECK(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H());
2366 Emit(VFormat(vn) | NEON_REV32 | Rn(vn) | Rd(vd));
2369 void Assembler::rev64(
const VRegister& vd,
const VRegister& vn) {
2370 DCHECK(AreSameFormat(vd, vn));
2371 DCHECK(!vd.Is1D() && !vd.Is2D());
2372 Emit(VFormat(vn) | NEON_REV64 | Rn(vn) | Rd(vd));
2375 void Assembler::ursqrte(
const VRegister& vd,
const VRegister& vn) {
2376 DCHECK(AreSameFormat(vd, vn));
2377 DCHECK(vd.Is2S() || vd.Is4S());
2378 Emit(VFormat(vn) | NEON_URSQRTE | Rn(vn) | Rd(vd));
2381 void Assembler::urecpe(
const VRegister& vd,
const VRegister& vn) {
2382 DCHECK(AreSameFormat(vd, vn));
2383 DCHECK(vd.Is2S() || vd.Is4S());
2384 Emit(VFormat(vn) | NEON_URECPE | Rn(vn) | Rd(vd));
2387 void Assembler::NEONAddlp(
const VRegister& vd,
const VRegister& vn,
2388 NEON2RegMiscOp op) {
2389 DCHECK((op == NEON_SADDLP) || (op == NEON_UADDLP) || (op == NEON_SADALP) ||
2390 (op == NEON_UADALP));
2392 DCHECK((vn.Is8B() && vd.Is4H()) || (vn.Is4H() && vd.Is2S()) ||
2393 (vn.Is2S() && vd.Is1D()) || (vn.Is16B() && vd.Is8H()) ||
2394 (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
2395 Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
2398 void Assembler::saddlp(
const VRegister& vd,
const VRegister& vn) {
2399 NEONAddlp(vd, vn, NEON_SADDLP);
2402 void Assembler::uaddlp(
const VRegister& vd,
const VRegister& vn) {
2403 NEONAddlp(vd, vn, NEON_UADDLP);
2406 void Assembler::sadalp(
const VRegister& vd,
const VRegister& vn) {
2407 NEONAddlp(vd, vn, NEON_SADALP);
2410 void Assembler::uadalp(
const VRegister& vd,
const VRegister& vn) {
2411 NEONAddlp(vd, vn, NEON_UADALP);
2414 void Assembler::NEONAcrossLanesL(
const VRegister& vd,
const VRegister& vn,
2415 NEONAcrossLanesOp op) {
2416 DCHECK((vn.Is8B() && vd.Is1H()) || (vn.Is16B() && vd.Is1H()) ||
2417 (vn.Is4H() && vd.Is1S()) || (vn.Is8H() && vd.Is1S()) ||
2418 (vn.Is4S() && vd.Is1D()));
2419 Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
2422 void Assembler::saddlv(
const VRegister& vd,
const VRegister& vn) {
2423 NEONAcrossLanesL(vd, vn, NEON_SADDLV);
2426 void Assembler::uaddlv(
const VRegister& vd,
const VRegister& vn) {
2427 NEONAcrossLanesL(vd, vn, NEON_UADDLV);
2430 void Assembler::NEONAcrossLanes(
const VRegister& vd,
const VRegister& vn,
2431 NEONAcrossLanesOp op) {
2432 DCHECK((vn.Is8B() && vd.Is1B()) || (vn.Is16B() && vd.Is1B()) ||
2433 (vn.Is4H() && vd.Is1H()) || (vn.Is8H() && vd.Is1H()) ||
2434 (vn.Is4S() && vd.Is1S()));
2435 if ((op & NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
2436 Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
2438 Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
2442 #define NEON_ACROSSLANES_LIST(V) \ 2443 V(fmaxv, NEON_FMAXV, vd.Is1S()) \ 2444 V(fminv, NEON_FMINV, vd.Is1S()) \ 2445 V(fmaxnmv, NEON_FMAXNMV, vd.Is1S()) \ 2446 V(fminnmv, NEON_FMINNMV, vd.Is1S()) \ 2447 V(addv, NEON_ADDV, true) \ 2448 V(smaxv, NEON_SMAXV, true) \ 2449 V(sminv, NEON_SMINV, true) \ 2450 V(umaxv, NEON_UMAXV, true) \ 2451 V(uminv, NEON_UMINV, true) 2453 #define DEFINE_ASM_FUNC(FN, OP, AS) \ 2454 void Assembler::FN(const VRegister& vd, const VRegister& vn) { \ 2456 NEONAcrossLanes(vd, vn, OP); \ 2458 NEON_ACROSSLANES_LIST(DEFINE_ASM_FUNC)
2459 #undef DEFINE_ASM_FUNC 2461 void Assembler::mov(
const VRegister& vd,
int vd_index,
const Register& rn) {
2462 ins(vd, vd_index, rn);
2465 void Assembler::umov(
const Register& rd,
const VRegister& vn,
int vn_index) {
2468 int lane_size = vn.LaneSizeInBytes();
2469 NEONFormatField format;
2471 switch (lane_size) {
2485 DCHECK_EQ(lane_size, 8);
2492 DCHECK((0 <= vn_index) &&
2493 (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
2494 Emit(q | NEON_UMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd));
2497 void Assembler::mov(
const VRegister& vd,
const VRegister& vn,
int vn_index) {
2498 DCHECK(vd.IsScalar());
2499 dup(vd, vn, vn_index);
2502 void Assembler::dup(
const VRegister& vd,
const Register& rn) {
2504 DCHECK_EQ(vd.Is2D(), rn.IsX());
2505 Instr q = vd.IsD() ? 0 : NEON_Q;
2506 Emit(q | NEON_DUP_GENERAL | ImmNEON5(VFormat(vd), 0) | Rn(rn) | Rd(vd));
2509 void Assembler::ins(
const VRegister& vd,
int vd_index,
const VRegister& vn,
2511 DCHECK(AreSameFormat(vd, vn));
2514 int lane_size = vd.LaneSizeInBytes();
2515 NEONFormatField format;
2516 switch (lane_size) {
2527 DCHECK_EQ(lane_size, 8);
2532 DCHECK((0 <= vd_index) &&
2533 (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
2534 DCHECK((0 <= vn_index) &&
2535 (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
2536 Emit(NEON_INS_ELEMENT | ImmNEON5(format, vd_index) |
2537 ImmNEON4(format, vn_index) | Rn(vn) | Rd(vd));
2540 void Assembler::NEONTable(
const VRegister& vd,
const VRegister& vn,
2541 const VRegister& vm, NEONTableOp op) {
2542 DCHECK(vd.Is16B() || vd.Is8B());
2544 DCHECK(AreSameFormat(vd, vm));
2545 Emit(op | (vd.IsQ() ? NEON_Q : 0) | Rm(vm) | Rn(vn) | Rd(vd));
2548 void Assembler::tbl(
const VRegister& vd,
const VRegister& vn,
2549 const VRegister& vm) {
2550 NEONTable(vd, vn, vm, NEON_TBL_1v);
2553 void Assembler::tbl(
const VRegister& vd,
const VRegister& vn,
2554 const VRegister& vn2,
const VRegister& vm) {
2556 DCHECK(AreSameFormat(vn, vn2));
2557 DCHECK(AreConsecutive(vn, vn2));
2558 NEONTable(vd, vn, vm, NEON_TBL_2v);
2561 void Assembler::tbl(
const VRegister& vd,
const VRegister& vn,
2562 const VRegister& vn2,
const VRegister& vn3,
2563 const VRegister& vm) {
2566 DCHECK(AreSameFormat(vn, vn2, vn3));
2567 DCHECK(AreConsecutive(vn, vn2, vn3));
2568 NEONTable(vd, vn, vm, NEON_TBL_3v);
2571 void Assembler::tbl(
const VRegister& vd,
const VRegister& vn,
2572 const VRegister& vn2,
const VRegister& vn3,
2573 const VRegister& vn4,
const VRegister& vm) {
2577 DCHECK(AreSameFormat(vn, vn2, vn3, vn4));
2578 DCHECK(AreConsecutive(vn, vn2, vn3, vn4));
2579 NEONTable(vd, vn, vm, NEON_TBL_4v);
2582 void Assembler::tbx(
const VRegister& vd,
const VRegister& vn,
2583 const VRegister& vm) {
2584 NEONTable(vd, vn, vm, NEON_TBX_1v);
2587 void Assembler::tbx(
const VRegister& vd,
const VRegister& vn,
2588 const VRegister& vn2,
const VRegister& vm) {
2590 DCHECK(AreSameFormat(vn, vn2));
2591 DCHECK(AreConsecutive(vn, vn2));
2592 NEONTable(vd, vn, vm, NEON_TBX_2v);
2595 void Assembler::tbx(
const VRegister& vd,
const VRegister& vn,
2596 const VRegister& vn2,
const VRegister& vn3,
2597 const VRegister& vm) {
2600 DCHECK(AreSameFormat(vn, vn2, vn3));
2601 DCHECK(AreConsecutive(vn, vn2, vn3));
2602 NEONTable(vd, vn, vm, NEON_TBX_3v);
2605 void Assembler::tbx(
const VRegister& vd,
const VRegister& vn,
2606 const VRegister& vn2,
const VRegister& vn3,
2607 const VRegister& vn4,
const VRegister& vm) {
2611 DCHECK(AreSameFormat(vn, vn2, vn3, vn4));
2612 DCHECK(AreConsecutive(vn, vn2, vn3, vn4));
2613 NEONTable(vd, vn, vm, NEON_TBX_4v);
2616 void Assembler::mov(
const VRegister& vd,
int vd_index,
const VRegister& vn,
2618 ins(vd, vd_index, vn, vn_index);
2621 void Assembler::mvn(
const Register& rd,
const Operand& operand) {
2622 orn(rd, AppropriateZeroRegFor(rd), operand);
2625 void Assembler::mrs(
const Register& rt, SystemRegister sysreg) {
2626 DCHECK(rt.Is64Bits());
2627 Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
2630 void Assembler::msr(SystemRegister sysreg,
const Register& rt) {
2631 DCHECK(rt.Is64Bits());
2632 Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
2635 void Assembler::hint(SystemHint code) { Emit(HINT | ImmHint(code) | Rt(xzr)); }
2638 Instr Assembler::LoadStoreStructAddrModeField(
const MemOperand& addr) {
2639 Instr addr_field = RnSP(addr.base());
2641 if (addr.IsPostIndex()) {
2642 static_assert(NEONLoadStoreMultiStructPostIndex ==
2643 static_cast<NEONLoadStoreMultiStructPostIndexOp>(
2644 NEONLoadStoreSingleStructPostIndex),
2645 "Opcodes must match for NEON post index memop.");
2647 addr_field |= NEONLoadStoreMultiStructPostIndex;
2648 if (addr.offset() == 0) {
2649 addr_field |= RmNot31(addr.regoffset());
2653 addr_field |= (0x1F << Rm_offset);
2656 DCHECK(addr.IsImmediateOffset() && (addr.offset() == 0));
2661 void Assembler::LoadStoreStructVerify(
const VRegister& vt,
2662 const MemOperand& addr, Instr op) {
2667 if (addr.IsImmediateOffset()) {
2668 DCHECK_EQ(addr.offset(), 0);
2670 int offset = vt.SizeInBytes();
2676 case NEONLoadStoreSingleStructLoad1:
2677 case NEONLoadStoreSingleStructStore1:
2679 offset = (offset / vt.LaneCount()) * 1;
2688 case NEONLoadStoreSingleStructLoad2:
2689 case NEONLoadStoreSingleStructStore2:
2691 offset = (offset / vt.LaneCount()) * 2;
2700 case NEONLoadStoreSingleStructLoad3:
2701 case NEONLoadStoreSingleStructStore3:
2703 offset = (offset / vt.LaneCount()) * 3;
2712 case NEONLoadStoreSingleStructLoad4:
2713 case NEONLoadStoreSingleStructStore4:
2715 offset = (offset / vt.LaneCount()) * 4;
2720 DCHECK(!addr.regoffset().Is(NoReg) || addr.offset() == offset);
2729 void Assembler::LoadStoreStruct(
const VRegister& vt,
const MemOperand& addr,
2730 NEONLoadStoreMultiStructOp op) {
2731 LoadStoreStructVerify(vt, addr, op);
2732 DCHECK(vt.IsVector() || vt.Is1D());
2733 Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt));
2736 void Assembler::LoadStoreStructSingleAllLanes(
const VRegister& vt,
2737 const MemOperand& addr,
2738 NEONLoadStoreSingleStructOp op) {
2739 LoadStoreStructVerify(vt, addr, op);
2740 Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt));
2743 void Assembler::ld1(
const VRegister& vt,
const MemOperand& src) {
2744 LoadStoreStruct(vt, src, NEON_LD1_1v);
2747 void Assembler::ld1(
const VRegister& vt,
const VRegister& vt2,
2748 const MemOperand& src) {
2750 DCHECK(AreSameFormat(vt, vt2));
2751 DCHECK(AreConsecutive(vt, vt2));
2752 LoadStoreStruct(vt, src, NEON_LD1_2v);
2755 void Assembler::ld1(
const VRegister& vt,
const VRegister& vt2,
2756 const VRegister& vt3,
const MemOperand& src) {
2759 DCHECK(AreSameFormat(vt, vt2, vt3));
2760 DCHECK(AreConsecutive(vt, vt2, vt3));
2761 LoadStoreStruct(vt, src, NEON_LD1_3v);
2764 void Assembler::ld1(
const VRegister& vt,
const VRegister& vt2,
2765 const VRegister& vt3,
const VRegister& vt4,
2766 const MemOperand& src) {
2770 DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
2771 DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
2772 LoadStoreStruct(vt, src, NEON_LD1_4v);
2775 void Assembler::ld2(
const VRegister& vt,
const VRegister& vt2,
2776 const MemOperand& src) {
2778 DCHECK(AreSameFormat(vt, vt2));
2779 DCHECK(AreConsecutive(vt, vt2));
2780 LoadStoreStruct(vt, src, NEON_LD2);
2783 void Assembler::ld2(
const VRegister& vt,
const VRegister& vt2,
int lane,
2784 const MemOperand& src) {
2786 DCHECK(AreSameFormat(vt, vt2));
2787 DCHECK(AreConsecutive(vt, vt2));
2788 LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad2);
2791 void Assembler::ld2r(
const VRegister& vt,
const VRegister& vt2,
2792 const MemOperand& src) {
2794 DCHECK(AreSameFormat(vt, vt2));
2795 DCHECK(AreConsecutive(vt, vt2));
2796 LoadStoreStructSingleAllLanes(vt, src, NEON_LD2R);
2799 void Assembler::ld3(
const VRegister& vt,
const VRegister& vt2,
2800 const VRegister& vt3,
const MemOperand& src) {
2803 DCHECK(AreSameFormat(vt, vt2, vt3));
2804 DCHECK(AreConsecutive(vt, vt2, vt3));
2805 LoadStoreStruct(vt, src, NEON_LD3);
2808 void Assembler::ld3(
const VRegister& vt,
const VRegister& vt2,
2809 const VRegister& vt3,
int lane,
const MemOperand& src) {
2812 DCHECK(AreSameFormat(vt, vt2, vt3));
2813 DCHECK(AreConsecutive(vt, vt2, vt3));
2814 LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad3);
2817 void Assembler::ld3r(
const VRegister& vt,
const VRegister& vt2,
2818 const VRegister& vt3,
const MemOperand& src) {
2821 DCHECK(AreSameFormat(vt, vt2, vt3));
2822 DCHECK(AreConsecutive(vt, vt2, vt3));
2823 LoadStoreStructSingleAllLanes(vt, src, NEON_LD3R);
2826 void Assembler::ld4(
const VRegister& vt,
const VRegister& vt2,
2827 const VRegister& vt3,
const VRegister& vt4,
2828 const MemOperand& src) {
2832 DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
2833 DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
2834 LoadStoreStruct(vt, src, NEON_LD4);
2837 void Assembler::ld4(
const VRegister& vt,
const VRegister& vt2,
2838 const VRegister& vt3,
const VRegister& vt4,
int lane,
2839 const MemOperand& src) {
2843 DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
2844 DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
2845 LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad4);
2848 void Assembler::ld4r(
const VRegister& vt,
const VRegister& vt2,
2849 const VRegister& vt3,
const VRegister& vt4,
2850 const MemOperand& src) {
2854 DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
2855 DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
2856 LoadStoreStructSingleAllLanes(vt, src, NEON_LD4R);
2859 void Assembler::st1(
const VRegister& vt,
const MemOperand& src) {
2860 LoadStoreStruct(vt, src, NEON_ST1_1v);
2863 void Assembler::st1(
const VRegister& vt,
const VRegister& vt2,
2864 const MemOperand& src) {
2866 DCHECK(AreSameFormat(vt, vt2));
2867 DCHECK(AreConsecutive(vt, vt2));
2868 LoadStoreStruct(vt, src, NEON_ST1_2v);
2871 void Assembler::st1(
const VRegister& vt,
const VRegister& vt2,
2872 const VRegister& vt3,
const MemOperand& src) {
2875 DCHECK(AreSameFormat(vt, vt2, vt3));
2876 DCHECK(AreConsecutive(vt, vt2, vt3));
2877 LoadStoreStruct(vt, src, NEON_ST1_3v);
2880 void Assembler::st1(
const VRegister& vt,
const VRegister& vt2,
2881 const VRegister& vt3,
const VRegister& vt4,
2882 const MemOperand& src) {
2886 DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
2887 DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
2888 LoadStoreStruct(vt, src, NEON_ST1_4v);
2891 void Assembler::st2(
const VRegister& vt,
const VRegister& vt2,
2892 const MemOperand& dst) {
2894 DCHECK(AreSameFormat(vt, vt2));
2895 DCHECK(AreConsecutive(vt, vt2));
2896 LoadStoreStruct(vt, dst, NEON_ST2);
2899 void Assembler::st2(
const VRegister& vt,
const VRegister& vt2,
int lane,
2900 const MemOperand& dst) {
2902 DCHECK(AreSameFormat(vt, vt2));
2903 DCHECK(AreConsecutive(vt, vt2));
2904 LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore2);
2907 void Assembler::st3(
const VRegister& vt,
const VRegister& vt2,
2908 const VRegister& vt3,
const MemOperand& dst) {
2911 DCHECK(AreSameFormat(vt, vt2, vt3));
2912 DCHECK(AreConsecutive(vt, vt2, vt3));
2913 LoadStoreStruct(vt, dst, NEON_ST3);
2916 void Assembler::st3(
const VRegister& vt,
const VRegister& vt2,
2917 const VRegister& vt3,
int lane,
const MemOperand& dst) {
2920 DCHECK(AreSameFormat(vt, vt2, vt3));
2921 DCHECK(AreConsecutive(vt, vt2, vt3));
2922 LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore3);
2925 void Assembler::st4(
const VRegister& vt,
const VRegister& vt2,
2926 const VRegister& vt3,
const VRegister& vt4,
2927 const MemOperand& dst) {
2931 DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
2932 DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
2933 LoadStoreStruct(vt, dst, NEON_ST4);
2936 void Assembler::st4(
const VRegister& vt,
const VRegister& vt2,
2937 const VRegister& vt3,
const VRegister& vt4,
int lane,
2938 const MemOperand& dst) {
2942 DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
2943 DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
2944 LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore4);
2947 void Assembler::LoadStoreStructSingle(
const VRegister& vt,
uint32_t lane,
2948 const MemOperand& addr,
2949 NEONLoadStoreSingleStructOp op) {
2950 LoadStoreStructVerify(vt, addr, op);
2954 unsigned lane_size = vt.LaneSizeInBytes();
2955 DCHECK_LT(lane, kQRegSize / lane_size);
2964 if (lane_size == 8) lane++;
2966 Instr size = (lane << NEONLSSize_offset) & NEONLSSize_mask;
2967 Instr s = (lane << (NEONS_offset - 2)) & NEONS_mask;
2968 Instr q = (lane << (NEONQ_offset - 3)) & NEONQ_mask;
2971 switch (lane_size) {
2973 instr |= NEONLoadStoreSingle_b;
2976 instr |= NEONLoadStoreSingle_h;
2979 instr |= NEONLoadStoreSingle_s;
2982 DCHECK_EQ(lane_size, 8U);
2983 instr |= NEONLoadStoreSingle_d;
2986 Emit(instr | LoadStoreStructAddrModeField(addr) | q | size | s | Rt(vt));
2989 void Assembler::ld1(
const VRegister& vt,
int lane,
const MemOperand& src) {
2990 LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad1);
2993 void Assembler::ld1r(
const VRegister& vt,
const MemOperand& src) {
2994 LoadStoreStructSingleAllLanes(vt, src, NEON_LD1R);
2997 void Assembler::st1(
const VRegister& vt,
int lane,
const MemOperand& dst) {
2998 LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore1);
3001 void Assembler::dmb(BarrierDomain domain, BarrierType type) {
3002 Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
3005 void Assembler::dsb(BarrierDomain domain, BarrierType type) {
3006 Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
3009 void Assembler::isb() {
3010 Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
3013 void Assembler::csdb() { hint(CSDB); }
3015 void Assembler::fmov(
const VRegister& vd,
double imm) {
3016 if (vd.IsScalar()) {
3018 Emit(FMOV_d_imm | Rd(vd) | ImmFP(imm));
3021 Instr op = NEONModifiedImmediate_MOVI | NEONModifiedImmediateOpBit;
3022 Emit(NEON_Q | op | ImmNEONFP(imm) | NEONCmode(0xF) | Rd(vd));
3026 void Assembler::fmov(
const VRegister& vd,
float imm) {
3027 if (vd.IsScalar()) {
3029 Emit(FMOV_s_imm | Rd(vd) | ImmFP(imm));
3031 DCHECK(vd.Is2S() | vd.Is4S());
3032 Instr op = NEONModifiedImmediate_MOVI;
3033 Instr q = vd.Is4S() ? NEON_Q : 0;
3034 Emit(q | op | ImmNEONFP(imm) | NEONCmode(0xF) | Rd(vd));
3038 void Assembler::fmov(
const Register& rd,
const VRegister& fn) {
3039 DCHECK_EQ(rd.SizeInBits(), fn.SizeInBits());
3040 FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
3041 Emit(op | Rd(rd) | Rn(fn));
3044 void Assembler::fmov(
const VRegister& vd,
const Register& rn) {
3045 DCHECK_EQ(vd.SizeInBits(), rn.SizeInBits());
3046 FPIntegerConvertOp op = vd.Is32Bits() ? FMOV_sw : FMOV_dx;
3047 Emit(op | Rd(vd) | Rn(rn));
3050 void Assembler::fmov(
const VRegister& vd,
const VRegister& vn) {
3051 DCHECK_EQ(vd.SizeInBits(), vn.SizeInBits());
3052 Emit(FPType(vd) | FMOV | Rd(vd) | Rn(vn));
3055 void Assembler::fmov(
const VRegister& vd,
int index,
const Register& rn) {
3056 DCHECK((index == 1) && vd.Is1D() && rn.IsX());
3058 Emit(FMOV_d1_x | Rd(vd) | Rn(rn));
3061 void Assembler::fmov(
const Register& rd,
const VRegister& vn,
int index) {
3062 DCHECK((index == 1) && vn.Is1D() && rd.IsX());
3064 Emit(FMOV_x_d1 | Rd(rd) | Rn(vn));
3067 void Assembler::fmadd(
const VRegister& fd,
const VRegister& fn,
3068 const VRegister& fm,
const VRegister& fa) {
3069 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d);
3072 void Assembler::fmsub(
const VRegister& fd,
const VRegister& fn,
3073 const VRegister& fm,
const VRegister& fa) {
3074 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d);
3077 void Assembler::fnmadd(
const VRegister& fd,
const VRegister& fn,
3078 const VRegister& fm,
const VRegister& fa) {
3079 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d);
3082 void Assembler::fnmsub(
const VRegister& fd,
const VRegister& fn,
3083 const VRegister& fm,
const VRegister& fa) {
3084 FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d);
3087 void Assembler::fnmul(
const VRegister& vd,
const VRegister& vn,
3088 const VRegister& vm) {
3089 DCHECK(AreSameSizeAndType(vd, vn, vm));
3090 Instr op = vd.Is1S() ? FNMUL_s : FNMUL_d;
3091 Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
3094 void Assembler::fcmp(
const VRegister& fn,
const VRegister& fm) {
3095 DCHECK_EQ(fn.SizeInBits(), fm.SizeInBits());
3096 Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn));
3099 void Assembler::fcmp(
const VRegister& fn,
double value) {
3104 DCHECK_EQ(value, 0.0);
3105 Emit(FPType(fn) | FCMP_zero | Rn(fn));
3108 void Assembler::fccmp(
const VRegister& fn,
const VRegister& fm,
3109 StatusFlags nzcv, Condition cond) {
3110 DCHECK_EQ(fn.SizeInBits(), fm.SizeInBits());
3111 Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv));
3114 void Assembler::fcsel(
const VRegister& fd,
const VRegister& fn,
3115 const VRegister& fm, Condition cond) {
3116 DCHECK_EQ(fd.SizeInBits(), fn.SizeInBits());
3117 DCHECK_EQ(fd.SizeInBits(), fm.SizeInBits());
3118 Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd));
3121 void Assembler::NEONFPConvertToInt(
const Register& rd,
const VRegister& vn,
3123 Emit(SF(rd) | FPType(vn) | op | Rn(vn) | Rd(rd));
3126 void Assembler::NEONFPConvertToInt(
const VRegister& vd,
const VRegister& vn,
3128 if (vn.IsScalar()) {
3129 DCHECK((vd.Is1S() && vn.Is1S()) || (vd.Is1D() && vn.Is1D()));
3130 op |= NEON_Q | NEONScalar;
3132 Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
3135 void Assembler::fcvt(
const VRegister& vd,
const VRegister& vn) {
3136 FPDataProcessing1SourceOp op;
3138 DCHECK(vn.Is1S() || vn.Is1H());
3139 op = vn.Is1S() ? FCVT_ds : FCVT_dh;
3140 }
else if (vd.Is1S()) {
3141 DCHECK(vn.Is1D() || vn.Is1H());
3142 op = vn.Is1D() ? FCVT_sd : FCVT_sh;
3145 DCHECK(vn.Is1D() || vn.Is1S());
3146 op = vn.Is1D() ? FCVT_hd : FCVT_hs;
3148 FPDataProcessing1Source(vd, vn, op);
3151 void Assembler::fcvtl(
const VRegister& vd,
const VRegister& vn) {
3152 DCHECK((vd.Is4S() && vn.Is4H()) || (vd.Is2D() && vn.Is2S()));
3153 Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
3154 Emit(format | NEON_FCVTL | Rn(vn) | Rd(vd));
3157 void Assembler::fcvtl2(
const VRegister& vd,
const VRegister& vn) {
3158 DCHECK((vd.Is4S() && vn.Is8H()) || (vd.Is2D() && vn.Is4S()));
3159 Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
3160 Emit(NEON_Q | format | NEON_FCVTL | Rn(vn) | Rd(vd));
3163 void Assembler::fcvtn(
const VRegister& vd,
const VRegister& vn) {
3164 DCHECK((vn.Is4S() && vd.Is4H()) || (vn.Is2D() && vd.Is2S()));
3165 Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0;
3166 Emit(format | NEON_FCVTN | Rn(vn) | Rd(vd));
3169 void Assembler::fcvtn2(
const VRegister& vd,
const VRegister& vn) {
3170 DCHECK((vn.Is4S() && vd.Is8H()) || (vn.Is2D() && vd.Is4S()));
3171 Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0;
3172 Emit(NEON_Q | format | NEON_FCVTN | Rn(vn) | Rd(vd));
3175 void Assembler::fcvtxn(
const VRegister& vd,
const VRegister& vn) {
3176 Instr format = 1 << NEONSize_offset;
3177 if (vd.IsScalar()) {
3178 DCHECK(vd.Is1S() && vn.Is1D());
3179 Emit(format | NEON_FCVTXN_scalar | Rn(vn) | Rd(vd));
3181 DCHECK(vd.Is2S() && vn.Is2D());
3182 Emit(format | NEON_FCVTXN | Rn(vn) | Rd(vd));
3186 void Assembler::fcvtxn2(
const VRegister& vd,
const VRegister& vn) {
3187 DCHECK(vd.Is4S() && vn.Is2D());
3188 Instr format = 1 << NEONSize_offset;
3189 Emit(NEON_Q | format | NEON_FCVTXN | Rn(vn) | Rd(vd));
3192 #define NEON_FP2REGMISC_FCVT_LIST(V) \ 3193 V(fcvtnu, NEON_FCVTNU, FCVTNU) \ 3194 V(fcvtns, NEON_FCVTNS, FCVTNS) \ 3195 V(fcvtpu, NEON_FCVTPU, FCVTPU) \ 3196 V(fcvtps, NEON_FCVTPS, FCVTPS) \ 3197 V(fcvtmu, NEON_FCVTMU, FCVTMU) \ 3198 V(fcvtms, NEON_FCVTMS, FCVTMS) \ 3199 V(fcvtau, NEON_FCVTAU, FCVTAU) \ 3200 V(fcvtas, NEON_FCVTAS, FCVTAS) 3202 #define DEFINE_ASM_FUNCS(FN, VEC_OP, SCA_OP) \ 3203 void Assembler::FN(const Register& rd, const VRegister& vn) { \ 3204 NEONFPConvertToInt(rd, vn, SCA_OP); \ 3206 void Assembler::FN(const VRegister& vd, const VRegister& vn) { \ 3207 NEONFPConvertToInt(vd, vn, VEC_OP); \ 3209 NEON_FP2REGMISC_FCVT_LIST(DEFINE_ASM_FUNCS)
3210 #undef DEFINE_ASM_FUNCS 3212 void Assembler::scvtf(
const VRegister& vd,
const VRegister& vn,
int fbits) {
3213 DCHECK_GE(fbits, 0);
3215 NEONFP2RegMisc(vd, vn, NEON_SCVTF);
3217 DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
3218 NEONShiftRightImmediate(vd, vn, fbits, NEON_SCVTF_imm);
3222 void Assembler::ucvtf(
const VRegister& vd,
const VRegister& vn,
int fbits) {
3223 DCHECK_GE(fbits, 0);
3225 NEONFP2RegMisc(vd, vn, NEON_UCVTF);
3227 DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
3228 NEONShiftRightImmediate(vd, vn, fbits, NEON_UCVTF_imm);
3232 void Assembler::scvtf(
const VRegister& vd,
const Register& rn,
int fbits) {
3233 DCHECK_GE(fbits, 0);
3235 Emit(SF(rn) | FPType(vd) | SCVTF | Rn(rn) | Rd(vd));
3237 Emit(SF(rn) | FPType(vd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
3242 void Assembler::ucvtf(
const VRegister& fd,
const Register& rn,
int fbits) {
3243 DCHECK_GE(fbits, 0);
3245 Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd));
3247 Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
3252 void Assembler::NEON3Same(
const VRegister& vd,
const VRegister& vn,
3253 const VRegister& vm, NEON3SameOp vop) {
3254 DCHECK(AreSameFormat(vd, vn, vm));
3255 DCHECK(vd.IsVector() || !vd.IsQ());
3257 Instr format, op = vop;
3258 if (vd.IsScalar()) {
3259 op |= NEON_Q | NEONScalar;
3260 format = SFormat(vd);
3262 format = VFormat(vd);
3265 Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
3268 void Assembler::NEONFP3Same(
const VRegister& vd,
const VRegister& vn,
3269 const VRegister& vm, Instr op) {
3270 DCHECK(AreSameFormat(vd, vn, vm));
3271 Emit(FPFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
3274 #define NEON_FP2REGMISC_LIST(V) \ 3275 V(fabs, NEON_FABS, FABS) \ 3276 V(fneg, NEON_FNEG, FNEG) \ 3277 V(fsqrt, NEON_FSQRT, FSQRT) \ 3278 V(frintn, NEON_FRINTN, FRINTN) \ 3279 V(frinta, NEON_FRINTA, FRINTA) \ 3280 V(frintp, NEON_FRINTP, FRINTP) \ 3281 V(frintm, NEON_FRINTM, FRINTM) \ 3282 V(frintx, NEON_FRINTX, FRINTX) \ 3283 V(frintz, NEON_FRINTZ, FRINTZ) \ 3284 V(frinti, NEON_FRINTI, FRINTI) \ 3285 V(frsqrte, NEON_FRSQRTE, NEON_FRSQRTE_scalar) \ 3286 V(frecpe, NEON_FRECPE, NEON_FRECPE_scalar) 3288 #define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP) \ 3289 void Assembler::FN(const VRegister& vd, const VRegister& vn) { \ 3291 if (vd.IsScalar()) { \ 3292 DCHECK(vd.Is1S() || vd.Is1D()); \ 3295 DCHECK(vd.Is2S() || vd.Is2D() || vd.Is4S()); \ 3298 NEONFP2RegMisc(vd, vn, op); \ 3300 NEON_FP2REGMISC_LIST(DEFINE_ASM_FUNC)
3301 #undef DEFINE_ASM_FUNC 3303 void Assembler::shll(
const VRegister& vd,
const VRegister& vn,
int shift) {
3304 DCHECK((vd.Is8H() && vn.Is8B() && shift == 8) ||
3305 (vd.Is4S() && vn.Is4H() && shift == 16) ||
3306 (vd.Is2D() && vn.Is2S() && shift == 32));
3308 Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
3311 void Assembler::shll2(
const VRegister& vd,
const VRegister& vn,
int shift) {
3313 DCHECK((vd.Is8H() && vn.Is16B() && shift == 8) ||
3314 (vd.Is4S() && vn.Is8H() && shift == 16) ||
3315 (vd.Is2D() && vn.Is4S() && shift == 32));
3316 Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
3319 void Assembler::NEONFP2RegMisc(
const VRegister& vd,
const VRegister& vn,
3320 NEON2RegMiscOp vop,
double value) {
3321 DCHECK(AreSameFormat(vd, vn));
3322 DCHECK_EQ(value, 0.0);
3326 if (vd.IsScalar()) {
3327 DCHECK(vd.Is1S() || vd.Is1D());
3328 op |= NEON_Q | NEONScalar;
3330 DCHECK(vd.Is2S() || vd.Is2D() || vd.Is4S());
3333 Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
3336 void Assembler::fcmeq(
const VRegister& vd,
const VRegister& vn,
double value) {
3337 NEONFP2RegMisc(vd, vn, NEON_FCMEQ_zero, value);
3340 void Assembler::fcmge(
const VRegister& vd,
const VRegister& vn,
double value) {
3341 NEONFP2RegMisc(vd, vn, NEON_FCMGE_zero, value);
3344 void Assembler::fcmgt(
const VRegister& vd,
const VRegister& vn,
double value) {
3345 NEONFP2RegMisc(vd, vn, NEON_FCMGT_zero, value);
3348 void Assembler::fcmle(
const VRegister& vd,
const VRegister& vn,
double value) {
3349 NEONFP2RegMisc(vd, vn, NEON_FCMLE_zero, value);
3352 void Assembler::fcmlt(
const VRegister& vd,
const VRegister& vn,
double value) {
3353 NEONFP2RegMisc(vd, vn, NEON_FCMLT_zero, value);
3356 void Assembler::frecpx(
const VRegister& vd,
const VRegister& vn) {
3357 DCHECK(vd.IsScalar());
3358 DCHECK(AreSameFormat(vd, vn));
3359 DCHECK(vd.Is1S() || vd.Is1D());
3360 Emit(FPFormat(vd) | NEON_FRECPX_scalar | Rn(vn) | Rd(vd));
3363 void Assembler::fcvtzs(
const Register& rd,
const VRegister& vn,
int fbits) {
3364 DCHECK(vn.Is1S() || vn.Is1D());
3365 DCHECK((fbits >= 0) && (fbits <= rd.SizeInBits()));
3367 Emit(SF(rd) | FPType(vn) | FCVTZS | Rn(vn) | Rd(rd));
3369 Emit(SF(rd) | FPType(vn) | FCVTZS_fixed | FPScale(64 - fbits) | Rn(vn) |
3374 void Assembler::fcvtzs(
const VRegister& vd,
const VRegister& vn,
int fbits) {
3375 DCHECK_GE(fbits, 0);
3377 NEONFP2RegMisc(vd, vn, NEON_FCVTZS);
3379 DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
3380 NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZS_imm);
3384 void Assembler::fcvtzu(
const Register& rd,
const VRegister& vn,
int fbits) {
3385 DCHECK(vn.Is1S() || vn.Is1D());
3386 DCHECK((fbits >= 0) && (fbits <= rd.SizeInBits()));
3388 Emit(SF(rd) | FPType(vn) | FCVTZU | Rn(vn) | Rd(rd));
3390 Emit(SF(rd) | FPType(vn) | FCVTZU_fixed | FPScale(64 - fbits) | Rn(vn) |
3395 void Assembler::fcvtzu(
const VRegister& vd,
const VRegister& vn,
int fbits) {
3396 DCHECK_GE(fbits, 0);
3398 NEONFP2RegMisc(vd, vn, NEON_FCVTZU);
3400 DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
3401 NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZU_imm);
3405 void Assembler::NEONFP2RegMisc(
const VRegister& vd,
const VRegister& vn,
3407 DCHECK(AreSameFormat(vd, vn));
3408 Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
3411 void Assembler::NEON2RegMisc(
const VRegister& vd,
const VRegister& vn,
3412 NEON2RegMiscOp vop,
int value) {
3413 DCHECK(AreSameFormat(vd, vn));
3414 DCHECK_EQ(value, 0);
3417 Instr format, op = vop;
3418 if (vd.IsScalar()) {
3419 op |= NEON_Q | NEONScalar;
3420 format = SFormat(vd);
3422 format = VFormat(vd);
3425 Emit(format | op | Rn(vn) | Rd(vd));
3428 void Assembler::cmeq(
const VRegister& vd,
const VRegister& vn,
int value) {
3429 DCHECK(vd.IsVector() || vd.Is1D());
3430 NEON2RegMisc(vd, vn, NEON_CMEQ_zero, value);
3433 void Assembler::cmge(
const VRegister& vd,
const VRegister& vn,
int value) {
3434 DCHECK(vd.IsVector() || vd.Is1D());
3435 NEON2RegMisc(vd, vn, NEON_CMGE_zero, value);
3438 void Assembler::cmgt(
const VRegister& vd,
const VRegister& vn,
int value) {
3439 DCHECK(vd.IsVector() || vd.Is1D());
3440 NEON2RegMisc(vd, vn, NEON_CMGT_zero, value);
3443 void Assembler::cmle(
const VRegister& vd,
const VRegister& vn,
int value) {
3444 DCHECK(vd.IsVector() || vd.Is1D());
3445 NEON2RegMisc(vd, vn, NEON_CMLE_zero, value);
3448 void Assembler::cmlt(
const VRegister& vd,
const VRegister& vn,
int value) {
3449 DCHECK(vd.IsVector() || vd.Is1D());
3450 NEON2RegMisc(vd, vn, NEON_CMLT_zero, value);
3453 #define NEON_3SAME_LIST(V) \ 3454 V(add, NEON_ADD, vd.IsVector() || vd.Is1D()) \ 3455 V(addp, NEON_ADDP, vd.IsVector() || vd.Is1D()) \ 3456 V(sub, NEON_SUB, vd.IsVector() || vd.Is1D()) \ 3457 V(cmeq, NEON_CMEQ, vd.IsVector() || vd.Is1D()) \ 3458 V(cmge, NEON_CMGE, vd.IsVector() || vd.Is1D()) \ 3459 V(cmgt, NEON_CMGT, vd.IsVector() || vd.Is1D()) \ 3460 V(cmhi, NEON_CMHI, vd.IsVector() || vd.Is1D()) \ 3461 V(cmhs, NEON_CMHS, vd.IsVector() || vd.Is1D()) \ 3462 V(cmtst, NEON_CMTST, vd.IsVector() || vd.Is1D()) \ 3463 V(sshl, NEON_SSHL, vd.IsVector() || vd.Is1D()) \ 3464 V(ushl, NEON_USHL, vd.IsVector() || vd.Is1D()) \ 3465 V(srshl, NEON_SRSHL, vd.IsVector() || vd.Is1D()) \ 3466 V(urshl, NEON_URSHL, vd.IsVector() || vd.Is1D()) \ 3467 V(sqdmulh, NEON_SQDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \ 3468 V(sqrdmulh, NEON_SQRDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \ 3469 V(shadd, NEON_SHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ 3470 V(uhadd, NEON_UHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ 3471 V(srhadd, NEON_SRHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ 3472 V(urhadd, NEON_URHADD, vd.IsVector() && !vd.IsLaneSizeD()) \ 3473 V(shsub, NEON_SHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \ 3474 V(uhsub, NEON_UHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \ 3475 V(smax, NEON_SMAX, vd.IsVector() && !vd.IsLaneSizeD()) \ 3476 V(smaxp, NEON_SMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \ 3477 V(smin, NEON_SMIN, vd.IsVector() && !vd.IsLaneSizeD()) \ 3478 V(sminp, NEON_SMINP, vd.IsVector() && !vd.IsLaneSizeD()) \ 3479 V(umax, NEON_UMAX, vd.IsVector() && !vd.IsLaneSizeD()) \ 3480 V(umaxp, NEON_UMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \ 3481 V(umin, NEON_UMIN, vd.IsVector() && !vd.IsLaneSizeD()) \ 3482 V(uminp, NEON_UMINP, vd.IsVector() && !vd.IsLaneSizeD()) \ 3483 V(saba, NEON_SABA, vd.IsVector() && !vd.IsLaneSizeD()) \ 3484 V(sabd, NEON_SABD, vd.IsVector() && !vd.IsLaneSizeD()) \ 3485 V(uaba, NEON_UABA, vd.IsVector() && !vd.IsLaneSizeD()) \ 3486 V(uabd, NEON_UABD, vd.IsVector() && !vd.IsLaneSizeD()) \ 3487 V(mla, NEON_MLA, vd.IsVector() && !vd.IsLaneSizeD()) \ 3488 V(mls, NEON_MLS, vd.IsVector() && !vd.IsLaneSizeD()) \ 3489 V(mul, NEON_MUL, vd.IsVector() && !vd.IsLaneSizeD()) \ 3490 V(and_, NEON_AND, vd.Is8B() || vd.Is16B()) \ 3491 V(orr, NEON_ORR, vd.Is8B() || vd.Is16B()) \ 3492 V(orn, NEON_ORN, vd.Is8B() || vd.Is16B()) \ 3493 V(eor, NEON_EOR, vd.Is8B() || vd.Is16B()) \ 3494 V(bic, NEON_BIC, vd.Is8B() || vd.Is16B()) \ 3495 V(bit, NEON_BIT, vd.Is8B() || vd.Is16B()) \ 3496 V(bif, NEON_BIF, vd.Is8B() || vd.Is16B()) \ 3497 V(bsl, NEON_BSL, vd.Is8B() || vd.Is16B()) \ 3498 V(pmul, NEON_PMUL, vd.Is8B() || vd.Is16B()) \ 3499 V(uqadd, NEON_UQADD, true) \ 3500 V(sqadd, NEON_SQADD, true) \ 3501 V(uqsub, NEON_UQSUB, true) \ 3502 V(sqsub, NEON_SQSUB, true) \ 3503 V(sqshl, NEON_SQSHL, true) \ 3504 V(uqshl, NEON_UQSHL, true) \ 3505 V(sqrshl, NEON_SQRSHL, true) \ 3506 V(uqrshl, NEON_UQRSHL, true) 3508 #define DEFINE_ASM_FUNC(FN, OP, AS) \ 3509 void Assembler::FN(const VRegister& vd, const VRegister& vn, \ 3510 const VRegister& vm) { \ 3512 NEON3Same(vd, vn, vm, OP); \ 3514 NEON_3SAME_LIST(DEFINE_ASM_FUNC)
3515 #undef DEFINE_ASM_FUNC 3517 #define NEON_FP3SAME_LIST_V2(V) \ 3518 V(fadd, NEON_FADD, FADD) \ 3519 V(fsub, NEON_FSUB, FSUB) \ 3520 V(fmul, NEON_FMUL, FMUL) \ 3521 V(fdiv, NEON_FDIV, FDIV) \ 3522 V(fmax, NEON_FMAX, FMAX) \ 3523 V(fmaxnm, NEON_FMAXNM, FMAXNM) \ 3524 V(fmin, NEON_FMIN, FMIN) \ 3525 V(fminnm, NEON_FMINNM, FMINNM) \ 3526 V(fmulx, NEON_FMULX, NEON_FMULX_scalar) \ 3527 V(frecps, NEON_FRECPS, NEON_FRECPS_scalar) \ 3528 V(frsqrts, NEON_FRSQRTS, NEON_FRSQRTS_scalar) \ 3529 V(fabd, NEON_FABD, NEON_FABD_scalar) \ 3530 V(fmla, NEON_FMLA, 0) \ 3531 V(fmls, NEON_FMLS, 0) \ 3532 V(facge, NEON_FACGE, NEON_FACGE_scalar) \ 3533 V(facgt, NEON_FACGT, NEON_FACGT_scalar) \ 3534 V(fcmeq, NEON_FCMEQ, NEON_FCMEQ_scalar) \ 3535 V(fcmge, NEON_FCMGE, NEON_FCMGE_scalar) \ 3536 V(fcmgt, NEON_FCMGT, NEON_FCMGT_scalar) \ 3537 V(faddp, NEON_FADDP, 0) \ 3538 V(fmaxp, NEON_FMAXP, 0) \ 3539 V(fminp, NEON_FMINP, 0) \ 3540 V(fmaxnmp, NEON_FMAXNMP, 0) \ 3541 V(fminnmp, NEON_FMINNMP, 0) 3543 #define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP) \ 3544 void Assembler::FN(const VRegister& vd, const VRegister& vn, \ 3545 const VRegister& vm) { \ 3547 if ((SCA_OP != 0) && vd.IsScalar()) { \ 3548 DCHECK(vd.Is1S() || vd.Is1D()); \ 3551 DCHECK(vd.IsVector()); \ 3552 DCHECK(vd.Is2S() || vd.Is2D() || vd.Is4S()); \ 3555 NEONFP3Same(vd, vn, vm, op); \ 3557 NEON_FP3SAME_LIST_V2(DEFINE_ASM_FUNC)
3558 #undef DEFINE_ASM_FUNC 3560 void Assembler::addp(
const VRegister& vd,
const VRegister& vn) {
3561 DCHECK((vd.Is1D() && vn.Is2D()));
3562 Emit(SFormat(vd) | NEON_ADDP_scalar | Rn(vn) | Rd(vd));
3565 void Assembler::faddp(
const VRegister& vd,
const VRegister& vn) {
3566 DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
3567 Emit(FPFormat(vd) | NEON_FADDP_scalar | Rn(vn) | Rd(vd));
3570 void Assembler::fmaxp(
const VRegister& vd,
const VRegister& vn) {
3571 DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
3572 Emit(FPFormat(vd) | NEON_FMAXP_scalar | Rn(vn) | Rd(vd));
3575 void Assembler::fminp(
const VRegister& vd,
const VRegister& vn) {
3576 DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
3577 Emit(FPFormat(vd) | NEON_FMINP_scalar | Rn(vn) | Rd(vd));
3580 void Assembler::fmaxnmp(
const VRegister& vd,
const VRegister& vn) {
3581 DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
3582 Emit(FPFormat(vd) | NEON_FMAXNMP_scalar | Rn(vn) | Rd(vd));
3585 void Assembler::fminnmp(
const VRegister& vd,
const VRegister& vn) {
3586 DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
3587 Emit(FPFormat(vd) | NEON_FMINNMP_scalar | Rn(vn) | Rd(vd));
3590 void Assembler::orr(
const VRegister& vd,
const int imm8,
const int left_shift) {
3591 NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_ORR);
3594 void Assembler::mov(
const VRegister& vd,
const VRegister& vn) {
3595 DCHECK(AreSameFormat(vd, vn));
3597 orr(vd.V8B(), vn.V8B(), vn.V8B());
3600 orr(vd.V16B(), vn.V16B(), vn.V16B());
3604 void Assembler::bic(
const VRegister& vd,
const int imm8,
const int left_shift) {
3605 NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_BIC);
3608 void Assembler::movi(
const VRegister& vd,
const uint64_t imm, Shift shift,
3609 const int shift_amount) {
3610 DCHECK((shift == LSL) || (shift == MSL));
3611 if (vd.Is2D() || vd.Is1D()) {
3612 DCHECK_EQ(shift_amount, 0);
3614 for (
int i = 0;
i < 8; ++
i) {
3615 int byte = (imm >> (
i * 8)) & 0xFF;
3616 DCHECK((byte == 0) || (byte == 0xFF));
3621 Instr q = vd.Is2D() ? NEON_Q : 0;
3622 Emit(q | NEONModImmOp(1) | NEONModifiedImmediate_MOVI |
3623 ImmNEONabcdefgh(imm8) | NEONCmode(0xE) | Rd(vd));
3624 }
else if (shift == LSL) {
3625 NEONModifiedImmShiftLsl(vd, static_cast<int>(imm), shift_amount,
3626 NEONModifiedImmediate_MOVI);
3628 NEONModifiedImmShiftMsl(vd, static_cast<int>(imm), shift_amount,
3629 NEONModifiedImmediate_MOVI);
3633 void Assembler::mvn(
const VRegister& vd,
const VRegister& vn) {
3634 DCHECK(AreSameFormat(vd, vn));
3636 not_(vd.V8B(), vn.V8B());
3639 not_(vd.V16B(), vn.V16B());
3643 void Assembler::mvni(
const VRegister& vd,
const int imm8, Shift shift,
3644 const int shift_amount) {
3645 DCHECK((shift == LSL) || (shift == MSL));
3647 NEONModifiedImmShiftLsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI);
3649 NEONModifiedImmShiftMsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI);
3653 void Assembler::NEONFPByElement(
const VRegister& vd,
const VRegister& vn,
3654 const VRegister& vm,
int vm_index,
3655 NEONByIndexedElementOp vop) {
3656 DCHECK(AreSameFormat(vd, vn));
3657 DCHECK((vd.Is2S() && vm.Is1S()) || (vd.Is4S() && vm.Is1S()) ||
3658 (vd.Is1S() && vm.Is1S()) || (vd.Is2D() && vm.Is1D()) ||
3659 (vd.Is1D() && vm.Is1D()));
3660 DCHECK((vm.Is1S() && (vm_index < 4)) || (vm.Is1D() && (vm_index < 2)));
3663 int index_num_bits = vm.Is1S() ? 2 : 1;
3664 if (vd.IsScalar()) {
3665 op |= NEON_Q | NEONScalar;
3668 Emit(FPFormat(vd) | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) |
3672 void Assembler::NEONByElement(
const VRegister& vd,
const VRegister& vn,
3673 const VRegister& vm,
int vm_index,
3674 NEONByIndexedElementOp vop) {
3675 DCHECK(AreSameFormat(vd, vn));
3676 DCHECK((vd.Is4H() && vm.Is1H()) || (vd.Is8H() && vm.Is1H()) ||
3677 (vd.Is1H() && vm.Is1H()) || (vd.Is2S() && vm.Is1S()) ||
3678 (vd.Is4S() && vm.Is1S()) || (vd.Is1S() && vm.Is1S()));
3679 DCHECK((vm.Is1H() && (vm.code() < 16) && (vm_index < 8)) ||
3680 (vm.Is1S() && (vm_index < 4)));
3682 Instr format, op = vop;
3683 int index_num_bits = vm.Is1H() ? 3 : 2;
3684 if (vd.IsScalar()) {
3685 op |= NEONScalar | NEON_Q;
3686 format = SFormat(vn);
3688 format = VFormat(vn);
3690 Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) |
3694 void Assembler::NEONByElementL(
const VRegister& vd,
const VRegister& vn,
3695 const VRegister& vm,
int vm_index,
3696 NEONByIndexedElementOp vop) {
3697 DCHECK((vd.Is4S() && vn.Is4H() && vm.Is1H()) ||
3698 (vd.Is4S() && vn.Is8H() && vm.Is1H()) ||
3699 (vd.Is1S() && vn.Is1H() && vm.Is1H()) ||
3700 (vd.Is2D() && vn.Is2S() && vm.Is1S()) ||
3701 (vd.Is2D() && vn.Is4S() && vm.Is1S()) ||
3702 (vd.Is1D() && vn.Is1S() && vm.Is1S()));
3704 DCHECK((vm.Is1H() && (vm.code() < 16) && (vm_index < 8)) ||
3705 (vm.Is1S() && (vm_index < 4)));
3707 Instr format, op = vop;
3708 int index_num_bits = vm.Is1H() ? 3 : 2;
3709 if (vd.IsScalar()) {
3710 op |= NEONScalar | NEON_Q;
3711 format = SFormat(vn);
3713 format = VFormat(vn);
3715 Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) |
3719 #define NEON_BYELEMENT_LIST(V) \ 3720 V(mul, NEON_MUL_byelement, vn.IsVector()) \ 3721 V(mla, NEON_MLA_byelement, vn.IsVector()) \ 3722 V(mls, NEON_MLS_byelement, vn.IsVector()) \ 3723 V(sqdmulh, NEON_SQDMULH_byelement, true) \ 3724 V(sqrdmulh, NEON_SQRDMULH_byelement, true) 3726 #define DEFINE_ASM_FUNC(FN, OP, AS) \ 3727 void Assembler::FN(const VRegister& vd, const VRegister& vn, \ 3728 const VRegister& vm, int vm_index) { \ 3730 NEONByElement(vd, vn, vm, vm_index, OP); \ 3732 NEON_BYELEMENT_LIST(DEFINE_ASM_FUNC)
3733 #undef DEFINE_ASM_FUNC 3735 #define NEON_FPBYELEMENT_LIST(V) \ 3736 V(fmul, NEON_FMUL_byelement) \ 3737 V(fmla, NEON_FMLA_byelement) \ 3738 V(fmls, NEON_FMLS_byelement) \ 3739 V(fmulx, NEON_FMULX_byelement) 3741 #define DEFINE_ASM_FUNC(FN, OP) \ 3742 void Assembler::FN(const VRegister& vd, const VRegister& vn, \ 3743 const VRegister& vm, int vm_index) { \ 3744 NEONFPByElement(vd, vn, vm, vm_index, OP); \ 3746 NEON_FPBYELEMENT_LIST(DEFINE_ASM_FUNC)
3747 #undef DEFINE_ASM_FUNC 3749 #define NEON_BYELEMENT_LONG_LIST(V) \ 3750 V(sqdmull, NEON_SQDMULL_byelement, vn.IsScalar() || vn.IsD()) \ 3751 V(sqdmull2, NEON_SQDMULL_byelement, vn.IsVector() && vn.IsQ()) \ 3752 V(sqdmlal, NEON_SQDMLAL_byelement, vn.IsScalar() || vn.IsD()) \ 3753 V(sqdmlal2, NEON_SQDMLAL_byelement, vn.IsVector() && vn.IsQ()) \ 3754 V(sqdmlsl, NEON_SQDMLSL_byelement, vn.IsScalar() || vn.IsD()) \ 3755 V(sqdmlsl2, NEON_SQDMLSL_byelement, vn.IsVector() && vn.IsQ()) \ 3756 V(smull, NEON_SMULL_byelement, vn.IsVector() && vn.IsD()) \ 3757 V(smull2, NEON_SMULL_byelement, vn.IsVector() && vn.IsQ()) \ 3758 V(umull, NEON_UMULL_byelement, vn.IsVector() && vn.IsD()) \ 3759 V(umull2, NEON_UMULL_byelement, vn.IsVector() && vn.IsQ()) \ 3760 V(smlal, NEON_SMLAL_byelement, vn.IsVector() && vn.IsD()) \ 3761 V(smlal2, NEON_SMLAL_byelement, vn.IsVector() && vn.IsQ()) \ 3762 V(umlal, NEON_UMLAL_byelement, vn.IsVector() && vn.IsD()) \ 3763 V(umlal2, NEON_UMLAL_byelement, vn.IsVector() && vn.IsQ()) \ 3764 V(smlsl, NEON_SMLSL_byelement, vn.IsVector() && vn.IsD()) \ 3765 V(smlsl2, NEON_SMLSL_byelement, vn.IsVector() && vn.IsQ()) \ 3766 V(umlsl, NEON_UMLSL_byelement, vn.IsVector() && vn.IsD()) \ 3767 V(umlsl2, NEON_UMLSL_byelement, vn.IsVector() && vn.IsQ()) 3769 #define DEFINE_ASM_FUNC(FN, OP, AS) \ 3770 void Assembler::FN(const VRegister& vd, const VRegister& vn, \ 3771 const VRegister& vm, int vm_index) { \ 3773 NEONByElementL(vd, vn, vm, vm_index, OP); \ 3775 NEON_BYELEMENT_LONG_LIST(DEFINE_ASM_FUNC)
3776 #undef DEFINE_ASM_FUNC 3778 void Assembler::suqadd(
const VRegister& vd,
const VRegister& vn) {
3779 NEON2RegMisc(vd, vn, NEON_SUQADD);
3782 void Assembler::usqadd(
const VRegister& vd,
const VRegister& vn) {
3783 NEON2RegMisc(vd, vn, NEON_USQADD);
3786 void Assembler::abs(
const VRegister& vd,
const VRegister& vn) {
3787 DCHECK(vd.IsVector() || vd.Is1D());
3788 NEON2RegMisc(vd, vn, NEON_ABS);
3791 void Assembler::sqabs(
const VRegister& vd,
const VRegister& vn) {
3792 NEON2RegMisc(vd, vn, NEON_SQABS);
3795 void Assembler::neg(
const VRegister& vd,
const VRegister& vn) {
3796 DCHECK(vd.IsVector() || vd.Is1D());
3797 NEON2RegMisc(vd, vn, NEON_NEG);
3800 void Assembler::sqneg(
const VRegister& vd,
const VRegister& vn) {
3801 NEON2RegMisc(vd, vn, NEON_SQNEG);
3804 void Assembler::NEONXtn(
const VRegister& vd,
const VRegister& vn,
3805 NEON2RegMiscOp vop) {
3806 Instr format, op = vop;
3807 if (vd.IsScalar()) {
3808 DCHECK((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) ||
3809 (vd.Is1S() && vn.Is1D()));
3810 op |= NEON_Q | NEONScalar;
3811 format = SFormat(vd);
3813 DCHECK((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) ||
3814 (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) ||
3815 (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
3816 format = VFormat(vd);
3818 Emit(format | op | Rn(vn) | Rd(vd));
3821 void Assembler::xtn(
const VRegister& vd,
const VRegister& vn) {
3822 DCHECK(vd.IsVector() && vd.IsD());
3823 NEONXtn(vd, vn, NEON_XTN);
3826 void Assembler::xtn2(
const VRegister& vd,
const VRegister& vn) {
3827 DCHECK(vd.IsVector() && vd.IsQ());
3828 NEONXtn(vd, vn, NEON_XTN);
3831 void Assembler::sqxtn(
const VRegister& vd,
const VRegister& vn) {
3832 DCHECK(vd.IsScalar() || vd.IsD());
3833 NEONXtn(vd, vn, NEON_SQXTN);
3836 void Assembler::sqxtn2(
const VRegister& vd,
const VRegister& vn) {
3837 DCHECK(vd.IsVector() && vd.IsQ());
3838 NEONXtn(vd, vn, NEON_SQXTN);
3841 void Assembler::sqxtun(
const VRegister& vd,
const VRegister& vn) {
3842 DCHECK(vd.IsScalar() || vd.IsD());
3843 NEONXtn(vd, vn, NEON_SQXTUN);
3846 void Assembler::sqxtun2(
const VRegister& vd,
const VRegister& vn) {
3847 DCHECK(vd.IsVector() && vd.IsQ());
3848 NEONXtn(vd, vn, NEON_SQXTUN);
3851 void Assembler::uqxtn(
const VRegister& vd,
const VRegister& vn) {
3852 DCHECK(vd.IsScalar() || vd.IsD());
3853 NEONXtn(vd, vn, NEON_UQXTN);
3856 void Assembler::uqxtn2(
const VRegister& vd,
const VRegister& vn) {
3857 DCHECK(vd.IsVector() && vd.IsQ());
3858 NEONXtn(vd, vn, NEON_UQXTN);
3862 void Assembler::not_(
const VRegister& vd,
const VRegister& vn) {
3863 DCHECK(AreSameFormat(vd, vn));
3864 DCHECK(vd.Is8B() || vd.Is16B());
3865 Emit(VFormat(vd) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
3868 void Assembler::rbit(
const VRegister& vd,
const VRegister& vn) {
3869 DCHECK(AreSameFormat(vd, vn));
3870 DCHECK(vd.Is8B() || vd.Is16B());
3871 Emit(VFormat(vn) | (1 << NEONSize_offset) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
3874 void Assembler::ext(
const VRegister& vd,
const VRegister& vn,
3875 const VRegister& vm,
int index) {
3876 DCHECK(AreSameFormat(vd, vn, vm));
3877 DCHECK(vd.Is8B() || vd.Is16B());
3878 DCHECK((0 <= index) && (index < vd.LaneCount()));
3879 Emit(VFormat(vd) | NEON_EXT | Rm(vm) | ImmNEONExt(index) | Rn(vn) | Rd(vd));
3882 void Assembler::dup(
const VRegister& vd,
const VRegister& vn,
int vn_index) {
3887 int lane_size = vn.LaneSizeInBytes();
3888 NEONFormatField format;
3889 switch (lane_size) {
3900 DCHECK_EQ(lane_size, 8);
3905 if (vd.IsScalar()) {
3907 scalar = NEONScalar;
3910 q = vd.IsD() ? 0 : NEON_Q;
3913 Emit(q | scalar | NEON_DUP_ELEMENT | ImmNEON5(format, vn_index) | Rn(vn) |
3917 void Assembler::dcptr(Label* label) {
3918 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
3919 if (label->is_bound()) {
3925 internal_reference_positions_.push_back(pc_offset());
3926 dc64(reinterpret_cast<uintptr_t>(buffer_ + label->pos()));
3929 if (label->is_linked()) {
3935 offset = label->pos() - pc_offset();
3936 DCHECK_NE(offset, kStartOfLabelLinkChain);
3940 offset = kStartOfLabelLinkChain;
3943 label->link_to(pc_offset());
3950 offset >>= kInstrSizeLog2;
3951 DCHECK(is_int32(offset));
3952 uint32_t high16 = unsigned_bitextract_32(31, 16, offset);
3953 uint32_t low16 = unsigned_bitextract_32(15, 0, offset);
3962 uint32_t Assembler::FPToImm8(
double imm) {
3963 DCHECK(IsImmFP64(imm));
3966 uint64_t bits = bit_cast<uint64_t>(imm);
3968 uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
3970 uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
3972 uint64_t bit5_to_0 = (bits >> 48) & 0x3F;
3974 return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
3977 Instr Assembler::ImmFP(
double imm) {
return FPToImm8(imm) << ImmFP_offset; }
3978 Instr Assembler::ImmNEONFP(
double imm) {
3979 return ImmNEONabcdefgh(FPToImm8(imm));
3983 void Assembler::MoveWide(
const Register& rd, uint64_t imm,
int shift,
3984 MoveWideImmediateOp mov_op) {
3986 if (rd.Is32Bits()) {
3989 DCHECK(((imm >> kWRegSizeInBits) == 0) ||
3990 ((imm >> (kWRegSizeInBits - 1)) == 0x1FFFFFFFF));
3996 DCHECK((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48));
3997 DCHECK(rd.Is64Bits() || (shift == 0) || (shift == 16));
4003 if ((imm & ~0xFFFFULL) == 0) {
4005 }
else if ((imm & ~(0xFFFFULL << 16)) == 0) {
4008 }
else if ((imm & ~(0xFFFFULL << 32)) == 0) {
4009 DCHECK(rd.Is64Bits());
4012 }
else if ((imm & ~(0xFFFFULL << 48)) == 0) {
4013 DCHECK(rd.Is64Bits());
4019 DCHECK(is_uint16(imm));
4021 Emit(SF(rd) | MoveWideImmediateFixed | mov_op | Rd(rd) |
4022 ImmMoveWide(static_cast<int>(imm)) | ShiftMoveWide(shift));
4025 void Assembler::AddSub(
const Register& rd,
const Register& rn,
4026 const Operand& operand, FlagsUpdate S, AddSubOp op) {
4027 DCHECK_EQ(rd.SizeInBits(), rn.SizeInBits());
4028 DCHECK(!operand.NeedsRelocation(
this));
4029 if (operand.IsImmediate()) {
4030 int64_t immediate = operand.ImmediateValue();
4031 DCHECK(IsImmAddSub(immediate));
4032 Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
4033 Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
4034 ImmAddSub(static_cast<int>(immediate)) | dest_reg | RnSP(rn));
4035 }
else if (operand.IsShiftedRegister()) {
4036 DCHECK_EQ(operand.reg().SizeInBits(), rd.SizeInBits());
4037 DCHECK_NE(operand.shift(), ROR);
4046 if (rn.IsSP() || rd.IsSP()) {
4047 DCHECK(!(rd.IsSP() && (S == SetFlags)));
4048 DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
4049 AddSubExtendedFixed | op);
4051 DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
4054 DCHECK(operand.IsExtendedRegister());
4055 DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
4059 void Assembler::AddSubWithCarry(
const Register& rd,
const Register& rn,
4060 const Operand& operand, FlagsUpdate S,
4061 AddSubWithCarryOp op) {
4062 DCHECK_EQ(rd.SizeInBits(), rn.SizeInBits());
4063 DCHECK_EQ(rd.SizeInBits(), operand.reg().SizeInBits());
4064 DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
4065 DCHECK(!operand.NeedsRelocation(
this));
4066 Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
4069 void Assembler::hlt(
int code) {
4070 DCHECK(is_uint16(code));
4071 Emit(HLT | ImmException(code));
4074 void Assembler::brk(
int code) {
4075 DCHECK(is_uint16(code));
4076 Emit(BRK | ImmException(code));
4079 void Assembler::EmitStringData(
const char*
string) {
4080 size_t len = strlen(
string) + 1;
4081 DCHECK_LE(RoundUp(len, kInstrSize), static_cast<size_t>(kGap));
4082 EmitData(
string, static_cast<int>(len));
4084 const char pad[] = {
'\0',
'\0',
'\0',
'\0'};
4085 static_assert(
sizeof(pad) == kInstrSize,
4086 "Size of padding must match instruction size.");
4087 EmitData(pad, RoundUp(pc_offset(), kInstrSize) - pc_offset());
4091 void Assembler::debug(
const char* message,
uint32_t code, Instr params) {
4092 #ifdef USE_SIMULATOR 4093 if (options().enable_simulator_code) {
4096 BlockPoolsScope scope(
this);
4103 hlt(kImmExceptionIsDebug);
4104 DCHECK_EQ(SizeOfCodeGeneratedSince(&start), kDebugCodeOffset);
4106 DCHECK_EQ(SizeOfCodeGeneratedSince(&start), kDebugParamsOffset);
4108 DCHECK_EQ(SizeOfCodeGeneratedSince(&start), kDebugMessageOffset);
4109 EmitStringData(message);
4110 hlt(kImmExceptionIsUnreachable);
4118 DCHECK(!options().enable_simulator_code);
4121 if (params & BREAK) {
4127 void Assembler::Logical(
const Register& rd,
4129 const Operand& operand,
4131 DCHECK(rd.SizeInBits() == rn.SizeInBits());
4132 DCHECK(!operand.NeedsRelocation(
this));
4133 if (operand.IsImmediate()) {
4134 int64_t immediate = operand.ImmediateValue();
4135 unsigned reg_size = rd.SizeInBits();
4137 DCHECK_NE(immediate, 0);
4138 DCHECK_NE(immediate, -1);
4139 DCHECK(rd.Is64Bits() || is_uint32(immediate));
4142 if ((op & NOT) == NOT) {
4143 op =
static_cast<LogicalOp
>(op & ~NOT);
4144 immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
4147 unsigned n, imm_s, imm_r;
4148 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
4150 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
4156 DCHECK(operand.IsShiftedRegister());
4157 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
4158 Instr dp_op =
static_cast<Instr
>(op | LogicalShiftedFixed);
4159 DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
4164 void Assembler::LogicalImmediate(
const Register& rd,
4170 unsigned reg_size = rd.SizeInBits();
4171 Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
4172 Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
4173 ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg |
4178 void Assembler::ConditionalCompare(
const Register& rn,
4179 const Operand& operand,
4182 ConditionalCompareOp op) {
4184 DCHECK(!operand.NeedsRelocation(
this));
4185 if (operand.IsImmediate()) {
4186 int64_t immediate = operand.ImmediateValue();
4187 DCHECK(IsImmConditionalCompare(immediate));
4188 ccmpop = ConditionalCompareImmediateFixed | op |
4189 ImmCondCmp(static_cast<unsigned>(immediate));
4191 DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
4192 ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
4194 Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
4198 void Assembler::DataProcessing1Source(
const Register& rd,
4200 DataProcessing1SourceOp op) {
4201 DCHECK(rd.SizeInBits() == rn.SizeInBits());
4202 Emit(SF(rn) | op | Rn(rn) | Rd(rd));
4205 void Assembler::FPDataProcessing1Source(
const VRegister& vd,
4206 const VRegister& vn,
4207 FPDataProcessing1SourceOp op) {
4208 Emit(FPType(vn) | op | Rn(vn) | Rd(vd));
4211 void Assembler::FPDataProcessing2Source(
const VRegister& fd,
4212 const VRegister& fn,
4213 const VRegister& fm,
4214 FPDataProcessing2SourceOp op) {
4215 DCHECK(fd.SizeInBits() == fn.SizeInBits());
4216 DCHECK(fd.SizeInBits() == fm.SizeInBits());
4217 Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd));
4220 void Assembler::FPDataProcessing3Source(
const VRegister& fd,
4221 const VRegister& fn,
4222 const VRegister& fm,
4223 const VRegister& fa,
4224 FPDataProcessing3SourceOp op) {
4225 DCHECK(AreSameSizeAndType(fd, fn, fm, fa));
4226 Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa));
4229 void Assembler::NEONModifiedImmShiftLsl(
const VRegister& vd,
const int imm8,
4230 const int left_shift,
4231 NEONModifiedImmediateOp op) {
4232 DCHECK(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H() || vd.Is2S() ||
4234 DCHECK((left_shift == 0) || (left_shift == 8) || (left_shift == 16) ||
4235 (left_shift == 24));
4236 DCHECK(is_uint8(imm8));
4238 int cmode_1, cmode_2, cmode_3;
4239 if (vd.Is8B() || vd.Is16B()) {
4240 DCHECK_EQ(op, NEONModifiedImmediate_MOVI);
4245 cmode_1 = (left_shift >> 3) & 1;
4246 cmode_2 = left_shift >> 4;
4248 if (vd.Is4H() || vd.Is8H()) {
4249 DCHECK((left_shift == 0) || (left_shift == 8));
4253 int cmode = (cmode_3 << 3) | (cmode_2 << 2) | (cmode_1 << 1);
4255 Instr q = vd.IsQ() ? NEON_Q : 0;
4257 Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
4260 void Assembler::NEONModifiedImmShiftMsl(
const VRegister& vd,
const int imm8,
4261 const int shift_amount,
4262 NEONModifiedImmediateOp op) {
4263 DCHECK(vd.Is2S() || vd.Is4S());
4264 DCHECK((shift_amount == 8) || (shift_amount == 16));
4265 DCHECK(is_uint8(imm8));
4267 int cmode_0 = (shift_amount >> 4) & 1;
4268 int cmode = 0xC | cmode_0;
4270 Instr q = vd.IsQ() ? NEON_Q : 0;
4272 Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
4275 void Assembler::EmitShift(
const Register& rd,
4278 unsigned shift_amount) {
4281 lsl(rd, rn, shift_amount);
4284 lsr(rd, rn, shift_amount);
4287 asr(rd, rn, shift_amount);
4290 ror(rd, rn, shift_amount);
4298 void Assembler::EmitExtendShift(
const Register& rd,
4301 unsigned left_shift) {
4302 DCHECK(rd.SizeInBits() >= rn.SizeInBits());
4303 unsigned reg_size = rd.SizeInBits();
4305 Register rn_ = Register::Create(rn.code(), rd.SizeInBits());
4307 unsigned high_bit = (8 << (extend & 0x3)) - 1;
4309 unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
4311 if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
4315 case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit);
break;
4318 case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit);
break;
4321 DCHECK_EQ(rn.SizeInBits(), kXRegSizeInBits);
4323 lsl(rd, rn_, left_shift);
4326 default: UNREACHABLE();
4330 lsl(rd, rn_, left_shift);
4335 void Assembler::DataProcShiftedRegister(
const Register& rd,
4337 const Operand& operand,
4340 DCHECK(operand.IsShiftedRegister());
4341 DCHECK(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
4342 DCHECK(!operand.NeedsRelocation(
this));
4343 Emit(SF(rd) | op | Flags(S) |
4344 ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
4345 Rm(operand.reg()) | Rn(rn) | Rd(rd));
4349 void Assembler::DataProcExtendedRegister(
const Register& rd,
4351 const Operand& operand,
4354 DCHECK(!operand.NeedsRelocation(
this));
4355 Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
4356 Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
4357 ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
4358 dest_reg | RnSP(rn));
4362 bool Assembler::IsImmAddSub(
int64_t immediate) {
4363 return is_uint12(immediate) ||
4364 (is_uint12(immediate >> 12) && ((immediate & 0xFFF) == 0));
4367 void Assembler::LoadStore(
const CPURegister& rt,
4368 const MemOperand& addr,
4370 Instr memop = op | Rt(rt) | RnSP(addr.base());
4372 if (addr.IsImmediateOffset()) {
4373 unsigned size = CalcLSDataSize(op);
4374 if (IsImmLSScaled(addr.offset(), size)) {
4375 int offset =
static_cast<int>(addr.offset());
4377 Emit(LoadStoreUnsignedOffsetFixed | memop |
4378 ImmLSUnsigned(offset >> size));
4379 }
else if (IsImmLSUnscaled(addr.offset())) {
4380 int offset =
static_cast<int>(addr.offset());
4382 Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset));
4387 }
else if (addr.IsRegisterOffset()) {
4388 Extend ext = addr.extend();
4389 Shift shift = addr.shift();
4390 unsigned shift_amount = addr.shift_amount();
4399 DCHECK((shift_amount == 0) ||
4400 (shift_amount == static_cast<unsigned>(CalcLSDataSize(op))));
4401 Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) |
4402 ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0));
4405 DCHECK(!rt.Is(addr.base()));
4406 if (IsImmLSUnscaled(addr.offset())) {
4407 int offset =
static_cast<int>(addr.offset());
4408 if (addr.IsPreIndex()) {
4409 Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
4411 DCHECK(addr.IsPostIndex());
4412 Emit(LoadStorePostIndexFixed | memop | ImmLS(offset));
4422 bool Assembler::IsImmLSUnscaled(
int64_t offset) {
4423 return is_int9(offset);
4426 bool Assembler::IsImmLSScaled(
int64_t offset,
unsigned size) {
4427 bool offset_is_size_multiple = (((offset >> size) << size) == offset);
4428 return offset_is_size_multiple && is_uint12(offset >> size);
4431 bool Assembler::IsImmLSPair(
int64_t offset,
unsigned size) {
4432 bool offset_is_size_multiple = (((offset >> size) << size) == offset);
4433 return offset_is_size_multiple && is_int7(offset >> size);
4437 bool Assembler::IsImmLLiteral(
int64_t offset) {
4438 int inst_size =
static_cast<int>(kInstrSizeLog2);
4439 bool offset_is_inst_multiple =
4440 (((offset >> inst_size) << inst_size) == offset);
4441 DCHECK_GT(offset, 0);
4442 offset >>= kLoadLiteralScaleLog2;
4443 return offset_is_inst_multiple && is_intn(offset, ImmLLiteral_width);
4454 bool Assembler::IsImmLogical(uint64_t value,
4459 DCHECK((n !=
nullptr) && (imm_s !=
nullptr) && (imm_r !=
nullptr));
4460 DCHECK((width == kWRegSizeInBits) || (width == kXRegSizeInBits));
4462 bool negate =
false;
4501 if (width == kWRegSizeInBits) {
4509 value <<= kWRegSizeInBits;
4510 value |= value >> kWRegSizeInBits;
4530 uint64_t a = LargestPowerOf2Divisor(value);
4531 uint64_t value_plus_a = value + a;
4532 uint64_t b = LargestPowerOf2Divisor(value_plus_a);
4533 uint64_t value_plus_a_minus_b = value_plus_a - b;
4534 uint64_t c = LargestPowerOf2Divisor(value_plus_a_minus_b);
4536 int d, clz_a, out_n;
4544 clz_a = CountLeadingZeros(a, kXRegSizeInBits);
4545 int clz_c = CountLeadingZeros(c, kXRegSizeInBits);
4547 mask = ((uint64_t{1} << d) - 1);
4566 clz_a = CountLeadingZeros(a, kXRegSizeInBits);
4568 mask = ~uint64_t{0};
4574 if (!base::bits::IsPowerOfTwo(d)) {
4578 if (((b - a) & ~mask) != 0) {
4591 static const uint64_t multipliers[] = {
4592 0x0000000000000001UL,
4593 0x0000000100000001UL,
4594 0x0001000100010001UL,
4595 0x0101010101010101UL,
4596 0x1111111111111111UL,
4597 0x5555555555555555UL,
4599 int multiplier_idx = CountLeadingZeros(d, kXRegSizeInBits) - 57;
4601 DCHECK((multiplier_idx >= 0) &&
4602 (static_cast<size_t>(multiplier_idx) < arraysize(multipliers)));
4603 uint64_t multiplier = multipliers[multiplier_idx];
4604 uint64_t candidate = (b - a) * multiplier;
4606 if (value != candidate) {
4618 int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSizeInBits);
4619 int s = clz_a - clz_b;
4630 r = (clz_b + 1) & (d - 1);
4632 r = (clz_a + 1) & (d - 1);
4649 *imm_s = ((-d << 1) | (s - 1)) & 0x3F;
4656 bool Assembler::IsImmConditionalCompare(
int64_t immediate) {
4657 return is_uint5(immediate);
4661 bool Assembler::IsImmFP32(
float imm) {
4666 if ((bits & 0x7FFFF) != 0) {
4671 uint32_t b_pattern = (bits >> 16) & 0x3E00;
4672 if (b_pattern != 0 && b_pattern != 0x3E00) {
4677 if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
4685 bool Assembler::IsImmFP64(
double imm) {
4689 uint64_t bits = bit_cast<uint64_t>(imm);
4691 if ((bits & 0xFFFFFFFFFFFFL) != 0) {
4696 uint32_t b_pattern = (bits >> 48) & 0x3FC0;
4697 if (b_pattern != 0 && b_pattern != 0x3FC0) {
4702 if (((bits ^ (bits << 1)) & 0x4000000000000000L) == 0) {
4710 void Assembler::GrowBuffer() {
4711 if (!own_buffer_) FATAL(
"external code buffer is too small");
4715 if (buffer_size_ < 1 * MB) {
4716 desc.buffer_size = 2 * buffer_size_;
4718 desc.buffer_size = buffer_size_ + 1 * MB;
4723 if (desc.buffer_size > kMaximalBufferSize) {
4724 V8::FatalProcessOutOfMemory(
nullptr,
"Assembler::GrowBuffer");
4727 byte* buffer =
reinterpret_cast<byte*
>(buffer_);
4730 desc.buffer = NewArray<byte>(desc.buffer_size);
4733 desc.instr_size = pc_offset();
4735 static_cast<int>((buffer + buffer_size_) - reloc_info_writer.pos());
4738 intptr_t pc_delta = desc.buffer - buffer;
4739 intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
4740 (buffer + buffer_size_);
4741 memmove(desc.buffer, buffer, desc.instr_size);
4742 memmove(reloc_info_writer.pos() + rc_delta,
4743 reloc_info_writer.pos(), desc.reloc_size);
4746 DeleteArray(buffer_);
4747 buffer_ = desc.buffer;
4748 buffer_size_ = desc.buffer_size;
4749 pc_ = pc_ + pc_delta;
4750 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
4751 reloc_info_writer.last_pc() + pc_delta);
4758 for (
auto pos : internal_reference_positions_) {
4759 intptr_t* p =
reinterpret_cast<intptr_t*
>(buffer_ + pos);
4766 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
4767 ConstantPoolMode constant_pool_mode) {
4768 if ((rmode == RelocInfo::COMMENT) ||
4769 (rmode == RelocInfo::INTERNAL_REFERENCE) ||
4770 (rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
4771 (rmode == RelocInfo::DEOPT_SCRIPT_OFFSET) ||
4772 (rmode == RelocInfo::DEOPT_INLINING_ID) ||
4773 (rmode == RelocInfo::DEOPT_REASON) || (rmode == RelocInfo::DEOPT_ID)) {
4775 DCHECK(RelocInfo::IsComment(rmode) || RelocInfo::IsDeoptReason(rmode) ||
4776 RelocInfo::IsDeoptId(rmode) || RelocInfo::IsDeoptPosition(rmode) ||
4777 RelocInfo::IsInternalReference(rmode) ||
4778 RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode));
4780 }
else if (constant_pool_mode == NEEDS_POOL_ENTRY) {
4781 bool new_constpool_entry = constpool_.RecordEntry(data, rmode);
4784 BlockConstPoolFor(1);
4785 if (!new_constpool_entry)
return;
4790 if (!ShouldRecordRelocInfo(rmode))
return;
4793 RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
4795 DCHECK_GE(buffer_space(), kMaxRelocSize);
4796 reloc_info_writer.Write(&rinfo);
4799 void Assembler::near_jump(
int offset, RelocInfo::Mode rmode) {
4800 if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode, offset, NO_POOL_ENTRY);
4804 void Assembler::near_call(
int offset, RelocInfo::Mode rmode) {
4805 if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode, offset, NO_POOL_ENTRY);
4809 void Assembler::near_call(HeapObjectRequest request) {
4810 RequestHeapObject(request);
4811 int index = AddCodeTarget(Handle<Code>());
4812 RecordRelocInfo(RelocInfo::CODE_TARGET, index, NO_POOL_ENTRY);
4816 void Assembler::BlockConstPoolFor(
int instructions) {
4817 int pc_limit = pc_offset() + instructions * kInstrSize;
4818 if (no_const_pool_before_ < pc_limit) {
4819 no_const_pool_before_ = pc_limit;
4821 DCHECK(pc_limit < constpool_.MaxPcOffset());
4824 if (next_constant_pool_check_ < no_const_pool_before_) {
4825 next_constant_pool_check_ = no_const_pool_before_;
4830 void Assembler::CheckConstPool(
bool force_emit,
bool require_jump) {
4834 if (is_const_pool_blocked()) {
4836 DCHECK(!force_emit);
4841 if (constpool_.IsEmpty()) {
4843 SetNextConstPoolCheckIn(kCheckConstPoolInterval);
4852 int dist = constpool_.DistanceToFirstUse();
4853 int count = constpool_.EntryCount();
4855 (dist < kApproxMaxDistToConstPool) &&
4856 (count < kApproxMaxPoolEntryCount)) {
4863 int worst_case_size = constpool_.WorstCaseSize();
4864 CheckVeneerPool(
false, require_jump,
4865 kVeneerDistanceMargin + worst_case_size);
4869 int needed_space = worst_case_size + kGap + 1 * kInstrSize;
4870 while (buffer_space() <= needed_space) {
4876 constpool_.Emit(require_jump);
4877 DCHECK(SizeOfCodeGeneratedSince(&size_check) <=
4878 static_cast<unsigned>(worst_case_size));
4882 SetNextConstPoolCheckIn(kCheckConstPoolInterval);
4886 bool Assembler::ShouldEmitVeneer(
int max_reachable_pc,
int margin) {
4888 int protection_offset = 2 * kInstrSize;
4889 return pc_offset() > max_reachable_pc - margin - protection_offset -
4890 static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
4894 void Assembler::RecordVeneerPool(
int location_offset,
int size) {
4895 RelocInfo rinfo(reinterpret_cast<Address>(buffer_) + location_offset,
4896 RelocInfo::VENEER_POOL, static_cast<intptr_t>(size), Code());
4897 reloc_info_writer.Write(&rinfo);
4901 void Assembler::EmitVeneers(
bool force_emit,
bool need_protection,
int margin) {
4902 BlockPoolsScope scope(
this);
4903 RecordComment(
"[ Veneers");
4912 int veneer_pool_relocinfo_loc = pc_offset();
4915 if (need_protection) {
4921 Label veneer_size_check;
4923 std::multimap<int, FarBranchInfo>::iterator it, it_to_delete;
4925 it = unresolved_branches_.begin();
4926 while (it != unresolved_branches_.end()) {
4927 if (force_emit || ShouldEmitVeneer(it->first, margin)) {
4928 Instruction* branch = InstructionAt(it->second.pc_offset_);
4929 Label* label = it->second.label_;
4932 bind(&veneer_size_check);
4936 Instruction* veneer =
reinterpret_cast<Instruction*
>(pc_);
4937 RemoveBranchFromLabelLinkChain(branch, label, veneer);
4938 branch->SetImmPCOffsetTarget(options(), veneer);
4941 DCHECK(SizeOfCodeGeneratedSince(&veneer_size_check) <=
4942 static_cast<uint64_t>(kMaxVeneerCodeSize));
4943 veneer_size_check.Unuse();
4946 it_to_delete = it++;
4947 unresolved_branches_.erase(it_to_delete);
4954 int pool_size =
static_cast<int>(SizeOfCodeGeneratedSince(&size_check));
4955 RecordVeneerPool(veneer_pool_relocinfo_loc, pool_size);
4957 if (unresolved_branches_.empty()) {
4958 next_veneer_pool_check_ = kMaxInt;
4960 next_veneer_pool_check_ =
4961 unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
4970 void Assembler::CheckVeneerPool(
bool force_emit,
bool require_jump,
4973 if (unresolved_branches_.empty()) {
4974 DCHECK_EQ(next_veneer_pool_check_, kMaxInt);
4978 DCHECK(pc_offset() < unresolved_branches_first_limit());
4983 if (is_veneer_pool_blocked()) {
4984 DCHECK(!force_emit);
4988 if (!require_jump) {
4990 margin *= kVeneerNoProtectionFactor;
4992 if (force_emit || ShouldEmitVeneers(margin)) {
4993 EmitVeneers(force_emit, require_jump, margin);
4995 next_veneer_pool_check_ =
4996 unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
5001 int Assembler::buffer_space()
const {
5002 return static_cast<int>(reloc_info_writer.pos() - pc_);
5006 void Assembler::RecordConstPool(
int size) {
5009 RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
5013 void PatchingAssembler::PatchAdrFar(
int64_t target_offset) {
5021 Instruction* expected_adr = InstructionAt(0);
5022 CHECK(expected_adr->IsAdr() && (expected_adr->ImmPCRel() == 0));
5023 int rd_code = expected_adr->Rd();
5024 for (
int i = 0;
i < kAdrFarPatchableNNops; ++
i) {
5025 CHECK(InstructionAt((
i + 1) * kInstrSize)->IsNop(ADR_FAR_NOP));
5027 Instruction* expected_movz =
5028 InstructionAt((kAdrFarPatchableNInstrs - 1) * kInstrSize);
5029 CHECK(expected_movz->IsMovz() &&
5030 (expected_movz->ImmMoveWide() == 0) &&
5031 (expected_movz->ShiftMoveWide() == 0));
5032 int scratch_code = expected_movz->Rd();
5035 Register rd = Register::XRegFromCode(rd_code);
5036 Register scratch = Register::XRegFromCode(scratch_code);
5038 adr(rd, target_offset & 0xFFFF);
5039 movz(scratch, (target_offset >> 16) & 0xFFFF, 16);
5040 movk(scratch, (target_offset >> 32) & 0xFFFF, 32);
5041 DCHECK_EQ(target_offset >> 48, 0);
5042 add(rd, rd, scratch);
5045 void PatchingAssembler::PatchSubSp(
uint32_t immediate) {
5050 Instruction* expected_adr = InstructionAt(0);
5051 CHECK(expected_adr->IsAddSubImmediate());
5052 sub(sp, sp, immediate);
5058 #endif // V8_TARGET_ARCH_ARM64