5 #if V8_TARGET_ARCH_ARM64 7 #include "src/assembler.h" 8 #include "src/base/bits.h" 9 #include "src/base/division-by-constant.h" 10 #include "src/bootstrapper.h" 11 #include "src/callable.h" 12 #include "src/code-factory.h" 13 #include "src/code-stubs.h" 14 #include "src/counters.h" 15 #include "src/debug/debug.h" 16 #include "src/external-reference-table.h" 17 #include "src/frame-constants.h" 18 #include "src/frames-inl.h" 19 #include "src/macro-assembler-inl.h" 20 #include "src/register-configuration.h" 21 #include "src/runtime/runtime.h" 22 #include "src/snapshot/embedded-data.h" 23 #include "src/snapshot/snapshot.h" 24 #include "src/wasm/wasm-code-manager.h" 29 #include "src/arm64/macro-assembler-arm64.h" 35 MacroAssembler::MacroAssembler(Isolate* isolate,
36 const AssemblerOptions& options,
void* buffer,
37 int size, CodeObjectRequired create_code_object)
38 : TurboAssembler(isolate, options, buffer, size, create_code_object) {
39 if (create_code_object == CodeObjectRequired::kYes) {
45 code_object_ = Handle<HeapObject>::New(
46 *isolate->factory()->NewSelfReferenceMarker(), isolate);
50 CPURegList TurboAssembler::DefaultTmpList() {
return CPURegList(ip0, ip1); }
52 CPURegList TurboAssembler::DefaultFPTmpList() {
53 return CPURegList(fp_scratch1, fp_scratch2);
56 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
57 Register exclusion)
const {
59 auto list = kCallerSaved;
66 #if defined(V8_OS_WIN) 70 DCHECK_EQ(list.Count() % 2, 1);
71 if (exclusion.Is(no_reg)) {
72 bytes += kXRegSizeInBits / 8;
74 bytes -= kXRegSizeInBits / 8;
77 DCHECK_EQ(list.Count() % 2, 0);
81 bytes += list.Count() * kXRegSizeInBits / 8;
83 if (fp_mode == kSaveFPRegs) {
84 DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
85 bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
90 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
93 auto list = kCallerSaved;
95 #if defined(V8_OS_WIN) 98 if (!exclusion.Is(no_reg)) {
99 list.Remove(exclusion);
101 list.Combine(padreg);
104 if (!exclusion.Is(no_reg)) {
106 list.Remove(exclusion);
107 list.Combine(padreg);
111 DCHECK_EQ(list.Count() % 2, 0);
112 PushCPURegList(list);
113 bytes += list.Count() * kXRegSizeInBits / 8;
115 if (fp_mode == kSaveFPRegs) {
116 DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
117 PushCPURegList(kCallerSavedV);
118 bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
123 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
125 if (fp_mode == kSaveFPRegs) {
126 DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
127 PopCPURegList(kCallerSavedV);
128 bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
131 auto list = kCallerSaved;
133 #if defined(V8_OS_WIN) 136 if (!exclusion.Is(no_reg)) {
137 list.Remove(exclusion);
139 list.Combine(padreg);
142 if (!exclusion.Is(no_reg)) {
144 list.Remove(exclusion);
145 list.Combine(padreg);
149 DCHECK_EQ(list.Count() % 2, 0);
151 bytes += list.Count() * kXRegSizeInBits / 8;
156 void TurboAssembler::LogicalMacro(
const Register& rd,
const Register& rn,
157 const Operand& operand, LogicalOp op) {
158 UseScratchRegisterScope temps(
this);
160 if (operand.NeedsRelocation(
this)) {
161 Register temp = temps.AcquireX();
162 Ldr(temp, operand.immediate());
163 Logical(rd, rn, temp, op);
165 }
else if (operand.IsImmediate()) {
166 int64_t immediate = operand.ImmediateValue();
167 unsigned reg_size = rd.SizeInBits();
170 if ((op & NOT) == NOT) {
171 op =
static_cast<LogicalOp
>(op & ~NOT);
172 immediate = ~immediate;
178 DCHECK(((immediate >> kWRegSizeInBits) == 0) ||
179 ((immediate >> kWRegSizeInBits) == -1));
180 immediate &= kWRegMask;
183 DCHECK(rd.Is64Bits() || is_uint32(immediate));
186 if (immediate == 0) {
201 }
else if ((rd.Is64Bits() && (immediate == -1L)) ||
202 (rd.Is32Bits() && (immediate == 0xFFFFFFFFL))) {
221 unsigned n, imm_s, imm_r;
222 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
224 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
227 Register temp = temps.AcquireSameSizeAs(rn);
231 PreShiftImmMode mode = rn.Is(sp) ? kNoShift : kAnyShift;
232 Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate, mode);
237 Logical(temp, rn, imm_operand, op);
240 Logical(rd, rn, imm_operand, op);
244 }
else if (operand.IsExtendedRegister()) {
245 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
248 DCHECK_LE(operand.shift_amount(), 4);
249 DCHECK(operand.reg().Is64Bits() ||
250 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
251 Register temp = temps.AcquireSameSizeAs(rn);
252 EmitExtendShift(temp, operand.reg(), operand.extend(),
253 operand.shift_amount());
254 Logical(rd, rn, temp, op);
258 DCHECK(operand.IsShiftedRegister());
259 Logical(rd, rn, operand, op);
263 void TurboAssembler::Mov(
const Register& rd, uint64_t imm) {
264 DCHECK(allow_macro_instructions());
265 DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
266 DCHECK(!rd.IsZero());
287 if (!TryOneInstrMoveImmediate(rd, imm)) {
288 unsigned reg_size = rd.SizeInBits();
295 uint64_t ignored_halfword = 0;
296 bool invert_move =
false;
299 if (CountClearHalfWords(~imm, reg_size) >
300 CountClearHalfWords(imm, reg_size)) {
301 ignored_halfword = 0xFFFFL;
307 UseScratchRegisterScope temps(
this);
308 Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
312 DCHECK_EQ(reg_size % 16, 0);
313 bool first_mov_done =
false;
314 for (
int i = 0;
i < (rd.SizeInBits() / 16);
i++) {
315 uint64_t imm16 = (imm >> (16 *
i)) & 0xFFFFL;
316 if (imm16 != ignored_halfword) {
317 if (!first_mov_done) {
319 movn(temp, (~imm16) & 0xFFFFL, 16 *
i);
321 movz(temp, imm16, 16 *
i);
323 first_mov_done =
true;
326 movk(temp, imm16, 16 *
i);
330 DCHECK(first_mov_done);
340 void TurboAssembler::Mov(
const Register& rd,
const Operand& operand,
341 DiscardMoveMode discard_mode) {
342 DCHECK(allow_macro_instructions());
343 DCHECK(!rd.IsZero());
347 UseScratchRegisterScope temps(
this);
348 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
350 if (operand.NeedsRelocation(
this)) {
351 if (FLAG_embedded_builtins) {
352 if (root_array_available_ && options().isolate_independent_code) {
353 if (operand.ImmediateRMode() == RelocInfo::EXTERNAL_REFERENCE) {
354 Address addr =
static_cast<Address
>(operand.ImmediateValue());
355 ExternalReference reference = bit_cast<ExternalReference>(addr);
356 IndirectLoadExternalReference(rd, reference);
358 }
else if (operand.ImmediateRMode() == RelocInfo::EMBEDDED_OBJECT) {
359 Handle<HeapObject> x(
360 reinterpret_cast<Address*>(operand.ImmediateValue()));
361 IndirectLoadConstant(rd, x);
367 }
else if (operand.IsImmediate()) {
369 Mov(dst, operand.ImmediateValue());
370 }
else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
374 EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
375 }
else if (operand.IsExtendedRegister()) {
378 EmitExtendShift(dst, operand.reg(), operand.extend(),
379 operand.shift_amount());
390 if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
391 (discard_mode == kDontDiscardForSameWReg))) {
392 Assembler::mov(rd, operand.reg());
401 Assembler::mov(rd, dst);
405 void TurboAssembler::Movi16bitHelper(
const VRegister& vd, uint64_t imm) {
406 DCHECK(is_uint16(imm));
407 int byte1 = (imm & 0xFF);
408 int byte2 = ((imm >> 8) & 0xFF);
409 if (byte1 == byte2) {
410 movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1);
411 }
else if (byte1 == 0) {
412 movi(vd, byte2, LSL, 8);
413 }
else if (byte2 == 0) {
415 }
else if (byte1 == 0xFF) {
416 mvni(vd, ~byte2 & 0xFF, LSL, 8);
417 }
else if (byte2 == 0xFF) {
418 mvni(vd, ~byte1 & 0xFF);
420 UseScratchRegisterScope temps(
this);
421 Register temp = temps.AcquireW();
427 void TurboAssembler::Movi32bitHelper(
const VRegister& vd, uint64_t imm) {
428 DCHECK(is_uint32(imm));
430 uint8_t bytes[
sizeof(imm)];
431 memcpy(bytes, &imm,
sizeof(imm));
435 bool all0orff =
true;
436 for (
int i = 0;
i < 4; ++
i) {
437 if ((bytes[
i] != 0) && (bytes[
i] != 0xFF)) {
443 if (all0orff ==
true) {
444 movi(vd.Is64Bits() ? vd.V1D() : vd.V2D(), ((imm << 32) | imm));
450 for (
int i = 0;
i < 4;
i++) {
451 if ((imm & (0xFF << (
i * 8))) == imm) {
452 movi(vd, bytes[
i], LSL,
i * 8);
458 for (
int i = 0;
i < 4;
i++) {
460 if ((imm & mask) == mask) {
461 mvni(vd, ~bytes[
i] & 0xFF, LSL,
i * 8);
467 if ((imm & 0xFF00FFFF) == 0x0000FFFF) {
468 movi(vd, bytes[2], MSL, 16);
473 if ((imm & 0xFFFF00FF) == 0x000000FF) {
474 movi(vd, bytes[1], MSL, 8);
479 if ((imm & 0xFF00FFFF) == 0xFF000000) {
480 mvni(vd, ~bytes[2] & 0xFF, MSL, 16);
484 if ((imm & 0xFFFF00FF) == 0xFFFF0000) {
485 mvni(vd, ~bytes[1] & 0xFF, MSL, 8);
490 if (((imm >> 16) & 0xFFFF) == (imm & 0xFFFF)) {
491 Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xFFFF);
497 UseScratchRegisterScope temps(
this);
498 Register temp = temps.AcquireW();
504 void TurboAssembler::Movi64bitHelper(
const VRegister& vd, uint64_t imm) {
507 bool all0orff =
true;
508 for (
int i = 0;
i < 8; ++
i) {
509 int byteval = (imm >> (
i * 8)) & 0xFF;
510 if (byteval != 0 && byteval != 0xFF) {
515 if (all0orff ==
true) {
522 if (((imm >> 32) & 0xFFFFFFFF) == (imm & 0xFFFFFFFF)) {
523 Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xFFFFFFFF);
529 UseScratchRegisterScope temps(
this);
530 Register temp = temps.AcquireX();
533 mov(vd.D(), 0, temp);
540 void TurboAssembler::Movi(
const VRegister& vd, uint64_t imm, Shift shift,
542 DCHECK(allow_macro_instructions());
543 if (shift_amount != 0 || shift != LSL) {
544 movi(vd, imm, shift, shift_amount);
545 }
else if (vd.Is8B() || vd.Is16B()) {
547 DCHECK(is_uint8(imm));
549 }
else if (vd.Is4H() || vd.Is8H()) {
551 Movi16bitHelper(vd, imm);
552 }
else if (vd.Is2S() || vd.Is4S()) {
554 Movi32bitHelper(vd, imm);
557 Movi64bitHelper(vd, imm);
561 void TurboAssembler::Movi(
const VRegister& vd, uint64_t hi, uint64_t lo) {
563 DCHECK(vd.Is128Bits());
564 UseScratchRegisterScope temps(
this);
566 Register temp = temps.AcquireX();
568 Ins(vd.V2D(), 1, temp);
571 void TurboAssembler::Mvn(
const Register& rd,
const Operand& operand) {
572 DCHECK(allow_macro_instructions());
574 if (operand.NeedsRelocation(
this)) {
575 Ldr(rd, operand.immediate());
578 }
else if (operand.IsImmediate()) {
580 Mov(rd, ~operand.ImmediateValue());
582 }
else if (operand.IsExtendedRegister()) {
585 EmitExtendShift(rd, operand.reg(), operand.extend(),
586 operand.shift_amount());
594 unsigned TurboAssembler::CountClearHalfWords(uint64_t imm,
unsigned reg_size) {
595 DCHECK_EQ(reg_size % 8, 0);
597 for (
unsigned i = 0;
i < (reg_size / 16);
i++) {
598 if ((imm & 0xFFFF) == 0) {
609 bool TurboAssembler::IsImmMovz(uint64_t imm,
unsigned reg_size) {
610 DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
611 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
616 bool TurboAssembler::IsImmMovn(uint64_t imm,
unsigned reg_size) {
617 return IsImmMovz(~imm, reg_size);
620 void TurboAssembler::ConditionalCompareMacro(
const Register& rn,
621 const Operand& operand,
622 StatusFlags nzcv, Condition cond,
623 ConditionalCompareOp op) {
624 DCHECK((cond != al) && (cond != nv));
625 if (operand.NeedsRelocation(
this)) {
626 UseScratchRegisterScope temps(
this);
627 Register temp = temps.AcquireX();
628 Ldr(temp, operand.immediate());
629 ConditionalCompareMacro(rn, temp, nzcv, cond, op);
631 }
else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
632 (operand.IsImmediate() &&
633 IsImmConditionalCompare(operand.ImmediateValue()))) {
636 ConditionalCompare(rn, operand, nzcv, cond, op);
641 UseScratchRegisterScope temps(
this);
642 Register temp = temps.AcquireSameSizeAs(rn);
644 ConditionalCompare(rn, temp, nzcv, cond, op);
648 void TurboAssembler::Csel(
const Register& rd,
const Register& rn,
649 const Operand& operand, Condition cond) {
650 DCHECK(allow_macro_instructions());
651 DCHECK(!rd.IsZero());
652 DCHECK((cond != al) && (cond != nv));
653 if (operand.IsImmediate()) {
656 int64_t imm = operand.ImmediateValue();
657 Register zr = AppropriateZeroRegFor(rn);
659 csel(rd, rn, zr, cond);
660 }
else if (imm == 1) {
661 csinc(rd, rn, zr, cond);
662 }
else if (imm == -1) {
663 csinv(rd, rn, zr, cond);
665 UseScratchRegisterScope temps(
this);
666 Register temp = temps.AcquireSameSizeAs(rn);
668 csel(rd, rn, temp, cond);
670 }
else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
672 csel(rd, rn, operand.reg(), cond);
675 UseScratchRegisterScope temps(
this);
676 Register temp = temps.AcquireSameSizeAs(rn);
678 csel(rd, rn, temp, cond);
682 bool TurboAssembler::TryOneInstrMoveImmediate(
const Register& dst,
684 unsigned n, imm_s, imm_r;
685 int reg_size = dst.SizeInBits();
686 if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
691 }
else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
694 movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
696 }
else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
698 LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
704 Operand TurboAssembler::MoveImmediateForShiftedOp(
const Register& dst,
706 PreShiftImmMode mode) {
707 int reg_size = dst.SizeInBits();
709 if (TryOneInstrMoveImmediate(dst, imm)) {
713 int shift_low = CountTrailingZeros(imm, reg_size);
714 if (mode == kLimitShiftForSP) {
719 shift_low = std::min(shift_low, 4);
721 int64_t imm_low = imm >> shift_low;
728 int shift_high = CountLeadingZeros(imm, reg_size);
729 int64_t imm_high = (imm << shift_high) | ((INT64_C(1) << shift_high) - 1);
731 if ((mode != kNoShift) && TryOneInstrMoveImmediate(dst, imm_low)) {
734 return Operand(dst, LSL, shift_low);
735 }
else if ((mode == kAnyShift) && TryOneInstrMoveImmediate(dst, imm_high)) {
738 return Operand(dst, LSR, shift_high);
747 void TurboAssembler::AddSubMacro(
const Register& rd,
const Register& rn,
748 const Operand& operand, FlagsUpdate S,
750 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
751 !operand.NeedsRelocation(
this) && (S == LeaveFlags)) {
756 if (operand.NeedsRelocation(
this)) {
757 UseScratchRegisterScope temps(
this);
758 Register temp = temps.AcquireX();
759 Ldr(temp, operand.immediate());
760 AddSubMacro(rd, rn, temp, S, op);
761 }
else if ((operand.IsImmediate() &&
762 !IsImmAddSub(operand.ImmediateValue())) ||
763 (rn.IsZero() && !operand.IsShiftedRegister()) ||
764 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
765 UseScratchRegisterScope temps(
this);
766 Register temp = temps.AcquireSameSizeAs(rn);
767 if (operand.IsImmediate()) {
768 PreShiftImmMode mode = kAnyShift;
776 mode = (S == SetFlags) ? kNoShift : kLimitShiftForSP;
777 }
else if (rn.Is(sp)) {
778 mode = kLimitShiftForSP;
781 Operand imm_operand =
782 MoveImmediateForShiftedOp(temp, operand.ImmediateValue(), mode);
783 AddSub(rd, rn, imm_operand, S, op);
786 AddSub(rd, rn, temp, S, op);
789 AddSub(rd, rn, operand, S, op);
793 void TurboAssembler::AddSubWithCarryMacro(
const Register& rd,
795 const Operand& operand, FlagsUpdate S,
796 AddSubWithCarryOp op) {
797 DCHECK(rd.SizeInBits() == rn.SizeInBits());
798 UseScratchRegisterScope temps(
this);
800 if (operand.NeedsRelocation(
this)) {
801 Register temp = temps.AcquireX();
802 Ldr(temp, operand.immediate());
803 AddSubWithCarryMacro(rd, rn, temp, S, op);
805 }
else if (operand.IsImmediate() ||
806 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
808 Register temp = temps.AcquireSameSizeAs(rn);
810 AddSubWithCarry(rd, rn, temp, S, op);
812 }
else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
814 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
815 DCHECK(operand.shift() != ROR);
816 DCHECK(is_uintn(operand.shift_amount(),
817 rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
818 : kWRegSizeInBitsLog2));
819 Register temp = temps.AcquireSameSizeAs(rn);
820 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
821 AddSubWithCarry(rd, rn, temp, S, op);
823 }
else if (operand.IsExtendedRegister()) {
825 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
828 DCHECK_LE(operand.shift_amount(), 4);
829 DCHECK(operand.reg().Is64Bits() ||
830 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
831 Register temp = temps.AcquireSameSizeAs(rn);
832 EmitExtendShift(temp, operand.reg(), operand.extend(),
833 operand.shift_amount());
834 AddSubWithCarry(rd, rn, temp, S, op);
838 AddSubWithCarry(rd, rn, operand, S, op);
842 void TurboAssembler::LoadStoreMacro(
const CPURegister& rt,
843 const MemOperand& addr, LoadStoreOp op) {
844 int64_t offset = addr.offset();
845 unsigned size = CalcLSDataSize(op);
850 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
851 !IsImmLSUnscaled(offset)) {
854 UseScratchRegisterScope temps(
this);
855 Register temp = temps.AcquireSameSizeAs(addr.base());
856 Mov(temp, addr.offset());
857 LoadStore(rt, MemOperand(addr.base(), temp), op);
858 }
else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
860 LoadStore(rt, MemOperand(addr.base()), op);
861 add(addr.base(), addr.base(), offset);
862 }
else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
864 add(addr.base(), addr.base(), offset);
865 LoadStore(rt, MemOperand(addr.base()), op);
868 LoadStore(rt, addr, op);
872 void TurboAssembler::LoadStorePairMacro(
const CPURegister& rt,
873 const CPURegister& rt2,
874 const MemOperand& addr,
875 LoadStorePairOp op) {
877 DCHECK(!addr.IsRegisterOffset());
879 int64_t offset = addr.offset();
880 unsigned size = CalcLSPairDataSize(op);
884 if (IsImmLSPair(offset, size)) {
886 LoadStorePair(rt, rt2, addr, op);
888 Register base = addr.base();
889 if (addr.IsImmediateOffset()) {
890 UseScratchRegisterScope temps(
this);
891 Register temp = temps.AcquireSameSizeAs(base);
892 Add(temp, base, offset);
893 LoadStorePair(rt, rt2, MemOperand(temp), op);
894 }
else if (addr.IsPostIndex()) {
895 LoadStorePair(rt, rt2, MemOperand(base), op);
896 Add(base, base, offset);
898 DCHECK(addr.IsPreIndex());
899 Add(base, base, offset);
900 LoadStorePair(rt, rt2, MemOperand(base), op);
905 bool TurboAssembler::NeedExtraInstructionsOrRegisterBranch(
906 Label* label, ImmBranchType b_type) {
907 bool need_longer_range =
false;
913 if (label->is_bound() || label->is_linked()) {
915 !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
917 if (!need_longer_range && !label->is_bound()) {
918 int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
919 unresolved_branches_.insert(
920 std::pair<int, FarBranchInfo>(max_reachable_pc,
921 FarBranchInfo(pc_offset(), label)));
923 next_veneer_pool_check_ =
924 Min(next_veneer_pool_check_,
925 max_reachable_pc - kVeneerDistanceCheckMargin);
927 return need_longer_range;
930 void TurboAssembler::Adr(
const Register& rd, Label* label, AdrHint hint) {
931 DCHECK(allow_macro_instructions());
932 DCHECK(!rd.IsZero());
934 if (hint == kAdrNear) {
939 DCHECK_EQ(hint, kAdrFar);
940 if (label->is_bound()) {
941 int label_offset = label->pos() - pc_offset();
942 if (Instruction::IsValidPCRelOffset(label_offset)) {
945 DCHECK_LE(label_offset, 0);
946 int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
947 adr(rd, min_adr_offset);
948 Add(rd, rd, label_offset - min_adr_offset);
951 UseScratchRegisterScope temps(
this);
952 Register scratch = temps.AcquireX();
954 InstructionAccurateScope scope(
955 this, PatchingAssembler::kAdrFarPatchableNInstrs);
957 for (
int i = 0;
i < PatchingAssembler::kAdrFarPatchableNNops; ++
i) {
964 void TurboAssembler::B(Label* label, BranchType type, Register reg,
int bit) {
965 DCHECK((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
966 (bit == -1 || type >= kBranchTypeFirstUsingBit));
967 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
968 B(static_cast<Condition>(type), label);
971 case always: B(label);
break;
973 case reg_zero: Cbz(reg, label);
break;
974 case reg_not_zero: Cbnz(reg, label);
break;
975 case reg_bit_clear: Tbz(reg, bit, label);
break;
976 case reg_bit_set: Tbnz(reg, bit, label);
break;
983 void TurboAssembler::B(Label* label, Condition cond) {
984 DCHECK(allow_macro_instructions());
985 DCHECK((cond != al) && (cond != nv));
988 bool need_extra_instructions =
989 NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
991 if (need_extra_instructions) {
992 b(&done, NegateCondition(cond));
1000 void TurboAssembler::Tbnz(
const Register& rt,
unsigned bit_pos, Label* label) {
1001 DCHECK(allow_macro_instructions());
1004 bool need_extra_instructions =
1005 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
1007 if (need_extra_instructions) {
1008 tbz(rt, bit_pos, &done);
1011 tbnz(rt, bit_pos, label);
1016 void TurboAssembler::Tbz(
const Register& rt,
unsigned bit_pos, Label* label) {
1017 DCHECK(allow_macro_instructions());
1020 bool need_extra_instructions =
1021 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
1023 if (need_extra_instructions) {
1024 tbnz(rt, bit_pos, &done);
1027 tbz(rt, bit_pos, label);
1032 void TurboAssembler::Cbnz(
const Register& rt, Label* label) {
1033 DCHECK(allow_macro_instructions());
1036 bool need_extra_instructions =
1037 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
1039 if (need_extra_instructions) {
1048 void TurboAssembler::Cbz(
const Register& rt, Label* label) {
1049 DCHECK(allow_macro_instructions());
1052 bool need_extra_instructions =
1053 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
1055 if (need_extra_instructions) {
1067 void TurboAssembler::Abs(
const Register& rd,
const Register& rm,
1068 Label* is_not_representable, Label* is_representable) {
1069 DCHECK(allow_macro_instructions());
1070 DCHECK(AreSameSizeAndType(rd, rm));
1078 if ((is_not_representable !=
nullptr) && (is_representable !=
nullptr)) {
1079 B(is_not_representable, vs);
1080 B(is_representable);
1081 }
else if (is_not_representable !=
nullptr) {
1082 B(is_not_representable, vs);
1083 }
else if (is_representable !=
nullptr) {
1084 B(is_representable, vc);
1091 void TurboAssembler::Push(
const CPURegister& src0,
const CPURegister& src1,
1092 const CPURegister& src2,
const CPURegister& src3) {
1093 DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
1095 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
1096 int size = src0.SizeInBytes();
1097 DCHECK_EQ(0, (size * count) % 16);
1099 PushHelper(count, size, src0, src1, src2, src3);
1102 void TurboAssembler::Push(
const CPURegister& src0,
const CPURegister& src1,
1103 const CPURegister& src2,
const CPURegister& src3,
1104 const CPURegister& src4,
const CPURegister& src5,
1105 const CPURegister& src6,
const CPURegister& src7) {
1106 DCHECK(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
1108 int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
1109 int size = src0.SizeInBytes();
1110 DCHECK_EQ(0, (size * count) % 16);
1112 PushHelper(4, size, src0, src1, src2, src3);
1113 PushHelper(count - 4, size, src4, src5, src6, src7);
1116 void TurboAssembler::Pop(
const CPURegister& dst0,
const CPURegister& dst1,
1117 const CPURegister& dst2,
const CPURegister& dst3) {
1120 DCHECK(!AreAliased(dst0, dst1, dst2, dst3));
1121 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
1122 DCHECK(dst0.IsValid());
1124 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
1125 int size = dst0.SizeInBytes();
1126 DCHECK_EQ(0, (size * count) % 16);
1128 PopHelper(count, size, dst0, dst1, dst2, dst3);
1131 void TurboAssembler::Pop(
const CPURegister& dst0,
const CPURegister& dst1,
1132 const CPURegister& dst2,
const CPURegister& dst3,
1133 const CPURegister& dst4,
const CPURegister& dst5,
1134 const CPURegister& dst6,
const CPURegister& dst7) {
1137 DCHECK(!AreAliased(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
1138 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
1139 DCHECK(dst0.IsValid());
1141 int count = 5 + dst5.IsValid() + dst6.IsValid() + dst7.IsValid();
1142 int size = dst0.SizeInBytes();
1143 DCHECK_EQ(0, (size * count) % 16);
1145 PopHelper(4, size, dst0, dst1, dst2, dst3);
1146 PopHelper(count - 4, size, dst4, dst5, dst6, dst7);
1149 void TurboAssembler::Push(
const Register& src0,
const VRegister& src1) {
1150 int size = src0.SizeInBytes() + src1.SizeInBytes();
1151 DCHECK_EQ(0, size % 16);
1154 str(src1, MemOperand(sp, -size, PreIndex));
1156 str(src0, MemOperand(sp, src1.SizeInBytes()));
1159 void MacroAssembler::PushPopQueue::PushQueued() {
1160 DCHECK_EQ(0, size_ % 16);
1161 if (queued_.empty())
return;
1163 size_t count = queued_.size();
1165 while (index < count) {
1168 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
1169 int batch_index = 0;
1171 batch[batch_index++] = queued_[index++];
1172 }
while ((batch_index < 4) && (index < count) &&
1173 batch[0].IsSameSizeAndType(queued_[index]));
1175 masm_->PushHelper(batch_index, batch[0].SizeInBytes(),
1176 batch[0], batch[1], batch[2], batch[3]);
1183 void MacroAssembler::PushPopQueue::PopQueued() {
1184 DCHECK_EQ(0, size_ % 16);
1185 if (queued_.empty())
return;
1187 size_t count = queued_.size();
1189 while (index < count) {
1192 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
1193 int batch_index = 0;
1195 batch[batch_index++] = queued_[index++];
1196 }
while ((batch_index < 4) && (index < count) &&
1197 batch[0].IsSameSizeAndType(queued_[index]));
1199 masm_->PopHelper(batch_index, batch[0].SizeInBytes(),
1200 batch[0], batch[1], batch[2], batch[3]);
1206 void TurboAssembler::PushCPURegList(CPURegList registers) {
1207 int size = registers.RegisterSizeInBytes();
1208 DCHECK_EQ(0, (size * registers.Count()) % 16);
1211 while (!registers.IsEmpty()) {
1212 int count_before = registers.Count();
1213 const CPURegister& src0 = registers.PopHighestIndex();
1214 const CPURegister& src1 = registers.PopHighestIndex();
1215 const CPURegister& src2 = registers.PopHighestIndex();
1216 const CPURegister& src3 = registers.PopHighestIndex();
1217 int count = count_before - registers.Count();
1218 PushHelper(count, size, src0, src1, src2, src3);
1222 void TurboAssembler::PopCPURegList(CPURegList registers) {
1223 int size = registers.RegisterSizeInBytes();
1224 DCHECK_EQ(0, (size * registers.Count()) % 16);
1227 while (!registers.IsEmpty()) {
1228 int count_before = registers.Count();
1229 const CPURegister& dst0 = registers.PopLowestIndex();
1230 const CPURegister& dst1 = registers.PopLowestIndex();
1231 const CPURegister& dst2 = registers.PopLowestIndex();
1232 const CPURegister& dst3 = registers.PopLowestIndex();
1233 int count = count_before - registers.Count();
1234 PopHelper(count, size, dst0, dst1, dst2, dst3);
1238 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
1239 UseScratchRegisterScope temps(
this);
1240 Register temp = temps.AcquireSameSizeAs(count);
1242 if (FLAG_optimize_for_size) {
1245 Subs(temp, count, 1);
1250 Subs(temp, temp, 1);
1251 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
1256 Label loop, leftover2, leftover1, done;
1258 Subs(temp, count, 4);
1263 Subs(temp, temp, 4);
1264 PushHelper(4, src.SizeInBytes(), src, src, src, src);
1269 Tbz(count, 1, &leftover1);
1270 PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);
1274 Tbz(count, 0, &done);
1275 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
1281 void TurboAssembler::PushHelper(
int count,
int size,
const CPURegister& src0,
1282 const CPURegister& src1,
1283 const CPURegister& src2,
1284 const CPURegister& src3) {
1286 InstructionAccurateScope scope(
this);
1288 DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
1289 DCHECK(size == src0.SizeInBytes());
1295 DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
1296 str(src0, MemOperand(sp, -1 * size, PreIndex));
1299 DCHECK(src2.IsNone() && src3.IsNone());
1300 stp(src1, src0, MemOperand(sp, -2 * size, PreIndex));
1303 DCHECK(src3.IsNone());
1304 stp(src2, src1, MemOperand(sp, -3 * size, PreIndex));
1305 str(src0, MemOperand(sp, 2 * size));
1311 stp(src3, src2, MemOperand(sp, -4 * size, PreIndex));
1312 stp(src1, src0, MemOperand(sp, 2 * size));
1319 void TurboAssembler::PopHelper(
int count,
int size,
const CPURegister& dst0,
1320 const CPURegister& dst1,
const CPURegister& dst2,
1321 const CPURegister& dst3) {
1323 InstructionAccurateScope scope(
this);
1325 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
1326 DCHECK(size == dst0.SizeInBytes());
1332 DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
1333 ldr(dst0, MemOperand(sp, 1 * size, PostIndex));
1336 DCHECK(dst2.IsNone() && dst3.IsNone());
1337 ldp(dst0, dst1, MemOperand(sp, 2 * size, PostIndex));
1340 DCHECK(dst3.IsNone());
1341 ldr(dst2, MemOperand(sp, 2 * size));
1342 ldp(dst0, dst1, MemOperand(sp, 3 * size, PostIndex));
1349 ldp(dst2, dst3, MemOperand(sp, 2 * size));
1350 ldp(dst0, dst1, MemOperand(sp, 4 * size, PostIndex));
1357 void TurboAssembler::Poke(
const CPURegister& src,
const Operand& offset) {
1358 if (offset.IsImmediate()) {
1359 DCHECK_GE(offset.ImmediateValue(), 0);
1360 }
else if (emit_debug_code()) {
1362 Check(le, AbortReason::kStackAccessBelowStackPointer);
1365 Str(src, MemOperand(sp, offset));
1368 void TurboAssembler::Peek(
const CPURegister& dst,
const Operand& offset) {
1369 if (offset.IsImmediate()) {
1370 DCHECK_GE(offset.ImmediateValue(), 0);
1371 }
else if (emit_debug_code()) {
1373 Check(le, AbortReason::kStackAccessBelowStackPointer);
1376 Ldr(dst, MemOperand(sp, offset));
1379 void TurboAssembler::PokePair(
const CPURegister& src1,
const CPURegister& src2,
1381 DCHECK(AreSameSizeAndType(src1, src2));
1382 DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
1383 Stp(src1, src2, MemOperand(sp, offset));
1387 void MacroAssembler::PeekPair(
const CPURegister& dst1,
1388 const CPURegister& dst2,
1390 DCHECK(AreSameSizeAndType(dst1, dst2));
1391 DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
1392 Ldp(dst1, dst2, MemOperand(sp, offset));
1396 void MacroAssembler::PushCalleeSavedRegisters() {
1398 InstructionAccurateScope scope(
this);
1400 MemOperand tos(sp, -2 * static_cast<int>(kXRegSize), PreIndex);
1416 void MacroAssembler::PopCalleeSavedRegisters() {
1418 InstructionAccurateScope scope(
this);
1420 MemOperand tos(sp, 2 * kXRegSize, PostIndex);
1435 void TurboAssembler::AssertSpAligned() {
1436 if (emit_debug_code()) {
1437 HardAbortScope hard_abort(
this);
1440 UseScratchRegisterScope scope(
this);
1441 Register temp = scope.AcquireX();
1444 Check(eq, AbortReason::kUnexpectedStackPointer);
1448 void TurboAssembler::CopySlots(
int dst, Register src, Register slot_count) {
1449 DCHECK(!src.IsZero());
1450 UseScratchRegisterScope scope(
this);
1451 Register dst_reg = scope.AcquireX();
1452 SlotAddress(dst_reg, dst);
1453 SlotAddress(src, src);
1454 CopyDoubleWords(dst_reg, src, slot_count);
1457 void TurboAssembler::CopySlots(Register dst, Register src,
1458 Register slot_count) {
1459 DCHECK(!dst.IsZero() && !src.IsZero());
1460 SlotAddress(dst, dst);
1461 SlotAddress(src, src);
1462 CopyDoubleWords(dst, src, slot_count);
1465 void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
1466 CopyDoubleWordsMode mode) {
1467 DCHECK(!AreAliased(dst, src, count));
1469 if (emit_debug_code()) {
1470 Register pointer1 = dst;
1471 Register pointer2 = src;
1472 if (mode == kSrcLessThanDst) {
1477 Label pointer1_below_pointer2;
1478 Subs(pointer1, pointer1, pointer2);
1479 B(lt, &pointer1_below_pointer2);
1480 Cmp(pointer1, count);
1481 Check(ge, AbortReason::kOffsetOutOfRange);
1482 Bind(&pointer1_below_pointer2);
1483 Add(pointer1, pointer1, pointer2);
1485 static_assert(kPointerSize == kDRegSize,
1486 "pointers must be the same size as doubles");
1488 int direction = (mode == kDstLessThanSrc) ? 1 : -1;
1489 UseScratchRegisterScope scope(
this);
1490 VRegister temp0 = scope.AcquireD();
1491 VRegister temp1 = scope.AcquireD();
1493 Label pairs, loop, done;
1495 Tbz(count, 0, &pairs);
1496 Ldr(temp0, MemOperand(src, direction * kPointerSize, PostIndex));
1497 Sub(count, count, 1);
1498 Str(temp0, MemOperand(dst, direction * kPointerSize, PostIndex));
1501 if (mode == kSrcLessThanDst) {
1503 Sub(dst, dst, kPointerSize);
1504 Sub(src, src, kPointerSize);
1508 Ldp(temp0, temp1, MemOperand(src, 2 * direction * kPointerSize, PostIndex));
1509 Sub(count, count, 2);
1510 Stp(temp0, temp1, MemOperand(dst, 2 * direction * kPointerSize, PostIndex));
1519 void TurboAssembler::SlotAddress(Register dst,
int slot_offset) {
1520 Add(dst, sp, slot_offset << kPointerSizeLog2);
1523 void TurboAssembler::SlotAddress(Register dst, Register slot_offset) {
1524 Add(dst, sp, Operand(slot_offset, LSL, kPointerSizeLog2));
1527 void TurboAssembler::AssertFPCRState(Register fpcr) {
1528 if (emit_debug_code()) {
1529 Label unexpected_mode, done;
1530 UseScratchRegisterScope temps(
this);
1531 if (fpcr.IsNone()) {
1532 fpcr = temps.AcquireX();
1538 Tbnz(fpcr, FZ_offset, &unexpected_mode);
1540 STATIC_ASSERT(FPTieEven == 0);
1541 Tst(fpcr, RMode_mask);
1544 Bind(&unexpected_mode);
1545 Abort(AbortReason::kUnexpectedFPCRMode);
1551 void TurboAssembler::CanonicalizeNaN(
const VRegister& dst,
1552 const VRegister& src) {
1558 Fsub(dst, src, fp_zero);
1561 void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
1565 MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
1569 void MacroAssembler::LoadObject(Register result, Handle<Object>
object) {
1570 AllowDeferredHandleDereference heap_object_check;
1571 if (object->IsHeapObject()) {
1572 Mov(result, Handle<HeapObject>::cast(
object));
1574 Mov(result, Operand(Smi::cast(*
object)));
1578 void TurboAssembler::Move(Register dst, Smi src) { Mov(dst, src); }
1580 void TurboAssembler::Swap(Register lhs, Register rhs) {
1581 DCHECK(lhs.IsSameSizeAndType(rhs));
1582 DCHECK(!lhs.Is(rhs));
1583 UseScratchRegisterScope temps(
this);
1584 Register temp = temps.AcquireX();
1590 void TurboAssembler::Swap(VRegister lhs, VRegister rhs) {
1591 DCHECK(lhs.IsSameSizeAndType(rhs));
1592 DCHECK(!lhs.Is(rhs));
1593 UseScratchRegisterScope temps(
this);
1594 VRegister temp = VRegister::no_reg();
1596 temp = temps.AcquireS();
1597 }
else if (lhs.IsD()) {
1598 temp = temps.AcquireD();
1601 temp = temps.AcquireQ();
1608 void TurboAssembler::AssertSmi(Register
object, AbortReason reason) {
1609 if (emit_debug_code()) {
1610 STATIC_ASSERT(kSmiTag == 0);
1611 Tst(
object, kSmiTagMask);
1616 void MacroAssembler::AssertNotSmi(Register
object, AbortReason reason) {
1617 if (emit_debug_code()) {
1618 STATIC_ASSERT(kSmiTag == 0);
1619 Tst(
object, kSmiTagMask);
1624 void MacroAssembler::AssertConstructor(Register
object) {
1625 if (emit_debug_code()) {
1626 AssertNotSmi(
object, AbortReason::kOperandIsASmiAndNotAConstructor);
1628 UseScratchRegisterScope temps(
this);
1629 Register temp = temps.AcquireX();
1631 Ldr(temp, FieldMemOperand(
object, HeapObject::kMapOffset));
1632 Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
1633 Tst(temp, Operand(Map::IsConstructorBit::kMask));
1635 Check(ne, AbortReason::kOperandIsNotAConstructor);
1639 void MacroAssembler::AssertFunction(Register
object) {
1640 if (emit_debug_code()) {
1641 AssertNotSmi(
object, AbortReason::kOperandIsASmiAndNotAFunction);
1643 UseScratchRegisterScope temps(
this);
1644 Register temp = temps.AcquireX();
1646 CompareObjectType(
object, temp, temp, JS_FUNCTION_TYPE);
1647 Check(eq, AbortReason::kOperandIsNotAFunction);
1652 void MacroAssembler::AssertBoundFunction(Register
object) {
1653 if (emit_debug_code()) {
1654 AssertNotSmi(
object, AbortReason::kOperandIsASmiAndNotABoundFunction);
1656 UseScratchRegisterScope temps(
this);
1657 Register temp = temps.AcquireX();
1659 CompareObjectType(
object, temp, temp, JS_BOUND_FUNCTION_TYPE);
1660 Check(eq, AbortReason::kOperandIsNotABoundFunction);
1664 void MacroAssembler::AssertGeneratorObject(Register
object) {
1665 if (!emit_debug_code())
return;
1666 AssertNotSmi(
object, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
1669 UseScratchRegisterScope temps(
this);
1670 Register temp = temps.AcquireX();
1671 Ldr(temp, FieldMemOperand(
object, HeapObject::kMapOffset));
1675 CompareInstanceType(temp, temp, JS_GENERATOR_OBJECT_TYPE);
1679 Cmp(temp, JS_ASYNC_FUNCTION_OBJECT_TYPE);
1683 Cmp(temp, JS_ASYNC_GENERATOR_OBJECT_TYPE);
1687 Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
1690 void MacroAssembler::AssertUndefinedOrAllocationSite(Register
object) {
1691 if (emit_debug_code()) {
1692 UseScratchRegisterScope temps(
this);
1693 Register scratch = temps.AcquireX();
1694 Label done_checking;
1695 AssertNotSmi(
object);
1696 JumpIfRoot(
object, RootIndex::kUndefinedValue, &done_checking);
1697 Ldr(scratch, FieldMemOperand(
object, HeapObject::kMapOffset));
1698 CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
1699 Assert(eq, AbortReason::kExpectedUndefinedOrCell);
1700 Bind(&done_checking);
1704 void TurboAssembler::AssertPositiveOrZero(Register value) {
1705 if (emit_debug_code()) {
1707 int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit;
1708 Tbz(value, sign_bit, &done);
1709 Abort(AbortReason::kUnexpectedNegativeValue);
1714 void MacroAssembler::CallStub(CodeStub* stub) {
1715 DCHECK(AllowThisStubCall(stub));
1716 Call(stub->GetCode(), RelocInfo::CODE_TARGET);
1719 void MacroAssembler::TailCallStub(CodeStub* stub) {
1720 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
1723 void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
1725 const Runtime::Function* f = Runtime::FunctionForId(fid);
1731 Mov(x1, ExternalReference::Create(f));
1732 DCHECK(!AreAliased(centry, x0, x1));
1733 Add(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag));
1737 void MacroAssembler::CallRuntime(
const Runtime::Function* f,
1739 SaveFPRegsMode save_doubles) {
1745 CHECK(f->nargs < 0 || f->nargs == num_arguments);
1748 Mov(x0, num_arguments);
1749 Mov(x1, ExternalReference::Create(f));
1752 CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
1753 Call(code, RelocInfo::CODE_TARGET);
1756 void MacroAssembler::JumpToExternalReference(
const ExternalReference& builtin,
1757 bool builtin_exit_frame) {
1759 Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
1760 kArgvOnStack, builtin_exit_frame);
1761 Jump(code, RelocInfo::CODE_TARGET);
1764 void MacroAssembler::JumpToInstructionStream(Address entry) {
1765 Ldr(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
1766 Br(kOffHeapTrampolineRegister);
1769 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
1770 const Runtime::Function*
function = Runtime::FunctionForId(fid);
1771 DCHECK_EQ(1, function->result_size);
1772 if (function->nargs >= 0) {
1777 Mov(x0, function->nargs);
1779 JumpToExternalReference(ExternalReference::Create(fid));
1782 int TurboAssembler::ActivationFrameAlignment() {
1783 #if V8_HOST_ARCH_ARM64 1788 return base::OS::ActivationFrameAlignment();
1789 #else // V8_HOST_ARCH_ARM64 1794 return FLAG_sim_stack_alignment;
1795 #endif // V8_HOST_ARCH_ARM64 1798 void TurboAssembler::CallCFunction(ExternalReference
function,
1799 int num_of_reg_args) {
1800 CallCFunction(
function, num_of_reg_args, 0);
1803 void TurboAssembler::CallCFunction(ExternalReference
function,
1804 int num_of_reg_args,
1805 int num_of_double_args) {
1806 UseScratchRegisterScope temps(
this);
1807 Register temp = temps.AcquireX();
1808 Mov(temp,
function);
1809 CallCFunction(temp, num_of_reg_args, num_of_double_args);
1812 static const int kRegisterPassedArguments = 8;
1814 void TurboAssembler::CallCFunction(Register
function,
int num_of_reg_args,
1815 int num_of_double_args) {
1816 DCHECK_LE(num_of_reg_args + num_of_double_args, kMaxCParameters);
1817 DCHECK(has_frame());
1825 if (num_of_double_args > 0) {
1826 DCHECK_LE(num_of_reg_args, 1);
1827 DCHECK_LE(num_of_double_args + num_of_reg_args, 2);
1834 if (num_of_reg_args > kRegisterPassedArguments) {
1836 int claim_slots = RoundUp(num_of_reg_args - kRegisterPassedArguments, 2);
1841 void TurboAssembler::LoadFromConstantsTable(Register destination,
1842 int constant_index) {
1843 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
1844 LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
1846 FieldMemOperand(destination,
1847 FixedArray::kHeaderSize + constant_index * kPointerSize));
1850 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
1851 Ldr(destination, MemOperand(kRootRegister, offset));
1854 void TurboAssembler::LoadRootRegisterOffset(Register destination,
1857 Mov(destination, kRootRegister);
1859 Add(destination, kRootRegister, offset);
1863 void TurboAssembler::Jump(Register target, Condition cond) {
1864 if (cond == nv)
return;
1866 if (cond != al) B(NegateCondition(cond), &done);
1871 void TurboAssembler::JumpHelper(
int64_t offset, RelocInfo::Mode rmode,
1873 if (cond == nv)
return;
1875 if (cond != al) B(NegateCondition(cond), &done);
1876 if (CanUseNearCallOrJump(rmode)) {
1877 DCHECK(IsNearCallOffset(offset));
1878 near_jump(static_cast<int>(offset), rmode);
1880 UseScratchRegisterScope temps(
this);
1881 Register temp = temps.AcquireX();
1882 uint64_t imm =
reinterpret_cast<uint64_t
>(pc_) + offset * kInstrSize;
1883 Mov(temp, Immediate(imm, rmode));
1895 static int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
1900 if (rmode != RelocInfo::WASM_CALL && rmode != RelocInfo::WASM_STUB_CALL) {
1901 offset -=
reinterpret_cast<int64_t>(pc);
1902 DCHECK_EQ(offset % kInstrSize, 0);
1903 offset = offset /
static_cast<int>(kInstrSize);
1909 void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
1911 JumpHelper(CalculateTargetOffset(target, rmode, pc_), rmode, cond);
1914 void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
1916 DCHECK(RelocInfo::IsCodeTarget(rmode));
1917 if (FLAG_embedded_builtins) {
1918 if (root_array_available_ && options().isolate_independent_code &&
1919 !Builtins::IsIsolateIndependentBuiltin(*code)) {
1925 UseScratchRegisterScope temps(
this);
1926 Register scratch = temps.AcquireX();
1927 IndirectLoadConstant(scratch, code);
1928 Add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
1929 Jump(scratch, cond);
1931 }
else if (options().inline_offheap_trampolines) {
1932 int builtin_index = Builtins::kNoBuiltinId;
1933 if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
1934 Builtins::IsIsolateIndependent(builtin_index)) {
1936 RecordCommentForOffHeapTrampoline(builtin_index);
1937 CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
1938 UseScratchRegisterScope temps(
this);
1939 Register scratch = temps.AcquireX();
1940 EmbeddedData d = EmbeddedData::FromBlob();
1941 Address entry = d.InstructionStartOfBuiltin(builtin_index);
1942 Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
1943 Jump(scratch, cond);
1948 if (CanUseNearCallOrJump(rmode)) {
1949 JumpHelper(static_cast<int64_t>(AddCodeTarget(code)), rmode, cond);
1951 Jump(code.address(), rmode, cond);
1955 void TurboAssembler::Call(Register target) {
1956 BlockPoolsScope scope(
this);
1960 void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
1961 BlockPoolsScope scope(
this);
1963 if (CanUseNearCallOrJump(rmode)) {
1964 int64_t offset = CalculateTargetOffset(target, rmode, pc_);
1965 DCHECK(IsNearCallOffset(offset));
1966 near_call(static_cast<int>(offset), rmode);
1968 IndirectCall(target, rmode);
1972 void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
1973 BlockPoolsScope scope(
this);
1975 if (FLAG_embedded_builtins) {
1976 if (root_array_available_ && options().isolate_independent_code &&
1977 !Builtins::IsIsolateIndependentBuiltin(*code)) {
1983 UseScratchRegisterScope temps(
this);
1984 Register scratch = temps.AcquireX();
1985 IndirectLoadConstant(scratch, code);
1986 Add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
1989 }
else if (options().inline_offheap_trampolines) {
1990 int builtin_index = Builtins::kNoBuiltinId;
1991 if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
1992 Builtins::IsIsolateIndependent(builtin_index)) {
1994 RecordCommentForOffHeapTrampoline(builtin_index);
1995 CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
1996 UseScratchRegisterScope temps(
this);
1997 Register scratch = temps.AcquireX();
1998 EmbeddedData d = EmbeddedData::FromBlob();
1999 Address entry = d.InstructionStartOfBuiltin(builtin_index);
2000 Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
2006 if (CanUseNearCallOrJump(rmode)) {
2007 near_call(AddCodeTarget(code), rmode);
2009 IndirectCall(code.address(), rmode);
2013 void TurboAssembler::Call(ExternalReference target) {
2014 UseScratchRegisterScope temps(
this);
2015 Register temp = temps.AcquireX();
2020 void TurboAssembler::IndirectCall(Address target, RelocInfo::Mode rmode) {
2021 UseScratchRegisterScope temps(
this);
2022 Register temp = temps.AcquireX();
2023 Mov(temp, Immediate(target, rmode));
2027 bool TurboAssembler::IsNearCallOffset(
int64_t offset) {
2028 return is_int26(offset);
2031 void TurboAssembler::CallForDeoptimization(Address target,
int deopt_id,
2032 RelocInfo::Mode rmode) {
2033 DCHECK_EQ(rmode, RelocInfo::RUNTIME_ENTRY);
2035 BlockPoolsScope scope(
this);
2041 UseScratchRegisterScope temps(
this);
2042 Register temp = temps.AcquireX();
2043 DCHECK(temp.Is(x16));
2046 DCHECK(is_uint16(deopt_id));
2047 movz(temp, deopt_id);
2049 static_cast<int64_t>(options().code_range_start);
2050 DCHECK_EQ(offset % kInstrSize, 0);
2051 offset = offset /
static_cast<int>(kInstrSize);
2052 DCHECK(IsNearCallOffset(offset));
2053 near_call(static_cast<int>(offset), RelocInfo::RUNTIME_ENTRY);
2056 void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, VRegister value,
2057 VRegister scratch_d,
2058 Label* on_successful_conversion,
2059 Label* on_failed_conversion) {
2061 Fcvtzs(as_int, value);
2062 Scvtf(scratch_d, as_int);
2063 Fcmp(value, scratch_d);
2065 if (on_successful_conversion) {
2066 B(on_successful_conversion, eq);
2068 if (on_failed_conversion) {
2069 B(on_failed_conversion, ne);
2073 void TurboAssembler::PrepareForTailCall(
const ParameterCount& callee_args_count,
2074 Register caller_args_count_reg,
2075 Register scratch0, Register scratch1) {
2077 if (callee_args_count.is_reg()) {
2078 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
2081 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
2088 Register dst_reg = scratch0;
2089 Add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
2090 Add(dst_reg, dst_reg, StandardFrameConstants::kCallerSPOffset + kPointerSize);
2093 Add(dst_reg, dst_reg, 15);
2094 Bic(dst_reg, dst_reg, 15);
2096 Register src_reg = caller_args_count_reg;
2098 if (callee_args_count.is_reg()) {
2099 Add(src_reg, sp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
2100 Add(src_reg, src_reg, kPointerSize);
2102 Add(src_reg, sp, (callee_args_count.immediate() + 1) * kPointerSize);
2107 Add(src_reg, src_reg, 15);
2108 Bic(src_reg, src_reg, 15);
2110 if (FLAG_debug_code) {
2111 Cmp(src_reg, dst_reg);
2112 Check(lo, AbortReason::kStackAccessBelowStackPointer);
2117 Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
2118 Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2125 Register tmp_reg = scratch1;
2129 Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
2130 Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
2139 void MacroAssembler::InvokePrologue(
const ParameterCount& expected,
2140 const ParameterCount& actual, Label* done,
2142 bool* definitely_mismatches) {
2143 bool definitely_matches =
false;
2144 *definitely_mismatches =
false;
2145 Label regular_invoke;
2156 DCHECK(actual.is_immediate() || actual.reg().is(x0));
2157 DCHECK(expected.is_immediate() || expected.reg().is(x2));
2159 if (expected.is_immediate()) {
2160 DCHECK(actual.is_immediate());
2161 Mov(x0, actual.immediate());
2162 if (expected.immediate() == actual.immediate()) {
2163 definitely_matches =
true;
2166 if (expected.immediate() ==
2167 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
2172 definitely_matches =
true;
2174 *definitely_mismatches =
true;
2176 Mov(x2, expected.immediate());
2181 Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
2182 : Operand(actual.reg());
2185 Cmp(expected.reg(), actual_op);
2186 B(eq, ®ular_invoke);
2191 if (!definitely_matches) {
2192 Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
2193 if (flag == CALL_FUNCTION) {
2195 if (!*definitely_mismatches) {
2201 Jump(adaptor, RelocInfo::CODE_TARGET);
2204 Bind(®ular_invoke);
2207 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
2208 const ParameterCount& expected,
2209 const ParameterCount& actual) {
2212 Mov(x4, ExternalReference::debug_hook_on_function_call_address(isolate()));
2213 Ldrsb(x4, MemOperand(x4));
2214 Cbz(x4, &skip_hook);
2218 Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
2219 : Operand(actual.reg());
2221 Ldr(x4, MemOperand(sp, x4, LSL, kPointerSizeLog2));
2222 FrameScope frame(
this,
2223 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
2225 Register expected_reg = padreg;
2226 Register actual_reg = padreg;
2227 if (expected.is_reg()) expected_reg = expected.reg();
2228 if (actual.is_reg()) actual_reg = actual.reg();
2229 if (!new_target.is_valid()) new_target = padreg;
2232 SmiTag(expected_reg);
2234 Push(expected_reg, actual_reg, new_target, fun);
2236 CallRuntime(Runtime::kDebugOnFunctionCall);
2239 Pop(fun, new_target, actual_reg, expected_reg);
2240 SmiUntag(actual_reg);
2241 SmiUntag(expected_reg);
2246 void MacroAssembler::InvokeFunctionCode(Register
function, Register new_target,
2247 const ParameterCount& expected,
2248 const ParameterCount& actual,
2251 DCHECK(flag == JUMP_FUNCTION || has_frame());
2252 DCHECK(
function.is(x1));
2253 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(x3));
2256 CheckDebugHook(
function, new_target, expected, actual);
2259 if (!new_target.is_valid()) {
2260 LoadRoot(x3, RootIndex::kUndefinedValue);
2264 bool definitely_mismatches =
false;
2265 InvokePrologue(expected, actual, &done, flag, &definitely_mismatches);
2270 if (!definitely_mismatches) {
2274 Register code = kJavaScriptCallCodeStartRegister;
2275 Ldr(code, FieldMemOperand(
function, JSFunction::kCodeOffset));
2276 Add(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
2277 if (flag == CALL_FUNCTION) {
2280 DCHECK(flag == JUMP_FUNCTION);
2290 void MacroAssembler::InvokeFunction(Register
function, Register new_target,
2291 const ParameterCount& actual,
2294 DCHECK(flag == JUMP_FUNCTION || has_frame());
2298 DCHECK(
function.is(x1));
2300 Register expected_reg = x2;
2302 Ldr(cp, FieldMemOperand(
function, JSFunction::kContextOffset));
2306 Ldr(expected_reg, FieldMemOperand(
function,
2307 JSFunction::kSharedFunctionInfoOffset));
2309 FieldMemOperand(expected_reg,
2310 SharedFunctionInfo::kFormalParameterCountOffset));
2312 ParameterCount expected(expected_reg);
2313 InvokeFunctionCode(
function, new_target, expected, actual, flag);
2316 void MacroAssembler::InvokeFunction(Register
function,
2317 const ParameterCount& expected,
2318 const ParameterCount& actual,
2321 DCHECK(flag == JUMP_FUNCTION || has_frame());
2325 DCHECK(
function.Is(x1));
2328 Ldr(cp, FieldMemOperand(
function, JSFunction::kContextOffset));
2330 InvokeFunctionCode(
function, no_reg, expected, actual, flag);
2333 void TurboAssembler::TryConvertDoubleToInt64(Register result,
2334 DoubleRegister double_input,
2343 Fcvtzs(result.X(), double_input);
2352 Ccmp(result.X(), -1, VFlag, vc);
2357 void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
2359 DoubleRegister double_input,
2360 StubCallMode stub_mode) {
2365 TryConvertDoubleToInt64(result, double_input, &done);
2368 Push(lr, double_input);
2371 if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
2372 Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
2374 Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
2376 Ldr(result, MemOperand(sp, 0));
2378 DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
2383 Uxtw(result.W(), result.W());
2386 void TurboAssembler::Prologue() {
2387 Push(lr, fp, cp, x1);
2388 Add(fp, sp, StandardFrameConstants::kFixedFrameSizeFromFp);
2391 void TurboAssembler::EnterFrame(StackFrame::Type type) {
2392 UseScratchRegisterScope temps(
this);
2394 if (type == StackFrame::INTERNAL) {
2395 Register type_reg = temps.AcquireX();
2396 Mov(type_reg, StackFrame::TypeToMarker(type));
2398 Push(lr, fp, type_reg, type_reg);
2399 const int kFrameSize =
2400 TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize;
2401 Add(fp, sp, kFrameSize);
2406 }
else if (type == StackFrame::WASM_COMPILED ||
2407 type == StackFrame::WASM_COMPILE_LAZY) {
2408 Register type_reg = temps.AcquireX();
2409 Mov(type_reg, StackFrame::TypeToMarker(type));
2412 Push(type_reg, padreg);
2418 DCHECK_EQ(type, StackFrame::CONSTRUCT);
2419 Register type_reg = temps.AcquireX();
2420 Mov(type_reg, StackFrame::TypeToMarker(type));
2424 Push(lr, fp, type_reg, cp);
2428 Add(fp, sp, TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
2436 void TurboAssembler::LeaveFrame(StackFrame::Type type) {
2444 void MacroAssembler::ExitFramePreserveFPRegs() {
2445 DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
2446 PushCPURegList(kCallerSavedV);
2450 void MacroAssembler::ExitFrameRestoreFPRegs() {
2453 CPURegList saved_fp_regs = kCallerSavedV;
2454 DCHECK_EQ(saved_fp_regs.Count() % 2, 0);
2456 int offset = ExitFrameConstants::kLastExitFrameField;
2457 while (!saved_fp_regs.IsEmpty()) {
2458 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
2459 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
2460 offset -= 2 * kDRegSize;
2461 Ldp(dst1, dst0, MemOperand(fp, offset));
2465 void MacroAssembler::EnterExitFrame(
bool save_doubles,
const Register& scratch,
2467 StackFrame::Type frame_type) {
2468 DCHECK(frame_type == StackFrame::EXIT ||
2469 frame_type == StackFrame::BUILTIN_EXIT);
2474 Mov(scratch, StackFrame::TypeToMarker(frame_type));
2476 Mov(scratch, CodeObject());
2477 Push(scratch, padreg);
2484 STATIC_ASSERT((2 * kPointerSize) == ExitFrameConstants::kCallerSPOffset);
2485 STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
2486 STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
2487 STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kSPOffset);
2488 STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kCodeOffset);
2489 STATIC_ASSERT((-4 * kPointerSize) == ExitFrameConstants::kPaddingOffset);
2493 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()));
2494 Str(fp, MemOperand(scratch));
2496 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
2497 Str(cp, MemOperand(scratch));
2499 STATIC_ASSERT((-4 * kPointerSize) == ExitFrameConstants::kLastExitFrameField);
2501 ExitFramePreserveFPRegs();
2505 int slots_to_claim = RoundUp(extra_space + 1, 2);
2510 Claim(slots_to_claim, kXRegSize);
2524 Add(scratch, sp, kXRegSize);
2525 Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
2530 void MacroAssembler::LeaveExitFrame(
bool restore_doubles,
2531 const Register& scratch,
2532 const Register& scratch2) {
2533 if (restore_doubles) {
2534 ExitFrameRestoreFPRegs();
2539 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
2540 Ldr(cp, MemOperand(scratch));
2542 if (emit_debug_code()) {
2544 Mov(scratch2, Operand(Context::kInvalidContext));
2545 Mov(scratch, ExternalReference::Create(IsolateAddressId::kContextAddress,
2547 Str(scratch2, MemOperand(scratch));
2551 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()));
2552 Str(xzr, MemOperand(scratch));
2562 void MacroAssembler::LoadGlobalProxy(Register dst) {
2563 LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
2566 void MacroAssembler::LoadWeakValue(Register out, Register in,
2567 Label* target_if_cleared) {
2568 CompareAndBranch(in.W(), Operand(kClearedWeakHeapObjectLower32), eq,
2571 and_(out, in, Operand(~kWeakHeapObjectMask));
2574 void MacroAssembler::IncrementCounter(StatsCounter* counter,
int value,
2575 Register scratch1, Register scratch2) {
2576 DCHECK_NE(value, 0);
2577 if (FLAG_native_code_counters && counter->Enabled()) {
2578 Mov(scratch2, ExternalReference::Create(counter));
2579 Ldr(scratch1.W(), MemOperand(scratch2));
2580 Add(scratch1.W(), scratch1.W(), value);
2581 Str(scratch1.W(), MemOperand(scratch2));
2586 void MacroAssembler::DecrementCounter(StatsCounter* counter,
int value,
2587 Register scratch1, Register scratch2) {
2588 IncrementCounter(counter, -value, scratch1, scratch2);
2591 void MacroAssembler::MaybeDropFrames() {
2593 Mov(x1, ExternalReference::debug_restart_fp_address(isolate()));
2594 Ldr(x1, MemOperand(x1));
2596 Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
2600 void MacroAssembler::JumpIfObjectType(Register
object,
2604 Label* if_cond_pass,
2606 CompareObjectType(
object, map, type_reg, type);
2607 B(cond, if_cond_pass);
2612 void MacroAssembler::CompareObjectType(Register
object,
2615 InstanceType type) {
2616 Ldr(map, FieldMemOperand(
object, HeapObject::kMapOffset));
2617 CompareInstanceType(map, type_reg, type);
2622 void MacroAssembler::CompareInstanceType(Register map,
2624 InstanceType type) {
2625 Ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
2626 Cmp(type_reg, type);
2630 void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
2632 Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
2634 DecodeField<Map::ElementsKindBits>(result);
2637 void MacroAssembler::CompareRoot(
const Register& obj, RootIndex index) {
2638 UseScratchRegisterScope temps(
this);
2639 Register temp = temps.AcquireX();
2640 DCHECK(!AreAliased(obj, temp));
2641 LoadRoot(temp, index);
2645 void MacroAssembler::JumpIfRoot(
const Register& obj, RootIndex index,
2647 CompareRoot(obj, index);
2651 void MacroAssembler::JumpIfNotRoot(
const Register& obj, RootIndex index,
2652 Label* if_not_equal) {
2653 CompareRoot(obj, index);
2654 B(ne, if_not_equal);
2658 void MacroAssembler::CompareAndSplit(
const Register& lhs,
2663 Label* fall_through) {
2664 if ((if_true == if_false) && (if_false == fall_through)) {
2666 }
else if (if_true == if_false) {
2668 }
else if (if_false == fall_through) {
2669 CompareAndBranch(lhs, rhs, cond, if_true);
2670 }
else if (if_true == fall_through) {
2671 CompareAndBranch(lhs, rhs, NegateCondition(cond), if_false);
2673 CompareAndBranch(lhs, rhs, cond, if_true);
2679 void MacroAssembler::TestAndSplit(
const Register& reg,
2680 uint64_t bit_pattern,
2681 Label* if_all_clear,
2683 Label* fall_through) {
2684 if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
2686 }
else if (if_all_clear == if_any_set) {
2688 }
else if (if_all_clear == fall_through) {
2689 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
2690 }
else if (if_any_set == fall_through) {
2691 TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
2693 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
2698 bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
2699 return has_frame() || !stub->SometimesSetsUpAFrame();
2702 void MacroAssembler::PopSafepointRegisters() {
2703 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
2704 DCHECK_GE(num_unsaved, 0);
2705 DCHECK_EQ(num_unsaved % 2, 0);
2706 DCHECK_EQ(kSafepointSavedRegisters % 2, 0);
2707 PopXRegList(kSafepointSavedRegisters);
2712 void MacroAssembler::PushSafepointRegisters() {
2715 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
2716 DCHECK_GE(num_unsaved, 0);
2717 DCHECK_EQ(num_unsaved % 2, 0);
2718 DCHECK_EQ(kSafepointSavedRegisters % 2, 0);
2720 PushXRegList(kSafepointSavedRegisters);
2723 int MacroAssembler::SafepointRegisterStackIndex(
int reg_code) {
2725 DCHECK_EQ(CPURegList::GetSafepointSavedRegisters().list(), 0x6FFCFFFF);
2736 if ((reg_code >= 0) && (reg_code <= 15)) {
2738 }
else if ((reg_code >= 18) && (reg_code <= 30)) {
2740 return reg_code - 2;
2747 void MacroAssembler::CheckPageFlag(
const Register&
object,
2748 const Register& scratch,
int mask,
2749 Condition cc, Label* condition_met) {
2750 And(scratch,
object, ~kPageAlignmentMask);
2751 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
2753 TestAndBranchIfAnySet(scratch, mask, condition_met);
2755 TestAndBranchIfAllClear(scratch, mask, condition_met);
2759 void TurboAssembler::CheckPageFlagSet(
const Register&
object,
2760 const Register& scratch,
int mask,
2761 Label* if_any_set) {
2762 And(scratch,
object, ~kPageAlignmentMask);
2763 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
2764 TestAndBranchIfAnySet(scratch, mask, if_any_set);
2767 void TurboAssembler::CheckPageFlagClear(
const Register&
object,
2768 const Register& scratch,
int mask,
2769 Label* if_all_clear) {
2770 And(scratch,
object, ~kPageAlignmentMask);
2771 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
2772 TestAndBranchIfAllClear(scratch, mask, if_all_clear);
2775 void MacroAssembler::RecordWriteField(Register
object,
int offset,
2776 Register value, Register scratch,
2777 LinkRegisterStatus lr_status,
2778 SaveFPRegsMode save_fp,
2779 RememberedSetAction remembered_set_action,
2780 SmiCheck smi_check) {
2786 if (smi_check == INLINE_SMI_CHECK) {
2787 JumpIfSmi(value, &done);
2792 DCHECK(IsAligned(offset, kPointerSize));
2794 Add(scratch,
object, offset - kHeapObjectTag);
2795 if (emit_debug_code()) {
2797 Tst(scratch, kPointerSize - 1);
2799 Abort(AbortReason::kUnalignedCellInWriteBarrier);
2803 RecordWrite(
object, scratch, value, lr_status, save_fp, remembered_set_action,
2810 if (emit_debug_code()) {
2811 Mov(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
2812 Mov(scratch, Operand(bit_cast<int64_t>(kZapValue + 8)));
2816 void TurboAssembler::SaveRegisters(RegList registers) {
2817 DCHECK_GT(NumRegs(registers), 0);
2818 CPURegList regs(lr);
2819 for (
int i = 0;
i < Register::kNumRegisters; ++
i) {
2820 if ((registers >>
i) & 1u) {
2821 regs.Combine(Register::XRegFromCode(
i));
2825 PushCPURegList(regs);
2828 void TurboAssembler::RestoreRegisters(RegList registers) {
2829 DCHECK_GT(NumRegs(registers), 0);
2830 CPURegList regs(lr);
2831 for (
int i = 0;
i < Register::kNumRegisters; ++
i) {
2832 if ((registers >>
i) & 1u) {
2833 regs.Combine(Register::XRegFromCode(
i));
2837 PopCPURegList(regs);
2840 void TurboAssembler::CallRecordWriteStub(
2841 Register
object, Register address,
2842 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
2843 CallRecordWriteStub(
2844 object, address, remembered_set_action, fp_mode,
2845 isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
2849 void TurboAssembler::CallRecordWriteStub(
2850 Register
object, Register address,
2851 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
2852 Address wasm_target) {
2853 CallRecordWriteStub(
object, address, remembered_set_action, fp_mode,
2854 Handle<Code>::null(), wasm_target);
2857 void TurboAssembler::CallRecordWriteStub(
2858 Register
object, Register address,
2859 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
2860 Handle<Code> code_target, Address wasm_target) {
2861 DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
2867 RecordWriteDescriptor descriptor;
2868 RegList registers = descriptor.allocatable_registers();
2870 SaveRegisters(registers);
2872 Register object_parameter(
2873 descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
2874 Register slot_parameter(
2875 descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
2876 Register remembered_set_parameter(
2877 descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
2878 Register fp_mode_parameter(
2879 descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
2881 Push(
object, address);
2883 Pop(slot_parameter, object_parameter);
2885 Mov(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
2886 Mov(fp_mode_parameter, Smi::FromEnum(fp_mode));
2887 if (code_target.is_null()) {
2888 Call(wasm_target, RelocInfo::WASM_STUB_CALL);
2890 Call(code_target, RelocInfo::CODE_TARGET);
2893 RestoreRegisters(registers);
2901 void MacroAssembler::RecordWrite(Register
object, Register address,
2902 Register value, LinkRegisterStatus lr_status,
2903 SaveFPRegsMode fp_mode,
2904 RememberedSetAction remembered_set_action,
2905 SmiCheck smi_check) {
2906 ASM_LOCATION_IN_ASSEMBLER(
"MacroAssembler::RecordWrite");
2907 DCHECK(!AreAliased(
object, value));
2909 if (emit_debug_code()) {
2910 UseScratchRegisterScope temps(
this);
2911 Register temp = temps.AcquireX();
2913 Ldr(temp, MemOperand(address));
2915 Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
2922 if (smi_check == INLINE_SMI_CHECK) {
2923 DCHECK_EQ(0, kSmiTag);
2924 JumpIfSmi(value, &done);
2927 CheckPageFlagClear(value,
2929 MemoryChunk::kPointersToHereAreInterestingMask, &done);
2930 CheckPageFlagClear(
object,
2932 MemoryChunk::kPointersFromHereAreInterestingMask,
2936 if (lr_status == kLRHasNotBeenSaved) {
2939 CallRecordWriteStub(
object, address, remembered_set_action, fp_mode);
2940 if (lr_status == kLRHasNotBeenSaved) {
2947 isolate()->counters()->write_barriers_static()->Increment();
2948 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, address,
2953 if (emit_debug_code()) {
2954 Mov(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
2955 Mov(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
2959 void TurboAssembler::Assert(Condition cond, AbortReason reason) {
2960 if (emit_debug_code()) {
2961 Check(cond, reason);
2965 void TurboAssembler::AssertUnreachable(AbortReason reason) {
2966 if (emit_debug_code()) Abort(reason);
2969 void MacroAssembler::AssertRegisterIsRoot(Register reg, RootIndex index,
2970 AbortReason reason) {
2971 if (emit_debug_code()) {
2972 CompareRoot(reg, index);
2977 void TurboAssembler::Check(Condition cond, AbortReason reason) {
2985 void TurboAssembler::Abort(AbortReason reason) {
2987 RecordComment(
"Abort message: ");
2988 RecordComment(GetAbortReason(reason));
2992 if (trap_on_abort()) {
2999 RegList old_tmp_list = TmpList()->list();
3000 TmpList()->Combine(MacroAssembler::DefaultTmpList());
3002 if (should_abort_hard()) {
3004 FrameScope assume_frame(
this, StackFrame::NONE);
3005 Mov(w0, static_cast<int>(reason));
3006 Call(ExternalReference::abort_with_reason());
3011 HardAbortScope hard_aborts(
this);
3013 Mov(x1, Smi::FromInt(static_cast<int>(reason)));
3018 FrameScope scope(
this, StackFrame::NONE);
3019 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
3021 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
3024 TmpList()->set_list(old_tmp_list);
3027 void MacroAssembler::LoadNativeContextSlot(
int index, Register dst) {
3028 Ldr(dst, NativeContextMemOperand());
3029 Ldr(dst, ContextMemOperand(dst, index));
3035 void MacroAssembler::PrintfNoPreserve(
const char * format,
3036 const CPURegister& arg0,
3037 const CPURegister& arg1,
3038 const CPURegister& arg2,
3039 const CPURegister& arg3) {
3042 DCHECK(!kCallerSaved.IncludesAliasOf(sp));
3045 CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
3046 CPURegister pcs[kPrintfMaxArgCount] = {NoReg, NoReg, NoReg, NoReg};
3048 int arg_count = kPrintfMaxArgCount;
3052 static const CPURegList kPCSVarargs =
3053 CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count);
3054 static const CPURegList kPCSVarargsFP =
3055 CPURegList(CPURegister::kVRegister, kDRegSizeInBits, 0, arg_count - 1);
3059 CPURegList tmp_list = kCallerSaved;
3060 tmp_list.Remove(x0);
3061 tmp_list.Remove(kPCSVarargs);
3062 tmp_list.Remove(arg0, arg1, arg2, arg3);
3064 CPURegList fp_tmp_list = kCallerSavedV;
3065 fp_tmp_list.Remove(kPCSVarargsFP);
3066 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
3070 UseScratchRegisterScope temps(
this);
3071 TmpList()->set_list(tmp_list.list());
3072 FPTmpList()->set_list(fp_tmp_list.list());
3075 CPURegList pcs_varargs = kPCSVarargs;
3076 CPURegList pcs_varargs_fp = kPCSVarargsFP;
3082 for (
unsigned i = 0;
i < kPrintfMaxArgCount;
i++) {
3084 if (args[
i].IsRegister()) {
3085 pcs[
i] = pcs_varargs.PopLowestIndex().X();
3088 if (args[
i].Is32Bits()) pcs[
i] = pcs[
i].W();
3089 }
else if (args[
i].IsVRegister()) {
3091 pcs[
i] = pcs_varargs_fp.PopLowestIndex().D();
3093 DCHECK(args[
i].IsNone());
3099 if (args[
i].Aliases(pcs[
i]))
continue;
3103 if (kPCSVarargs.IncludesAliasOf(args[
i]) ||
3104 kPCSVarargsFP.IncludesAliasOf(args[
i])) {
3105 if (args[
i].IsRegister()) {
3106 Register old_arg = args[
i].Reg();
3107 Register new_arg = temps.AcquireSameSizeAs(old_arg);
3108 Mov(new_arg, old_arg);
3111 VRegister old_arg = args[
i].VReg();
3112 VRegister new_arg = temps.AcquireSameSizeAs(old_arg);
3113 Fmov(new_arg, old_arg);
3121 for (
int i = 0;
i < arg_count;
i++) {
3122 DCHECK(pcs[
i].type() == args[
i].type());
3123 if (pcs[
i].IsRegister()) {
3124 Mov(pcs[
i].Reg(), args[
i].Reg(), kDiscardForSameWReg);
3126 DCHECK(pcs[
i].IsVRegister());
3127 if (pcs[
i].SizeInBytes() == args[
i].SizeInBytes()) {
3128 Fmov(pcs[
i].VReg(), args[
i].VReg());
3130 Fcvt(pcs[
i].VReg(), args[
i].VReg());
3141 Label format_address;
3142 Adr(x0, &format_address);
3145 { BlockPoolsScope scope(
this);
3148 Bind(&format_address);
3149 EmitStringData(format);
3154 CallPrintf(arg_count, pcs);
3157 void TurboAssembler::CallPrintf(
int arg_count,
const CPURegister* args) {
3161 #ifdef USE_SIMULATOR 3163 InstructionAccurateScope scope(
this, kPrintfLength / kInstrSize);
3164 hlt(kImmExceptionIsPrintf);
3169 for (
int i = 0;
i < arg_count;
i++) {
3171 if (args[
i].IsRegister()) {
3172 arg_pattern = args[
i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
3174 DCHECK(args[
i].Is64Bits());
3175 arg_pattern = kPrintfArgD;
3177 DCHECK(arg_pattern < (1 << kPrintfArgPatternBits));
3178 arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits *
i));
3180 dc32(arg_pattern_list);
3183 Call(ExternalReference::printf_function());
3188 void MacroAssembler::Printf(
const char * format,
3195 RegList old_tmp_list = TmpList()->list();
3196 RegList old_fp_tmp_list = FPTmpList()->list();
3197 TmpList()->set_list(0);
3198 FPTmpList()->set_list(0);
3203 PushCPURegList(kCallerSaved);
3204 PushCPURegList(kCallerSavedV);
3207 CPURegList tmp_list = kCallerSaved;
3208 CPURegList fp_tmp_list = kCallerSavedV;
3209 tmp_list.Remove(arg0, arg1, arg2, arg3);
3210 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
3211 TmpList()->set_list(tmp_list.list());
3212 FPTmpList()->set_list(fp_tmp_list.list());
3214 { UseScratchRegisterScope temps(
this);
3218 bool arg0_sp = sp.Aliases(arg0);
3219 bool arg1_sp = sp.Aliases(arg1);
3220 bool arg2_sp = sp.Aliases(arg2);
3221 bool arg3_sp = sp.Aliases(arg3);
3222 if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
3225 Register arg_sp = temps.AcquireX();
3227 kCallerSaved.TotalSizeInBytes() + kCallerSavedV.TotalSizeInBytes());
3228 if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
3229 if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
3230 if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits());
3231 if (arg3_sp) arg3 = Register::Create(arg_sp.code(), arg3.SizeInBits());
3235 { UseScratchRegisterScope temps(
this);
3236 Register tmp = temps.AcquireX();
3241 PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
3244 { UseScratchRegisterScope temps(
this);
3245 Register tmp = temps.AcquireX();
3251 PopCPURegList(kCallerSavedV);
3252 PopCPURegList(kCallerSaved);
3254 TmpList()->set_list(old_tmp_list);
3255 FPTmpList()->set_list(old_fp_tmp_list);
3258 UseScratchRegisterScope::~UseScratchRegisterScope() {
3259 available_->set_list(old_available_);
3260 availablefp_->set_list(old_availablefp_);
3264 Register UseScratchRegisterScope::AcquireSameSizeAs(
const Register& reg) {
3265 int code = AcquireNextAvailable(available_).code();
3266 return Register::Create(code, reg.SizeInBits());
3269 VRegister UseScratchRegisterScope::AcquireSameSizeAs(
const VRegister& reg) {
3270 int code = AcquireNextAvailable(availablefp_).code();
3271 return VRegister::Create(code, reg.SizeInBits());
3275 CPURegister UseScratchRegisterScope::AcquireNextAvailable(
3276 CPURegList* available) {
3277 CHECK(!available->IsEmpty());
3278 CPURegister result = available->PopLowestIndex();
3279 DCHECK(!AreAliased(result, xzr, sp));
3284 MemOperand ContextMemOperand(Register context,
int index) {
3285 return MemOperand(context, Context::SlotOffset(index));
3288 MemOperand NativeContextMemOperand() {
3289 return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
3294 void InlineSmiCheckInfo::Emit(MacroAssembler* masm,
const Register& reg,
3295 const Label* smi_check) {
3296 Assembler::BlockPoolsScope scope(masm);
3297 if (reg.IsValid()) {
3298 DCHECK(smi_check->is_bound());
3299 DCHECK(reg.Is64Bits());
3306 static_cast<uint32_t>(__ InstructionsGeneratedSince(smi_check));
3307 __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
3309 DCHECK(!smi_check->is_bound());
3316 InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
3317 : reg_(NoReg), smi_check_delta_(0), smi_check_(nullptr) {
3318 InstructionSequence* inline_data = InstructionSequence::At(info);
3319 DCHECK(inline_data->IsInlineData());
3320 if (inline_data->IsInlineData()) {
3321 uint64_t payload = inline_data->InlineData();
3324 DCHECK(is_uint32(payload));
3327 int reg_code = RegisterBits::decode(payload32);
3328 reg_ = Register::XRegFromCode(reg_code);
3329 smi_check_delta_ = DeltaBits::decode(payload32);
3330 DCHECK_NE(0, smi_check_delta_);
3331 smi_check_ = inline_data->preceding(smi_check_delta_);
3336 void TurboAssembler::ComputeCodeStartAddress(
const Register& rd) {
3338 adr(rd, -pc_offset());
3341 void TurboAssembler::ResetSpeculationPoisonRegister() {
3342 Mov(kSpeculationPoisonRegister, -1);
3351 #endif // V8_TARGET_ARCH_ARM64