9 #include "src/assembler-inl.h" 10 #include "src/base/bits.h" 11 #include "src/base/division-by-constant.h" 12 #include "src/base/utils/random-number-generator.h" 13 #include "src/bootstrapper.h" 14 #include "src/callable.h" 15 #include "src/code-factory.h" 16 #include "src/code-stubs.h" 17 #include "src/counters.h" 18 #include "src/debug/debug.h" 19 #include "src/double.h" 20 #include "src/external-reference-table.h" 21 #include "src/frames-inl.h" 22 #include "src/macro-assembler.h" 23 #include "src/objects-inl.h" 24 #include "src/register-configuration.h" 25 #include "src/runtime/runtime.h" 26 #include "src/snapshot/embedded-data.h" 27 #include "src/snapshot/snapshot.h" 28 #include "src/wasm/wasm-code-manager.h" 33 #include "src/arm/macro-assembler-arm.h" 39 MacroAssembler::MacroAssembler(Isolate* isolate,
40 const AssemblerOptions& options,
void* buffer,
41 int size, CodeObjectRequired create_code_object)
42 : TurboAssembler(isolate, options, buffer, size, create_code_object) {
43 if (create_code_object == CodeObjectRequired::kYes) {
49 code_object_ = Handle<HeapObject>::New(
50 *isolate->factory()->NewSelfReferenceMarker(), isolate);
54 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
57 Register exclusion3)
const {
59 RegList exclusions = 0;
60 if (exclusion1 != no_reg) {
61 exclusions |= exclusion1.bit();
62 if (exclusion2 != no_reg) {
63 exclusions |= exclusion2.bit();
64 if (exclusion3 != no_reg) {
65 exclusions |= exclusion3.bit();
70 RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
72 bytes += NumRegs(list) * kPointerSize;
74 if (fp_mode == kSaveFPRegs) {
75 bytes += DwVfpRegister::NumRegisters() * DwVfpRegister::kSizeInBytes;
81 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
82 Register exclusion2, Register exclusion3) {
84 RegList exclusions = 0;
85 if (exclusion1 != no_reg) {
86 exclusions |= exclusion1.bit();
87 if (exclusion2 != no_reg) {
88 exclusions |= exclusion2.bit();
89 if (exclusion3 != no_reg) {
90 exclusions |= exclusion3.bit();
95 RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
98 bytes += NumRegs(list) * kPointerSize;
100 if (fp_mode == kSaveFPRegs) {
102 bytes += DwVfpRegister::NumRegisters() * DwVfpRegister::kSizeInBytes;
108 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
109 Register exclusion2, Register exclusion3) {
111 if (fp_mode == kSaveFPRegs) {
112 RestoreFPRegs(sp, lr);
113 bytes += DwVfpRegister::NumRegisters() * DwVfpRegister::kSizeInBytes;
116 RegList exclusions = 0;
117 if (exclusion1 != no_reg) {
118 exclusions |= exclusion1.bit();
119 if (exclusion2 != no_reg) {
120 exclusions |= exclusion2.bit();
121 if (exclusion3 != no_reg) {
122 exclusions |= exclusion3.bit();
127 RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
130 bytes += NumRegs(list) * kPointerSize;
135 void TurboAssembler::LoadFromConstantsTable(Register destination,
136 int constant_index) {
137 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
145 FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag;
146 const bool could_clobber_ip = !is_uint12(offset);
148 Register reg = destination;
149 if (could_clobber_ip) {
154 LoadRoot(reg, RootIndex::kBuiltinsConstantsTable);
155 ldr(destination, MemOperand(reg, offset));
157 if (could_clobber_ip) {
163 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
164 ldr(destination, MemOperand(kRootRegister, offset));
167 void TurboAssembler::LoadRootRegisterOffset(Register destination,
170 Move(destination, kRootRegister);
172 add(destination, kRootRegister, Operand(offset));
176 void TurboAssembler::Jump(Register target, Condition cond) { bx(target, cond); }
178 void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
180 mov(pc, Operand(target, rmode), LeaveCC, cond);
183 void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
185 DCHECK(!RelocInfo::IsCodeTarget(rmode));
186 Jump(static_cast<intptr_t>(target), rmode, cond);
189 void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
191 DCHECK(RelocInfo::IsCodeTarget(rmode));
192 if (FLAG_embedded_builtins) {
193 int builtin_index = Builtins::kNoBuiltinId;
194 bool target_is_isolate_independent_builtin =
195 isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
196 Builtins::IsIsolateIndependent(builtin_index);
197 if (target_is_isolate_independent_builtin &&
198 options().use_pc_relative_calls_and_jumps) {
199 int32_t code_target_index = AddCodeTarget(code);
200 b(code_target_index * kInstrSize, cond, RelocInfo::RELATIVE_CODE_TARGET);
202 }
else if (root_array_available_ && options().isolate_independent_code) {
203 UseScratchRegisterScope temps(
this);
204 Register scratch = temps.Acquire();
205 IndirectLoadConstant(scratch, code);
206 add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
209 }
else if (target_is_isolate_independent_builtin &&
210 options().inline_offheap_trampolines) {
212 RecordCommentForOffHeapTrampoline(builtin_index);
213 EmbeddedData d = EmbeddedData::FromBlob();
214 Address entry = d.InstructionStartOfBuiltin(builtin_index);
217 mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
223 Jump(static_cast<intptr_t>(code.address()), rmode, cond);
226 void TurboAssembler::Call(Register target, Condition cond) {
228 BlockConstPoolScope block_const_pool(
this);
232 void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
233 TargetAddressStorageMode mode,
234 bool check_constant_pool) {
236 if (check_constant_pool) MaybeCheckConstPool();
238 BlockConstPoolScope block_const_pool(
this);
240 bool old_predictable_code_size = predictable_code_size();
241 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
242 set_predictable_code_size(
true);
259 mov(ip, Operand(target, rmode));
262 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
263 set_predictable_code_size(old_predictable_code_size);
267 void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
268 Condition cond, TargetAddressStorageMode mode,
269 bool check_constant_pool) {
270 DCHECK(RelocInfo::IsCodeTarget(rmode));
271 if (FLAG_embedded_builtins) {
272 int builtin_index = Builtins::kNoBuiltinId;
273 bool target_is_isolate_independent_builtin =
274 isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
275 Builtins::IsIsolateIndependent(builtin_index);
276 if (target_is_isolate_independent_builtin &&
277 options().use_pc_relative_calls_and_jumps) {
278 int32_t code_target_index = AddCodeTarget(code);
279 bl(code_target_index * kInstrSize, cond, RelocInfo::RELATIVE_CODE_TARGET);
281 }
else if (root_array_available_ && options().isolate_independent_code) {
284 IndirectLoadConstant(ip, code);
285 add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
288 }
else if (target_is_isolate_independent_builtin &&
289 options().inline_offheap_trampolines) {
291 RecordCommentForOffHeapTrampoline(builtin_index);
292 EmbeddedData d = EmbeddedData::FromBlob();
293 Address entry = d.InstructionStartOfBuiltin(builtin_index);
296 mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
302 Call(code.address(), rmode, cond, mode);
305 void TurboAssembler::Ret(Condition cond) { bx(lr, cond); }
307 void TurboAssembler::Drop(
int count, Condition cond) {
309 add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
313 void TurboAssembler::Drop(Register count, Condition cond) {
314 add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC, cond);
317 void TurboAssembler::Ret(
int drop, Condition cond) {
322 void TurboAssembler::Call(Label* target) { bl(target); }
324 void TurboAssembler::Push(Handle<HeapObject> handle) {
325 UseScratchRegisterScope temps(
this);
326 Register scratch = temps.Acquire();
327 mov(scratch, Operand(handle));
331 void TurboAssembler::Push(Smi smi) {
332 UseScratchRegisterScope temps(
this);
333 Register scratch = temps.Acquire();
334 mov(scratch, Operand(smi));
338 void TurboAssembler::Move(Register dst, Smi smi) { mov(dst, Operand(smi)); }
340 void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
341 if (FLAG_embedded_builtins) {
342 if (root_array_available_ && options().isolate_independent_code) {
343 IndirectLoadConstant(dst, value);
347 mov(dst, Operand(value));
350 void TurboAssembler::Move(Register dst, ExternalReference reference) {
351 if (FLAG_embedded_builtins) {
352 if (root_array_available_ && options().isolate_independent_code) {
353 IndirectLoadExternalReference(dst, reference);
357 mov(dst, Operand(reference));
360 void TurboAssembler::Move(Register dst, Register src, Condition cond) {
362 mov(dst, src, LeaveCC, cond);
366 void TurboAssembler::Move(SwVfpRegister dst, SwVfpRegister src,
369 vmov(dst, src, cond);
373 void TurboAssembler::Move(DwVfpRegister dst, DwVfpRegister src,
376 vmov(dst, src, cond);
380 void TurboAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
386 void TurboAssembler::Swap(Register srcdst0, Register srcdst1) {
387 DCHECK(srcdst0 != srcdst1);
388 UseScratchRegisterScope temps(
this);
389 Register scratch = temps.Acquire();
390 mov(scratch, srcdst0);
391 mov(srcdst0, srcdst1);
392 mov(srcdst1, scratch);
395 void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
396 DCHECK(srcdst0 != srcdst1);
397 DCHECK(VfpRegisterIsAvailable(srcdst0));
398 DCHECK(VfpRegisterIsAvailable(srcdst1));
400 if (CpuFeatures::IsSupported(NEON)) {
401 vswp(srcdst0, srcdst1);
403 UseScratchRegisterScope temps(
this);
404 DwVfpRegister scratch = temps.AcquireD();
405 vmov(scratch, srcdst0);
406 vmov(srcdst0, srcdst1);
407 vmov(srcdst1, scratch);
411 void TurboAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
412 DCHECK(srcdst0 != srcdst1);
413 vswp(srcdst0, srcdst1);
416 void MacroAssembler::Mls(Register dst, Register src1, Register src2,
417 Register srcA, Condition cond) {
418 if (CpuFeatures::IsSupported(ARMv7)) {
419 CpuFeatureScope scope(
this, ARMv7);
420 mls(dst, src1, src2, srcA, cond);
422 UseScratchRegisterScope temps(
this);
423 Register scratch = temps.Acquire();
424 DCHECK(srcA != scratch);
425 mul(scratch, src1, src2, LeaveCC, cond);
426 sub(dst, srcA, scratch, LeaveCC, cond);
431 void MacroAssembler::And(Register dst, Register src1,
const Operand& src2,
433 if (!src2.IsRegister() && !src2.MustOutputRelocInfo(
this) &&
434 src2.immediate() == 0) {
435 mov(dst, Operand::Zero(), LeaveCC, cond);
436 }
else if (!(src2.InstructionsRequired(
this) == 1) &&
437 !src2.MustOutputRelocInfo(
this) &&
438 CpuFeatures::IsSupported(ARMv7) &&
439 base::bits::IsPowerOfTwo(src2.immediate() + 1)) {
440 CpuFeatureScope scope(
this, ARMv7);
442 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
444 and_(dst, src1, src2, LeaveCC, cond);
449 void MacroAssembler::Ubfx(Register dst, Register src1,
int lsb,
int width,
452 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
453 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
454 and_(dst, src1, Operand(mask), LeaveCC, cond);
456 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
459 CpuFeatureScope scope(
this, ARMv7);
460 ubfx(dst, src1, lsb, width, cond);
465 void MacroAssembler::Sbfx(Register dst, Register src1,
int lsb,
int width,
468 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
469 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
470 and_(dst, src1, Operand(mask), LeaveCC, cond);
471 int shift_up = 32 - lsb - width;
472 int shift_down = lsb + shift_up;
474 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
476 if (shift_down != 0) {
477 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
480 CpuFeatureScope scope(
this, ARMv7);
481 sbfx(dst, src1, lsb, width, cond);
486 void TurboAssembler::Bfc(Register dst, Register src,
int lsb,
int width,
489 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
490 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
491 bic(dst, src, Operand(mask));
493 CpuFeatureScope scope(
this, ARMv7);
494 Move(dst, src, cond);
495 bfc(dst, lsb, width, cond);
499 void MacroAssembler::Load(Register dst,
500 const MemOperand& src,
502 DCHECK(!r.IsDouble());
503 if (r.IsInteger8()) {
505 }
else if (r.IsUInteger8()) {
507 }
else if (r.IsInteger16()) {
509 }
else if (r.IsUInteger16()) {
516 void MacroAssembler::Store(Register src,
517 const MemOperand& dst,
519 DCHECK(!r.IsDouble());
520 if (r.IsInteger8() || r.IsUInteger8()) {
522 }
else if (r.IsInteger16() || r.IsUInteger16()) {
525 if (r.IsHeapObject()) {
527 }
else if (r.IsSmi()) {
534 void TurboAssembler::LoadRoot(Register destination, RootIndex index,
537 MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), cond);
541 void MacroAssembler::RecordWriteField(Register
object,
int offset,
542 Register value, Register dst,
543 LinkRegisterStatus lr_status,
544 SaveFPRegsMode save_fp,
545 RememberedSetAction remembered_set_action,
546 SmiCheck smi_check) {
552 if (smi_check == INLINE_SMI_CHECK) {
553 JumpIfSmi(value, &done);
558 DCHECK(IsAligned(offset, kPointerSize));
560 add(dst,
object, Operand(offset - kHeapObjectTag));
561 if (emit_debug_code()) {
563 tst(dst, Operand(kPointerSize - 1));
565 stop(
"Unaligned cell in write barrier");
569 RecordWrite(
object, dst, value, lr_status, save_fp, remembered_set_action,
576 if (emit_debug_code()) {
577 mov(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
578 mov(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
582 void TurboAssembler::SaveRegisters(RegList registers) {
583 DCHECK_GT(NumRegs(registers), 0);
585 for (
int i = 0;
i < Register::kNumRegisters; ++
i) {
586 if ((registers >>
i) & 1u) {
587 regs |= Register::from_code(
i).bit();
594 void TurboAssembler::RestoreRegisters(RegList registers) {
595 DCHECK_GT(NumRegs(registers), 0);
597 for (
int i = 0;
i < Register::kNumRegisters; ++
i) {
598 if ((registers >>
i) & 1u) {
599 regs |= Register::from_code(
i).bit();
605 void TurboAssembler::CallRecordWriteStub(
606 Register
object, Register address,
607 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
609 object, address, remembered_set_action, fp_mode,
610 isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
614 void TurboAssembler::CallRecordWriteStub(
615 Register
object, Register address,
616 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
617 Address wasm_target) {
618 CallRecordWriteStub(
object, address, remembered_set_action, fp_mode,
619 Handle<Code>::null(), wasm_target);
622 void TurboAssembler::CallRecordWriteStub(
623 Register
object, Register address,
624 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
625 Handle<Code> code_target, Address wasm_target) {
626 DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
632 RecordWriteDescriptor descriptor;
633 RegList registers = descriptor.allocatable_registers();
635 SaveRegisters(registers);
637 Register object_parameter(
638 descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
639 Register slot_parameter(
640 descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
641 Register remembered_set_parameter(
642 descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
643 Register fp_mode_parameter(
644 descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
650 Pop(object_parameter);
652 Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
653 Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
654 if (code_target.is_null()) {
655 Call(wasm_target, RelocInfo::WASM_STUB_CALL);
657 Call(code_target, RelocInfo::CODE_TARGET);
660 RestoreRegisters(registers);
666 void MacroAssembler::RecordWrite(Register
object, Register address,
667 Register value, LinkRegisterStatus lr_status,
668 SaveFPRegsMode fp_mode,
669 RememberedSetAction remembered_set_action,
670 SmiCheck smi_check) {
671 DCHECK(
object != value);
672 if (emit_debug_code()) {
674 UseScratchRegisterScope temps(
this);
675 Register scratch = temps.Acquire();
676 ldr(scratch, MemOperand(address));
679 Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
682 if (remembered_set_action == OMIT_REMEMBERED_SET &&
683 !FLAG_incremental_marking) {
691 if (smi_check == INLINE_SMI_CHECK) {
692 JumpIfSmi(value, &done);
697 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
698 CheckPageFlag(
object,
700 MemoryChunk::kPointersFromHereAreInterestingMask,
705 if (lr_status == kLRHasNotBeenSaved) {
708 CallRecordWriteStub(
object, address, remembered_set_action, fp_mode);
709 if (lr_status == kLRHasNotBeenSaved) {
716 isolate()->counters()->write_barriers_static()->Increment();
718 UseScratchRegisterScope temps(
this);
719 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1,
720 temps.Acquire(), value);
725 if (emit_debug_code()) {
726 mov(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
727 mov(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
731 void TurboAssembler::PushCommonFrame(Register marker_reg) {
732 if (marker_reg.is_valid()) {
733 if (marker_reg.code() > fp.code()) {
734 stm(db_w, sp, fp.bit() | lr.bit());
735 mov(fp, Operand(sp));
738 stm(db_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
739 add(fp, sp, Operand(kPointerSize));
742 stm(db_w, sp, fp.bit() | lr.bit());
747 void TurboAssembler::PushStandardFrame(Register function_reg) {
748 DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code());
749 stm(db_w, sp, (function_reg.is_valid() ? function_reg.bit() : 0) | cp.bit() |
750 fp.bit() | lr.bit());
751 int offset = -StandardFrameConstants::kContextOffset;
752 offset += function_reg.is_valid() ? kPointerSize : 0;
753 add(fp, sp, Operand(offset));
758 void MacroAssembler::PushSafepointRegisters() {
760 DCHECK_EQ(kSafepointSavedRegisters, (1 << kNumSafepointSavedRegisters) - 1);
763 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
764 DCHECK_GE(num_unsaved, 0);
765 sub(sp, sp, Operand(num_unsaved * kPointerSize));
766 stm(db_w, sp, kSafepointSavedRegisters);
769 void MacroAssembler::PopSafepointRegisters() {
770 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
771 ldm(ia_w, sp, kSafepointSavedRegisters);
772 add(sp, sp, Operand(num_unsaved * kPointerSize));
775 int MacroAssembler::SafepointRegisterStackIndex(
int reg_code) {
778 DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
782 void TurboAssembler::VFPCanonicalizeNaN(
const DwVfpRegister dst,
783 const DwVfpRegister src,
784 const Condition cond) {
788 vsub(dst, src, kDoubleRegZero, cond);
791 void TurboAssembler::VFPCompareAndSetFlags(
const SwVfpRegister src1,
792 const SwVfpRegister src2,
793 const Condition cond) {
795 VFPCompareAndLoadFlags(src1, src2, pc, cond);
798 void TurboAssembler::VFPCompareAndSetFlags(
const SwVfpRegister src1,
800 const Condition cond) {
802 VFPCompareAndLoadFlags(src1, src2, pc, cond);
805 void TurboAssembler::VFPCompareAndSetFlags(
const DwVfpRegister src1,
806 const DwVfpRegister src2,
807 const Condition cond) {
809 VFPCompareAndLoadFlags(src1, src2, pc, cond);
812 void TurboAssembler::VFPCompareAndSetFlags(
const DwVfpRegister src1,
814 const Condition cond) {
816 VFPCompareAndLoadFlags(src1, src2, pc, cond);
819 void TurboAssembler::VFPCompareAndLoadFlags(
const SwVfpRegister src1,
820 const SwVfpRegister src2,
821 const Register fpscr_flags,
822 const Condition cond) {
824 vcmp(src1, src2, cond);
825 vmrs(fpscr_flags, cond);
828 void TurboAssembler::VFPCompareAndLoadFlags(
const SwVfpRegister src1,
830 const Register fpscr_flags,
831 const Condition cond) {
833 vcmp(src1, src2, cond);
834 vmrs(fpscr_flags, cond);
837 void TurboAssembler::VFPCompareAndLoadFlags(
const DwVfpRegister src1,
838 const DwVfpRegister src2,
839 const Register fpscr_flags,
840 const Condition cond) {
842 vcmp(src1, src2, cond);
843 vmrs(fpscr_flags, cond);
846 void TurboAssembler::VFPCompareAndLoadFlags(
const DwVfpRegister src1,
848 const Register fpscr_flags,
849 const Condition cond) {
851 vcmp(src1, src2, cond);
852 vmrs(fpscr_flags, cond);
855 void TurboAssembler::VmovHigh(Register dst, DwVfpRegister src) {
856 if (src.code() < 16) {
857 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
858 vmov(dst, loc.high());
860 vmov(NeonS32, dst, src, 1);
864 void TurboAssembler::VmovHigh(DwVfpRegister dst, Register src) {
865 if (dst.code() < 16) {
866 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
867 vmov(loc.high(), src);
869 vmov(NeonS32, dst, 1, src);
873 void TurboAssembler::VmovLow(Register dst, DwVfpRegister src) {
874 if (src.code() < 16) {
875 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
876 vmov(dst, loc.low());
878 vmov(NeonS32, dst, src, 0);
882 void TurboAssembler::VmovLow(DwVfpRegister dst, Register src) {
883 if (dst.code() < 16) {
884 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
885 vmov(loc.low(), src);
887 vmov(NeonS32, dst, 0, src);
891 void TurboAssembler::VmovExtended(Register dst,
int src_code) {
892 DCHECK_LE(SwVfpRegister::kNumRegisters, src_code);
893 DCHECK_GT(SwVfpRegister::kNumRegisters * 2, src_code);
894 if (src_code & 0x1) {
895 VmovHigh(dst, DwVfpRegister::from_code(src_code / 2));
897 VmovLow(dst, DwVfpRegister::from_code(src_code / 2));
901 void TurboAssembler::VmovExtended(
int dst_code, Register src) {
902 DCHECK_LE(SwVfpRegister::kNumRegisters, dst_code);
903 DCHECK_GT(SwVfpRegister::kNumRegisters * 2, dst_code);
904 if (dst_code & 0x1) {
905 VmovHigh(DwVfpRegister::from_code(dst_code / 2), src);
907 VmovLow(DwVfpRegister::from_code(dst_code / 2), src);
911 void TurboAssembler::VmovExtended(
int dst_code,
int src_code) {
912 if (src_code == dst_code)
return;
914 if (src_code < SwVfpRegister::kNumRegisters &&
915 dst_code < SwVfpRegister::kNumRegisters) {
917 vmov(SwVfpRegister::from_code(dst_code),
918 SwVfpRegister::from_code(src_code));
921 DwVfpRegister dst_d_reg = DwVfpRegister::from_code(dst_code / 2);
922 DwVfpRegister src_d_reg = DwVfpRegister::from_code(src_code / 2);
923 int dst_offset = dst_code & 1;
924 int src_offset = src_code & 1;
925 if (CpuFeatures::IsSupported(NEON)) {
926 UseScratchRegisterScope temps(
this);
927 DwVfpRegister scratch = temps.AcquireD();
929 if (src_offset == dst_offset) {
931 vdup(Neon32, scratch, src_d_reg, src_offset);
934 src_offset = dst_offset ^ 1;
937 if (dst_d_reg == src_d_reg) {
938 vdup(Neon32, dst_d_reg, src_d_reg, 0);
940 vsli(Neon64, dst_d_reg, src_d_reg, 32);
943 if (dst_d_reg == src_d_reg) {
944 vdup(Neon32, dst_d_reg, src_d_reg, 1);
946 vsri(Neon64, dst_d_reg, src_d_reg, 32);
954 UseScratchRegisterScope temps(
this);
955 LowDwVfpRegister d_scratch = temps.AcquireLowD();
956 LowDwVfpRegister d_scratch2 = temps.AcquireLowD();
957 int s_scratch_code = d_scratch.low().code();
958 int s_scratch_code2 = d_scratch2.low().code();
959 if (src_code < SwVfpRegister::kNumRegisters) {
961 vmov(d_scratch, dst_d_reg);
962 vmov(SwVfpRegister::from_code(s_scratch_code + dst_offset),
963 SwVfpRegister::from_code(src_code));
964 vmov(dst_d_reg, d_scratch);
965 }
else if (dst_code < SwVfpRegister::kNumRegisters) {
967 vmov(d_scratch, src_d_reg);
968 vmov(SwVfpRegister::from_code(dst_code),
969 SwVfpRegister::from_code(s_scratch_code + src_offset));
973 vmov(d_scratch, src_d_reg);
974 vmov(d_scratch2, dst_d_reg);
975 vmov(SwVfpRegister::from_code(s_scratch_code + dst_offset),
976 SwVfpRegister::from_code(s_scratch_code2 + src_offset));
977 vmov(dst_d_reg, d_scratch2);
981 void TurboAssembler::VmovExtended(
int dst_code,
const MemOperand& src) {
982 if (dst_code < SwVfpRegister::kNumRegisters) {
983 vldr(SwVfpRegister::from_code(dst_code), src);
985 UseScratchRegisterScope temps(
this);
986 LowDwVfpRegister scratch = temps.AcquireLowD();
988 int dst_s_code = scratch.low().code() + (dst_code & 1);
989 vmov(scratch, DwVfpRegister::from_code(dst_code / 2));
990 vldr(SwVfpRegister::from_code(dst_s_code), src);
991 vmov(DwVfpRegister::from_code(dst_code / 2), scratch);
995 void TurboAssembler::VmovExtended(
const MemOperand& dst,
int src_code) {
996 if (src_code < SwVfpRegister::kNumRegisters) {
997 vstr(SwVfpRegister::from_code(src_code), dst);
1000 UseScratchRegisterScope temps(
this);
1001 LowDwVfpRegister scratch = temps.AcquireLowD();
1002 int src_s_code = scratch.low().code() + (src_code & 1);
1003 vmov(scratch, DwVfpRegister::from_code(src_code / 2));
1004 vstr(SwVfpRegister::from_code(src_s_code), dst);
1008 void TurboAssembler::ExtractLane(Register dst, QwNeonRegister src,
1009 NeonDataType dt,
int lane) {
1010 int size = NeonSz(dt);
1011 int byte = lane << size;
1012 int double_word = byte >> kDoubleSizeLog2;
1013 int double_byte = byte & (kDoubleSize - 1);
1014 int double_lane = double_byte >> size;
1015 DwVfpRegister double_source =
1016 DwVfpRegister::from_code(src.code() * 2 + double_word);
1017 vmov(dt, dst, double_source, double_lane);
1020 void TurboAssembler::ExtractLane(Register dst, DwVfpRegister src,
1021 NeonDataType dt,
int lane) {
1022 int size = NeonSz(dt);
1023 int byte = lane << size;
1024 int double_byte = byte & (kDoubleSize - 1);
1025 int double_lane = double_byte >> size;
1026 vmov(dt, dst, src, double_lane);
1029 void TurboAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src,
1031 int s_code = src.code() * 4 + lane;
1032 VmovExtended(dst.code(), s_code);
1035 void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
1036 Register src_lane, NeonDataType dt,
int lane) {
1038 int size = NeonSz(dt);
1039 int byte = lane << size;
1040 int double_word = byte >> kDoubleSizeLog2;
1041 int double_byte = byte & (kDoubleSize - 1);
1042 int double_lane = double_byte >> size;
1043 DwVfpRegister double_dst =
1044 DwVfpRegister::from_code(dst.code() * 2 + double_word);
1045 vmov(dt, double_dst, double_lane, src_lane);
1048 void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
1049 SwVfpRegister src_lane,
int lane) {
1051 int s_code = dst.code() * 4 + lane;
1052 VmovExtended(s_code, src_lane.code());
1055 void TurboAssembler::LslPair(Register dst_low, Register dst_high,
1056 Register src_low, Register src_high,
1058 DCHECK(!AreAliased(dst_high, src_low));
1059 DCHECK(!AreAliased(dst_high, shift));
1060 UseScratchRegisterScope temps(
this);
1061 Register scratch = temps.Acquire();
1065 rsb(scratch, shift, Operand(32), SetCC);
1066 b(gt, &less_than_32);
1068 and_(scratch, shift, Operand(0x1F));
1069 lsl(dst_high, src_low, Operand(scratch));
1070 mov(dst_low, Operand(0));
1072 bind(&less_than_32);
1074 lsl(dst_high, src_high, Operand(shift));
1075 orr(dst_high, dst_high, Operand(src_low, LSR, scratch));
1076 lsl(dst_low, src_low, Operand(shift));
1080 void TurboAssembler::LslPair(Register dst_low, Register dst_high,
1081 Register src_low, Register src_high,
1083 DCHECK(!AreAliased(dst_high, src_low));
1087 Move(dst_high, src_high);
1088 Move(dst_low, src_low);
1089 }
else if (shift == 32) {
1090 Move(dst_high, src_low);
1091 Move(dst_low, Operand(0));
1092 }
else if (shift >= 32) {
1094 lsl(dst_high, src_low, Operand(shift));
1095 mov(dst_low, Operand(0));
1097 lsl(dst_high, src_high, Operand(shift));
1098 orr(dst_high, dst_high, Operand(src_low, LSR, 32 - shift));
1099 lsl(dst_low, src_low, Operand(shift));
1103 void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
1104 Register src_low, Register src_high,
1106 DCHECK(!AreAliased(dst_low, src_high));
1107 DCHECK(!AreAliased(dst_low, shift));
1108 UseScratchRegisterScope temps(
this);
1109 Register scratch = temps.Acquire();
1113 rsb(scratch, shift, Operand(32), SetCC);
1114 b(gt, &less_than_32);
1116 and_(scratch, shift, Operand(0x1F));
1117 lsr(dst_low, src_high, Operand(scratch));
1118 mov(dst_high, Operand(0));
1120 bind(&less_than_32);
1123 lsr(dst_low, src_low, Operand(shift));
1124 orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
1125 lsr(dst_high, src_high, Operand(shift));
1129 void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
1130 Register src_low, Register src_high,
1132 DCHECK(!AreAliased(dst_low, src_high));
1136 mov(dst_low, src_high);
1137 mov(dst_high, Operand(0));
1138 }
else if (shift > 32) {
1140 lsr(dst_low, src_high, Operand(shift));
1141 mov(dst_high, Operand(0));
1142 }
else if (shift == 0) {
1143 Move(dst_low, src_low);
1144 Move(dst_high, src_high);
1146 lsr(dst_low, src_low, Operand(shift));
1147 orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
1148 lsr(dst_high, src_high, Operand(shift));
1152 void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
1153 Register src_low, Register src_high,
1155 DCHECK(!AreAliased(dst_low, src_high));
1156 DCHECK(!AreAliased(dst_low, shift));
1157 UseScratchRegisterScope temps(
this);
1158 Register scratch = temps.Acquire();
1162 rsb(scratch, shift, Operand(32), SetCC);
1163 b(gt, &less_than_32);
1165 and_(scratch, shift, Operand(0x1F));
1166 asr(dst_low, src_high, Operand(scratch));
1167 asr(dst_high, src_high, Operand(31));
1169 bind(&less_than_32);
1171 lsr(dst_low, src_low, Operand(shift));
1172 orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
1173 asr(dst_high, src_high, Operand(shift));
1177 void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
1178 Register src_low, Register src_high,
1180 DCHECK(!AreAliased(dst_low, src_high));
1184 mov(dst_low, src_high);
1185 asr(dst_high, src_high, Operand(31));
1186 }
else if (shift > 32) {
1188 asr(dst_low, src_high, Operand(shift));
1189 asr(dst_high, src_high, Operand(31));
1190 }
else if (shift == 0) {
1191 Move(dst_low, src_low);
1192 Move(dst_high, src_high);
1194 lsr(dst_low, src_low, Operand(shift));
1195 orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
1196 asr(dst_high, src_high, Operand(shift));
1200 void TurboAssembler::StubPrologue(StackFrame::Type type) {
1201 UseScratchRegisterScope temps(
this);
1202 Register scratch = temps.Acquire();
1203 mov(scratch, Operand(StackFrame::TypeToMarker(type)));
1204 PushCommonFrame(scratch);
1207 void TurboAssembler::Prologue() { PushStandardFrame(r1); }
1209 void TurboAssembler::EnterFrame(StackFrame::Type type,
1210 bool load_constant_pool_pointer_reg) {
1212 UseScratchRegisterScope temps(
this);
1213 Register scratch = temps.Acquire();
1214 mov(scratch, Operand(StackFrame::TypeToMarker(type)));
1215 PushCommonFrame(scratch);
1218 int TurboAssembler::LeaveFrame(StackFrame::Type type) {
1226 int frame_ends = pc_offset();
1227 ldm(ia_w, sp, fp.bit() | lr.bit());
1231 void MacroAssembler::EnterExitFrame(
bool save_doubles,
int stack_space,
1232 StackFrame::Type frame_type) {
1233 DCHECK(frame_type == StackFrame::EXIT ||
1234 frame_type == StackFrame::BUILTIN_EXIT);
1235 UseScratchRegisterScope temps(
this);
1236 Register scratch = temps.Acquire();
1239 DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1240 DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1241 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1242 mov(scratch, Operand(StackFrame::TypeToMarker(frame_type)));
1243 PushCommonFrame(scratch);
1245 sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
1246 if (emit_debug_code()) {
1247 mov(scratch, Operand::Zero());
1248 str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
1250 Move(scratch, CodeObject());
1251 str(scratch, MemOperand(fp, ExitFrameConstants::kCodeOffset));
1254 Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1256 str(fp, MemOperand(scratch));
1258 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1259 str(cp, MemOperand(scratch));
1263 SaveFPRegs(sp, scratch);
1272 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1273 sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
1274 if (frame_alignment > 0) {
1275 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1276 and_(sp, sp, Operand(-frame_alignment));
1281 add(scratch, sp, Operand(kPointerSize));
1282 str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
1285 int TurboAssembler::ActivationFrameAlignment() {
1286 #if V8_HOST_ARCH_ARM 1291 return base::OS::ActivationFrameAlignment();
1292 #else // V8_HOST_ARCH_ARM 1297 return FLAG_sim_stack_alignment;
1298 #endif // V8_HOST_ARCH_ARM 1301 void MacroAssembler::LeaveExitFrame(
bool save_doubles, Register argument_count,
1302 bool argument_count_is_length) {
1303 ConstantPoolUnavailableScope constant_pool_unavailable(
this);
1304 UseScratchRegisterScope temps(
this);
1305 Register scratch = temps.Acquire();
1310 const int offset = ExitFrameConstants::kFixedFrameSizeFromFp;
1311 sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
1312 RestoreFPRegs(r3, scratch);
1316 mov(r3, Operand::Zero());
1317 Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1319 str(r3, MemOperand(scratch));
1323 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1324 ldr(cp, MemOperand(scratch));
1326 mov(r3, Operand(Context::kInvalidContext));
1328 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1329 str(r3, MemOperand(scratch));
1333 mov(sp, Operand(fp));
1334 ldm(ia_w, sp, fp.bit() | lr.bit());
1335 if (argument_count.is_valid()) {
1336 if (argument_count_is_length) {
1337 add(sp, sp, argument_count);
1339 add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
1344 void TurboAssembler::MovFromFloatResult(
const DwVfpRegister dst) {
1345 if (use_eabi_hardfloat()) {
1354 void TurboAssembler::MovFromFloatParameter(DwVfpRegister dst) {
1355 MovFromFloatResult(dst);
1358 void TurboAssembler::PrepareForTailCall(
const ParameterCount& callee_args_count,
1359 Register caller_args_count_reg,
1360 Register scratch0, Register scratch1) {
1362 if (callee_args_count.is_reg()) {
1363 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
1366 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
1373 Register dst_reg = scratch0;
1374 add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
1375 add(dst_reg, dst_reg,
1376 Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
1378 Register src_reg = caller_args_count_reg;
1380 if (callee_args_count.is_reg()) {
1381 add(src_reg, sp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
1382 add(src_reg, src_reg, Operand(kPointerSize));
1385 Operand((callee_args_count.immediate() + 1) * kPointerSize));
1388 if (FLAG_debug_code) {
1389 cmp(src_reg, dst_reg);
1390 Check(lo, AbortReason::kStackAccessBelowStackPointer);
1395 ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
1396 ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1403 Register tmp_reg = scratch1;
1407 ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
1408 str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
1417 void MacroAssembler::InvokePrologue(
const ParameterCount& expected,
1418 const ParameterCount& actual, Label* done,
1419 bool* definitely_mismatches,
1421 bool definitely_matches =
false;
1422 *definitely_mismatches =
false;
1423 Label regular_invoke;
1434 DCHECK(actual.is_immediate() || actual.reg() == r0);
1435 DCHECK(expected.is_immediate() || expected.reg() == r2);
1437 if (expected.is_immediate()) {
1438 DCHECK(actual.is_immediate());
1439 mov(r0, Operand(actual.immediate()));
1440 if (expected.immediate() == actual.immediate()) {
1441 definitely_matches =
true;
1443 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1444 if (expected.immediate() == sentinel) {
1449 definitely_matches =
true;
1451 *definitely_mismatches =
true;
1452 mov(r2, Operand(expected.immediate()));
1456 if (actual.is_immediate()) {
1457 mov(r0, Operand(actual.immediate()));
1458 cmp(expected.reg(), Operand(actual.immediate()));
1459 b(eq, ®ular_invoke);
1461 cmp(expected.reg(), Operand(actual.reg()));
1462 b(eq, ®ular_invoke);
1466 if (!definitely_matches) {
1467 Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
1468 if (flag == CALL_FUNCTION) {
1470 if (!*definitely_mismatches) {
1474 Jump(adaptor, RelocInfo::CODE_TARGET);
1476 bind(®ular_invoke);
1480 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
1481 const ParameterCount& expected,
1482 const ParameterCount& actual) {
1485 ExternalReference debug_hook_active =
1486 ExternalReference::debug_hook_on_function_call_address(isolate());
1487 Move(r4, debug_hook_active);
1488 ldrsb(r4, MemOperand(r4));
1489 cmp(r4, Operand(0));
1494 if (actual.is_reg()) {
1495 mov(r4, actual.reg());
1497 mov(r4, Operand(actual.immediate()));
1499 ldr(r4, MemOperand(sp, r4, LSL, kPointerSizeLog2));
1500 FrameScope frame(
this,
1501 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1502 if (expected.is_reg()) {
1503 SmiTag(expected.reg());
1504 Push(expected.reg());
1506 if (actual.is_reg()) {
1507 SmiTag(actual.reg());
1510 if (new_target.is_valid()) {
1516 CallRuntime(Runtime::kDebugOnFunctionCall);
1518 if (new_target.is_valid()) {
1521 if (actual.is_reg()) {
1523 SmiUntag(actual.reg());
1525 if (expected.is_reg()) {
1526 Pop(expected.reg());
1527 SmiUntag(expected.reg());
1533 void MacroAssembler::InvokeFunctionCode(Register
function, Register new_target,
1534 const ParameterCount& expected,
1535 const ParameterCount& actual,
1538 DCHECK(flag == JUMP_FUNCTION || has_frame());
1539 DCHECK(
function == r1);
1540 DCHECK_IMPLIES(new_target.is_valid(), new_target == r3);
1543 CheckDebugHook(
function, new_target, expected, actual);
1546 if (!new_target.is_valid()) {
1547 LoadRoot(r3, RootIndex::kUndefinedValue);
1551 bool definitely_mismatches =
false;
1552 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag);
1553 if (!definitely_mismatches) {
1557 Register code = kJavaScriptCallCodeStartRegister;
1558 ldr(code, FieldMemOperand(
function, JSFunction::kCodeOffset));
1559 add(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
1560 if (flag == CALL_FUNCTION) {
1563 DCHECK(flag == JUMP_FUNCTION);
1573 void MacroAssembler::InvokeFunction(Register fun, Register new_target,
1574 const ParameterCount& actual,
1577 DCHECK(flag == JUMP_FUNCTION || has_frame());
1582 Register expected_reg = r2;
1583 Register temp_reg = r4;
1585 ldr(temp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1586 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1588 FieldMemOperand(temp_reg,
1589 SharedFunctionInfo::kFormalParameterCountOffset));
1591 ParameterCount expected(expected_reg);
1592 InvokeFunctionCode(fun, new_target, expected, actual, flag);
1595 void MacroAssembler::InvokeFunction(Register
function,
1596 const ParameterCount& expected,
1597 const ParameterCount& actual,
1600 DCHECK(flag == JUMP_FUNCTION || has_frame());
1603 DCHECK(
function == r1);
1606 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1608 InvokeFunctionCode(r1, no_reg, expected, actual, flag);
1611 void MacroAssembler::MaybeDropFrames() {
1613 ExternalReference restart_fp =
1614 ExternalReference::debug_restart_fp_address(isolate());
1615 Move(r1, restart_fp);
1616 ldr(r1, MemOperand(r1));
1618 Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
1622 void MacroAssembler::PushStackHandler() {
1624 STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
1625 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1629 mov(r6, Operand(ExternalReference::Create(IsolateAddressId::kHandlerAddress,
1631 ldr(r5, MemOperand(r6));
1634 str(sp, MemOperand(r6));
1638 void MacroAssembler::PopStackHandler() {
1639 UseScratchRegisterScope temps(
this);
1640 Register scratch = temps.Acquire();
1641 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1643 mov(scratch, Operand(ExternalReference::Create(
1644 IsolateAddressId::kHandlerAddress, isolate())));
1645 str(r1, MemOperand(scratch));
1646 add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1650 void MacroAssembler::CompareObjectType(Register
object,
1653 InstanceType type) {
1654 UseScratchRegisterScope temps(
this);
1655 const Register temp = type_reg == no_reg ? temps.Acquire() : type_reg;
1657 ldr(map, FieldMemOperand(
object, HeapObject::kMapOffset));
1658 CompareInstanceType(map, temp, type);
1662 void MacroAssembler::CompareInstanceType(Register map,
1664 InstanceType type) {
1665 ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1666 cmp(type_reg, Operand(type));
1669 void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
1670 UseScratchRegisterScope temps(
this);
1671 Register scratch = temps.Acquire();
1672 DCHECK(obj != scratch);
1673 LoadRoot(scratch, index);
1677 void MacroAssembler::CallStub(CodeStub* stub,
1679 DCHECK(AllowThisStubCall(stub));
1680 Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, CAN_INLINE_TARGET_ADDRESS,
1684 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
1685 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1688 bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
1689 return has_frame() || !stub->SometimesSetsUpAFrame();
1692 void MacroAssembler::TryDoubleToInt32Exact(Register result,
1693 DwVfpRegister double_input,
1694 LowDwVfpRegister double_scratch) {
1695 DCHECK(double_input != double_scratch);
1696 vcvt_s32_f64(double_scratch.low(), double_input);
1697 vmov(result, double_scratch.low());
1698 vcvt_f64_s32(double_scratch, double_scratch.low());
1699 VFPCompareAndSetFlags(double_input, double_scratch);
1702 void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
1703 DwVfpRegister double_input,
1705 UseScratchRegisterScope temps(
this);
1706 SwVfpRegister single_scratch = SwVfpRegister::no_reg();
1707 if (temps.CanAcquireVfp<SwVfpRegister>()) {
1708 single_scratch = temps.AcquireS();
1712 DCHECK_LT(double_input.code(), LowDwVfpRegister::kNumRegisters);
1713 LowDwVfpRegister double_scratch =
1714 LowDwVfpRegister::from_code(double_input.code());
1715 single_scratch = double_scratch.low();
1717 vcvt_s32_f64(single_scratch, double_input);
1718 vmov(result, single_scratch);
1720 Register scratch = temps.Acquire();
1722 sub(scratch, result, Operand(1));
1723 cmp(scratch, Operand(0x7FFFFFFE));
1727 void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
1729 DwVfpRegister double_input,
1730 StubCallMode stub_mode) {
1733 TryInlineTruncateDoubleToI(result, double_input, &done);
1737 sub(sp, sp, Operand(kDoubleSize));
1738 vstr(double_input, MemOperand(sp, 0));
1740 if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
1741 Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
1743 Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
1745 ldr(result, MemOperand(sp, 0));
1747 add(sp, sp, Operand(kDoubleSize));
1753 void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
1755 const Runtime::Function* f = Runtime::FunctionForId(fid);
1760 mov(r0, Operand(f->nargs));
1761 Move(r1, ExternalReference::Create(f));
1762 DCHECK(!AreAliased(centry, r0, r1));
1763 add(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag));
1767 void MacroAssembler::CallRuntime(
const Runtime::Function* f,
1769 SaveFPRegsMode save_doubles) {
1775 CHECK(f->nargs < 0 || f->nargs == num_arguments);
1781 mov(r0, Operand(num_arguments));
1782 Move(r1, ExternalReference::Create(f));
1784 CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
1785 Call(code, RelocInfo::CODE_TARGET);
1788 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
1789 const Runtime::Function*
function = Runtime::FunctionForId(fid);
1790 DCHECK_EQ(1, function->result_size);
1791 if (function->nargs >= 0) {
1796 mov(r0, Operand(function->nargs));
1798 JumpToExternalReference(ExternalReference::Create(fid));
1801 void MacroAssembler::JumpToExternalReference(
const ExternalReference& builtin,
1802 bool builtin_exit_frame) {
1803 #if defined(__thumb__) 1805 DCHECK_EQ(builtin.address() & 1, 1);
1808 Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
1809 kArgvOnStack, builtin_exit_frame);
1810 Jump(code, RelocInfo::CODE_TARGET);
1813 void MacroAssembler::JumpToInstructionStream(Address entry) {
1814 mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
1815 Jump(kOffHeapTrampolineRegister);
1818 void MacroAssembler::LoadWeakValue(Register out, Register in,
1819 Label* target_if_cleared) {
1820 cmp(in, Operand(kClearedWeakHeapObjectLower32));
1821 b(eq, target_if_cleared);
1823 and_(out, in, Operand(~kWeakHeapObjectMask));
1826 void MacroAssembler::IncrementCounter(StatsCounter* counter,
int value,
1827 Register scratch1, Register scratch2) {
1828 DCHECK_GT(value, 0);
1829 if (FLAG_native_code_counters && counter->Enabled()) {
1830 Move(scratch2, ExternalReference::Create(counter));
1831 ldr(scratch1, MemOperand(scratch2));
1832 add(scratch1, scratch1, Operand(value));
1833 str(scratch1, MemOperand(scratch2));
1838 void MacroAssembler::DecrementCounter(StatsCounter* counter,
int value,
1839 Register scratch1, Register scratch2) {
1840 DCHECK_GT(value, 0);
1841 if (FLAG_native_code_counters && counter->Enabled()) {
1842 Move(scratch2, ExternalReference::Create(counter));
1843 ldr(scratch1, MemOperand(scratch2));
1844 sub(scratch1, scratch1, Operand(value));
1845 str(scratch1, MemOperand(scratch2));
1849 void TurboAssembler::Assert(Condition cond, AbortReason reason) {
1850 if (emit_debug_code())
1851 Check(cond, reason);
1854 void TurboAssembler::AssertUnreachable(AbortReason reason) {
1855 if (emit_debug_code()) Abort(reason);
1858 void TurboAssembler::Check(Condition cond, AbortReason reason) {
1866 void TurboAssembler::Abort(AbortReason reason) {
1869 const char* msg = GetAbortReason(reason);
1871 RecordComment(
"Abort message: ");
1876 if (trap_on_abort()) {
1881 if (should_abort_hard()) {
1883 FrameScope assume_frame(
this, StackFrame::NONE);
1884 Move32BitImmediate(r0, Operand(static_cast<int>(reason)));
1885 PrepareCallCFunction(1, 0, r1);
1886 Move(r1, ExternalReference::abort_with_reason());
1893 Move(r1, Smi::FromInt(static_cast<int>(reason)));
1899 FrameScope scope(
this, StackFrame::NONE);
1900 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1902 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1907 void MacroAssembler::LoadGlobalProxy(Register dst) {
1908 LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
1911 void MacroAssembler::LoadNativeContextSlot(
int index, Register dst) {
1912 ldr(dst, NativeContextMemOperand());
1913 ldr(dst, ContextMemOperand(dst, index));
1917 void TurboAssembler::InitializeRootRegister() {
1918 ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
1919 mov(kRootRegister, Operand(isolate_root));
1922 void MacroAssembler::SmiTag(Register reg, SBit s) {
1923 add(reg, reg, Operand(reg), s);
1926 void MacroAssembler::SmiTag(Register dst, Register src, SBit s) {
1927 add(dst, src, Operand(src), s);
1930 void MacroAssembler::UntagAndJumpIfSmi(
1931 Register dst, Register src, Label* smi_case) {
1932 STATIC_ASSERT(kSmiTag == 0);
1933 SmiUntag(dst, src, SetCC);
1937 void MacroAssembler::SmiTst(Register value) {
1938 tst(value, Operand(kSmiTagMask));
1941 void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
1942 tst(value, Operand(kSmiTagMask));
1946 void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
1951 void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
1956 void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
1957 tst(value, Operand(kSmiTagMask));
1958 b(ne, not_smi_label);
1961 void MacroAssembler::JumpIfEitherSmi(Register reg1,
1963 Label* on_either_smi) {
1964 STATIC_ASSERT(kSmiTag == 0);
1965 tst(reg1, Operand(kSmiTagMask));
1966 tst(reg2, Operand(kSmiTagMask), ne);
1967 b(eq, on_either_smi);
1970 void MacroAssembler::AssertNotSmi(Register
object) {
1971 if (emit_debug_code()) {
1972 STATIC_ASSERT(kSmiTag == 0);
1973 tst(
object, Operand(kSmiTagMask));
1974 Check(ne, AbortReason::kOperandIsASmi);
1979 void MacroAssembler::AssertSmi(Register
object) {
1980 if (emit_debug_code()) {
1981 STATIC_ASSERT(kSmiTag == 0);
1982 tst(
object, Operand(kSmiTagMask));
1983 Check(eq, AbortReason::kOperandIsNotASmi);
1987 void MacroAssembler::AssertConstructor(Register
object) {
1988 if (emit_debug_code()) {
1989 STATIC_ASSERT(kSmiTag == 0);
1990 tst(
object, Operand(kSmiTagMask));
1991 Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor);
1993 ldr(
object, FieldMemOperand(
object, HeapObject::kMapOffset));
1994 ldrb(
object, FieldMemOperand(
object, Map::kBitFieldOffset));
1995 tst(
object, Operand(Map::IsConstructorBit::kMask));
1997 Check(ne, AbortReason::kOperandIsNotAConstructor);
2001 void MacroAssembler::AssertFunction(Register
object) {
2002 if (emit_debug_code()) {
2003 STATIC_ASSERT(kSmiTag == 0);
2004 tst(
object, Operand(kSmiTagMask));
2005 Check(ne, AbortReason::kOperandIsASmiAndNotAFunction);
2007 CompareObjectType(
object,
object,
object, JS_FUNCTION_TYPE);
2009 Check(eq, AbortReason::kOperandIsNotAFunction);
2014 void MacroAssembler::AssertBoundFunction(Register
object) {
2015 if (emit_debug_code()) {
2016 STATIC_ASSERT(kSmiTag == 0);
2017 tst(
object, Operand(kSmiTagMask));
2018 Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction);
2020 CompareObjectType(
object,
object,
object, JS_BOUND_FUNCTION_TYPE);
2022 Check(eq, AbortReason::kOperandIsNotABoundFunction);
2026 void MacroAssembler::AssertGeneratorObject(Register
object) {
2027 if (!emit_debug_code())
return;
2028 tst(
object, Operand(kSmiTagMask));
2029 Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
2032 Register map = object;
2034 ldr(map, FieldMemOperand(
object, HeapObject::kMapOffset));
2038 Register instance_type = object;
2039 CompareInstanceType(map, instance_type, JS_GENERATOR_OBJECT_TYPE);
2043 cmp(instance_type, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
2047 cmp(instance_type, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
2052 Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
2055 void MacroAssembler::AssertUndefinedOrAllocationSite(Register
object,
2057 if (emit_debug_code()) {
2058 Label done_checking;
2059 AssertNotSmi(
object);
2060 CompareRoot(
object, RootIndex::kUndefinedValue);
2061 b(eq, &done_checking);
2062 ldr(scratch, FieldMemOperand(
object, HeapObject::kMapOffset));
2063 CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
2064 Assert(eq, AbortReason::kExpectedUndefinedOrCell);
2065 bind(&done_checking);
2070 void TurboAssembler::CheckFor32DRegs(Register scratch) {
2071 Move(scratch, ExternalReference::cpu_features());
2072 ldr(scratch, MemOperand(scratch));
2073 tst(scratch, Operand(1u << VFP32DREGS));
2076 void TurboAssembler::SaveFPRegs(Register location, Register scratch) {
2077 CpuFeatureScope scope(
this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
2078 CheckFor32DRegs(scratch);
2079 vstm(db_w, location, d16, d31, ne);
2080 sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
2081 vstm(db_w, location, d0, d15);
2084 void TurboAssembler::RestoreFPRegs(Register location, Register scratch) {
2085 CpuFeatureScope scope(
this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
2086 CheckFor32DRegs(scratch);
2087 vldm(ia_w, location, d0, d15);
2088 vldm(ia_w, location, d16, d31, ne);
2089 add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
2092 template <
typename T>
2093 void TurboAssembler::FloatMaxHelper(T result, T left, T right,
2094 Label* out_of_line) {
2097 DCHECK(left != right);
2099 if (CpuFeatures::IsSupported(ARMv8)) {
2100 CpuFeatureScope scope(
this, ARMv8);
2101 VFPCompareAndSetFlags(left, right);
2103 vmaxnm(result, left, right);
2106 VFPCompareAndSetFlags(left, right);
2109 bool aliased_result_reg = result == left || result == right;
2110 Move(result, right, aliased_result_reg ? mi : al);
2111 Move(result, left, gt);
2114 VFPCompareAndSetFlags(left, 0.0);
2123 template <
typename T>
2124 void TurboAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) {
2125 DCHECK(left != right);
2133 vadd(result, left, right);
2136 template <
typename T>
2137 void TurboAssembler::FloatMinHelper(T result, T left, T right,
2138 Label* out_of_line) {
2141 DCHECK(left != right);
2143 if (CpuFeatures::IsSupported(ARMv8)) {
2144 CpuFeatureScope scope(
this, ARMv8);
2145 VFPCompareAndSetFlags(left, right);
2147 vminnm(result, left, right);
2150 VFPCompareAndSetFlags(left, right);
2153 bool aliased_result_reg = result == left || result == right;
2154 Move(result, left, aliased_result_reg ? mi : al);
2155 Move(result, right, gt);
2158 VFPCompareAndSetFlags(left, 0.0);
2167 if (left == result) {
2168 DCHECK(right != result);
2170 vsub(result, result, right);
2171 vneg(result, result);
2173 DCHECK(left != result);
2174 vneg(result, right);
2175 vsub(result, result, left);
2176 vneg(result, result);
2182 template <
typename T>
2183 void TurboAssembler::FloatMinOutOfLineHelper(T result, T left, T right) {
2184 DCHECK(left != right);
2188 vadd(result, left, right);
2191 void TurboAssembler::FloatMax(SwVfpRegister result, SwVfpRegister left,
2192 SwVfpRegister right, Label* out_of_line) {
2193 FloatMaxHelper(result, left, right, out_of_line);
2196 void TurboAssembler::FloatMin(SwVfpRegister result, SwVfpRegister left,
2197 SwVfpRegister right, Label* out_of_line) {
2198 FloatMinHelper(result, left, right, out_of_line);
2201 void TurboAssembler::FloatMax(DwVfpRegister result, DwVfpRegister left,
2202 DwVfpRegister right, Label* out_of_line) {
2203 FloatMaxHelper(result, left, right, out_of_line);
2206 void TurboAssembler::FloatMin(DwVfpRegister result, DwVfpRegister left,
2207 DwVfpRegister right, Label* out_of_line) {
2208 FloatMinHelper(result, left, right, out_of_line);
2211 void TurboAssembler::FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left,
2212 SwVfpRegister right) {
2213 FloatMaxOutOfLineHelper(result, left, right);
2216 void TurboAssembler::FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left,
2217 SwVfpRegister right) {
2218 FloatMinOutOfLineHelper(result, left, right);
2221 void TurboAssembler::FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left,
2222 DwVfpRegister right) {
2223 FloatMaxOutOfLineHelper(result, left, right);
2226 void TurboAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
2227 DwVfpRegister right) {
2228 FloatMinOutOfLineHelper(result, left, right);
2231 static const int kRegisterPassedArguments = 4;
2233 int TurboAssembler::CalculateStackPassedWords(
int num_reg_arguments,
2234 int num_double_arguments) {
2235 int stack_passed_words = 0;
2236 if (use_eabi_hardfloat()) {
2239 if (num_double_arguments > DoubleRegister::NumRegisters()) {
2240 stack_passed_words +=
2241 2 * (num_double_arguments - DoubleRegister::NumRegisters());
2246 num_reg_arguments += 2 * num_double_arguments;
2249 if (num_reg_arguments > kRegisterPassedArguments) {
2250 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
2252 return stack_passed_words;
2255 void TurboAssembler::PrepareCallCFunction(
int num_reg_arguments,
2256 int num_double_arguments,
2258 int frame_alignment = ActivationFrameAlignment();
2259 int stack_passed_arguments = CalculateStackPassedWords(
2260 num_reg_arguments, num_double_arguments);
2261 if (frame_alignment > kPointerSize) {
2262 UseScratchRegisterScope temps(
this);
2263 if (!scratch.is_valid()) scratch = temps.Acquire();
2267 sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
2268 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
2269 and_(sp, sp, Operand(-frame_alignment));
2270 str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
2271 }
else if (stack_passed_arguments > 0) {
2272 sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
2276 void TurboAssembler::MovToFloatParameter(DwVfpRegister src) {
2278 if (!use_eabi_hardfloat()) {
2285 void TurboAssembler::MovToFloatResult(DwVfpRegister src) {
2286 MovToFloatParameter(src);
2289 void TurboAssembler::MovToFloatParameters(DwVfpRegister src1,
2290 DwVfpRegister src2) {
2293 if (!use_eabi_hardfloat()) {
2299 void TurboAssembler::CallCFunction(ExternalReference
function,
2300 int num_reg_arguments,
2301 int num_double_arguments) {
2302 UseScratchRegisterScope temps(
this);
2303 Register scratch = temps.Acquire();
2304 Move(scratch,
function);
2305 CallCFunctionHelper(scratch, num_reg_arguments, num_double_arguments);
2308 void TurboAssembler::CallCFunction(Register
function,
int num_reg_arguments,
2309 int num_double_arguments) {
2310 CallCFunctionHelper(
function, num_reg_arguments, num_double_arguments);
2313 void TurboAssembler::CallCFunction(ExternalReference
function,
2314 int num_arguments) {
2315 CallCFunction(
function, num_arguments, 0);
2318 void TurboAssembler::CallCFunction(Register
function,
int num_arguments) {
2319 CallCFunction(
function, num_arguments, 0);
2322 void TurboAssembler::CallCFunctionHelper(Register
function,
2323 int num_reg_arguments,
2324 int num_double_arguments) {
2325 DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
2326 DCHECK(has_frame());
2330 #if V8_HOST_ARCH_ARM 2331 if (emit_debug_code()) {
2332 int frame_alignment = base::OS::ActivationFrameAlignment();
2333 int frame_alignment_mask = frame_alignment - 1;
2334 if (frame_alignment > kPointerSize) {
2335 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
2336 Label alignment_as_expected;
2337 tst(sp, Operand(frame_alignment_mask));
2338 b(eq, &alignment_as_expected);
2341 stop(
"Unexpected alignment");
2342 bind(&alignment_as_expected);
2351 int stack_passed_arguments = CalculateStackPassedWords(
2352 num_reg_arguments, num_double_arguments);
2353 if (ActivationFrameAlignment() > kPointerSize) {
2354 ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
2356 add(sp, sp, Operand(stack_passed_arguments * kPointerSize));
2360 void TurboAssembler::CheckPageFlag(Register
object, Register scratch,
int mask,
2361 Condition cc, Label* condition_met) {
2362 DCHECK(cc == eq || cc == ne);
2363 Bfc(scratch,
object, 0, kPageSizeBits);
2364 ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
2365 tst(scratch, Operand(mask));
2366 b(cc, condition_met);
2369 Register GetRegisterThatIsNotOneOf(Register reg1,
2376 if (reg1.is_valid()) regs |= reg1.bit();
2377 if (reg2.is_valid()) regs |= reg2.bit();
2378 if (reg3.is_valid()) regs |= reg3.bit();
2379 if (reg4.is_valid()) regs |= reg4.bit();
2380 if (reg5.is_valid()) regs |= reg5.bit();
2381 if (reg6.is_valid()) regs |= reg6.bit();
2383 const RegisterConfiguration* config = RegisterConfiguration::Default();
2384 for (
int i = 0;
i < config->num_allocatable_general_registers(); ++
i) {
2385 int code = config->GetAllocatableGeneralCode(
i);
2386 Register candidate = Register::from_code(code);
2387 if (regs & candidate.bit())
continue;
2393 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
2395 sub(dst, pc, Operand(pc_offset() + Instruction::kPcLoadDelta));
2398 void TurboAssembler::ResetSpeculationPoisonRegister() {
2399 mov(kSpeculationPoisonRegister, Operand(-1));
2405 #endif // V8_TARGET_ARCH_ARM