7 #if V8_TARGET_ARCH_MIPS 9 #include "src/assembler-inl.h" 10 #include "src/base/bits.h" 11 #include "src/base/division-by-constant.h" 12 #include "src/bootstrapper.h" 13 #include "src/callable.h" 14 #include "src/code-factory.h" 15 #include "src/code-stubs.h" 16 #include "src/counters.h" 17 #include "src/debug/debug.h" 18 #include "src/external-reference-table.h" 19 #include "src/frames-inl.h" 20 #include "src/macro-assembler.h" 21 #include "src/register-configuration.h" 22 #include "src/runtime/runtime.h" 23 #include "src/snapshot/embedded-data.h" 24 #include "src/snapshot/snapshot.h" 25 #include "src/wasm/wasm-code-manager.h" 30 #include "src/mips/macro-assembler-mips.h" 36 MacroAssembler::MacroAssembler(Isolate* isolate,
37 const AssemblerOptions& options,
void* buffer,
38 int size, CodeObjectRequired create_code_object)
39 : TurboAssembler(isolate, options, buffer, size, create_code_object) {
40 if (create_code_object == CodeObjectRequired::kYes) {
46 code_object_ = Handle<HeapObject>::New(
47 *isolate->factory()->NewSelfReferenceMarker(), isolate);
51 static inline bool IsZero(
const Operand& rt) {
53 return rt.rm() == zero_reg;
55 return rt.immediate() == 0;
59 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
62 Register exclusion3)
const {
64 RegList exclusions = 0;
65 if (exclusion1 != no_reg) {
66 exclusions |= exclusion1.bit();
67 if (exclusion2 != no_reg) {
68 exclusions |= exclusion2.bit();
69 if (exclusion3 != no_reg) {
70 exclusions |= exclusion3.bit();
75 RegList list = kJSCallerSaved & ~exclusions;
76 bytes += NumRegs(list) * kPointerSize;
78 if (fp_mode == kSaveFPRegs) {
79 bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
85 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
86 Register exclusion2, Register exclusion3) {
88 RegList exclusions = 0;
89 if (exclusion1 != no_reg) {
90 exclusions |= exclusion1.bit();
91 if (exclusion2 != no_reg) {
92 exclusions |= exclusion2.bit();
93 if (exclusion3 != no_reg) {
94 exclusions |= exclusion3.bit();
99 RegList list = kJSCallerSaved & ~exclusions;
101 bytes += NumRegs(list) * kPointerSize;
103 if (fp_mode == kSaveFPRegs) {
104 MultiPushFPU(kCallerSavedFPU);
105 bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
111 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
112 Register exclusion2, Register exclusion3) {
114 if (fp_mode == kSaveFPRegs) {
115 MultiPopFPU(kCallerSavedFPU);
116 bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
119 RegList exclusions = 0;
120 if (exclusion1 != no_reg) {
121 exclusions |= exclusion1.bit();
122 if (exclusion2 != no_reg) {
123 exclusions |= exclusion2.bit();
124 if (exclusion3 != no_reg) {
125 exclusions |= exclusion3.bit();
130 RegList list = kJSCallerSaved & ~exclusions;
132 bytes += NumRegs(list) * kPointerSize;
137 void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
139 MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
142 void TurboAssembler::LoadRoot(Register destination, RootIndex index,
143 Condition cond, Register src1,
144 const Operand& src2) {
145 Branch(2, NegateCondition(cond), src1, src2);
147 MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
151 void TurboAssembler::PushCommonFrame(Register marker_reg) {
152 if (marker_reg.is_valid()) {
153 Push(ra, fp, marker_reg);
154 Addu(fp, sp, Operand(kPointerSize));
161 void TurboAssembler::PushStandardFrame(Register function_reg) {
162 int offset = -StandardFrameConstants::kContextOffset;
163 if (function_reg.is_valid()) {
164 Push(ra, fp, cp, function_reg);
165 offset += kPointerSize;
169 Addu(fp, sp, Operand(offset));
173 void MacroAssembler::PushSafepointRegisters() {
176 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
177 DCHECK_GE(num_unsaved, 0);
178 if (num_unsaved > 0) {
179 Subu(sp, sp, Operand(num_unsaved * kPointerSize));
181 MultiPush(kSafepointSavedRegisters);
185 void MacroAssembler::PopSafepointRegisters() {
186 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
187 MultiPop(kSafepointSavedRegisters);
188 if (num_unsaved > 0) {
189 Addu(sp, sp, Operand(num_unsaved * kPointerSize));
193 int MacroAssembler::SafepointRegisterStackIndex(
int reg_code) {
196 return kSafepointRegisterStackIndexMap[reg_code];
203 void MacroAssembler::RecordWriteField(Register
object,
int offset,
204 Register value, Register dst,
206 SaveFPRegsMode save_fp,
207 RememberedSetAction remembered_set_action,
208 SmiCheck smi_check) {
209 DCHECK(!AreAliased(value, dst, t8,
object));
215 if (smi_check == INLINE_SMI_CHECK) {
216 JumpIfSmi(value, &done);
221 DCHECK(IsAligned(offset, kPointerSize));
223 Addu(dst,
object, Operand(offset - kHeapObjectTag));
224 if (emit_debug_code()) {
225 BlockTrampolinePoolScope block_trampoline_pool(
this);
227 And(t8, dst, Operand(kPointerSize - 1));
228 Branch(&ok, eq, t8, Operand(zero_reg));
229 stop(
"Unaligned cell in write barrier");
233 RecordWrite(
object, dst, value, ra_status, save_fp, remembered_set_action,
240 if (emit_debug_code()) {
241 li(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
242 li(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
246 void TurboAssembler::SaveRegisters(RegList registers) {
247 DCHECK_GT(NumRegs(registers), 0);
249 for (
int i = 0;
i < Register::kNumRegisters; ++
i) {
250 if ((registers >>
i) & 1u) {
251 regs |= Register::from_code(
i).bit();
257 void TurboAssembler::RestoreRegisters(RegList registers) {
258 DCHECK_GT(NumRegs(registers), 0);
260 for (
int i = 0;
i < Register::kNumRegisters; ++
i) {
261 if ((registers >>
i) & 1u) {
262 regs |= Register::from_code(
i).bit();
268 void TurboAssembler::CallRecordWriteStub(
269 Register
object, Register address,
270 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
272 object, address, remembered_set_action, fp_mode,
273 isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
277 void TurboAssembler::CallRecordWriteStub(
278 Register
object, Register address,
279 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
280 Address wasm_target) {
281 CallRecordWriteStub(
object, address, remembered_set_action, fp_mode,
282 Handle<Code>::null(), wasm_target);
285 void TurboAssembler::CallRecordWriteStub(
286 Register
object, Register address,
287 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
288 Handle<Code> code_target, Address wasm_target) {
289 DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
295 RecordWriteDescriptor descriptor;
296 RegList registers = descriptor.allocatable_registers();
298 SaveRegisters(registers);
299 Register object_parameter(
300 descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
301 Register slot_parameter(
302 descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
303 Register remembered_set_parameter(
304 descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
305 Register fp_mode_parameter(
306 descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
312 Pop(object_parameter);
314 Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
315 Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
316 if (code_target.is_null()) {
317 Call(wasm_target, RelocInfo::WASM_STUB_CALL);
319 Call(code_target, RelocInfo::CODE_TARGET);
322 RestoreRegisters(registers);
328 void MacroAssembler::RecordWrite(Register
object, Register address,
329 Register value, RAStatus ra_status,
330 SaveFPRegsMode fp_mode,
331 RememberedSetAction remembered_set_action,
332 SmiCheck smi_check) {
333 DCHECK(!AreAliased(
object, address, value, t8));
334 DCHECK(!AreAliased(
object, address, value, t9));
336 if (emit_debug_code()) {
337 UseScratchRegisterScope temps(
this);
338 Register scratch = temps.Acquire();
339 lw(scratch, MemOperand(address));
340 Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, scratch,
344 if (remembered_set_action == OMIT_REMEMBERED_SET &&
345 !FLAG_incremental_marking) {
353 if (smi_check == INLINE_SMI_CHECK) {
354 DCHECK_EQ(0, kSmiTag);
355 JumpIfSmi(value, &done);
360 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
361 CheckPageFlag(
object,
363 MemoryChunk::kPointersFromHereAreInterestingMask,
368 if (ra_status == kRAHasNotBeenSaved) {
371 CallRecordWriteStub(
object, address, remembered_set_action, fp_mode);
372 if (ra_status == kRAHasNotBeenSaved) {
380 isolate()->counters()->write_barriers_static()->Increment();
381 UseScratchRegisterScope temps(
this);
382 Register scratch = temps.Acquire();
383 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1,
389 if (emit_debug_code()) {
390 li(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
391 li(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
398 void TurboAssembler::Addu(Register rd, Register rs,
const Operand& rt) {
400 addu(rd, rs, rt.rm());
402 if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) {
403 addiu(rd, rs, rt.immediate());
406 UseScratchRegisterScope temps(
this);
407 Register scratch = temps.Acquire();
408 DCHECK(rs != scratch);
410 addu(rd, rs, scratch);
415 void TurboAssembler::Subu(Register rd, Register rs,
const Operand& rt) {
417 subu(rd, rs, rt.rm());
419 if (is_int16(-rt.immediate()) && !MustUseReg(rt.rmode())) {
420 addiu(rd, rs, -rt.immediate());
421 }
else if (!(-rt.immediate() & kHiMask) &&
422 !MustUseReg(rt.rmode())) {
424 UseScratchRegisterScope temps(
this);
425 Register scratch = temps.Acquire();
426 DCHECK(rs != scratch);
427 li(scratch, -rt.immediate());
428 addu(rd, rs, scratch);
431 UseScratchRegisterScope temps(
this);
432 Register scratch = temps.Acquire();
433 DCHECK(rs != scratch);
435 subu(rd, rs, scratch);
440 void TurboAssembler::Mul(Register rd, Register rs,
const Operand& rt) {
442 if (IsMipsArchVariant(kLoongson)) {
446 mul(rd, rs, rt.rm());
450 UseScratchRegisterScope temps(
this);
451 Register scratch = temps.Acquire();
452 DCHECK(rs != scratch);
454 if (IsMipsArchVariant(kLoongson)) {
458 mul(rd, rs, scratch);
463 void TurboAssembler::Mul(Register rd_hi, Register rd_lo, Register rs,
466 if (!IsMipsArchVariant(kMips32r6)) {
473 DCHECK(rd_hi != rt.rm() && rd_lo != rt.rm());
474 muh(rd_hi, rs, rt.rm());
475 mul(rd_lo, rs, rt.rm());
477 DCHECK(rd_hi != rt.rm() && rd_lo != rt.rm());
478 mul(rd_lo, rs, rt.rm());
479 muh(rd_hi, rs, rt.rm());
484 UseScratchRegisterScope temps(
this);
485 Register scratch = temps.Acquire();
486 DCHECK(rs != scratch);
488 if (!IsMipsArchVariant(kMips32r6)) {
495 DCHECK(rd_hi != scratch && rd_lo != scratch);
496 muh(rd_hi, rs, scratch);
497 mul(rd_lo, rs, scratch);
499 DCHECK(rd_hi != scratch && rd_lo != scratch);
500 mul(rd_lo, rs, scratch);
501 muh(rd_hi, rs, scratch);
507 void TurboAssembler::Mulu(Register rd_hi, Register rd_lo, Register rs,
509 Register reg = no_reg;
510 UseScratchRegisterScope temps(
this);
511 Register scratch = temps.Acquire();
515 DCHECK(rs != scratch);
520 if (!IsMipsArchVariant(kMips32r6)) {
527 DCHECK(rd_hi != reg && rd_lo != reg);
528 muhu(rd_hi, rs, reg);
529 mulu(rd_lo, rs, reg);
531 DCHECK(rd_hi != reg && rd_lo != reg);
532 mulu(rd_lo, rs, reg);
533 muhu(rd_hi, rs, reg);
538 void TurboAssembler::Mulh(Register rd, Register rs,
const Operand& rt) {
540 if (!IsMipsArchVariant(kMips32r6)) {
544 muh(rd, rs, rt.rm());
548 UseScratchRegisterScope temps(
this);
549 Register scratch = temps.Acquire();
550 DCHECK(rs != scratch);
552 if (!IsMipsArchVariant(kMips32r6)) {
556 muh(rd, rs, scratch);
561 void TurboAssembler::Mult(Register rs,
const Operand& rt) {
566 UseScratchRegisterScope temps(
this);
567 Register scratch = temps.Acquire();
568 DCHECK(rs != scratch);
574 void TurboAssembler::Mulhu(Register rd, Register rs,
const Operand& rt) {
576 if (!IsMipsArchVariant(kMips32r6)) {
580 muhu(rd, rs, rt.rm());
584 UseScratchRegisterScope temps(
this);
585 Register scratch = temps.Acquire();
586 DCHECK(rs != scratch);
588 if (!IsMipsArchVariant(kMips32r6)) {
592 muhu(rd, rs, scratch);
597 void TurboAssembler::Multu(Register rs,
const Operand& rt) {
602 UseScratchRegisterScope temps(
this);
603 Register scratch = temps.Acquire();
604 DCHECK(rs != scratch);
610 void TurboAssembler::Div(Register rs,
const Operand& rt) {
615 UseScratchRegisterScope temps(
this);
616 Register scratch = temps.Acquire();
617 DCHECK(rs != scratch);
623 void TurboAssembler::Div(Register rem, Register res, Register rs,
626 if (!IsMipsArchVariant(kMips32r6)) {
631 div(res, rs, rt.rm());
632 mod(rem, rs, rt.rm());
636 UseScratchRegisterScope temps(
this);
637 Register scratch = temps.Acquire();
638 DCHECK(rs != scratch);
640 if (!IsMipsArchVariant(kMips32r6)) {
645 div(res, rs, scratch);
646 mod(rem, rs, scratch);
651 void TurboAssembler::Div(Register res, Register rs,
const Operand& rt) {
653 if (!IsMipsArchVariant(kMips32r6)) {
657 div(res, rs, rt.rm());
661 UseScratchRegisterScope temps(
this);
662 Register scratch = temps.Acquire();
663 DCHECK(rs != scratch);
665 if (!IsMipsArchVariant(kMips32r6)) {
669 div(res, rs, scratch);
674 void TurboAssembler::Mod(Register rd, Register rs,
const Operand& rt) {
676 if (!IsMipsArchVariant(kMips32r6)) {
680 mod(rd, rs, rt.rm());
684 UseScratchRegisterScope temps(
this);
685 Register scratch = temps.Acquire();
686 DCHECK(rs != scratch);
688 if (!IsMipsArchVariant(kMips32r6)) {
692 mod(rd, rs, scratch);
697 void TurboAssembler::Modu(Register rd, Register rs,
const Operand& rt) {
699 if (!IsMipsArchVariant(kMips32r6)) {
703 modu(rd, rs, rt.rm());
707 UseScratchRegisterScope temps(
this);
708 Register scratch = temps.Acquire();
709 DCHECK(rs != scratch);
711 if (!IsMipsArchVariant(kMips32r6)) {
715 modu(rd, rs, scratch);
720 void TurboAssembler::Divu(Register rs,
const Operand& rt) {
725 UseScratchRegisterScope temps(
this);
726 Register scratch = temps.Acquire();
727 DCHECK(rs != scratch);
733 void TurboAssembler::Divu(Register res, Register rs,
const Operand& rt) {
735 if (!IsMipsArchVariant(kMips32r6)) {
739 divu(res, rs, rt.rm());
743 UseScratchRegisterScope temps(
this);
744 Register scratch = temps.Acquire();
745 DCHECK(rs != scratch);
747 if (!IsMipsArchVariant(kMips32r6)) {
751 divu(res, rs, scratch);
756 void TurboAssembler::And(Register rd, Register rs,
const Operand& rt) {
758 and_(rd, rs, rt.rm());
760 if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) {
761 andi(rd, rs, rt.immediate());
764 UseScratchRegisterScope temps(
this);
765 Register scratch = temps.Acquire();
766 DCHECK(rs != scratch);
768 and_(rd, rs, scratch);
773 void TurboAssembler::Or(Register rd, Register rs,
const Operand& rt) {
775 or_(rd, rs, rt.rm());
777 if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) {
778 ori(rd, rs, rt.immediate());
781 UseScratchRegisterScope temps(
this);
782 Register scratch = temps.Acquire();
783 DCHECK(rs != scratch);
785 or_(rd, rs, scratch);
790 void TurboAssembler::Xor(Register rd, Register rs,
const Operand& rt) {
792 xor_(rd, rs, rt.rm());
794 if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) {
795 xori(rd, rs, rt.immediate());
798 UseScratchRegisterScope temps(
this);
799 Register scratch = temps.Acquire();
800 DCHECK(rs != scratch);
802 xor_(rd, rs, scratch);
807 void TurboAssembler::Nor(Register rd, Register rs,
const Operand& rt) {
809 nor(rd, rs, rt.rm());
812 UseScratchRegisterScope temps(
this);
813 Register scratch = temps.Acquire();
814 DCHECK(rs != scratch);
816 nor(rd, rs, scratch);
820 void TurboAssembler::Neg(Register rs,
const Operand& rt) {
821 subu(rs, zero_reg, rt.rm());
824 void TurboAssembler::Slt(Register rd, Register rs,
const Operand& rt) {
826 slt(rd, rs, rt.rm());
828 if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) {
829 slti(rd, rs, rt.immediate());
832 BlockTrampolinePoolScope block_trampoline_pool(
this);
833 UseScratchRegisterScope temps(
this);
834 Register scratch = rd == at ? t8 : temps.Acquire();
835 DCHECK(rs != scratch);
837 slt(rd, rs, scratch);
842 void TurboAssembler::Sltu(Register rd, Register rs,
const Operand& rt) {
844 sltu(rd, rs, rt.rm());
846 const uint32_t int16_min = std::numeric_limits<int16_t>::min();
847 if (is_uint15(rt.immediate()) && !MustUseReg(rt.rmode())) {
849 sltiu(rd, rs, rt.immediate());
850 }
else if (is_uint15(rt.immediate() - int16_min) &&
851 !MustUseReg(rt.rmode())) {
853 sltiu(rd, rs, static_cast<uint16_t>(rt.immediate()));
856 BlockTrampolinePoolScope block_trampoline_pool(
this);
857 UseScratchRegisterScope temps(
this);
858 Register scratch = rd == at ? t8 : temps.Acquire();
859 DCHECK(rs != scratch);
861 sltu(rd, rs, scratch);
866 void TurboAssembler::Sle(Register rd, Register rs,
const Operand& rt) {
868 slt(rd, rt.rm(), rs);
871 BlockTrampolinePoolScope block_trampoline_pool(
this);
872 UseScratchRegisterScope temps(
this);
873 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
874 DCHECK(rs != scratch);
876 slt(rd, scratch, rs);
881 void TurboAssembler::Sleu(Register rd, Register rs,
const Operand& rt) {
883 sltu(rd, rt.rm(), rs);
886 BlockTrampolinePoolScope block_trampoline_pool(
this);
887 UseScratchRegisterScope temps(
this);
888 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
889 DCHECK(rs != scratch);
891 sltu(rd, scratch, rs);
896 void TurboAssembler::Sge(Register rd, Register rs,
const Operand& rt) {
901 void TurboAssembler::Sgeu(Register rd, Register rs,
const Operand& rt) {
906 void TurboAssembler::Sgt(Register rd, Register rs,
const Operand& rt) {
908 slt(rd, rt.rm(), rs);
911 BlockTrampolinePoolScope block_trampoline_pool(
this);
912 UseScratchRegisterScope temps(
this);
913 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
914 DCHECK(rs != scratch);
916 slt(rd, scratch, rs);
920 void TurboAssembler::Sgtu(Register rd, Register rs,
const Operand& rt) {
922 sltu(rd, rt.rm(), rs);
925 BlockTrampolinePoolScope block_trampoline_pool(
this);
926 UseScratchRegisterScope temps(
this);
927 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
928 DCHECK(rs != scratch);
930 sltu(rd, scratch, rs);
934 void TurboAssembler::Ror(Register rd, Register rs,
const Operand& rt) {
935 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
937 rotrv(rd, rs, rt.rm());
939 rotr(rd, rs, rt.immediate() & 0x1F);
943 BlockTrampolinePoolScope block_trampoline_pool(
this);
944 UseScratchRegisterScope temps(
this);
945 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
946 subu(scratch, zero_reg, rt.rm());
947 sllv(scratch, rs, scratch);
948 srlv(rd, rs, rt.rm());
949 or_(rd, rd, scratch);
951 if (rt.immediate() == 0) {
954 UseScratchRegisterScope temps(
this);
955 Register scratch = temps.Acquire();
956 srl(scratch, rs, rt.immediate() & 0x1F);
957 sll(rd, rs, (0x20 - (rt.immediate() & 0x1F)) & 0x1F);
958 or_(rd, rd, scratch);
965 void MacroAssembler::Pref(int32_t hint,
const MemOperand& rs) {
966 if (IsMipsArchVariant(kLoongson)) {
973 void TurboAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
975 DCHECK(sa >= 1 && sa <= 31);
976 if (IsMipsArchVariant(kMips32r6) && sa <= 4) {
977 lsa(rd, rt, rs, sa - 1);
979 Register tmp = rd == rt ? scratch : rd;
986 void TurboAssembler::Bovc(Register rs, Register rt, Label* L) {
987 if (is_trampoline_emitted()) {
990 BranchLong(L, PROTECT);
997 void TurboAssembler::Bnvc(Register rs, Register rt, Label* L) {
998 if (is_trampoline_emitted()) {
1000 bovc(rs, rt, &skip);
1001 BranchLong(L, PROTECT);
1011 void TurboAssembler::ByteSwapSigned(Register dest, Register src,
1013 DCHECK(operand_size == 2 || operand_size == 4);
1015 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1016 if (operand_size == 2) {
1021 rotr(dest, dest, 16);
1023 }
else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
1024 if (operand_size == 2) {
1025 DCHECK(src != at && dest != at);
1029 or_(dest, dest, at);
1032 sll(dest, dest, 16);
1033 sra(dest, dest, 16);
1035 BlockTrampolinePoolScope block_trampoline_pool(
this);
1038 DCHECK(dest != tmp && dest != tmp2);
1039 DCHECK(src != tmp && src != tmp2);
1041 andi(tmp2, src, 0xFF);
1044 andi(tmp2, src, 0xFF00);
1046 or_(tmp, tmp, tmp2);
1049 andi(tmp2, tmp2, 0xFF00);
1050 or_(tmp, tmp, tmp2);
1053 or_(dest, tmp, tmp2);
1058 void TurboAssembler::ByteSwapUnsigned(Register dest, Register src,
1060 DCHECK_EQ(operand_size, 2);
1062 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1064 andi(dest, dest, 0xFFFF);
1065 }
else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
1066 DCHECK(src != at && dest != at);
1070 or_(dest, dest, at);
1073 andi(dest, dest, 0xFFFF);
1077 void TurboAssembler::Ulw(Register rd,
const MemOperand& rs) {
1079 DCHECK(rs.rm() != at);
1080 if (IsMipsArchVariant(kMips32r6)) {
1083 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1084 IsMipsArchVariant(kLoongson));
1085 DCHECK(kMipsLwrOffset <= 3 && kMipsLwlOffset <= 3);
1086 MemOperand source = rs;
1088 AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3);
1089 if (rd != source.rm()) {
1090 lwr(rd, MemOperand(source.rm(), source.offset() + kMipsLwrOffset));
1091 lwl(rd, MemOperand(source.rm(), source.offset() + kMipsLwlOffset));
1093 UseScratchRegisterScope temps(
this);
1094 Register scratch = temps.Acquire();
1095 lwr(scratch, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1096 lwl(scratch, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1102 void TurboAssembler::Usw(Register rd,
const MemOperand& rs) {
1104 DCHECK(rs.rm() != at);
1105 DCHECK(rd != rs.rm());
1106 if (IsMipsArchVariant(kMips32r6)) {
1109 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1110 IsMipsArchVariant(kLoongson));
1111 DCHECK(kMipsSwrOffset <= 3 && kMipsSwlOffset <= 3);
1112 MemOperand source = rs;
1114 AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3);
1115 swr(rd, MemOperand(source.rm(), source.offset() + kMipsSwrOffset));
1116 swl(rd, MemOperand(source.rm(), source.offset() + kMipsSwlOffset));
1120 void TurboAssembler::Ulh(Register rd,
const MemOperand& rs) {
1122 DCHECK(rs.rm() != at);
1123 if (IsMipsArchVariant(kMips32r6)) {
1126 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1127 IsMipsArchVariant(kLoongson));
1128 MemOperand source = rs;
1130 AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
1131 UseScratchRegisterScope temps(
this);
1132 Register scratch = temps.Acquire();
1133 if (source.rm() == scratch) {
1134 #if defined(V8_TARGET_LITTLE_ENDIAN) 1135 lb(rd, MemOperand(source.rm(), source.offset() + 1));
1136 lbu(scratch, source);
1137 #elif defined(V8_TARGET_BIG_ENDIAN) 1139 lbu(scratch, MemOperand(source.rm(), source.offset() + 1));
1142 #if defined(V8_TARGET_LITTLE_ENDIAN) 1143 lbu(scratch, source);
1144 lb(rd, MemOperand(source.rm(), source.offset() + 1));
1145 #elif defined(V8_TARGET_BIG_ENDIAN) 1146 lbu(scratch, MemOperand(source.rm(), source.offset() + 1));
1151 or_(rd, rd, scratch);
1155 void TurboAssembler::Ulhu(Register rd,
const MemOperand& rs) {
1157 DCHECK(rs.rm() != at);
1158 if (IsMipsArchVariant(kMips32r6)) {
1161 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1162 IsMipsArchVariant(kLoongson));
1163 MemOperand source = rs;
1165 AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
1166 UseScratchRegisterScope temps(
this);
1167 Register scratch = temps.Acquire();
1168 if (source.rm() == scratch) {
1169 #if defined(V8_TARGET_LITTLE_ENDIAN) 1170 lbu(rd, MemOperand(source.rm(), source.offset() + 1));
1171 lbu(scratch, source);
1172 #elif defined(V8_TARGET_BIG_ENDIAN) 1174 lbu(scratch, MemOperand(source.rm(), source.offset() + 1));
1177 #if defined(V8_TARGET_LITTLE_ENDIAN) 1178 lbu(scratch, source);
1179 lbu(rd, MemOperand(source.rm(), source.offset() + 1));
1180 #elif defined(V8_TARGET_BIG_ENDIAN) 1181 lbu(scratch, MemOperand(source.rm(), source.offset() + 1));
1186 or_(rd, rd, scratch);
1190 void TurboAssembler::Ush(Register rd,
const MemOperand& rs, Register scratch) {
1192 DCHECK(rs.rm() != at);
1193 DCHECK(rs.rm() != scratch);
1194 DCHECK(scratch != at);
1195 if (IsMipsArchVariant(kMips32r6)) {
1198 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1199 IsMipsArchVariant(kLoongson));
1200 MemOperand source = rs;
1202 AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
1204 if (scratch != rd) {
1208 #if defined(V8_TARGET_LITTLE_ENDIAN) 1209 sb(scratch, source);
1210 srl(scratch, scratch, 8);
1211 sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1212 #elif defined(V8_TARGET_BIG_ENDIAN) 1213 sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1214 srl(scratch, scratch, 8);
1215 sb(scratch, source);
1220 void TurboAssembler::Ulwc1(FPURegister fd,
const MemOperand& rs,
1222 if (IsMipsArchVariant(kMips32r6)) {
1225 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1226 IsMipsArchVariant(kLoongson));
1232 void TurboAssembler::Uswc1(FPURegister fd,
const MemOperand& rs,
1234 if (IsMipsArchVariant(kMips32r6)) {
1237 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1238 IsMipsArchVariant(kLoongson));
1244 void TurboAssembler::Uldc1(FPURegister fd,
const MemOperand& rs,
1246 DCHECK(scratch != at);
1247 if (IsMipsArchVariant(kMips32r6)) {
1250 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1251 IsMipsArchVariant(kLoongson));
1252 Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
1254 Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
1259 void TurboAssembler::Usdc1(FPURegister fd,
const MemOperand& rs,
1261 DCHECK(scratch != at);
1262 if (IsMipsArchVariant(kMips32r6)) {
1265 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1266 IsMipsArchVariant(kLoongson));
1268 Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
1270 Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
1274 void TurboAssembler::Ldc1(FPURegister fd,
const MemOperand& src) {
1278 BlockTrampolinePoolScope block_trampoline_pool(
this);
1279 DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
1280 MemOperand tmp = src;
1281 AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
1282 lwc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset));
1284 FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1);
1286 MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
1288 DCHECK(IsFp64Mode() || IsFpxxMode());
1290 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
1291 UseScratchRegisterScope temps(
this);
1292 Register scratch = temps.Acquire();
1293 DCHECK(src.rm() != scratch);
1295 MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
1299 CheckTrampolinePoolQuick(1);
1302 void TurboAssembler::Sdc1(FPURegister fd,
const MemOperand& src) {
1306 BlockTrampolinePoolScope block_trampoline_pool(
this);
1307 DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
1308 MemOperand tmp = src;
1309 AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
1310 swc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset));
1312 FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1);
1314 MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
1316 BlockTrampolinePoolScope block_trampoline_pool(
this);
1317 DCHECK(IsFp64Mode() || IsFpxxMode());
1319 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
1320 DCHECK(src.rm() != t8);
1322 sw(t8, MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
1325 CheckTrampolinePoolQuick(1);
1328 void TurboAssembler::Ll(Register rd,
const MemOperand& rs) {
1329 bool is_one_instruction = IsMipsArchVariant(kMips32r6)
1330 ? is_int9(rs.offset())
1331 : is_int16(rs.offset());
1332 if (is_one_instruction) {
1335 UseScratchRegisterScope temps(
this);
1336 Register scratch = temps.Acquire();
1337 li(scratch, rs.offset());
1338 addu(scratch, scratch, rs.rm());
1339 ll(rd, MemOperand(scratch, 0));
1343 void TurboAssembler::Sc(Register rd,
const MemOperand& rs) {
1344 bool is_one_instruction = IsMipsArchVariant(kMips32r6)
1345 ? is_int9(rs.offset())
1346 : is_int16(rs.offset());
1347 if (is_one_instruction) {
1350 UseScratchRegisterScope temps(
this);
1351 Register scratch = temps.Acquire();
1352 li(scratch, rs.offset());
1353 addu(scratch, scratch, rs.rm());
1354 sc(rd, MemOperand(scratch, 0));
1358 void TurboAssembler::li(Register dst, Handle<HeapObject> value, LiFlags mode) {
1359 if (FLAG_embedded_builtins) {
1360 if (root_array_available_ && options().isolate_independent_code) {
1361 IndirectLoadConstant(dst, value);
1365 li(dst, Operand(value), mode);
1368 void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
1369 if (FLAG_embedded_builtins) {
1370 if (root_array_available_ && options().isolate_independent_code) {
1371 IndirectLoadExternalReference(dst, value);
1375 li(dst, Operand(value), mode);
1378 void TurboAssembler::li(Register dst,
const StringConstantBase*
string,
1380 li(dst, Operand::EmbeddedStringConstant(
string), mode);
1383 void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
1384 DCHECK(!j.is_reg());
1385 BlockTrampolinePoolScope block_trampoline_pool(
this);
1386 if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) {
1388 if (is_int16(j.immediate())) {
1389 addiu(rd, zero_reg, j.immediate());
1390 }
else if (!(j.immediate() & kHiMask)) {
1391 ori(rd, zero_reg, j.immediate());
1393 lui(rd, (j.immediate() >> kLuiShift) & kImm16Mask);
1394 if (j.immediate() & kImm16Mask) {
1395 ori(rd, rd, (j.immediate() & kImm16Mask));
1400 if (j.IsHeapObjectRequest()) {
1401 RequestHeapObject(j.heap_object_request());
1404 immediate = j.immediate();
1407 if (MustUseReg(j.rmode())) {
1408 RecordRelocInfo(j.rmode(), immediate);
1413 lui(rd, (immediate >> kLuiShift) & kImm16Mask);
1414 ori(rd, rd, (immediate & kImm16Mask));
1418 void TurboAssembler::MultiPush(RegList regs) {
1419 int16_t num_to_push = base::bits::CountPopulation(regs);
1420 int16_t stack_offset = num_to_push * kPointerSize;
1422 Subu(sp, sp, Operand(stack_offset));
1423 for (int16_t
i = kNumRegisters - 1;
i >= 0;
i--) {
1424 if ((regs & (1 <<
i)) != 0) {
1425 stack_offset -= kPointerSize;
1426 sw(ToRegister(
i), MemOperand(sp, stack_offset));
1432 void TurboAssembler::MultiPop(RegList regs) {
1433 int16_t stack_offset = 0;
1435 for (int16_t
i = 0;
i < kNumRegisters;
i++) {
1436 if ((regs & (1 <<
i)) != 0) {
1437 lw(ToRegister(
i), MemOperand(sp, stack_offset));
1438 stack_offset += kPointerSize;
1441 addiu(sp, sp, stack_offset);
1445 void TurboAssembler::MultiPushFPU(RegList regs) {
1446 int16_t num_to_push = base::bits::CountPopulation(regs);
1447 int16_t stack_offset = num_to_push * kDoubleSize;
1449 Subu(sp, sp, Operand(stack_offset));
1450 for (int16_t
i = kNumRegisters - 1;
i >= 0;
i--) {
1451 if ((regs & (1 <<
i)) != 0) {
1452 stack_offset -= kDoubleSize;
1453 Sdc1(FPURegister::from_code(
i), MemOperand(sp, stack_offset));
1459 void TurboAssembler::MultiPopFPU(RegList regs) {
1460 int16_t stack_offset = 0;
1462 for (int16_t
i = 0;
i < kNumRegisters;
i++) {
1463 if ((regs & (1 <<
i)) != 0) {
1464 Ldc1(FPURegister::from_code(
i), MemOperand(sp, stack_offset));
1465 stack_offset += kDoubleSize;
1468 addiu(sp, sp, stack_offset);
1471 void TurboAssembler::AddPair(Register dst_low, Register dst_high,
1472 Register left_low, Register left_high,
1473 Register right_low, Register right_high,
1474 Register scratch1, Register scratch2) {
1475 BlockTrampolinePoolScope block_trampoline_pool(
this);
1476 Register scratch3 = t8;
1477 Addu(scratch1, left_low, right_low);
1478 Sltu(scratch3, scratch1, left_low);
1479 Addu(scratch2, left_high, right_high);
1480 Addu(dst_high, scratch2, scratch3);
1481 Move(dst_low, scratch1);
1484 void TurboAssembler::SubPair(Register dst_low, Register dst_high,
1485 Register left_low, Register left_high,
1486 Register right_low, Register right_high,
1487 Register scratch1, Register scratch2) {
1488 BlockTrampolinePoolScope block_trampoline_pool(
this);
1489 Register scratch3 = t8;
1490 Sltu(scratch3, left_low, right_low);
1491 Subu(scratch1, left_low, right_low);
1492 Subu(scratch2, left_high, right_high);
1493 Subu(dst_high, scratch2, scratch3);
1494 Move(dst_low, scratch1);
1497 void TurboAssembler::AndPair(Register dst_low, Register dst_high,
1498 Register left_low, Register left_high,
1499 Register right_low, Register right_high) {
1500 And(dst_low, left_low, right_low);
1501 And(dst_high, left_high, right_high);
1504 void TurboAssembler::OrPair(Register dst_low, Register dst_high,
1505 Register left_low, Register left_high,
1506 Register right_low, Register right_high) {
1507 Or(dst_low, left_low, right_low);
1508 Or(dst_high, left_high, right_high);
1510 void TurboAssembler::XorPair(Register dst_low, Register dst_high,
1511 Register left_low, Register left_high,
1512 Register right_low, Register right_high) {
1513 Xor(dst_low, left_low, right_low);
1514 Xor(dst_high, left_high, right_high);
1517 void TurboAssembler::MulPair(Register dst_low, Register dst_high,
1518 Register left_low, Register left_high,
1519 Register right_low, Register right_high,
1520 Register scratch1, Register scratch2) {
1521 BlockTrampolinePoolScope block_trampoline_pool(
this);
1522 Register scratch3 = t8;
1523 Mulu(scratch2, scratch1, left_low, right_low);
1524 Mul(scratch3, left_low, right_high);
1525 Addu(scratch2, scratch2, scratch3);
1526 Mul(scratch3, left_high, right_low);
1527 Addu(dst_high, scratch2, scratch3);
1528 Move(dst_low, scratch1);
1531 void TurboAssembler::ShlPair(Register dst_low, Register dst_high,
1532 Register src_low, Register src_high,
1533 Register shift, Register scratch1,
1534 Register scratch2) {
1535 BlockTrampolinePoolScope block_trampoline_pool(
this);
1537 Register scratch3 = t8;
1538 And(scratch3, shift, 0x3F);
1539 sllv(dst_low, src_low, scratch3);
1540 Nor(scratch2, zero_reg, scratch3);
1541 srl(scratch1, src_low, 1);
1542 srlv(scratch1, scratch1, scratch2);
1543 sllv(dst_high, src_high, scratch3);
1544 Or(dst_high, dst_high, scratch1);
1545 And(scratch1, scratch3, 32);
1546 if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
1547 Branch(&done, eq, scratch1, Operand(zero_reg));
1548 mov(dst_high, dst_low);
1549 mov(dst_low, zero_reg);
1551 movn(dst_high, dst_low, scratch1);
1552 movn(dst_low, zero_reg, scratch1);
1557 void TurboAssembler::ShlPair(Register dst_low, Register dst_high,
1558 Register src_low, Register src_high,
1559 uint32_t shift, Register scratch) {
1560 shift = shift & 0x3F;
1562 mov(dst_low, src_low);
1563 mov(dst_high, src_high);
1564 }
else if (shift < 32) {
1565 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1566 srl(dst_high, src_low, 32 - shift);
1567 Ins(dst_high, src_high, shift, 32 - shift);
1568 sll(dst_low, src_low, shift);
1570 sll(dst_high, src_high, shift);
1571 sll(dst_low, src_low, shift);
1572 srl(scratch, src_low, 32 - shift);
1573 Or(dst_high, dst_high, scratch);
1575 }
else if (shift == 32) {
1576 mov(dst_low, zero_reg);
1577 mov(dst_high, src_low);
1580 mov(dst_low, zero_reg);
1581 sll(dst_high, src_low, shift);
1585 void TurboAssembler::ShrPair(Register dst_low, Register dst_high,
1586 Register src_low, Register src_high,
1587 Register shift, Register scratch1,
1588 Register scratch2) {
1589 BlockTrampolinePoolScope block_trampoline_pool(
this);
1591 Register scratch3 = t8;
1592 And(scratch3, shift, 0x3F);
1593 srlv(dst_high, src_high, scratch3);
1594 Nor(scratch2, zero_reg, scratch3);
1595 sll(scratch1, src_high, 1);
1596 sllv(scratch1, scratch1, scratch2);
1597 srlv(dst_low, src_low, scratch3);
1598 Or(dst_low, dst_low, scratch1);
1599 And(scratch1, scratch3, 32);
1600 if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
1601 Branch(&done, eq, scratch1, Operand(zero_reg));
1602 mov(dst_low, dst_high);
1603 mov(dst_high, zero_reg);
1605 movn(dst_low, dst_high, scratch1);
1606 movn(dst_high, zero_reg, scratch1);
1611 void TurboAssembler::ShrPair(Register dst_low, Register dst_high,
1612 Register src_low, Register src_high,
1613 uint32_t shift, Register scratch) {
1614 shift = shift & 0x3F;
1616 mov(dst_low, src_low);
1617 mov(dst_high, src_high);
1618 }
else if (shift < 32) {
1619 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1620 srl(dst_low, src_low, shift);
1621 Ins(dst_low, src_high, 32 - shift, shift);
1622 srl(dst_high, src_high, shift);
1624 srl(dst_high, src_high, shift);
1625 srl(dst_low, src_low, shift);
1627 sll(scratch, src_high, shift);
1628 Or(dst_low, dst_low, scratch);
1630 }
else if (shift == 32) {
1631 mov(dst_high, zero_reg);
1632 mov(dst_low, src_high);
1635 mov(dst_high, zero_reg);
1636 srl(dst_low, src_high, shift);
1640 void TurboAssembler::SarPair(Register dst_low, Register dst_high,
1641 Register src_low, Register src_high,
1642 Register shift, Register scratch1,
1643 Register scratch2) {
1644 BlockTrampolinePoolScope block_trampoline_pool(
this);
1646 Register scratch3 = t8;
1647 And(scratch3, shift, 0x3F);
1648 srav(dst_high, src_high, scratch3);
1649 Nor(scratch2, zero_reg, scratch3);
1650 sll(scratch1, src_high, 1);
1651 sllv(scratch1, scratch1, scratch2);
1652 srlv(dst_low, src_low, scratch3);
1653 Or(dst_low, dst_low, scratch1);
1654 And(scratch1, scratch3, 32);
1655 Branch(&done, eq, scratch1, Operand(zero_reg));
1656 mov(dst_low, dst_high);
1657 sra(dst_high, dst_high, 31);
1661 void TurboAssembler::SarPair(Register dst_low, Register dst_high,
1662 Register src_low, Register src_high,
1663 uint32_t shift, Register scratch) {
1664 shift = shift & 0x3F;
1666 mov(dst_low, src_low);
1667 mov(dst_high, src_high);
1668 }
else if (shift < 32) {
1669 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1670 srl(dst_low, src_low, shift);
1671 Ins(dst_low, src_high, 32 - shift, shift);
1672 sra(dst_high, src_high, shift);
1674 sra(dst_high, src_high, shift);
1675 srl(dst_low, src_low, shift);
1677 sll(scratch, src_high, shift);
1678 Or(dst_low, dst_low, scratch);
1680 }
else if (shift == 32) {
1681 sra(dst_high, src_high, 31);
1682 mov(dst_low, src_high);
1685 sra(dst_high, src_high, 31);
1686 sra(dst_low, src_high, shift);
1690 void TurboAssembler::Ext(Register rt, Register rs, uint16_t pos,
1693 DCHECK_LT(pos + size, 33);
1695 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1696 ext_(rt, rs, pos, size);
1700 int shift_left = 32 - (pos + size);
1701 sll(rt, rs, shift_left);
1703 int shift_right = 32 - size;
1704 if (shift_right > 0) {
1705 srl(rt, rt, shift_right);
1710 void TurboAssembler::Ins(Register rt, Register rs, uint16_t pos,
1713 DCHECK_LE(pos + size, 32);
1716 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1717 ins_(rt, rs, pos, size);
1719 DCHECK(rt != t8 && rs != t8);
1720 BlockTrampolinePoolScope block_trampoline_pool(
this);
1721 UseScratchRegisterScope temps(
this);
1722 Register scratch = temps.Acquire();
1723 Subu(scratch, zero_reg, Operand(1));
1724 srl(scratch, scratch, 32 - size);
1725 and_(t8, rs, scratch);
1727 sll(scratch, scratch, pos);
1728 nor(scratch, scratch, zero_reg);
1729 and_(scratch, rt, scratch);
1730 or_(rt, t8, scratch);
1734 void TurboAssembler::ExtractBits(Register dest, Register source, Register pos,
1735 int size,
bool sign_extend) {
1736 srav(dest, source, pos);
1737 Ext(dest, dest, 0, size);
1742 }
else if (size == 16) {
1751 void TurboAssembler::InsertBits(Register dest, Register source, Register pos,
1753 Ror(dest, dest, pos);
1754 Ins(dest, source, 0, size);
1756 UseScratchRegisterScope temps(
this);
1757 Register scratch = temps.Acquire();
1758 Subu(scratch, zero_reg, pos);
1759 Ror(dest, dest, scratch);
1763 void TurboAssembler::Seb(Register rd, Register rt) {
1764 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1767 DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson));
1773 void TurboAssembler::Seh(Register rd, Register rt) {
1774 if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
1777 DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson));
1783 void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) {
1784 if (IsMipsArchVariant(kMips32r6)) {
1788 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1789 IsMipsArchVariant(kLoongson));
1790 BlockTrampolinePoolScope block_trampoline_pool(
this);
1792 Register scratch1 = t8;
1793 Register scratch2 = t9;
1794 CompareIsNanF32(fs, fs);
1795 BranchTrueShortF(&is_nan);
1796 Branch(USE_DELAY_SLOT, &done);
1802 li(scratch2, kBinary32SignMask);
1803 Xor(scratch1, scratch1, scratch2);
1809 void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) {
1810 if (IsMipsArchVariant(kMips32r6)) {
1814 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1815 IsMipsArchVariant(kLoongson));
1816 BlockTrampolinePoolScope block_trampoline_pool(
this);
1818 Register scratch1 = t8;
1819 Register scratch2 = t9;
1820 CompareIsNanF64(fs, fs);
1821 BranchTrueShortF(&is_nan);
1822 Branch(USE_DELAY_SLOT, &done);
1828 Mfhc1(scratch1, fd);
1829 li(scratch2, HeapNumber::kSignMask);
1830 Xor(scratch1, scratch1, scratch2);
1831 Mthc1(scratch1, fd);
1836 void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs,
1837 FPURegister scratch) {
1841 Mthc1(zero_reg, scratch);
1842 cvt_d_l(fd, scratch);
1845 DCHECK(fd != scratch);
1848 Label msb_clear, conversion_done;
1850 Branch(&msb_clear, ge, rs, Operand(zero_reg), USE_DELAY_SLOT);
1853 UseScratchRegisterScope temps(
this);
1854 Register scratch1 = temps.Acquire();
1855 li(scratch1, 0x41F00000);
1859 mtc1(zero_reg, scratch);
1860 Mthc1(scratch1, scratch);
1865 Branch(USE_DELAY_SLOT, &conversion_done);
1866 add_d(fd, fd, scratch);
1871 bind(&conversion_done);
1875 void TurboAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs,
1876 FPURegister scratch) {
1877 BlockTrampolinePoolScope block_trampoline_pool(
this);
1878 Trunc_uw_d(t8, fs, scratch);
1882 void TurboAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
1883 FPURegister scratch) {
1884 BlockTrampolinePoolScope block_trampoline_pool(
this);
1885 Trunc_uw_s(t8, fs, scratch);
1889 void TurboAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1890 if (IsMipsArchVariant(kLoongson) && fd == fs) {
1891 BlockTrampolinePoolScope block_trampoline_pool(
this);
1900 void TurboAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1901 if (IsMipsArchVariant(kLoongson) && fd == fs) {
1902 BlockTrampolinePoolScope block_trampoline_pool(
this);
1911 void TurboAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1912 if (IsMipsArchVariant(kLoongson) && fd == fs) {
1913 BlockTrampolinePoolScope block_trampoline_pool(
this);
1922 void TurboAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1923 if (IsMipsArchVariant(kLoongson) && fd == fs) {
1924 BlockTrampolinePoolScope block_trampoline_pool(
this);
1933 void TurboAssembler::Trunc_uw_d(Register rd, FPURegister fs,
1934 FPURegister scratch) {
1935 DCHECK(fs != scratch);
1940 UseScratchRegisterScope temps(
this);
1941 Register scratch1 = temps.Acquire();
1942 li(scratch1, 0x41E00000);
1943 mtc1(zero_reg, scratch);
1944 Mthc1(scratch1, scratch);
1948 Label simple_convert;
1949 CompareF64(OLT, fs, scratch);
1950 BranchTrueShortF(&simple_convert);
1954 sub_d(scratch, fs, scratch);
1955 trunc_w_d(scratch, scratch);
1957 Or(rd, rd, 1 << 31);
1962 bind(&simple_convert);
1963 trunc_w_d(scratch, fs);
1969 void TurboAssembler::Trunc_uw_s(Register rd, FPURegister fs,
1970 FPURegister scratch) {
1971 DCHECK(fs != scratch);
1976 UseScratchRegisterScope temps(
this);
1977 Register scratch1 = temps.Acquire();
1978 li(scratch1, 0x4F000000);
1979 mtc1(scratch1, scratch);
1983 Label simple_convert;
1984 CompareF32(OLT, fs, scratch);
1985 BranchTrueShortF(&simple_convert);
1989 sub_s(scratch, fs, scratch);
1990 trunc_w_s(scratch, scratch);
1992 Or(rd, rd, 1 << 31);
1997 bind(&simple_convert);
1998 trunc_w_s(scratch, fs);
2004 template <
typename RoundFunc>
2005 void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src,
2006 FPURoundingMode mode, RoundFunc round) {
2007 BlockTrampolinePoolScope block_trampoline_pool(
this);
2008 Register scratch = t8;
2009 Register scratch2 = t9;
2010 if (IsMipsArchVariant(kMips32r6)) {
2011 cfc1(scratch, FCSR);
2012 li(at, Operand(mode));
2015 ctc1(scratch, FCSR);
2018 Mfhc1(scratch, src);
2019 Ext(at, scratch, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
2020 Branch(USE_DELAY_SLOT, &done, hs, at,
2021 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits));
2023 round(
this, dst, src);
2024 Move(at, scratch2, dst);
2025 or_(at, at, scratch2);
2026 Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg));
2028 srl(at, scratch, 31);
2035 void TurboAssembler::Floor_d_d(FPURegister dst, FPURegister src) {
2036 RoundDouble(dst, src, mode_floor,
2037 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2038 tasm->floor_l_d(dst, src);
2042 void TurboAssembler::Ceil_d_d(FPURegister dst, FPURegister src) {
2043 RoundDouble(dst, src, mode_ceil,
2044 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2045 tasm->ceil_l_d(dst, src);
2049 void TurboAssembler::Trunc_d_d(FPURegister dst, FPURegister src) {
2050 RoundDouble(dst, src, mode_trunc,
2051 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2052 tasm->trunc_l_d(dst, src);
2056 void TurboAssembler::Round_d_d(FPURegister dst, FPURegister src) {
2057 RoundDouble(dst, src, mode_round,
2058 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2059 tasm->round_l_d(dst, src);
2063 template <
typename RoundFunc>
2064 void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src,
2065 FPURoundingMode mode, RoundFunc round) {
2066 BlockTrampolinePoolScope block_trampoline_pool(
this);
2067 Register scratch = t8;
2068 if (IsMipsArchVariant(kMips32r6)) {
2069 cfc1(scratch, FCSR);
2070 li(at, Operand(mode));
2073 ctc1(scratch, FCSR);
2075 int32_t kFloat32ExponentBias = 127;
2076 int32_t kFloat32MantissaBits = 23;
2077 int32_t kFloat32ExponentBits = 8;
2080 Ext(at, scratch, kFloat32MantissaBits, kFloat32ExponentBits);
2081 Branch(USE_DELAY_SLOT, &done, hs, at,
2082 Operand(kFloat32ExponentBias + kFloat32MantissaBits));
2084 round(
this, dst, src);
2086 Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg));
2088 srl(at, scratch, 31);
2095 void TurboAssembler::Floor_s_s(FPURegister dst, FPURegister src) {
2096 RoundFloat(dst, src, mode_floor,
2097 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2098 tasm->floor_w_s(dst, src);
2102 void TurboAssembler::Ceil_s_s(FPURegister dst, FPURegister src) {
2103 RoundFloat(dst, src, mode_ceil,
2104 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2105 tasm->ceil_w_s(dst, src);
2109 void TurboAssembler::Trunc_s_s(FPURegister dst, FPURegister src) {
2110 RoundFloat(dst, src, mode_trunc,
2111 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2112 tasm->trunc_w_s(dst, src);
2116 void TurboAssembler::Round_s_s(FPURegister dst, FPURegister src) {
2117 RoundFloat(dst, src, mode_round,
2118 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2119 tasm->round_w_s(dst, src);
2123 void TurboAssembler::Mthc1(Register rt, FPURegister fs) {
2125 mtc1(rt, fs.high());
2127 DCHECK(IsFp64Mode() || IsFpxxMode());
2128 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2133 void TurboAssembler::Mfhc1(Register rt, FPURegister fs) {
2135 mfc1(rt, fs.high());
2137 DCHECK(IsFp64Mode() || IsFpxxMode());
2138 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2143 void TurboAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
2144 FPURegister ft, FPURegister scratch) {
2145 if (IsMipsArchVariant(kMips32r2)) {
2146 madd_s(fd, fr, fs, ft);
2148 DCHECK(fr != scratch && fs != scratch && ft != scratch);
2149 mul_s(scratch, fs, ft);
2150 add_s(fd, fr, scratch);
2154 void TurboAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2155 FPURegister ft, FPURegister scratch) {
2156 if (IsMipsArchVariant(kMips32r2)) {
2157 madd_d(fd, fr, fs, ft);
2159 DCHECK(fr != scratch && fs != scratch && ft != scratch);
2160 mul_d(scratch, fs, ft);
2161 add_d(fd, fr, scratch);
2165 void TurboAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
2166 FPURegister ft, FPURegister scratch) {
2167 if (IsMipsArchVariant(kMips32r2)) {
2168 msub_s(fd, fr, fs, ft);
2170 DCHECK(fr != scratch && fs != scratch && ft != scratch);
2171 mul_s(scratch, fs, ft);
2172 sub_s(fd, scratch, fr);
2176 void TurboAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
2177 FPURegister ft, FPURegister scratch) {
2178 if (IsMipsArchVariant(kMips32r2)) {
2179 msub_d(fd, fr, fs, ft);
2181 DCHECK(fr != scratch && fs != scratch && ft != scratch);
2182 mul_d(scratch, fs, ft);
2183 sub_d(fd, scratch, fr);
2187 void TurboAssembler::CompareF(SecondaryField sizeField, FPUCondition cc,
2188 FPURegister cmp1, FPURegister cmp2) {
2189 if (IsMipsArchVariant(kMips32r6)) {
2190 sizeField = sizeField == D ? L : W;
2191 DCHECK(cmp1 != kDoubleCompareReg && cmp2 != kDoubleCompareReg);
2192 cmp(cc, sizeField, kDoubleCompareReg, cmp1, cmp2);
2194 c(cc, sizeField, cmp1, cmp2);
2198 void TurboAssembler::CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
2200 CompareF(sizeField, UN, cmp1, cmp2);
2203 void TurboAssembler::BranchTrueShortF(Label* target, BranchDelaySlot bd) {
2204 if (IsMipsArchVariant(kMips32r6)) {
2205 bc1nez(target, kDoubleCompareReg);
2209 if (bd == PROTECT) {
2214 void TurboAssembler::BranchFalseShortF(Label* target, BranchDelaySlot bd) {
2215 if (IsMipsArchVariant(kMips32r6)) {
2216 bc1eqz(target, kDoubleCompareReg);
2220 if (bd == PROTECT) {
2225 void TurboAssembler::BranchTrueF(Label* target, BranchDelaySlot bd) {
2227 target->is_bound() ? !is_near(target) : is_trampoline_emitted();
2230 BranchFalseShortF(&skip);
2231 BranchLong(target, bd);
2234 BranchTrueShortF(target, bd);
2238 void TurboAssembler::BranchFalseF(Label* target, BranchDelaySlot bd) {
2240 target->is_bound() ? !is_near(target) : is_trampoline_emitted();
2243 BranchTrueShortF(&skip);
2244 BranchLong(target, bd);
2247 BranchFalseShortF(target, bd);
2251 void TurboAssembler::BranchMSA(Label* target, MSABranchDF df,
2252 MSABranchCondition cond, MSARegister wt,
2253 BranchDelaySlot bd) {
2255 BlockTrampolinePoolScope block_trampoline_pool(
this);
2259 target->is_bound() ? !is_near(target) : is_trampoline_emitted();
2262 MSABranchCondition neg_cond = NegateMSABranchCondition(cond);
2263 BranchShortMSA(df, &skip, neg_cond, wt, bd);
2264 BranchLong(target, bd);
2267 BranchShortMSA(df, target, cond, wt, bd);
2273 void TurboAssembler::BranchShortMSA(MSABranchDF df, Label* target,
2274 MSABranchCondition cond, MSARegister wt,
2275 BranchDelaySlot bd) {
2276 if (IsMipsArchVariant(kMips32r6)) {
2277 BlockTrampolinePoolScope block_trampoline_pool(
this);
2296 case one_elem_not_zero:
2323 if (bd == PROTECT) {
2328 void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) {
2332 DCHECK(IsFp64Mode() || IsFpxxMode());
2333 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2334 UseScratchRegisterScope temps(
this);
2335 Register scratch = temps.Acquire();
2336 DCHECK(src_low != scratch);
2337 mfhc1(scratch, dst);
2339 mthc1(scratch, dst);
2343 void TurboAssembler::Move(FPURegister dst,
uint32_t src) {
2344 UseScratchRegisterScope temps(
this);
2345 Register scratch = temps.Acquire();
2346 li(scratch, Operand(static_cast<int32_t>(src)));
2350 void TurboAssembler::Move(FPURegister dst, uint64_t src) {
2352 if (src == bit_cast<uint64_t>(0.0) && has_double_zero_reg_set_) {
2353 mov_d(dst, kDoubleRegZero);
2354 }
else if (src == bit_cast<uint64_t>(-0.0) && has_double_zero_reg_set_) {
2355 Neg_d(dst, kDoubleRegZero);
2362 UseScratchRegisterScope temps(
this);
2363 Register scratch = temps.Acquire();
2364 li(scratch, Operand(lo));
2367 mtc1(zero_reg, dst);
2372 UseScratchRegisterScope temps(
this);
2373 Register scratch = temps.Acquire();
2374 li(scratch, Operand(hi));
2375 Mthc1(scratch, dst);
2377 Mthc1(zero_reg, dst);
2379 if (dst == kDoubleRegZero) has_double_zero_reg_set_ =
true;
2383 void TurboAssembler::LoadZeroOnCondition(Register rd, Register rs,
2384 const Operand& rt, Condition cond) {
2385 BlockTrampolinePoolScope block_trampoline_pool(
this);
2391 if (rs == zero_reg) {
2393 LoadZeroIfConditionZero(rd, rt.rm());
2395 if (rt.immediate() == 0) {
2401 }
else if (IsZero(rt)) {
2402 LoadZeroIfConditionZero(rd, rs);
2405 LoadZeroIfConditionZero(rd, t9);
2409 if (rs == zero_reg) {
2411 LoadZeroIfConditionNotZero(rd, rt.rm());
2413 if (rt.immediate() != 0) {
2419 }
else if (IsZero(rt)) {
2420 LoadZeroIfConditionNotZero(rd, rs);
2423 LoadZeroIfConditionNotZero(rd, t9);
2430 LoadZeroIfConditionNotZero(rd, t9);
2434 LoadZeroIfConditionNotZero(rd, t9);
2439 LoadZeroIfConditionNotZero(rd, t9);
2444 LoadZeroIfConditionNotZero(rd, t9);
2451 LoadZeroIfConditionNotZero(rd, t9);
2455 case Ugreater_equal:
2457 LoadZeroIfConditionNotZero(rd, t9);
2462 LoadZeroIfConditionNotZero(rd, t9);
2467 LoadZeroIfConditionNotZero(rd, t9);
2475 void TurboAssembler::LoadZeroIfConditionNotZero(Register dest,
2476 Register condition) {
2477 if (IsMipsArchVariant(kMips32r6)) {
2478 seleqz(dest, dest, condition);
2480 Movn(dest, zero_reg, condition);
2484 void TurboAssembler::LoadZeroIfConditionZero(Register dest,
2485 Register condition) {
2486 if (IsMipsArchVariant(kMips32r6)) {
2487 selnez(dest, dest, condition);
2489 Movz(dest, zero_reg, condition);
2493 void TurboAssembler::LoadZeroIfFPUCondition(Register dest) {
2494 if (IsMipsArchVariant(kMips32r6)) {
2495 mfc1(kScratchReg, kDoubleCompareReg);
2496 LoadZeroIfConditionNotZero(dest, kScratchReg);
2498 Movt(dest, zero_reg);
2502 void TurboAssembler::LoadZeroIfNotFPUCondition(Register dest) {
2503 if (IsMipsArchVariant(kMips32r6)) {
2504 mfc1(kScratchReg, kDoubleCompareReg);
2505 LoadZeroIfConditionZero(dest, kScratchReg);
2507 Movf(dest, zero_reg);
2511 void TurboAssembler::Movz(Register rd, Register rs, Register rt) {
2512 if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
2514 Branch(&done, ne, rt, Operand(zero_reg));
2522 void TurboAssembler::Movn(Register rd, Register rs, Register rt) {
2523 if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
2525 Branch(&done, eq, rt, Operand(zero_reg));
2533 void TurboAssembler::Movt(Register rd, Register rs, uint16_t cc) {
2534 if (IsMipsArchVariant(kLoongson)) {
2535 BlockTrampolinePoolScope block_trampoline_pool(
this);
2539 DCHECK(rs != t8 && rd != t8);
2541 Register scratch = t8;
2545 cfc1(scratch, FCSR);
2549 srl(scratch, scratch, 16);
2550 andi(scratch, scratch, 0x0080);
2551 Branch(&done, eq, scratch, Operand(zero_reg));
2559 void TurboAssembler::Movf(Register rd, Register rs, uint16_t cc) {
2560 if (IsMipsArchVariant(kLoongson)) {
2561 BlockTrampolinePoolScope block_trampoline_pool(
this);
2565 DCHECK(rs != t8 && rd != t8);
2567 Register scratch = t8;
2571 cfc1(scratch, FCSR);
2575 srl(scratch, scratch, 16);
2576 andi(scratch, scratch, 0x0080);
2577 Branch(&done, ne, scratch, Operand(zero_reg));
2585 void TurboAssembler::Clz(Register rd, Register rs) {
2586 if (IsMipsArchVariant(kLoongson)) {
2587 BlockTrampolinePoolScope block_trampoline_pool(
this);
2588 DCHECK(rd != t8 && rd != t9 && rs != t8 && rs != t9);
2590 Register scratch = t9;
2593 UseScratchRegisterScope temps(
this);
2594 Register scratch1 = temps.Acquire();
2599 and_(scratch, scratch1, mask);
2601 Branch(&end, ne, scratch, Operand(zero_reg));
2603 Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
2611 void TurboAssembler::Ctz(Register rd, Register rs) {
2612 if (IsMipsArchVariant(kMips32r6)) {
2623 UseScratchRegisterScope temps(
this);
2624 Register scratch = temps.Acquire();
2625 Addu(scratch, rs, -1);
2626 Xor(rd, scratch, rs);
2627 And(rd, rd, scratch);
2633 Subu(rd, scratch, rd);
2637 void TurboAssembler::Popcnt(Register rd, Register rs) {
2664 BlockTrampolinePoolScope block_trampoline_pool(
this);
2665 UseScratchRegisterScope temps(
this);
2666 Register scratch = temps.Acquire();
2667 Register scratch2 = t8;
2668 srl(scratch, rs, 1);
2670 And(scratch, scratch, scratch2);
2671 Subu(scratch, rs, scratch);
2673 And(rd, scratch, scratch2);
2674 srl(scratch, scratch, 2);
2675 And(scratch, scratch, scratch2);
2676 Addu(scratch, rd, scratch);
2677 srl(rd, scratch, 4);
2678 Addu(rd, rd, scratch);
2680 And(rd, rd, scratch2);
2682 Mul(rd, rd, scratch);
2686 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
2688 DoubleRegister double_input,
2690 DoubleRegister double_scratch,
2691 Register except_flag,
2692 CheckForInexactConversion check_inexact) {
2693 DCHECK(result != scratch);
2694 DCHECK(double_input != double_scratch);
2695 DCHECK(except_flag != scratch);
2700 mov(except_flag, zero_reg);
2703 cvt_w_d(double_scratch, double_input);
2704 mfc1(result, double_scratch);
2705 cvt_d_w(double_scratch, double_scratch);
2706 CompareF64(EQ, double_input, double_scratch);
2707 BranchTrueShortF(&done);
2709 int32_t except_mask = kFCSRFlagMask;
2711 if (check_inexact == kDontCheckForInexactConversion) {
2713 except_mask &= ~kFCSRInexactFlagMask;
2717 cfc1(scratch, FCSR);
2719 ctc1(zero_reg, FCSR);
2722 switch (rounding_mode) {
2723 case kRoundToNearest:
2724 Round_w_d(double_scratch, double_input);
2727 Trunc_w_d(double_scratch, double_input);
2729 case kRoundToPlusInf:
2730 Ceil_w_d(double_scratch, double_input);
2732 case kRoundToMinusInf:
2733 Floor_w_d(double_scratch, double_input);
2738 cfc1(except_flag, FCSR);
2740 ctc1(scratch, FCSR);
2742 mfc1(result, double_scratch);
2745 And(except_flag, except_flag, Operand(except_mask));
2750 void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
2751 DoubleRegister double_input,
2753 BlockTrampolinePoolScope block_trampoline_pool(
this);
2754 DoubleRegister single_scratch = kScratchDoubleReg.low();
2755 UseScratchRegisterScope temps(
this);
2756 Register scratch = temps.Acquire();
2757 Register scratch2 = t9;
2760 cfc1(scratch2, FCSR);
2761 ctc1(zero_reg, FCSR);
2763 trunc_w_d(single_scratch, double_input);
2764 mfc1(result, single_scratch);
2766 cfc1(scratch, FCSR);
2767 ctc1(scratch2, FCSR);
2771 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
2773 Branch(done, eq, scratch, Operand(zero_reg));
2776 void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
2778 DoubleRegister double_input,
2779 StubCallMode stub_mode) {
2782 TryInlineTruncateDoubleToI(result, double_input, &done);
2786 Subu(sp, sp, Operand(kDoubleSize));
2787 Sdc1(double_input, MemOperand(sp, 0));
2789 if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
2790 Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
2792 Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
2794 lw(result, MemOperand(sp, 0));
2796 Addu(sp, sp, Operand(kDoubleSize));
2805 #define BRANCH_ARGS_CHECK(cond, rs, rt) \ 2806 DCHECK((cond == cc_always && rs == zero_reg && rt.rm() == zero_reg) || \ 2807 (cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg))) 2809 void TurboAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
2810 DCHECK(IsMipsArchVariant(kMips32r6) ? is_int26(offset) : is_int16(offset));
2811 BranchShort(offset, bdslot);
2814 void TurboAssembler::Branch(int32_t offset, Condition cond, Register rs,
2815 const Operand& rt, BranchDelaySlot bdslot) {
2816 bool is_near = BranchShortCheck(offset,
nullptr, cond, rs, rt, bdslot);
2821 void TurboAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
2822 if (L->is_bound()) {
2823 if (is_near_branch(L)) {
2824 BranchShort(L, bdslot);
2826 BranchLong(L, bdslot);
2829 if (is_trampoline_emitted()) {
2830 BranchLong(L, bdslot);
2832 BranchShort(L, bdslot);
2837 void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
2838 const Operand& rt, BranchDelaySlot bdslot) {
2839 if (L->is_bound()) {
2840 if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
2841 if (cond != cc_always) {
2843 Condition neg_cond = NegateCondition(cond);
2844 BranchShort(&skip, neg_cond, rs, rt);
2845 BranchLong(L, bdslot);
2848 BranchLong(L, bdslot);
2852 if (is_trampoline_emitted()) {
2853 if (cond != cc_always) {
2855 Condition neg_cond = NegateCondition(cond);
2856 BranchShort(&skip, neg_cond, rs, rt);
2857 BranchLong(L, bdslot);
2860 BranchLong(L, bdslot);
2863 BranchShort(L, cond, rs, rt, bdslot);
2868 void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
2869 RootIndex index, BranchDelaySlot bdslot) {
2870 UseScratchRegisterScope temps(
this);
2871 Register scratch = temps.Acquire();
2872 LoadRoot(scratch, index);
2873 Branch(L, cond, rs, Operand(scratch), bdslot);
2876 void TurboAssembler::BranchShortHelper(int16_t offset, Label* L,
2877 BranchDelaySlot bdslot) {
2878 DCHECK(L ==
nullptr || offset == 0);
2879 offset = GetOffset(offset, L, OffsetSize::kOffset16);
2883 if (bdslot == PROTECT)
2887 void TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
2888 DCHECK(L ==
nullptr || offset == 0);
2889 offset = GetOffset(offset, L, OffsetSize::kOffset26);
2893 void TurboAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
2894 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
2895 DCHECK(is_int26(offset));
2896 BranchShortHelperR6(offset,
nullptr);
2898 DCHECK(is_int16(offset));
2899 BranchShortHelper(offset,
nullptr, bdslot);
2903 void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
2904 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
2905 BranchShortHelperR6(0, L);
2907 BranchShortHelper(0, L, bdslot);
2912 int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
2914 offset = branch_offset_helper(L, bits) >> 2;
2916 DCHECK(is_intn(offset, bits));
2921 Register TurboAssembler::GetRtAsRegisterHelper(
const Operand& rt,
2923 Register r2 = no_reg;
2934 bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset,
2936 if (!is_near(L, bits))
return false;
2937 offset = GetOffset(offset, L, bits);
2941 bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset, OffsetSize bits,
2942 Register& scratch,
const Operand& rt) {
2943 if (!is_near(L, bits))
return false;
2944 scratch = GetRtAsRegisterHelper(rt, scratch);
2945 offset = GetOffset(offset, L, bits);
2949 bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
2950 Condition cond, Register rs,
2951 const Operand& rt) {
2952 DCHECK(L ==
nullptr || offset == 0);
2953 UseScratchRegisterScope temps(
this);
2954 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
2960 BlockTrampolinePoolScope block_trampoline_pool(
this);
2963 if (!CalculateOffset(L, offset, OffsetSize::kOffset26))
return false;
2967 if (rt.is_reg() && rs.code() == rt.rm().code()) {
2970 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
2972 beq(rs, scratch, offset);
2974 }
else if (IsZero(rt)) {
2975 if (!CalculateOffset(L, offset, OffsetSize::kOffset21))
return false;
2979 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
2981 beqc(rs, scratch, offset);
2985 if (rt.is_reg() && rs.code() == rt.rm().code()) {
2988 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
2990 bne(rs, scratch, offset);
2992 }
else if (IsZero(rt)) {
2993 if (!CalculateOffset(L, offset, OffsetSize::kOffset21))
return false;
2997 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
2999 bnec(rs, scratch, offset);
3006 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3008 }
else if (rs == zero_reg) {
3009 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3011 bltzc(scratch, offset);
3012 }
else if (IsZero(rt)) {
3013 if (!CalculateOffset(L, offset, OffsetSize::kOffset16))
return false;
3016 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3018 DCHECK(rs != scratch);
3019 bltc(scratch, rs, offset);
3024 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3025 if (!CalculateOffset(L, offset, OffsetSize::kOffset26))
return false;
3027 }
else if (rs == zero_reg) {
3028 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3030 blezc(scratch, offset);
3031 }
else if (IsZero(rt)) {
3032 if (!CalculateOffset(L, offset, OffsetSize::kOffset16))
return false;
3035 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3037 DCHECK(rs != scratch);
3038 bgec(rs, scratch, offset);
3043 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3045 }
else if (rs == zero_reg) {
3046 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3048 bgtzc(scratch, offset);
3049 }
else if (IsZero(rt)) {
3050 if (!CalculateOffset(L, offset, OffsetSize::kOffset16))
return false;
3053 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3055 DCHECK(rs != scratch);
3056 bltc(rs, scratch, offset);
3061 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3062 if (!CalculateOffset(L, offset, OffsetSize::kOffset26))
return false;
3064 }
else if (rs == zero_reg) {
3065 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3067 bgezc(scratch, offset);
3068 }
else if (IsZero(rt)) {
3069 if (!CalculateOffset(L, offset, OffsetSize::kOffset16))
return false;
3072 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3074 DCHECK(rs != scratch);
3075 bgec(scratch, rs, offset);
3082 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3084 }
else if (rs == zero_reg) {
3085 if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
3087 bnezc(scratch, offset);
3088 }
else if (IsZero(rt)) {
3089 if (!CalculateOffset(L, offset, OffsetSize::kOffset21))
return false;
3092 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3094 DCHECK(rs != scratch);
3095 bltuc(scratch, rs, offset);
3098 case Ugreater_equal:
3100 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3101 if (!CalculateOffset(L, offset, OffsetSize::kOffset26))
return false;
3103 }
else if (rs == zero_reg) {
3104 if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
3106 beqzc(scratch, offset);
3107 }
else if (IsZero(rt)) {
3108 if (!CalculateOffset(L, offset, OffsetSize::kOffset26))
return false;
3111 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3113 DCHECK(rs != scratch);
3114 bgeuc(rs, scratch, offset);
3119 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3121 }
else if (rs == zero_reg) {
3122 if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
3124 bnezc(scratch, offset);
3125 }
else if (IsZero(rt)) {
3128 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3130 DCHECK(rs != scratch);
3131 bltuc(rs, scratch, offset);
3136 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3137 if (!CalculateOffset(L, offset, OffsetSize::kOffset26))
return false;
3139 }
else if (rs == zero_reg) {
3140 if (!CalculateOffset(L, offset, OffsetSize::kOffset26, scratch, rt))
3143 }
else if (IsZero(rt)) {
3144 if (!CalculateOffset(L, offset, OffsetSize::kOffset21))
return false;
3147 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3149 DCHECK(rs != scratch);
3150 bgeuc(scratch, rs, offset);
3157 CheckTrampolinePoolQuick(1);
3161 bool TurboAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
3162 Register rs,
const Operand& rt,
3163 BranchDelaySlot bdslot) {
3164 DCHECK(L ==
nullptr || offset == 0);
3165 if (!is_near(L, OffsetSize::kOffset16))
return false;
3167 UseScratchRegisterScope temps(
this);
3168 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
3175 BlockTrampolinePoolScope block_trampoline_pool(
this);
3178 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3183 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3184 beq(rs, zero_reg, offset32);
3187 scratch = GetRtAsRegisterHelper(rt, scratch);
3188 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3189 beq(rs, scratch, offset32);
3194 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3195 bne(rs, zero_reg, offset32);
3198 scratch = GetRtAsRegisterHelper(rt, scratch);
3199 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3200 bne(rs, scratch, offset32);
3207 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3210 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3211 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3212 bne(scratch, zero_reg, offset32);
3217 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3220 Slt(scratch, rs, rt);
3221 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3222 beq(scratch, zero_reg, offset32);
3227 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3230 Slt(scratch, rs, rt);
3231 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3232 bne(scratch, zero_reg, offset32);
3237 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3240 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3241 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3242 beq(scratch, zero_reg, offset32);
3249 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3250 bne(rs, zero_reg, offset32);
3252 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3253 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3254 bne(scratch, zero_reg, offset32);
3257 case Ugreater_equal:
3259 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3262 Sltu(scratch, rs, rt);
3263 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3264 beq(scratch, zero_reg, offset32);
3271 Sltu(scratch, rs, rt);
3272 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3273 bne(scratch, zero_reg, offset32);
3278 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3279 beq(rs, zero_reg, offset32);
3281 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3282 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3283 beq(scratch, zero_reg, offset32);
3291 if (bdslot == PROTECT)
3297 bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
3298 Register rs,
const Operand& rt,
3299 BranchDelaySlot bdslot) {
3300 BRANCH_ARGS_CHECK(cond, rs, rt);
3302 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3303 DCHECK(is_int26(offset));
3304 return BranchShortHelperR6(offset,
nullptr, cond, rs, rt);
3306 DCHECK(is_int16(offset));
3307 return BranchShortHelper(offset,
nullptr, cond, rs, rt, bdslot);
3310 DCHECK_EQ(offset, 0);
3311 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3312 return BranchShortHelperR6(0, L, cond, rs, rt);
3314 return BranchShortHelper(0, L, cond, rs, rt, bdslot);
3320 void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
3321 const Operand& rt, BranchDelaySlot bdslot) {
3322 BranchShortCheck(offset,
nullptr, cond, rs, rt, bdslot);
3325 void TurboAssembler::BranchShort(Label* L, Condition cond, Register rs,
3326 const Operand& rt, BranchDelaySlot bdslot) {
3327 BranchShortCheck(0, L, cond, rs, rt, bdslot);
3330 void TurboAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
3331 BranchAndLinkShort(offset, bdslot);
3334 void TurboAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
3335 const Operand& rt, BranchDelaySlot bdslot) {
3336 bool is_near = BranchAndLinkShortCheck(offset,
nullptr, cond, rs, rt, bdslot);
3341 void TurboAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
3342 if (L->is_bound()) {
3343 if (is_near_branch(L)) {
3344 BranchAndLinkShort(L, bdslot);
3346 BranchAndLinkLong(L, bdslot);
3349 if (is_trampoline_emitted()) {
3350 BranchAndLinkLong(L, bdslot);
3352 BranchAndLinkShort(L, bdslot);
3357 void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
3358 const Operand& rt, BranchDelaySlot bdslot) {
3359 if (L->is_bound()) {
3360 if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
3362 Condition neg_cond = NegateCondition(cond);
3363 BranchShort(&skip, neg_cond, rs, rt);
3364 BranchAndLinkLong(L, bdslot);
3368 if (is_trampoline_emitted()) {
3370 Condition neg_cond = NegateCondition(cond);
3371 BranchShort(&skip, neg_cond, rs, rt);
3372 BranchAndLinkLong(L, bdslot);
3375 BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot);
3380 void TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3381 BranchDelaySlot bdslot) {
3382 DCHECK(L ==
nullptr || offset == 0);
3383 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3387 if (bdslot == PROTECT)
3391 void TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
3392 DCHECK(L ==
nullptr || offset == 0);
3393 offset = GetOffset(offset, L, OffsetSize::kOffset26);
3397 void TurboAssembler::BranchAndLinkShort(int32_t offset,
3398 BranchDelaySlot bdslot) {
3399 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3400 DCHECK(is_int26(offset));
3401 BranchAndLinkShortHelperR6(offset,
nullptr);
3403 DCHECK(is_int16(offset));
3404 BranchAndLinkShortHelper(offset,
nullptr, bdslot);
3408 void TurboAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
3409 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3410 BranchAndLinkShortHelperR6(0, L);
3412 BranchAndLinkShortHelper(0, L, bdslot);
3416 bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
3417 Condition cond, Register rs,
3418 const Operand& rt) {
3419 DCHECK(L ==
nullptr || offset == 0);
3420 UseScratchRegisterScope temps(
this);
3421 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
3422 OffsetSize bits = OffsetSize::kOffset16;
3424 BlockTrampolinePoolScope block_trampoline_pool(
this);
3425 DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
3428 if (!CalculateOffset(L, offset, OffsetSize::kOffset26))
return false;
3432 if (!is_near(L, bits))
return false;
3433 Subu(scratch, rs, rt);
3434 offset = GetOffset(offset, L, bits);
3435 beqzalc(scratch, offset);
3438 if (!is_near(L, bits))
return false;
3439 Subu(scratch, rs, rt);
3440 offset = GetOffset(offset, L, bits);
3441 bnezalc(scratch, offset);
3447 if (rs.code() == rt.rm().code()) {
3449 }
else if (rs == zero_reg) {
3450 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3452 bltzalc(scratch, offset);
3453 }
else if (IsZero(rt)) {
3454 if (!CalculateOffset(L, offset, OffsetSize::kOffset16))
return false;
3455 bgtzalc(rs, offset);
3457 if (!is_near(L, bits))
return false;
3458 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3459 offset = GetOffset(offset, L, bits);
3460 bnezalc(scratch, offset);
3465 if (rs.code() == rt.rm().code()) {
3466 if (!CalculateOffset(L, offset, OffsetSize::kOffset26))
return false;
3468 }
else if (rs == zero_reg) {
3469 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3471 blezalc(scratch, offset);
3472 }
else if (IsZero(rt)) {
3473 if (!CalculateOffset(L, offset, OffsetSize::kOffset16))
return false;
3474 bgezalc(rs, offset);
3476 if (!is_near(L, bits))
return false;
3477 Slt(scratch, rs, rt);
3478 offset = GetOffset(offset, L, bits);
3479 beqzalc(scratch, offset);
3484 if (rs.code() == rt.rm().code()) {
3486 }
else if (rs == zero_reg) {
3487 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3489 bgtzalc(scratch, offset);
3490 }
else if (IsZero(rt)) {
3491 if (!CalculateOffset(L, offset, OffsetSize::kOffset16))
return false;
3492 bltzalc(rs, offset);
3494 if (!is_near(L, bits))
return false;
3495 Slt(scratch, rs, rt);
3496 offset = GetOffset(offset, L, bits);
3497 bnezalc(scratch, offset);
3502 if (rs.code() == rt.rm().code()) {
3503 if (!CalculateOffset(L, offset, OffsetSize::kOffset26))
return false;
3505 }
else if (rs == zero_reg) {
3506 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3508 bgezalc(scratch, offset);
3509 }
else if (IsZero(rt)) {
3510 if (!CalculateOffset(L, offset, OffsetSize::kOffset16))
return false;
3511 blezalc(rs, offset);
3513 if (!is_near(L, bits))
return false;
3514 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3515 offset = GetOffset(offset, L, bits);
3516 beqzalc(scratch, offset);
3524 if (!is_near(L, bits))
return false;
3525 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3526 offset = GetOffset(offset, L, bits);
3527 bnezalc(scratch, offset);
3529 case Ugreater_equal:
3531 if (!is_near(L, bits))
return false;
3532 Sltu(scratch, rs, rt);
3533 offset = GetOffset(offset, L, bits);
3534 beqzalc(scratch, offset);
3538 if (!is_near(L, bits))
return false;
3539 Sltu(scratch, rs, rt);
3540 offset = GetOffset(offset, L, bits);
3541 bnezalc(scratch, offset);
3545 if (!is_near(L, bits))
return false;
3546 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3547 offset = GetOffset(offset, L, bits);
3548 beqzalc(scratch, offset);
3559 bool TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3560 Condition cond, Register rs,
3562 BranchDelaySlot bdslot) {
3563 DCHECK(L ==
nullptr || offset == 0);
3564 if (!is_near(L, OffsetSize::kOffset16))
return false;
3566 Register scratch = t8;
3567 BlockTrampolinePoolScope block_trampoline_pool(
this);
3571 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3575 bne(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3577 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3581 beq(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3583 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3589 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3590 addiu(scratch, scratch, -1);
3591 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3592 bgezal(scratch, offset);
3595 Slt(scratch, rs, rt);
3596 addiu(scratch, scratch, -1);
3597 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3598 bltzal(scratch, offset);
3601 Slt(scratch, rs, rt);
3602 addiu(scratch, scratch, -1);
3603 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3604 bgezal(scratch, offset);
3607 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3608 addiu(scratch, scratch, -1);
3609 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3610 bltzal(scratch, offset);
3615 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3616 addiu(scratch, scratch, -1);
3617 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3618 bgezal(scratch, offset);
3620 case Ugreater_equal:
3621 Sltu(scratch, rs, rt);
3622 addiu(scratch, scratch, -1);
3623 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3624 bltzal(scratch, offset);
3627 Sltu(scratch, rs, rt);
3628 addiu(scratch, scratch, -1);
3629 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3630 bgezal(scratch, offset);
3633 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3634 addiu(scratch, scratch, -1);
3635 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3636 bltzal(scratch, offset);
3644 if (bdslot == PROTECT)
3650 bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
3651 Condition cond, Register rs,
3653 BranchDelaySlot bdslot) {
3654 BRANCH_ARGS_CHECK(cond, rs, rt);
3657 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3658 DCHECK(is_int26(offset));
3659 return BranchAndLinkShortHelperR6(offset,
nullptr, cond, rs, rt);
3661 DCHECK(is_int16(offset));
3662 return BranchAndLinkShortHelper(offset,
nullptr, cond, rs, rt, bdslot);
3665 DCHECK_EQ(offset, 0);
3666 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
3667 return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
3669 return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
3675 void TurboAssembler::LoadFromConstantsTable(Register destination,
3676 int constant_index) {
3677 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
3678 LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
3680 FieldMemOperand(destination,
3681 FixedArray::kHeaderSize + constant_index * kPointerSize));
3684 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
3685 lw(destination, MemOperand(kRootRegister, offset));
3688 void TurboAssembler::LoadRootRegisterOffset(Register destination,
3691 Move(destination, kRootRegister);
3693 Addu(destination, kRootRegister, offset);
3697 void TurboAssembler::Jump(Register target, int16_t offset, Condition cond,
3698 Register rs,
const Operand& rt, BranchDelaySlot bd) {
3699 BlockTrampolinePoolScope block_trampoline_pool(
this);
3700 DCHECK(is_int16(offset));
3701 if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
3702 if (cond == cc_always) {
3703 jic(target, offset);
3705 BRANCH_ARGS_CHECK(cond, rs, rt);
3706 Branch(2, NegateCondition(cond), rs, rt);
3707 jic(target, offset);
3711 Addu(target, target, offset);
3713 if (cond == cc_always) {
3716 BRANCH_ARGS_CHECK(cond, rs, rt);
3717 Branch(2, NegateCondition(cond), rs, rt);
3721 if (bd == PROTECT) nop();
3725 void TurboAssembler::Jump(Register target, Register base, int16_t offset,
3726 Condition cond, Register rs,
const Operand& rt,
3727 BranchDelaySlot bd) {
3728 DCHECK(is_int16(offset));
3729 BlockTrampolinePoolScope block_trampoline_pool(
this);
3730 if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
3731 if (cond == cc_always) {
3734 BRANCH_ARGS_CHECK(cond, rs, rt);
3735 Branch(2, NegateCondition(cond), rs, rt);
3740 Addu(target, base, offset);
3742 if (target != base) mov(target, base);
3744 if (cond == cc_always) {
3747 BRANCH_ARGS_CHECK(cond, rs, rt);
3748 Branch(2, NegateCondition(cond), rs, rt);
3752 if (bd == PROTECT) nop();
3756 void TurboAssembler::Jump(Register target,
const Operand& offset,
3757 Condition cond, Register rs,
const Operand& rt,
3758 BranchDelaySlot bd) {
3759 BlockTrampolinePoolScope block_trampoline_pool(
this);
3760 if (IsMipsArchVariant(kMips32r6) && bd == PROTECT &&
3761 !is_int16(offset.immediate())) {
3763 Assembler::UnpackTargetAddressUnsigned(offset.immediate(), aui_offset,
3765 RecordRelocInfo(RelocInfo::EXTERNAL_REFERENCE, offset.immediate());
3766 aui(target, target, aui_offset);
3767 if (cond == cc_always) {
3768 jic(target, jic_offset);
3770 BRANCH_ARGS_CHECK(cond, rs, rt);
3771 Branch(2, NegateCondition(cond), rs, rt);
3772 jic(target, jic_offset);
3775 if (offset.immediate() != 0) {
3776 Addu(target, target, offset);
3778 if (cond == cc_always) {
3781 BRANCH_ARGS_CHECK(cond, rs, rt);
3782 Branch(2, NegateCondition(cond), rs, rt);
3786 if (bd == PROTECT) nop();
3790 void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
3791 Condition cond, Register rs,
const Operand& rt,
3792 BranchDelaySlot bd) {
3793 BlockTrampolinePoolScope block_trampoline_pool(
this);
3795 if (cond != cc_always) {
3796 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
3800 if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
3802 UnpackTargetAddressUnsigned(target, lui_offset, jic_offset);
3803 if (MustUseReg(rmode)) {
3804 RecordRelocInfo(rmode, target);
3806 lui(t9, lui_offset);
3807 Jump(t9, jic_offset, al, zero_reg, Operand(zero_reg), bd);
3809 li(t9, Operand(target, rmode));
3810 Jump(t9, 0, al, zero_reg, Operand(zero_reg), bd);
3815 void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
3816 Register rs,
const Operand& rt, BranchDelaySlot bd) {
3817 DCHECK(!RelocInfo::IsCodeTarget(rmode));
3818 Jump(static_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
3821 void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
3822 Condition cond, Register rs,
const Operand& rt,
3823 BranchDelaySlot bd) {
3824 DCHECK(RelocInfo::IsCodeTarget(rmode));
3825 BlockTrampolinePoolScope block_trampoline_pool(
this);
3826 if (FLAG_embedded_builtins) {
3827 int builtin_index = Builtins::kNoBuiltinId;
3828 bool target_is_isolate_independent_builtin =
3829 isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
3830 Builtins::IsIsolateIndependent(builtin_index);
3831 if (target_is_isolate_independent_builtin &&
3832 options().use_pc_relative_calls_and_jumps) {
3833 int32_t code_target_index = AddCodeTarget(code);
3835 BlockTrampolinePoolScope block_trampoline_pool(
this);
3836 if (cond != cc_always) {
3839 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
3841 GenPCRelativeJump(t8, t9, code_target_index,
3842 RelocInfo::RELATIVE_CODE_TARGET, bd);
3845 }
else if (root_array_available_ && options().isolate_independent_code) {
3846 IndirectLoadConstant(t9, code);
3847 Jump(t9, Code::kHeaderSize - kHeapObjectTag, cond, rs, rt, bd);
3849 }
else if (target_is_isolate_independent_builtin &&
3850 options().inline_offheap_trampolines) {
3852 RecordCommentForOffHeapTrampoline(builtin_index);
3853 CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
3854 EmbeddedData d = EmbeddedData::FromBlob();
3855 Address entry = d.InstructionStartOfBuiltin(builtin_index);
3856 li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
3857 Jump(t9, 0, cond, rs, rt, bd);
3861 Jump(static_cast<intptr_t>(code.address()), rmode, cond, rs, rt, bd);
3865 void TurboAssembler::Call(Register target, int16_t offset, Condition cond,
3866 Register rs,
const Operand& rt, BranchDelaySlot bd) {
3867 DCHECK(is_int16(offset));
3868 BlockTrampolinePoolScope block_trampoline_pool(
this);
3869 if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
3870 if (cond == cc_always) {
3871 jialc(target, offset);
3873 BRANCH_ARGS_CHECK(cond, rs, rt);
3874 Branch(2, NegateCondition(cond), rs, rt);
3875 jialc(target, offset);
3879 Addu(target, target, offset);
3881 if (cond == cc_always) {
3884 BRANCH_ARGS_CHECK(cond, rs, rt);
3885 Branch(2, NegateCondition(cond), rs, rt);
3889 if (bd == PROTECT) nop();
3894 void TurboAssembler::Call(Register target, Register base, int16_t offset,
3895 Condition cond, Register rs,
const Operand& rt,
3896 BranchDelaySlot bd) {
3897 DCHECK(is_uint16(offset));
3898 BlockTrampolinePoolScope block_trampoline_pool(
this);
3899 if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
3900 if (cond == cc_always) {
3901 jialc(base, offset);
3903 BRANCH_ARGS_CHECK(cond, rs, rt);
3904 Branch(2, NegateCondition(cond), rs, rt);
3905 jialc(base, offset);
3909 Addu(target, base, offset);
3911 if (target != base) mov(target, base);
3913 if (cond == cc_always) {
3916 BRANCH_ARGS_CHECK(cond, rs, rt);
3917 Branch(2, NegateCondition(cond), rs, rt);
3921 if (bd == PROTECT) nop();
3925 void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
3926 Register rs,
const Operand& rt, BranchDelaySlot bd) {
3928 BlockTrampolinePoolScope block_trampoline_pool(
this);
3929 int32_t target_int =
static_cast<int32_t
>(target);
3930 if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && cond == cc_always) {
3932 UnpackTargetAddressUnsigned(target_int, lui_offset, jialc_offset);
3933 if (MustUseReg(rmode)) {
3934 RecordRelocInfo(rmode, target_int);
3936 lui(t9, lui_offset);
3937 Call(t9, jialc_offset, cond, rs, rt, bd);
3939 li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
3940 Call(t9, 0, cond, rs, rt, bd);
3944 void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
3945 Condition cond, Register rs,
const Operand& rt,
3946 BranchDelaySlot bd) {
3947 BlockTrampolinePoolScope block_trampoline_pool(
this);
3948 if (FLAG_embedded_builtins) {
3949 int builtin_index = Builtins::kNoBuiltinId;
3950 bool target_is_isolate_independent_builtin =
3951 isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
3952 Builtins::IsIsolateIndependent(builtin_index);
3953 if (target_is_isolate_independent_builtin &&
3954 options().use_pc_relative_calls_and_jumps) {
3955 int32_t code_target_index = AddCodeTarget(code);
3957 BlockTrampolinePoolScope block_trampoline_pool(
this);
3958 if (cond != cc_always) {
3959 Branch(PROTECT, &skip, NegateCondition(cond), rs, rt);
3961 GenPCRelativeJumpAndLink(t8, code_target_index,
3962 RelocInfo::RELATIVE_CODE_TARGET, bd);
3965 }
else if (root_array_available_ && options().isolate_independent_code) {
3966 IndirectLoadConstant(t9, code);
3967 Call(t9, Code::kHeaderSize - kHeapObjectTag, cond, rs, rt, bd);
3969 }
else if (target_is_isolate_independent_builtin &&
3970 options().inline_offheap_trampolines) {
3972 RecordCommentForOffHeapTrampoline(builtin_index);
3973 CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
3974 EmbeddedData d = EmbeddedData::FromBlob();
3975 Address entry = d.InstructionStartOfBuiltin(builtin_index);
3976 li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
3977 Call(t9, 0, cond, rs, rt, bd);
3981 DCHECK(RelocInfo::IsCodeTarget(rmode));
3982 AllowDeferredHandleDereference embedding_raw_address;
3983 Call(code.address(), rmode, cond, rs, rt, bd);
3986 void TurboAssembler::Ret(Condition cond, Register rs,
const Operand& rt,
3987 BranchDelaySlot bd) {
3988 Jump(ra, 0, cond, rs, rt, bd);
3991 void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
3992 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT &&
3993 (!L->is_bound() || is_near_r6(L))) {
3994 BranchShortHelperR6(0, L);
3997 BlockTrampolinePoolScope block_trampoline_pool(
this);
3999 imm32 = branch_long_offset(L);
4000 GenPCRelativeJump(t8, t9, imm32, RelocInfo::NONE, bdslot);
4004 void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
4005 if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT &&
4006 (!L->is_bound() || is_near_r6(L))) {
4007 BranchAndLinkShortHelperR6(0, L);
4010 BlockTrampolinePoolScope block_trampoline_pool(
this);
4012 imm32 = branch_long_offset(L);
4013 GenPCRelativeJumpAndLink(t8, imm32, RelocInfo::NONE, bdslot);
4017 void TurboAssembler::DropAndRet(
int drop) {
4018 DCHECK(is_int16(drop * kPointerSize));
4019 Ret(USE_DELAY_SLOT);
4020 addiu(sp, sp, drop * kPointerSize);
4023 void TurboAssembler::DropAndRet(
int drop, Condition cond, Register r1,
4024 const Operand& r2) {
4027 if (cond != cc_always) {
4028 Branch(&skip, NegateCondition(cond), r1, r2);
4034 if (cond != cc_always) {
4039 void TurboAssembler::Drop(
int count, Condition cond, Register reg,
4040 const Operand& op) {
4048 Branch(&skip, NegateCondition(cond), reg, op);
4051 Addu(sp, sp, Operand(count * kPointerSize));
4060 void MacroAssembler::Swap(Register reg1,
4063 if (scratch == no_reg) {
4064 Xor(reg1, reg1, Operand(reg2));
4065 Xor(reg2, reg2, Operand(reg1));
4066 Xor(reg1, reg1, Operand(reg2));
4074 void TurboAssembler::Call(Label* target) { BranchAndLink(target); }
4076 void TurboAssembler::Push(Handle<HeapObject> handle) {
4077 UseScratchRegisterScope temps(
this);
4078 Register scratch = temps.Acquire();
4079 li(scratch, Operand(handle));
4083 void TurboAssembler::Push(Smi smi) {
4084 UseScratchRegisterScope temps(
this);
4085 Register scratch = temps.Acquire();
4086 li(scratch, Operand(smi));
4090 void MacroAssembler::MaybeDropFrames() {
4092 li(a1, ExternalReference::debug_restart_fp_address(isolate()));
4093 lw(a1, MemOperand(a1));
4094 Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
4095 ne, a1, Operand(zero_reg));
4101 void MacroAssembler::PushStackHandler() {
4103 STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
4104 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
4110 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
4111 lw(t1, MemOperand(t2));
4115 sw(sp, MemOperand(t2));
4119 void MacroAssembler::PopStackHandler() {
4120 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4122 Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
4123 UseScratchRegisterScope temps(
this);
4124 Register scratch = temps.Acquire();
4126 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
4127 sw(a1, MemOperand(scratch));
4130 void TurboAssembler::FPUCanonicalizeNaN(
const DoubleRegister dst,
4131 const DoubleRegister src) {
4132 sub_d(dst, src, kDoubleRegZero);
4135 void TurboAssembler::MovFromFloatResult(DoubleRegister dst) {
4136 if (IsMipsSoftFloatABI) {
4137 if (kArchEndian == kLittle) {
4147 void TurboAssembler::MovFromFloatParameter(DoubleRegister dst) {
4148 if (IsMipsSoftFloatABI) {
4149 if (kArchEndian == kLittle) {
4159 void TurboAssembler::MovToFloatParameter(DoubleRegister src) {
4160 if (!IsMipsSoftFloatABI) {
4163 if (kArchEndian == kLittle) {
4171 void TurboAssembler::MovToFloatResult(DoubleRegister src) {
4172 if (!IsMipsSoftFloatABI) {
4175 if (kArchEndian == kLittle) {
4183 void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
4184 DoubleRegister src2) {
4185 if (!IsMipsSoftFloatABI) {
4187 DCHECK(src1 != f14);
4195 if (kArchEndian == kLittle) {
4209 void TurboAssembler::PrepareForTailCall(
const ParameterCount& callee_args_count,
4210 Register caller_args_count_reg,
4211 Register scratch0, Register scratch1) {
4213 if (callee_args_count.is_reg()) {
4214 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
4217 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
4224 Register dst_reg = scratch0;
4225 Lsa(dst_reg, fp, caller_args_count_reg, kPointerSizeLog2);
4226 Addu(dst_reg, dst_reg,
4227 Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
4229 Register src_reg = caller_args_count_reg;
4231 if (callee_args_count.is_reg()) {
4232 Lsa(src_reg, sp, callee_args_count.reg(), kPointerSizeLog2);
4233 Addu(src_reg, src_reg, Operand(kPointerSize));
4236 Operand((callee_args_count.immediate() + 1) * kPointerSize));
4239 if (FLAG_debug_code) {
4240 Check(lo, AbortReason::kStackAccessBelowStackPointer, src_reg,
4246 lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
4247 lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4254 Register tmp_reg = scratch1;
4258 Subu(src_reg, src_reg, Operand(kPointerSize));
4259 Subu(dst_reg, dst_reg, Operand(kPointerSize));
4260 lw(tmp_reg, MemOperand(src_reg));
4261 sw(tmp_reg, MemOperand(dst_reg));
4263 Branch(&loop, ne, sp, Operand(src_reg));
4269 void MacroAssembler::InvokePrologue(
const ParameterCount& expected,
4270 const ParameterCount& actual, Label* done,
4271 bool* definitely_mismatches,
4273 bool definitely_matches =
false;
4274 *definitely_mismatches =
false;
4275 Label regular_invoke;
4286 DCHECK(actual.is_immediate() || actual.reg() == a0);
4287 DCHECK(expected.is_immediate() || expected.reg() == a2);
4289 if (expected.is_immediate()) {
4290 DCHECK(actual.is_immediate());
4291 li(a0, Operand(actual.immediate()));
4292 if (expected.immediate() == actual.immediate()) {
4293 definitely_matches =
true;
4295 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
4296 if (expected.immediate() == sentinel) {
4301 definitely_matches =
true;
4303 *definitely_mismatches =
true;
4304 li(a2, Operand(expected.immediate()));
4307 }
else if (actual.is_immediate()) {
4308 li(a0, Operand(actual.immediate()));
4309 Branch(®ular_invoke, eq, expected.reg(), Operand(a0));
4311 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
4314 if (!definitely_matches) {
4315 Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
4316 if (flag == CALL_FUNCTION) {
4318 if (!*definitely_mismatches) {
4322 Jump(adaptor, RelocInfo::CODE_TARGET);
4324 bind(®ular_invoke);
4328 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
4329 const ParameterCount& expected,
4330 const ParameterCount& actual) {
4332 li(t0, ExternalReference::debug_hook_on_function_call_address(isolate()));
4333 lb(t0, MemOperand(t0));
4334 Branch(&skip_hook, eq, t0, Operand(zero_reg));
4338 if (actual.is_reg()) {
4339 mov(t0, actual.reg());
4341 li(t0, actual.immediate());
4343 Lsa(at, sp, t0, kPointerSizeLog2);
4344 lw(t0, MemOperand(at));
4345 FrameScope frame(
this,
4346 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
4347 if (expected.is_reg()) {
4348 SmiTag(expected.reg());
4349 Push(expected.reg());
4351 if (actual.is_reg()) {
4352 SmiTag(actual.reg());
4355 if (new_target.is_valid()) {
4361 CallRuntime(Runtime::kDebugOnFunctionCall);
4363 if (new_target.is_valid()) {
4366 if (actual.is_reg()) {
4368 SmiUntag(actual.reg());
4370 if (expected.is_reg()) {
4371 Pop(expected.reg());
4372 SmiUntag(expected.reg());
4378 void MacroAssembler::InvokeFunctionCode(Register
function, Register new_target,
4379 const ParameterCount& expected,
4380 const ParameterCount& actual,
4383 DCHECK(flag == JUMP_FUNCTION || has_frame());
4384 DCHECK(
function == a1);
4385 DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
4388 CheckDebugHook(
function, new_target, expected, actual);
4391 if (!new_target.is_valid()) {
4392 LoadRoot(a3, RootIndex::kUndefinedValue);
4396 bool definitely_mismatches =
false;
4397 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag);
4398 if (!definitely_mismatches) {
4402 Register code = kJavaScriptCallCodeStartRegister;
4403 lw(code, FieldMemOperand(
function, JSFunction::kCodeOffset));
4404 if (flag == CALL_FUNCTION) {
4405 Addu(code, code, Code::kHeaderSize - kHeapObjectTag);
4408 DCHECK(flag == JUMP_FUNCTION);
4409 Addu(code, code, Code::kHeaderSize - kHeapObjectTag);
4418 void MacroAssembler::InvokeFunction(Register
function, Register new_target,
4419 const ParameterCount& actual,
4422 DCHECK(flag == JUMP_FUNCTION || has_frame());
4425 DCHECK(
function == a1);
4426 Register expected_reg = a2;
4427 Register temp_reg = t0;
4429 lw(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4430 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4432 FieldMemOperand(temp_reg,
4433 SharedFunctionInfo::kFormalParameterCountOffset));
4435 ParameterCount expected(expected_reg);
4436 InvokeFunctionCode(
function, new_target, expected, actual, flag);
4439 void MacroAssembler::InvokeFunction(Register
function,
4440 const ParameterCount& expected,
4441 const ParameterCount& actual,
4444 DCHECK(flag == JUMP_FUNCTION || has_frame());
4447 DCHECK(
function == a1);
4450 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4452 InvokeFunctionCode(a1, no_reg, expected, actual, flag);
4459 void MacroAssembler::GetObjectType(Register
object,
4461 Register type_reg) {
4462 lw(map, FieldMemOperand(
object, HeapObject::kMapOffset));
4463 lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
4470 void MacroAssembler::CallStub(CodeStub* stub,
4474 BranchDelaySlot bd) {
4475 DCHECK(AllowThisStubCall(stub));
4476 Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
4479 void MacroAssembler::TailCallStub(CodeStub* stub,
4483 BranchDelaySlot bd) {
4484 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
4487 bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
4488 return has_frame() || !stub->SometimesSetsUpAFrame();
4491 void TurboAssembler::AddOverflow(Register dst, Register left,
4492 const Operand& right, Register overflow) {
4493 BlockTrampolinePoolScope block_trampoline_pool(
this);
4494 Register right_reg = no_reg;
4495 Register scratch = t8;
4496 if (!right.is_reg()) {
4497 li(at, Operand(right));
4500 right_reg = right.rm();
4503 DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
4504 overflow != scratch);
4505 DCHECK(overflow != left && overflow != right_reg);
4507 if (dst == left || dst == right_reg) {
4508 addu(scratch, left, right_reg);
4509 xor_(overflow, scratch, left);
4510 xor_(at, scratch, right_reg);
4511 and_(overflow, overflow, at);
4514 addu(dst, left, right_reg);
4515 xor_(overflow, dst, left);
4516 xor_(at, dst, right_reg);
4517 and_(overflow, overflow, at);
4521 void TurboAssembler::SubOverflow(Register dst, Register left,
4522 const Operand& right, Register overflow) {
4523 BlockTrampolinePoolScope block_trampoline_pool(
this);
4524 Register right_reg = no_reg;
4525 Register scratch = t8;
4526 if (!right.is_reg()) {
4527 li(at, Operand(right));
4530 right_reg = right.rm();
4533 DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
4534 overflow != scratch);
4535 DCHECK(overflow != left && overflow != right_reg);
4537 if (dst == left || dst == right_reg) {
4538 subu(scratch, left, right_reg);
4539 xor_(overflow, left, scratch);
4540 xor_(at, left, right_reg);
4541 and_(overflow, overflow, at);
4544 subu(dst, left, right_reg);
4545 xor_(overflow, left, dst);
4546 xor_(at, left, right_reg);
4547 and_(overflow, overflow, at);
4551 void TurboAssembler::MulOverflow(Register dst, Register left,
4552 const Operand& right, Register overflow) {
4553 BlockTrampolinePoolScope block_trampoline_pool(
this);
4554 Register right_reg = no_reg;
4555 Register scratch = t8;
4556 Register scratch2 = t9;
4557 if (!right.is_reg()) {
4558 li(at, Operand(right));
4561 right_reg = right.rm();
4564 DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
4565 overflow != scratch);
4566 DCHECK(overflow != left && overflow != right_reg);
4568 if (dst == left || dst == right_reg) {
4569 Mul(overflow, scratch2, left, right_reg);
4570 sra(scratch, scratch2, 31);
4571 xor_(overflow, overflow, scratch);
4574 Mul(overflow, dst, left, right_reg);
4575 sra(scratch, dst, 31);
4576 xor_(overflow, overflow, scratch);
4580 void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
4582 const Runtime::Function* f = Runtime::FunctionForId(fid);
4587 PrepareCEntryArgs(f->nargs);
4588 PrepareCEntryFunction(ExternalReference::Create(f));
4589 DCHECK(!AreAliased(centry, a0, a1));
4590 Call(centry, Code::kHeaderSize - kHeapObjectTag);
4593 void MacroAssembler::CallRuntime(
const Runtime::Function* f,
int num_arguments,
4594 SaveFPRegsMode save_doubles) {
4600 CHECK(f->nargs < 0 || f->nargs == num_arguments);
4606 PrepareCEntryArgs(num_arguments);
4607 PrepareCEntryFunction(ExternalReference::Create(f));
4609 CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
4610 Call(code, RelocInfo::CODE_TARGET);
4613 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
4614 const Runtime::Function*
function = Runtime::FunctionForId(fid);
4615 DCHECK_EQ(1, function->result_size);
4616 if (function->nargs >= 0) {
4617 PrepareCEntryArgs(function->nargs);
4619 JumpToExternalReference(ExternalReference::Create(fid));
4622 void MacroAssembler::JumpToExternalReference(
const ExternalReference& builtin,
4624 bool builtin_exit_frame) {
4625 PrepareCEntryFunction(builtin);
4626 Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
4627 kArgvOnStack, builtin_exit_frame);
4628 Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), bd);
4631 void MacroAssembler::JumpToInstructionStream(Address entry) {
4632 li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
4633 Jump(kOffHeapTrampolineRegister);
4636 void MacroAssembler::LoadWeakValue(Register out, Register in,
4637 Label* target_if_cleared) {
4638 Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObjectLower32));
4640 And(out, in, Operand(~kWeakHeapObjectMask));
4643 void MacroAssembler::IncrementCounter(StatsCounter* counter,
int value,
4644 Register scratch1, Register scratch2) {
4645 DCHECK_GT(value, 0);
4646 if (FLAG_native_code_counters && counter->Enabled()) {
4647 li(scratch2, ExternalReference::Create(counter));
4648 lw(scratch1, MemOperand(scratch2));
4649 Addu(scratch1, scratch1, Operand(value));
4650 sw(scratch1, MemOperand(scratch2));
4655 void MacroAssembler::DecrementCounter(StatsCounter* counter,
int value,
4656 Register scratch1, Register scratch2) {
4657 DCHECK_GT(value, 0);
4658 if (FLAG_native_code_counters && counter->Enabled()) {
4659 li(scratch2, ExternalReference::Create(counter));
4660 lw(scratch1, MemOperand(scratch2));
4661 Subu(scratch1, scratch1, Operand(value));
4662 sw(scratch1, MemOperand(scratch2));
4670 void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
4672 if (emit_debug_code())
4673 Check(cc, reason, rs, rt);
4676 void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
4679 Branch(&L, cc, rs, rt);
4685 void TurboAssembler::Abort(AbortReason reason) {
4688 const char* msg = GetAbortReason(reason);
4690 RecordComment(
"Abort message: ");
4695 if (trap_on_abort()) {
4700 if (should_abort_hard()) {
4702 FrameScope assume_frame(
this, StackFrame::NONE);
4703 PrepareCallCFunction(0, a0);
4704 li(a0, Operand(static_cast<int>(reason)));
4705 CallCFunction(ExternalReference::abort_with_reason(), 1);
4709 Move(a0, Smi::FromInt(static_cast<int>(reason)));
4715 FrameScope scope(
this, StackFrame::NONE);
4716 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
4718 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
4721 if (is_trampoline_pool_blocked()) {
4727 static const int kExpectedAbortInstructions = 10;
4728 int abort_instructions = InstructionsGeneratedSince(&abort_start);
4729 DCHECK_LE(abort_instructions, kExpectedAbortInstructions);
4730 while (abort_instructions++ < kExpectedAbortInstructions) {
4736 void MacroAssembler::LoadNativeContextSlot(
int index, Register dst) {
4737 lw(dst, NativeContextMemOperand());
4738 lw(dst, ContextMemOperand(dst, index));
4741 void TurboAssembler::StubPrologue(StackFrame::Type type) {
4742 UseScratchRegisterScope temps(
this);
4743 Register scratch = temps.Acquire();
4744 li(scratch, Operand(StackFrame::TypeToMarker(type)));
4745 PushCommonFrame(scratch);
4748 void TurboAssembler::Prologue() { PushStandardFrame(a1); }
4750 void TurboAssembler::EnterFrame(StackFrame::Type type) {
4751 BlockTrampolinePoolScope block_trampoline_pool(
this);
4752 int stack_offset = -3 * kPointerSize;
4753 const int fp_offset = 1 * kPointerSize;
4754 addiu(sp, sp, stack_offset);
4755 stack_offset = -stack_offset - kPointerSize;
4756 sw(ra, MemOperand(sp, stack_offset));
4757 stack_offset -= kPointerSize;
4758 sw(fp, MemOperand(sp, stack_offset));
4759 stack_offset -= kPointerSize;
4760 li(t9, Operand(StackFrame::TypeToMarker(type)));
4761 sw(t9, MemOperand(sp, stack_offset));
4763 DCHECK_EQ(stack_offset, 0);
4764 Addu(fp, sp, Operand(fp_offset));
4767 void TurboAssembler::LeaveFrame(StackFrame::Type type) {
4768 addiu(sp, fp, 2 * kPointerSize);
4769 lw(ra, MemOperand(fp, 1 * kPointerSize));
4770 lw(fp, MemOperand(fp, 0 * kPointerSize));
4773 void MacroAssembler::EnterExitFrame(
bool save_doubles,
int stack_space,
4774 StackFrame::Type frame_type) {
4775 BlockTrampolinePoolScope block_trampoline_pool(
this);
4776 DCHECK(frame_type == StackFrame::EXIT ||
4777 frame_type == StackFrame::BUILTIN_EXIT);
4780 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4781 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4782 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4795 addiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
4796 sw(ra, MemOperand(sp, 4 * kPointerSize));
4797 sw(fp, MemOperand(sp, 3 * kPointerSize));
4799 UseScratchRegisterScope temps(
this);
4800 Register scratch = temps.Acquire();
4801 li(scratch, Operand(StackFrame::TypeToMarker(frame_type)));
4802 sw(scratch, MemOperand(sp, 2 * kPointerSize));
4805 addiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
4807 if (emit_debug_code()) {
4808 sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4812 li(t8, CodeObject(), CONSTANT_SIZE);
4813 sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
4817 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()));
4818 sw(fp, MemOperand(t8));
4820 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
4821 sw(cp, MemOperand(t8));
4823 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4826 DCHECK_EQ(kDoubleSize, frame_alignment);
4827 if (frame_alignment > 0) {
4828 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
4829 And(sp, sp, Operand(-frame_alignment));
4831 int space = FPURegister::kNumRegisters * kDoubleSize;
4832 Subu(sp, sp, Operand(space));
4834 for (
int i = 0;
i < FPURegister::kNumRegisters;
i += 2) {
4835 FPURegister reg = FPURegister::from_code(
i);
4836 Sdc1(reg, MemOperand(sp,
i * kDoubleSize));
4843 DCHECK_GE(stack_space, 0);
4844 Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4845 if (frame_alignment > 0) {
4846 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
4847 And(sp, sp, Operand(-frame_alignment));
4852 UseScratchRegisterScope temps(
this);
4853 Register scratch = temps.Acquire();
4854 addiu(scratch, sp, kPointerSize);
4855 sw(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
4858 void MacroAssembler::LeaveExitFrame(
bool save_doubles, Register argument_count,
4860 bool argument_count_is_length) {
4861 BlockTrampolinePoolScope block_trampoline_pool(
this);
4865 lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
4866 for (
int i = 0;
i < FPURegister::kNumRegisters;
i += 2) {
4867 FPURegister reg = FPURegister::from_code(
i);
4868 Ldc1(reg, MemOperand(t8,
i * kDoubleSize + kPointerSize));
4874 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()));
4875 sw(zero_reg, MemOperand(t8));
4879 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
4880 lw(cp, MemOperand(t8));
4884 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
4885 sw(a3, MemOperand(t8));
4890 lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
4891 lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
4893 if (argument_count.is_valid()) {
4894 if (argument_count_is_length) {
4895 addu(sp, sp, argument_count);
4897 Lsa(sp, sp, argument_count, kPointerSizeLog2, t8);
4902 Ret(USE_DELAY_SLOT);
4908 int TurboAssembler::ActivationFrameAlignment() {
4909 #if V8_HOST_ARCH_MIPS 4914 return base::OS::ActivationFrameAlignment();
4915 #else // V8_HOST_ARCH_MIPS 4920 return FLAG_sim_stack_alignment;
4921 #endif // V8_HOST_ARCH_MIPS 4925 void MacroAssembler::AssertStackIsAligned() {
4926 if (emit_debug_code()) {
4927 const int frame_alignment = ActivationFrameAlignment();
4928 const int frame_alignment_mask = frame_alignment - 1;
4930 if (frame_alignment > kPointerSize) {
4931 Label alignment_as_expected;
4932 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
4933 UseScratchRegisterScope temps(
this);
4934 Register scratch = temps.Acquire();
4935 andi(scratch, sp, frame_alignment_mask);
4936 Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
4938 stop(
"Unexpected stack alignment");
4939 bind(&alignment_as_expected);
4944 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
4947 UseScratchRegisterScope temps(
this);
4948 Register scratch = temps.Acquire();
4949 JumpIfSmi(src, smi_case, scratch, USE_DELAY_SLOT);
4953 void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
4954 Register scratch, BranchDelaySlot bd) {
4955 DCHECK_EQ(0, kSmiTag);
4956 andi(scratch, value, kSmiTagMask);
4957 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
4960 void MacroAssembler::JumpIfNotSmi(Register value,
4961 Label* not_smi_label,
4963 BranchDelaySlot bd) {
4964 DCHECK_EQ(0, kSmiTag);
4965 andi(scratch, value, kSmiTagMask);
4966 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
4970 void MacroAssembler::JumpIfEitherSmi(Register reg1,
4972 Label* on_either_smi) {
4973 STATIC_ASSERT(kSmiTag == 0);
4974 DCHECK_EQ(1, kSmiTagMask);
4976 UseScratchRegisterScope temps(
this);
4977 Register scratch = temps.Acquire();
4978 and_(scratch, reg1, reg2);
4979 JumpIfSmi(scratch, on_either_smi);
4982 void MacroAssembler::AssertNotSmi(Register
object) {
4983 if (emit_debug_code()) {
4984 STATIC_ASSERT(kSmiTag == 0);
4985 UseScratchRegisterScope temps(
this);
4986 Register scratch = temps.Acquire();
4987 andi(scratch,
object, kSmiTagMask);
4988 Check(ne, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
4993 void MacroAssembler::AssertSmi(Register
object) {
4994 if (emit_debug_code()) {
4995 STATIC_ASSERT(kSmiTag == 0);
4996 UseScratchRegisterScope temps(
this);
4997 Register scratch = temps.Acquire();
4998 andi(scratch,
object, kSmiTagMask);
4999 Check(eq, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
5003 void MacroAssembler::AssertConstructor(Register
object) {
5004 if (emit_debug_code()) {
5005 BlockTrampolinePoolScope block_trampoline_pool(
this);
5006 STATIC_ASSERT(kSmiTag == 0);
5008 Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, t8,
5011 lw(t8, FieldMemOperand(
object, HeapObject::kMapOffset));
5012 lbu(t8, FieldMemOperand(t8, Map::kBitFieldOffset));
5013 And(t8, t8, Operand(Map::IsConstructorBit::kMask));
5014 Check(ne, AbortReason::kOperandIsNotAConstructor, t8, Operand(zero_reg));
5018 void MacroAssembler::AssertFunction(Register
object) {
5019 if (emit_debug_code()) {
5020 BlockTrampolinePoolScope block_trampoline_pool(
this);
5021 STATIC_ASSERT(kSmiTag == 0);
5023 Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
5025 GetObjectType(
object, t8, t8);
5026 Check(eq, AbortReason::kOperandIsNotAFunction, t8,
5027 Operand(JS_FUNCTION_TYPE));
5032 void MacroAssembler::AssertBoundFunction(Register
object) {
5033 if (emit_debug_code()) {
5034 BlockTrampolinePoolScope block_trampoline_pool(
this);
5035 STATIC_ASSERT(kSmiTag == 0);
5037 Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, t8,
5039 GetObjectType(
object, t8, t8);
5040 Check(eq, AbortReason::kOperandIsNotABoundFunction, t8,
5041 Operand(JS_BOUND_FUNCTION_TYPE));
5045 void MacroAssembler::AssertGeneratorObject(Register
object) {
5046 if (!emit_debug_code())
return;
5047 BlockTrampolinePoolScope block_trampoline_pool(
this);
5048 STATIC_ASSERT(kSmiTag == 0);
5050 Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, t8,
5053 GetObjectType(
object, t8, t8);
5058 Branch(&done, eq, t8, Operand(JS_GENERATOR_OBJECT_TYPE));
5061 Branch(&done, eq, t8, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
5064 Branch(&done, eq, t8, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
5066 Abort(AbortReason::kOperandIsNotAGeneratorObject);
5071 void MacroAssembler::AssertUndefinedOrAllocationSite(Register
object,
5073 if (emit_debug_code()) {
5074 Label done_checking;
5075 AssertNotSmi(
object);
5076 LoadRoot(scratch, RootIndex::kUndefinedValue);
5077 Branch(&done_checking, eq,
object, Operand(scratch));
5078 GetObjectType(
object, scratch, scratch);
5079 Assert(eq, AbortReason::kExpectedUndefinedOrCell, scratch,
5080 Operand(ALLOCATION_SITE_TYPE));
5081 bind(&done_checking);
5086 void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
5087 FPURegister src2, Label* out_of_line) {
5094 CompareIsNanF32(src1, src2);
5095 BranchTrueF(out_of_line);
5097 if (IsMipsArchVariant(kMips32r6)) {
5098 max_s(dst, src1, src2);
5100 Label return_left, return_right, done;
5102 CompareF32(OLT, src1, src2);
5103 BranchTrueShortF(&return_right);
5104 CompareF32(OLT, src2, src1);
5105 BranchTrueShortF(&return_left);
5109 BlockTrampolinePoolScope block_trampoline_pool(
this);
5111 Branch(&return_left, eq, t8, Operand(zero_reg));
5112 Branch(&return_right);
5115 bind(&return_right);
5130 void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
5132 add_s(dst, src1, src2);
5135 void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
5136 FPURegister src2, Label* out_of_line) {
5143 CompareIsNanF32(src1, src2);
5144 BranchTrueF(out_of_line);
5146 if (IsMipsArchVariant(kMips32r6)) {
5147 min_s(dst, src1, src2);
5149 Label return_left, return_right, done;
5151 CompareF32(OLT, src1, src2);
5152 BranchTrueShortF(&return_left);
5153 CompareF32(OLT, src2, src1);
5154 BranchTrueShortF(&return_right);
5158 BlockTrampolinePoolScope block_trampoline_pool(
this);
5160 Branch(&return_right, eq, t8, Operand(zero_reg));
5161 Branch(&return_left);
5164 bind(&return_right);
5179 void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
5181 add_s(dst, src1, src2);
5184 void TurboAssembler::Float64Max(DoubleRegister dst, DoubleRegister src1,
5185 DoubleRegister src2, Label* out_of_line) {
5192 CompareIsNanF64(src1, src2);
5193 BranchTrueF(out_of_line);
5195 if (IsMipsArchVariant(kMips32r6)) {
5196 max_d(dst, src1, src2);
5198 Label return_left, return_right, done;
5200 CompareF64(OLT, src1, src2);
5201 BranchTrueShortF(&return_right);
5202 CompareF64(OLT, src2, src1);
5203 BranchTrueShortF(&return_left);
5207 BlockTrampolinePoolScope block_trampoline_pool(
this);
5209 Branch(&return_left, eq, t8, Operand(zero_reg));
5210 Branch(&return_right);
5213 bind(&return_right);
5228 void TurboAssembler::Float64MaxOutOfLine(DoubleRegister dst,
5229 DoubleRegister src1,
5230 DoubleRegister src2) {
5231 add_d(dst, src1, src2);
5234 void TurboAssembler::Float64Min(DoubleRegister dst, DoubleRegister src1,
5235 DoubleRegister src2, Label* out_of_line) {
5242 CompareIsNanF64(src1, src2);
5243 BranchTrueF(out_of_line);
5245 if (IsMipsArchVariant(kMips32r6)) {
5246 min_d(dst, src1, src2);
5248 Label return_left, return_right, done;
5250 CompareF64(OLT, src1, src2);
5251 BranchTrueShortF(&return_left);
5252 CompareF64(OLT, src2, src1);
5253 BranchTrueShortF(&return_right);
5257 BlockTrampolinePoolScope block_trampoline_pool(
this);
5259 Branch(&return_right, eq, t8, Operand(zero_reg));
5260 Branch(&return_left);
5263 bind(&return_right);
5278 void TurboAssembler::Float64MinOutOfLine(DoubleRegister dst,
5279 DoubleRegister src1,
5280 DoubleRegister src2) {
5281 add_d(dst, src1, src2);
5284 static const int kRegisterPassedArguments = 4;
5286 int TurboAssembler::CalculateStackPassedWords(
int num_reg_arguments,
5287 int num_double_arguments) {
5288 int stack_passed_words = 0;
5289 num_reg_arguments += 2 * num_double_arguments;
5292 if (num_reg_arguments > kRegisterPassedArguments) {
5293 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5295 stack_passed_words += kCArgSlotCount;
5296 return stack_passed_words;
5299 void TurboAssembler::PrepareCallCFunction(
int num_reg_arguments,
5300 int num_double_arguments,
5302 int frame_alignment = ActivationFrameAlignment();
5309 int stack_passed_arguments = CalculateStackPassedWords(
5310 num_reg_arguments, num_double_arguments);
5311 if (frame_alignment > kPointerSize) {
5315 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5316 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
5317 And(sp, sp, Operand(-frame_alignment));
5318 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5320 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5324 void TurboAssembler::PrepareCallCFunction(
int num_reg_arguments,
5326 PrepareCallCFunction(num_reg_arguments, 0, scratch);
5329 void TurboAssembler::CallCFunction(ExternalReference
function,
5330 int num_reg_arguments,
5331 int num_double_arguments) {
5335 BlockTrampolinePoolScope block_trampoline_pool(
this);
5337 CallCFunctionHelper(t9, 0, num_reg_arguments, num_double_arguments);
5340 void TurboAssembler::CallCFunction(Register
function,
int num_reg_arguments,
5341 int num_double_arguments) {
5342 CallCFunctionHelper(
function, 0, num_reg_arguments, num_double_arguments);
5345 void TurboAssembler::CallCFunction(ExternalReference
function,
5346 int num_arguments) {
5347 CallCFunction(
function, num_arguments, 0);
5350 void TurboAssembler::CallCFunction(Register
function,
int num_arguments) {
5351 CallCFunction(
function, num_arguments, 0);
5354 void TurboAssembler::CallCFunctionHelper(Register function_base,
5355 int16_t function_offset,
5356 int num_reg_arguments,
5357 int num_double_arguments) {
5358 DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
5359 DCHECK(has_frame());
5366 #if V8_HOST_ARCH_MIPS 5367 if (emit_debug_code()) {
5368 int frame_alignment = base::OS::ActivationFrameAlignment();
5369 int frame_alignment_mask = frame_alignment - 1;
5370 if (frame_alignment > kPointerSize) {
5371 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
5372 Label alignment_as_expected;
5373 UseScratchRegisterScope temps(
this);
5374 Register scratch = temps.Acquire();
5375 And(scratch, sp, Operand(frame_alignment_mask));
5376 Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
5379 stop(
"Unexpected alignment in CallCFunction");
5380 bind(&alignment_as_expected);
5383 #endif // V8_HOST_ARCH_MIPS 5390 BlockTrampolinePoolScope block_trampoline_pool(
this);
5391 if (function_base != t9) {
5392 mov(t9, function_base);
5396 if (function_offset != 0) {
5397 addiu(t9, t9, function_offset);
5398 function_offset = 0;
5401 Call(function_base, function_offset);
5404 int stack_passed_arguments = CalculateStackPassedWords(
5405 num_reg_arguments, num_double_arguments);
5407 if (base::OS::ActivationFrameAlignment() > kPointerSize) {
5408 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5410 Addu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5415 #undef BRANCH_ARGS_CHECK 5417 void TurboAssembler::CheckPageFlag(Register
object, Register scratch,
int mask,
5418 Condition cc, Label* condition_met) {
5419 And(scratch,
object, Operand(~kPageAlignmentMask));
5420 lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5421 And(scratch, scratch, Operand(mask));
5422 Branch(condition_met, cc, scratch, Operand(zero_reg));
5425 Register GetRegisterThatIsNotOneOf(Register reg1,
5432 if (reg1.is_valid()) regs |= reg1.bit();
5433 if (reg2.is_valid()) regs |= reg2.bit();
5434 if (reg3.is_valid()) regs |= reg3.bit();
5435 if (reg4.is_valid()) regs |= reg4.bit();
5436 if (reg5.is_valid()) regs |= reg5.bit();
5437 if (reg6.is_valid()) regs |= reg6.bit();
5439 const RegisterConfiguration* config = RegisterConfiguration::Default();
5440 for (
int i = 0;
i < config->num_allocatable_general_registers(); ++
i) {
5441 int code = config->GetAllocatableGeneralCode(
i);
5442 Register candidate = Register::from_code(code);
5443 if (regs & candidate.bit())
continue;
5449 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
5456 if (IsMipsArchVariant(kMips32r6)) {
5462 int pc = pc_offset();
5469 void TurboAssembler::ResetSpeculationPoisonRegister() {
5470 li(kSpeculationPoisonRegister, -1);
5476 #endif // V8_TARGET_ARCH_MIPS