7 #if V8_TARGET_ARCH_MIPS64 9 #include "src/assembler-inl.h" 10 #include "src/base/bits.h" 11 #include "src/base/division-by-constant.h" 12 #include "src/bootstrapper.h" 13 #include "src/callable.h" 14 #include "src/code-factory.h" 15 #include "src/code-stubs.h" 16 #include "src/counters.h" 17 #include "src/debug/debug.h" 18 #include "src/external-reference-table.h" 19 #include "src/frames-inl.h" 20 #include "src/macro-assembler.h" 21 #include "src/register-configuration.h" 22 #include "src/runtime/runtime.h" 23 #include "src/snapshot/embedded-data.h" 24 #include "src/snapshot/snapshot.h" 25 #include "src/wasm/wasm-code-manager.h" 30 #include "src/mips64/macro-assembler-mips64.h" 36 MacroAssembler::MacroAssembler(Isolate* isolate,
37 const AssemblerOptions& options,
void* buffer,
38 int size, CodeObjectRequired create_code_object)
39 : TurboAssembler(isolate, options, buffer, size, create_code_object) {
40 if (create_code_object == CodeObjectRequired::kYes) {
46 code_object_ = Handle<HeapObject>::New(
47 *isolate->factory()->NewSelfReferenceMarker(), isolate);
51 static inline bool IsZero(
const Operand& rt) {
53 return rt.rm() == zero_reg;
55 return rt.immediate() == 0;
59 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
62 Register exclusion3)
const {
64 RegList exclusions = 0;
65 if (exclusion1 != no_reg) {
66 exclusions |= exclusion1.bit();
67 if (exclusion2 != no_reg) {
68 exclusions |= exclusion2.bit();
69 if (exclusion3 != no_reg) {
70 exclusions |= exclusion3.bit();
75 RegList list = kJSCallerSaved & ~exclusions;
76 bytes += NumRegs(list) * kPointerSize;
78 if (fp_mode == kSaveFPRegs) {
79 bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
85 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
86 Register exclusion2, Register exclusion3) {
88 RegList exclusions = 0;
89 if (exclusion1 != no_reg) {
90 exclusions |= exclusion1.bit();
91 if (exclusion2 != no_reg) {
92 exclusions |= exclusion2.bit();
93 if (exclusion3 != no_reg) {
94 exclusions |= exclusion3.bit();
99 RegList list = kJSCallerSaved & ~exclusions;
101 bytes += NumRegs(list) * kPointerSize;
103 if (fp_mode == kSaveFPRegs) {
104 MultiPushFPU(kCallerSavedFPU);
105 bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
111 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
112 Register exclusion2, Register exclusion3) {
114 if (fp_mode == kSaveFPRegs) {
115 MultiPopFPU(kCallerSavedFPU);
116 bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
119 RegList exclusions = 0;
120 if (exclusion1 != no_reg) {
121 exclusions |= exclusion1.bit();
122 if (exclusion2 != no_reg) {
123 exclusions |= exclusion2.bit();
124 if (exclusion3 != no_reg) {
125 exclusions |= exclusion3.bit();
130 RegList list = kJSCallerSaved & ~exclusions;
132 bytes += NumRegs(list) * kPointerSize;
137 void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
138 Ld(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index)));
141 void TurboAssembler::LoadRoot(Register destination, RootIndex index,
142 Condition cond, Register src1,
143 const Operand& src2) {
144 Branch(2, NegateCondition(cond), src1, src2);
145 Ld(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index)));
149 void TurboAssembler::PushCommonFrame(Register marker_reg) {
150 if (marker_reg.is_valid()) {
151 Push(ra, fp, marker_reg);
152 Daddu(fp, sp, Operand(kPointerSize));
159 void TurboAssembler::PushStandardFrame(Register function_reg) {
160 int offset = -StandardFrameConstants::kContextOffset;
161 if (function_reg.is_valid()) {
162 Push(ra, fp, cp, function_reg);
163 offset += kPointerSize;
167 Daddu(fp, sp, Operand(offset));
171 void MacroAssembler::PushSafepointRegisters() {
174 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
175 DCHECK_GE(num_unsaved, 0);
176 if (num_unsaved > 0) {
177 Dsubu(sp, sp, Operand(num_unsaved * kPointerSize));
179 MultiPush(kSafepointSavedRegisters);
183 void MacroAssembler::PopSafepointRegisters() {
184 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
185 MultiPop(kSafepointSavedRegisters);
186 if (num_unsaved > 0) {
187 Daddu(sp, sp, Operand(num_unsaved * kPointerSize));
191 int MacroAssembler::SafepointRegisterStackIndex(
int reg_code) {
194 return kSafepointRegisterStackIndexMap[reg_code];
201 void MacroAssembler::RecordWriteField(Register
object,
int offset,
202 Register value, Register dst,
204 SaveFPRegsMode save_fp,
205 RememberedSetAction remembered_set_action,
206 SmiCheck smi_check) {
207 DCHECK(!AreAliased(value, dst, t8,
object));
213 if (smi_check == INLINE_SMI_CHECK) {
214 JumpIfSmi(value, &done);
219 DCHECK(IsAligned(offset, kPointerSize));
221 Daddu(dst,
object, Operand(offset - kHeapObjectTag));
222 if (emit_debug_code()) {
223 BlockTrampolinePoolScope block_trampoline_pool(
this);
225 And(t8, dst, Operand(kPointerSize - 1));
226 Branch(&ok, eq, t8, Operand(zero_reg));
227 stop(
"Unaligned cell in write barrier");
231 RecordWrite(
object, dst, value, ra_status, save_fp, remembered_set_action,
238 if (emit_debug_code()) {
239 li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
240 li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
244 void TurboAssembler::SaveRegisters(RegList registers) {
245 DCHECK_GT(NumRegs(registers), 0);
247 for (
int i = 0;
i < Register::kNumRegisters; ++
i) {
248 if ((registers >>
i) & 1u) {
249 regs |= Register::from_code(
i).bit();
255 void TurboAssembler::RestoreRegisters(RegList registers) {
256 DCHECK_GT(NumRegs(registers), 0);
258 for (
int i = 0;
i < Register::kNumRegisters; ++
i) {
259 if ((registers >>
i) & 1u) {
260 regs |= Register::from_code(
i).bit();
266 void TurboAssembler::CallRecordWriteStub(
267 Register
object, Register address,
268 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
270 object, address, remembered_set_action, fp_mode,
271 isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
275 void TurboAssembler::CallRecordWriteStub(
276 Register
object, Register address,
277 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
278 Address wasm_target) {
279 CallRecordWriteStub(
object, address, remembered_set_action, fp_mode,
280 Handle<Code>::null(), wasm_target);
283 void TurboAssembler::CallRecordWriteStub(
284 Register
object, Register address,
285 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
286 Handle<Code> code_target, Address wasm_target) {
287 DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
293 RecordWriteDescriptor descriptor;
294 RegList registers = descriptor.allocatable_registers();
296 SaveRegisters(registers);
297 Register object_parameter(
298 descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
299 Register slot_parameter(
300 descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
301 Register remembered_set_parameter(
302 descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
303 Register fp_mode_parameter(
304 descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
310 Pop(object_parameter);
312 Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
313 Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
314 if (code_target.is_null()) {
315 Call(wasm_target, RelocInfo::WASM_STUB_CALL);
317 Call(code_target, RelocInfo::CODE_TARGET);
320 RestoreRegisters(registers);
326 void MacroAssembler::RecordWrite(Register
object, Register address,
327 Register value, RAStatus ra_status,
328 SaveFPRegsMode fp_mode,
329 RememberedSetAction remembered_set_action,
330 SmiCheck smi_check) {
331 DCHECK(!AreAliased(
object, address, value, t8));
332 DCHECK(!AreAliased(
object, address, value, t9));
334 if (emit_debug_code()) {
335 UseScratchRegisterScope temps(
this);
336 Register scratch = temps.Acquire();
337 Ld(scratch, MemOperand(address));
338 Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, scratch,
342 if (remembered_set_action == OMIT_REMEMBERED_SET &&
343 !FLAG_incremental_marking) {
351 if (smi_check == INLINE_SMI_CHECK) {
352 DCHECK_EQ(0, kSmiTag);
353 JumpIfSmi(value, &done);
358 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
359 CheckPageFlag(
object,
361 MemoryChunk::kPointersFromHereAreInterestingMask,
366 if (ra_status == kRAHasNotBeenSaved) {
369 CallRecordWriteStub(
object, address, remembered_set_action, fp_mode);
370 if (ra_status == kRAHasNotBeenSaved) {
378 UseScratchRegisterScope temps(
this);
379 Register scratch = temps.Acquire();
380 isolate()->counters()->write_barriers_static()->Increment();
381 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1,
387 if (emit_debug_code()) {
388 li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
389 li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
396 void TurboAssembler::Addu(Register rd, Register rs,
const Operand& rt) {
398 addu(rd, rs, rt.rm());
400 if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) {
401 addiu(rd, rs, static_cast<int32_t>(rt.immediate()));
404 UseScratchRegisterScope temps(
this);
405 Register scratch = temps.Acquire();
406 DCHECK(rs != scratch);
408 addu(rd, rs, scratch);
413 void TurboAssembler::Daddu(Register rd, Register rs,
const Operand& rt) {
415 daddu(rd, rs, rt.rm());
417 if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) {
418 daddiu(rd, rs, static_cast<int32_t>(rt.immediate()));
421 UseScratchRegisterScope temps(
this);
422 Register scratch = temps.Acquire();
423 DCHECK(rs != scratch);
425 daddu(rd, rs, scratch);
430 void TurboAssembler::Subu(Register rd, Register rs,
const Operand& rt) {
432 subu(rd, rs, rt.rm());
434 DCHECK(is_int32(rt.immediate()));
435 if (is_int16(-rt.immediate()) && !MustUseReg(rt.rmode())) {
437 static_cast<int32_t>(
440 UseScratchRegisterScope temps(
this);
441 Register scratch = temps.Acquire();
442 DCHECK(rs != scratch);
443 if (-rt.immediate() >> 16 == 0 && !MustUseReg(rt.rmode())) {
445 li(scratch, -rt.immediate());
446 addu(rd, rs, scratch);
450 subu(rd, rs, scratch);
456 void TurboAssembler::Dsubu(Register rd, Register rs,
const Operand& rt) {
458 dsubu(rd, rs, rt.rm());
459 }
else if (is_int16(-rt.immediate()) && !MustUseReg(rt.rmode())) {
461 static_cast<int32_t>(
465 int li_count = InstrCountForLi64Bit(rt.immediate());
466 int li_neg_count = InstrCountForLi64Bit(-rt.immediate());
467 if (li_neg_count < li_count && !MustUseReg(rt.rmode())) {
469 DCHECK(rt.immediate() != std::numeric_limits<int32_t>::min());
470 UseScratchRegisterScope temps(
this);
471 Register scratch = temps.Acquire();
472 li(scratch, Operand(-rt.immediate()));
473 Daddu(rd, rs, scratch);
476 UseScratchRegisterScope temps(
this);
477 Register scratch = temps.Acquire();
479 dsubu(rd, rs, scratch);
484 void TurboAssembler::Mul(Register rd, Register rs,
const Operand& rt) {
486 mul(rd, rs, rt.rm());
489 UseScratchRegisterScope temps(
this);
490 Register scratch = temps.Acquire();
491 DCHECK(rs != scratch);
493 mul(rd, rs, scratch);
497 void TurboAssembler::Mulh(Register rd, Register rs,
const Operand& rt) {
499 if (kArchVariant != kMips64r6) {
503 muh(rd, rs, rt.rm());
507 UseScratchRegisterScope temps(
this);
508 Register scratch = temps.Acquire();
509 DCHECK(rs != scratch);
511 if (kArchVariant != kMips64r6) {
515 muh(rd, rs, scratch);
520 void TurboAssembler::Mulhu(Register rd, Register rs,
const Operand& rt) {
522 if (kArchVariant != kMips64r6) {
526 muhu(rd, rs, rt.rm());
530 UseScratchRegisterScope temps(
this);
531 Register scratch = temps.Acquire();
532 DCHECK(rs != scratch);
534 if (kArchVariant != kMips64r6) {
538 muhu(rd, rs, scratch);
543 void TurboAssembler::Dmul(Register rd, Register rs,
const Operand& rt) {
545 if (kArchVariant == kMips64r6) {
546 dmul(rd, rs, rt.rm());
553 UseScratchRegisterScope temps(
this);
554 Register scratch = temps.Acquire();
555 DCHECK(rs != scratch);
557 if (kArchVariant == kMips64r6) {
558 dmul(rd, rs, scratch);
566 void TurboAssembler::Dmulh(Register rd, Register rs,
const Operand& rt) {
568 if (kArchVariant == kMips64r6) {
569 dmuh(rd, rs, rt.rm());
576 UseScratchRegisterScope temps(
this);
577 Register scratch = temps.Acquire();
578 DCHECK(rs != scratch);
580 if (kArchVariant == kMips64r6) {
581 dmuh(rd, rs, scratch);
589 void TurboAssembler::Mult(Register rs,
const Operand& rt) {
594 UseScratchRegisterScope temps(
this);
595 Register scratch = temps.Acquire();
596 DCHECK(rs != scratch);
602 void TurboAssembler::Dmult(Register rs,
const Operand& rt) {
607 UseScratchRegisterScope temps(
this);
608 Register scratch = temps.Acquire();
609 DCHECK(rs != scratch);
615 void TurboAssembler::Multu(Register rs,
const Operand& rt) {
620 UseScratchRegisterScope temps(
this);
621 Register scratch = temps.Acquire();
622 DCHECK(rs != scratch);
628 void TurboAssembler::Dmultu(Register rs,
const Operand& rt) {
633 UseScratchRegisterScope temps(
this);
634 Register scratch = temps.Acquire();
635 DCHECK(rs != scratch);
641 void TurboAssembler::Div(Register rs,
const Operand& rt) {
646 UseScratchRegisterScope temps(
this);
647 Register scratch = temps.Acquire();
648 DCHECK(rs != scratch);
654 void TurboAssembler::Div(Register res, Register rs,
const Operand& rt) {
656 if (kArchVariant != kMips64r6) {
660 div(res, rs, rt.rm());
664 UseScratchRegisterScope temps(
this);
665 Register scratch = temps.Acquire();
666 DCHECK(rs != scratch);
668 if (kArchVariant != kMips64r6) {
672 div(res, rs, scratch);
677 void TurboAssembler::Mod(Register rd, Register rs,
const Operand& rt) {
679 if (kArchVariant != kMips64r6) {
683 mod(rd, rs, rt.rm());
687 UseScratchRegisterScope temps(
this);
688 Register scratch = temps.Acquire();
689 DCHECK(rs != scratch);
691 if (kArchVariant != kMips64r6) {
695 mod(rd, rs, scratch);
700 void TurboAssembler::Modu(Register rd, Register rs,
const Operand& rt) {
702 if (kArchVariant != kMips64r6) {
706 modu(rd, rs, rt.rm());
710 UseScratchRegisterScope temps(
this);
711 Register scratch = temps.Acquire();
712 DCHECK(rs != scratch);
714 if (kArchVariant != kMips64r6) {
718 modu(rd, rs, scratch);
723 void TurboAssembler::Ddiv(Register rs,
const Operand& rt) {
728 UseScratchRegisterScope temps(
this);
729 Register scratch = temps.Acquire();
730 DCHECK(rs != scratch);
736 void TurboAssembler::Ddiv(Register rd, Register rs,
const Operand& rt) {
737 if (kArchVariant != kMips64r6) {
743 UseScratchRegisterScope temps(
this);
744 Register scratch = temps.Acquire();
745 DCHECK(rs != scratch);
752 ddiv(rd, rs, rt.rm());
755 UseScratchRegisterScope temps(
this);
756 Register scratch = temps.Acquire();
757 DCHECK(rs != scratch);
759 ddiv(rd, rs, scratch);
764 void TurboAssembler::Divu(Register rs,
const Operand& rt) {
769 UseScratchRegisterScope temps(
this);
770 Register scratch = temps.Acquire();
771 DCHECK(rs != scratch);
777 void TurboAssembler::Divu(Register res, Register rs,
const Operand& rt) {
779 if (kArchVariant != kMips64r6) {
783 divu(res, rs, rt.rm());
787 UseScratchRegisterScope temps(
this);
788 Register scratch = temps.Acquire();
789 DCHECK(rs != scratch);
791 if (kArchVariant != kMips64r6) {
795 divu(res, rs, scratch);
800 void TurboAssembler::Ddivu(Register rs,
const Operand& rt) {
805 UseScratchRegisterScope temps(
this);
806 Register scratch = temps.Acquire();
807 DCHECK(rs != scratch);
813 void TurboAssembler::Ddivu(Register res, Register rs,
const Operand& rt) {
815 if (kArchVariant != kMips64r6) {
819 ddivu(res, rs, rt.rm());
823 UseScratchRegisterScope temps(
this);
824 Register scratch = temps.Acquire();
825 DCHECK(rs != scratch);
827 if (kArchVariant != kMips64r6) {
831 ddivu(res, rs, scratch);
836 void TurboAssembler::Dmod(Register rd, Register rs,
const Operand& rt) {
837 if (kArchVariant != kMips64r6) {
843 UseScratchRegisterScope temps(
this);
844 Register scratch = temps.Acquire();
845 DCHECK(rs != scratch);
852 dmod(rd, rs, rt.rm());
855 UseScratchRegisterScope temps(
this);
856 Register scratch = temps.Acquire();
857 DCHECK(rs != scratch);
859 dmod(rd, rs, scratch);
864 void TurboAssembler::Dmodu(Register rd, Register rs,
const Operand& rt) {
865 if (kArchVariant != kMips64r6) {
871 UseScratchRegisterScope temps(
this);
872 Register scratch = temps.Acquire();
873 DCHECK(rs != scratch);
880 dmodu(rd, rs, rt.rm());
883 UseScratchRegisterScope temps(
this);
884 Register scratch = temps.Acquire();
885 DCHECK(rs != scratch);
887 dmodu(rd, rs, scratch);
892 void TurboAssembler::And(Register rd, Register rs,
const Operand& rt) {
894 and_(rd, rs, rt.rm());
896 if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) {
897 andi(rd, rs, static_cast<int32_t>(rt.immediate()));
900 UseScratchRegisterScope temps(
this);
901 Register scratch = temps.Acquire();
902 DCHECK(rs != scratch);
904 and_(rd, rs, scratch);
909 void TurboAssembler::Or(Register rd, Register rs,
const Operand& rt) {
911 or_(rd, rs, rt.rm());
913 if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) {
914 ori(rd, rs, static_cast<int32_t>(rt.immediate()));
917 UseScratchRegisterScope temps(
this);
918 Register scratch = temps.Acquire();
919 DCHECK(rs != scratch);
921 or_(rd, rs, scratch);
926 void TurboAssembler::Xor(Register rd, Register rs,
const Operand& rt) {
928 xor_(rd, rs, rt.rm());
930 if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) {
931 xori(rd, rs, static_cast<int32_t>(rt.immediate()));
934 UseScratchRegisterScope temps(
this);
935 Register scratch = temps.Acquire();
936 DCHECK(rs != scratch);
938 xor_(rd, rs, scratch);
943 void TurboAssembler::Nor(Register rd, Register rs,
const Operand& rt) {
945 nor(rd, rs, rt.rm());
948 UseScratchRegisterScope temps(
this);
949 Register scratch = temps.Acquire();
950 DCHECK(rs != scratch);
952 nor(rd, rs, scratch);
956 void TurboAssembler::Neg(Register rs,
const Operand& rt) {
957 dsubu(rs, zero_reg, rt.rm());
960 void TurboAssembler::Slt(Register rd, Register rs,
const Operand& rt) {
962 slt(rd, rs, rt.rm());
964 if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) {
965 slti(rd, rs, static_cast<int32_t>(rt.immediate()));
968 UseScratchRegisterScope temps(
this);
969 BlockTrampolinePoolScope block_trampoline_pool(
this);
970 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
971 DCHECK(rs != scratch);
973 slt(rd, rs, scratch);
978 void TurboAssembler::Sltu(Register rd, Register rs,
const Operand& rt) {
980 sltu(rd, rs, rt.rm());
982 const uint64_t int16_min = std::numeric_limits<int16_t>::min();
983 if (is_uint15(rt.immediate()) && !MustUseReg(rt.rmode())) {
985 sltiu(rd, rs, static_cast<int32_t>(rt.immediate()));
986 }
else if (is_uint15(rt.immediate() - int16_min) &&
987 !MustUseReg(rt.rmode())) {
989 sltiu(rd, rs, static_cast<uint16_t>(rt.immediate()));
992 UseScratchRegisterScope temps(
this);
993 BlockTrampolinePoolScope block_trampoline_pool(
this);
994 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
995 DCHECK(rs != scratch);
997 sltu(rd, rs, scratch);
1002 void TurboAssembler::Sle(Register rd, Register rs,
const Operand& rt) {
1004 slt(rd, rt.rm(), rs);
1007 UseScratchRegisterScope temps(
this);
1008 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
1009 BlockTrampolinePoolScope block_trampoline_pool(
this);
1010 DCHECK(rs != scratch);
1012 slt(rd, scratch, rs);
1017 void TurboAssembler::Sleu(Register rd, Register rs,
const Operand& rt) {
1019 sltu(rd, rt.rm(), rs);
1022 UseScratchRegisterScope temps(
this);
1023 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
1024 BlockTrampolinePoolScope block_trampoline_pool(
this);
1025 DCHECK(rs != scratch);
1027 sltu(rd, scratch, rs);
1032 void TurboAssembler::Sge(Register rd, Register rs,
const Operand& rt) {
1037 void TurboAssembler::Sgeu(Register rd, Register rs,
const Operand& rt) {
1042 void TurboAssembler::Sgt(Register rd, Register rs,
const Operand& rt) {
1044 slt(rd, rt.rm(), rs);
1047 UseScratchRegisterScope temps(
this);
1048 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
1049 BlockTrampolinePoolScope block_trampoline_pool(
this);
1050 DCHECK(rs != scratch);
1052 slt(rd, scratch, rs);
1056 void TurboAssembler::Sgtu(Register rd, Register rs,
const Operand& rt) {
1058 sltu(rd, rt.rm(), rs);
1061 UseScratchRegisterScope temps(
this);
1062 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
1063 BlockTrampolinePoolScope block_trampoline_pool(
this);
1064 DCHECK(rs != scratch);
1066 sltu(rd, scratch, rs);
1070 void TurboAssembler::Ror(Register rd, Register rs,
const Operand& rt) {
1072 rotrv(rd, rs, rt.rm());
1074 int64_t ror_value = rt.immediate() % 32;
1075 if (ror_value < 0) {
1078 rotr(rd, rs, ror_value);
1082 void TurboAssembler::Dror(Register rd, Register rs,
const Operand& rt) {
1084 drotrv(rd, rs, rt.rm());
1086 int64_t dror_value = rt.immediate() % 64;
1087 if (dror_value < 0) dror_value += 64;
1088 if (dror_value <= 31) {
1089 drotr(rd, rs, dror_value);
1091 drotr32(rd, rs, dror_value - 32);
1097 void MacroAssembler::Pref(int32_t hint,
const MemOperand& rs) {
1101 void TurboAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
1103 DCHECK(sa >= 1 && sa <= 31);
1104 if (kArchVariant == kMips64r6 && sa <= 4) {
1105 lsa(rd, rt, rs, sa - 1);
1107 Register tmp = rd == rt ? scratch : rd;
1114 void TurboAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
1116 DCHECK(sa >= 1 && sa <= 31);
1117 if (kArchVariant == kMips64r6 && sa <= 4) {
1118 dlsa(rd, rt, rs, sa - 1);
1120 Register tmp = rd == rt ? scratch : rd;
1127 void TurboAssembler::Bovc(Register rs, Register rt, Label* L) {
1128 if (is_trampoline_emitted()) {
1130 bnvc(rs, rt, &skip);
1131 BranchLong(L, PROTECT);
1138 void TurboAssembler::Bnvc(Register rs, Register rt, Label* L) {
1139 if (is_trampoline_emitted()) {
1141 bovc(rs, rt, &skip);
1142 BranchLong(L, PROTECT);
1152 void TurboAssembler::ByteSwapSigned(Register dest, Register src,
1154 DCHECK(operand_size == 2 || operand_size == 4 || operand_size == 8);
1155 DCHECK(kArchVariant == kMips64r6 || kArchVariant == kMips64r2);
1156 if (operand_size == 2) {
1159 }
else if (operand_size == 4) {
1161 rotr(dest, dest, 16);
1168 void TurboAssembler::ByteSwapUnsigned(Register dest, Register src,
1170 DCHECK(operand_size == 2 || operand_size == 4);
1171 if (operand_size == 2) {
1173 andi(dest, dest, 0xFFFF);
1176 rotr(dest, dest, 16);
1177 dinsu_(dest, zero_reg, 32, 32);
1181 void TurboAssembler::Ulw(Register rd,
const MemOperand& rs) {
1183 DCHECK(rs.rm() != at);
1184 if (kArchVariant == kMips64r6) {
1187 DCHECK_EQ(kArchVariant, kMips64r2);
1188 DCHECK(kMipsLwrOffset <= 3 && kMipsLwlOffset <= 3);
1189 MemOperand source = rs;
1191 AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3);
1192 if (rd != source.rm()) {
1193 lwr(rd, MemOperand(source.rm(), source.offset() + kMipsLwrOffset));
1194 lwl(rd, MemOperand(source.rm(), source.offset() + kMipsLwlOffset));
1196 UseScratchRegisterScope temps(
this);
1197 Register scratch = temps.Acquire();
1198 lwr(scratch, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1199 lwl(scratch, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1205 void TurboAssembler::Ulwu(Register rd,
const MemOperand& rs) {
1206 if (kArchVariant == kMips64r6) {
1209 DCHECK_EQ(kArchVariant, kMips64r2);
1211 Dext(rd, rd, 0, 32);
1215 void TurboAssembler::Usw(Register rd,
const MemOperand& rs) {
1217 DCHECK(rs.rm() != at);
1218 DCHECK(rd != rs.rm());
1219 if (kArchVariant == kMips64r6) {
1222 DCHECK_EQ(kArchVariant, kMips64r2);
1223 DCHECK(kMipsSwrOffset <= 3 && kMipsSwlOffset <= 3);
1224 MemOperand source = rs;
1226 AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3);
1227 swr(rd, MemOperand(source.rm(), source.offset() + kMipsSwrOffset));
1228 swl(rd, MemOperand(source.rm(), source.offset() + kMipsSwlOffset));
1232 void TurboAssembler::Ulh(Register rd,
const MemOperand& rs) {
1234 DCHECK(rs.rm() != at);
1235 if (kArchVariant == kMips64r6) {
1238 DCHECK_EQ(kArchVariant, kMips64r2);
1239 MemOperand source = rs;
1241 AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
1242 UseScratchRegisterScope temps(
this);
1243 Register scratch = temps.Acquire();
1244 if (source.rm() == scratch) {
1245 #if defined(V8_TARGET_LITTLE_ENDIAN) 1246 Lb(rd, MemOperand(source.rm(), source.offset() + 1));
1247 Lbu(scratch, source);
1248 #elif defined(V8_TARGET_BIG_ENDIAN) 1250 Lbu(scratch, MemOperand(source.rm(), source.offset() + 1));
1253 #if defined(V8_TARGET_LITTLE_ENDIAN) 1254 Lbu(scratch, source);
1255 Lb(rd, MemOperand(source.rm(), source.offset() + 1));
1256 #elif defined(V8_TARGET_BIG_ENDIAN) 1257 Lbu(scratch, MemOperand(source.rm(), source.offset() + 1));
1262 or_(rd, rd, scratch);
1266 void TurboAssembler::Ulhu(Register rd,
const MemOperand& rs) {
1268 DCHECK(rs.rm() != at);
1269 if (kArchVariant == kMips64r6) {
1272 DCHECK_EQ(kArchVariant, kMips64r2);
1273 MemOperand source = rs;
1275 AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
1276 UseScratchRegisterScope temps(
this);
1277 Register scratch = temps.Acquire();
1278 if (source.rm() == scratch) {
1279 #if defined(V8_TARGET_LITTLE_ENDIAN) 1280 Lbu(rd, MemOperand(source.rm(), source.offset() + 1));
1281 Lbu(scratch, source);
1282 #elif defined(V8_TARGET_BIG_ENDIAN) 1284 Lbu(scratch, MemOperand(source.rm(), source.offset() + 1));
1287 #if defined(V8_TARGET_LITTLE_ENDIAN) 1288 Lbu(scratch, source);
1289 Lbu(rd, MemOperand(source.rm(), source.offset() + 1));
1290 #elif defined(V8_TARGET_BIG_ENDIAN) 1291 Lbu(scratch, MemOperand(source.rm(), source.offset() + 1));
1296 or_(rd, rd, scratch);
1300 void TurboAssembler::Ush(Register rd,
const MemOperand& rs, Register scratch) {
1302 DCHECK(rs.rm() != at);
1303 DCHECK(rs.rm() != scratch);
1304 DCHECK(scratch != at);
1305 if (kArchVariant == kMips64r6) {
1308 DCHECK_EQ(kArchVariant, kMips64r2);
1309 MemOperand source = rs;
1311 AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
1313 if (scratch != rd) {
1317 #if defined(V8_TARGET_LITTLE_ENDIAN) 1318 Sb(scratch, source);
1319 srl(scratch, scratch, 8);
1320 Sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1321 #elif defined(V8_TARGET_BIG_ENDIAN) 1322 Sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1323 srl(scratch, scratch, 8);
1324 Sb(scratch, source);
1329 void TurboAssembler::Uld(Register rd,
const MemOperand& rs) {
1331 DCHECK(rs.rm() != at);
1332 if (kArchVariant == kMips64r6) {
1335 DCHECK_EQ(kArchVariant, kMips64r2);
1336 DCHECK(kMipsLdrOffset <= 7 && kMipsLdlOffset <= 7);
1337 MemOperand source = rs;
1339 AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 7);
1340 if (rd != source.rm()) {
1341 ldr(rd, MemOperand(source.rm(), source.offset() + kMipsLdrOffset));
1342 ldl(rd, MemOperand(source.rm(), source.offset() + kMipsLdlOffset));
1344 UseScratchRegisterScope temps(
this);
1345 Register scratch = temps.Acquire();
1346 ldr(scratch, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
1347 ldl(scratch, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
1357 void MacroAssembler::LoadWordPair(Register rd,
const MemOperand& rs,
1360 Lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1361 dsll32(scratch, scratch, 0);
1362 Daddu(rd, rd, scratch);
1365 void TurboAssembler::Usd(Register rd,
const MemOperand& rs) {
1367 DCHECK(rs.rm() != at);
1368 if (kArchVariant == kMips64r6) {
1371 DCHECK_EQ(kArchVariant, kMips64r2);
1372 DCHECK(kMipsSdrOffset <= 7 && kMipsSdlOffset <= 7);
1373 MemOperand source = rs;
1375 AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 7);
1376 sdr(rd, MemOperand(source.rm(), source.offset() + kMipsSdrOffset));
1377 sdl(rd, MemOperand(source.rm(), source.offset() + kMipsSdlOffset));
1383 void MacroAssembler::StoreWordPair(Register rd,
const MemOperand& rs,
1386 dsrl32(scratch, rd, 0);
1387 Sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1390 void TurboAssembler::Ulwc1(FPURegister fd,
const MemOperand& rs,
1392 if (kArchVariant == kMips64r6) {
1395 DCHECK_EQ(kArchVariant, kMips64r2);
1401 void TurboAssembler::Uswc1(FPURegister fd,
const MemOperand& rs,
1403 if (kArchVariant == kMips64r6) {
1406 DCHECK_EQ(kArchVariant, kMips64r2);
1412 void TurboAssembler::Uldc1(FPURegister fd,
const MemOperand& rs,
1414 DCHECK(scratch != at);
1415 if (kArchVariant == kMips64r6) {
1418 DCHECK_EQ(kArchVariant, kMips64r2);
1424 void TurboAssembler::Usdc1(FPURegister fd,
const MemOperand& rs,
1426 DCHECK(scratch != at);
1427 if (kArchVariant == kMips64r6) {
1430 DCHECK_EQ(kArchVariant, kMips64r2);
1436 void TurboAssembler::Lb(Register rd,
const MemOperand& rs) {
1437 MemOperand source = rs;
1438 AdjustBaseAndOffset(source);
1442 void TurboAssembler::Lbu(Register rd,
const MemOperand& rs) {
1443 MemOperand source = rs;
1444 AdjustBaseAndOffset(source);
1448 void TurboAssembler::Sb(Register rd,
const MemOperand& rs) {
1449 MemOperand source = rs;
1450 AdjustBaseAndOffset(source);
1454 void TurboAssembler::Lh(Register rd,
const MemOperand& rs) {
1455 MemOperand source = rs;
1456 AdjustBaseAndOffset(source);
1460 void TurboAssembler::Lhu(Register rd,
const MemOperand& rs) {
1461 MemOperand source = rs;
1462 AdjustBaseAndOffset(source);
1466 void TurboAssembler::Sh(Register rd,
const MemOperand& rs) {
1467 MemOperand source = rs;
1468 AdjustBaseAndOffset(source);
1472 void TurboAssembler::Lw(Register rd,
const MemOperand& rs) {
1473 MemOperand source = rs;
1474 AdjustBaseAndOffset(source);
1478 void TurboAssembler::Lwu(Register rd,
const MemOperand& rs) {
1479 MemOperand source = rs;
1480 AdjustBaseAndOffset(source);
1484 void TurboAssembler::Sw(Register rd,
const MemOperand& rs) {
1485 MemOperand source = rs;
1486 AdjustBaseAndOffset(source);
1490 void TurboAssembler::Ld(Register rd,
const MemOperand& rs) {
1491 MemOperand source = rs;
1492 AdjustBaseAndOffset(source);
1496 void TurboAssembler::Sd(Register rd,
const MemOperand& rs) {
1497 MemOperand source = rs;
1498 AdjustBaseAndOffset(source);
1502 void TurboAssembler::Lwc1(FPURegister fd,
const MemOperand& src) {
1503 MemOperand tmp = src;
1504 AdjustBaseAndOffset(tmp);
1508 void TurboAssembler::Swc1(FPURegister fs,
const MemOperand& src) {
1509 MemOperand tmp = src;
1510 AdjustBaseAndOffset(tmp);
1514 void TurboAssembler::Ldc1(FPURegister fd,
const MemOperand& src) {
1515 MemOperand tmp = src;
1516 AdjustBaseAndOffset(tmp);
1520 void TurboAssembler::Sdc1(FPURegister fs,
const MemOperand& src) {
1521 MemOperand tmp = src;
1522 AdjustBaseAndOffset(tmp);
1526 void TurboAssembler::Ll(Register rd,
const MemOperand& rs) {
1527 bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset())
1528 : is_int16(rs.offset());
1529 if (is_one_instruction) {
1532 UseScratchRegisterScope temps(
this);
1533 Register scratch = temps.Acquire();
1534 li(scratch, rs.offset());
1535 daddu(scratch, scratch, rs.rm());
1536 ll(rd, MemOperand(scratch, 0));
1540 void TurboAssembler::Lld(Register rd,
const MemOperand& rs) {
1541 bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset())
1542 : is_int16(rs.offset());
1543 if (is_one_instruction) {
1546 UseScratchRegisterScope temps(
this);
1547 Register scratch = temps.Acquire();
1548 li(scratch, rs.offset());
1549 daddu(scratch, scratch, rs.rm());
1550 lld(rd, MemOperand(scratch, 0));
1554 void TurboAssembler::Sc(Register rd,
const MemOperand& rs) {
1555 bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset())
1556 : is_int16(rs.offset());
1557 if (is_one_instruction) {
1560 UseScratchRegisterScope temps(
this);
1561 Register scratch = temps.Acquire();
1562 li(scratch, rs.offset());
1563 daddu(scratch, scratch, rs.rm());
1564 sc(rd, MemOperand(scratch, 0));
1568 void TurboAssembler::Scd(Register rd,
const MemOperand& rs) {
1569 bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset())
1570 : is_int16(rs.offset());
1571 if (is_one_instruction) {
1574 UseScratchRegisterScope temps(
this);
1575 Register scratch = temps.Acquire();
1576 li(scratch, rs.offset());
1577 daddu(scratch, scratch, rs.rm());
1578 scd(rd, MemOperand(scratch, 0));
1582 void TurboAssembler::li(Register dst, Handle<HeapObject> value, LiFlags mode) {
1583 if (FLAG_embedded_builtins) {
1584 if (root_array_available_ && options().isolate_independent_code) {
1585 IndirectLoadConstant(dst, value);
1589 li(dst, Operand(value), mode);
1592 void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
1593 if (FLAG_embedded_builtins) {
1594 if (root_array_available_ && options().isolate_independent_code) {
1595 IndirectLoadExternalReference(dst, value);
1599 li(dst, Operand(value), mode);
1602 void TurboAssembler::li(Register dst,
const StringConstantBase*
string,
1604 li(dst, Operand::EmbeddedStringConstant(
string), mode);
1607 static inline int InstrCountForLiLower32Bit(
int64_t value) {
1608 if (!is_int16(static_cast<int32_t>(value)) && (value & kUpper16MaskOf64) &&
1609 (value & kImm16Mask)) {
1616 void TurboAssembler::LiLower32BitHelper(Register rd, Operand j) {
1617 if (is_int16(static_cast<int32_t>(j.immediate()))) {
1618 daddiu(rd, zero_reg, (j.immediate() & kImm16Mask));
1619 }
else if (!(j.immediate() & kUpper16MaskOf64)) {
1620 ori(rd, zero_reg, j.immediate() & kImm16Mask);
1622 lui(rd, j.immediate() >> kLuiShift & kImm16Mask);
1623 if (j.immediate() & kImm16Mask) {
1624 ori(rd, rd, j.immediate() & kImm16Mask);
1629 static inline int InstrCountForLoadReplicatedConst32(
int64_t value) {
1634 return (is_uint16(x) || is_int16(x) || (x & kImm16Mask) == 0) ? 2 : 3;
1640 int TurboAssembler::InstrCountForLi64Bit(
int64_t value) {
1641 if (is_int32(value)) {
1642 return InstrCountForLiLower32Bit(value);
1644 int bit31 = value >> 31 & 0x1;
1645 if ((value & kUpper16MaskOf64) == 0 && is_int16(value >> 32) &&
1646 kArchVariant == kMips64r6) {
1648 }
else if ((value & (kHigher16MaskOf64 | kUpper16MaskOf64)) == 0 &&
1649 kArchVariant == kMips64r6) {
1651 }
else if ((value & kImm16Mask) == 0 && is_int16((value >> 32) + bit31) &&
1652 kArchVariant == kMips64r6) {
1654 }
else if ((value & kImm16Mask) == 0 &&
1655 ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF) &&
1656 kArchVariant == kMips64r6) {
1658 }
else if (is_int16(static_cast<int32_t>(value)) &&
1659 is_int16((value >> 32) + bit31) && kArchVariant == kMips64r6) {
1661 }
else if (is_int16(static_cast<int32_t>(value)) &&
1662 ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF) &&
1663 kArchVariant == kMips64r6) {
1665 }
else if (base::bits::IsPowerOfTwo(value + 1) ||
1666 value == std::numeric_limits<int64_t>::max()) {
1669 int shift_cnt = base::bits::CountTrailingZeros64(value);
1670 int rep32_count = InstrCountForLoadReplicatedConst32(value);
1671 int64_t tmp = value >> shift_cnt;
1672 if (is_uint16(tmp)) {
1674 }
else if (is_int16(tmp)) {
1676 }
else if (rep32_count < 3) {
1678 }
else if (is_int32(tmp)) {
1681 shift_cnt = 16 + base::bits::CountTrailingZeros64(value >> 16);
1682 tmp = value >> shift_cnt;
1683 if (is_uint16(tmp)) {
1685 }
else if (is_int16(tmp)) {
1687 }
else if (rep32_count < 4) {
1689 }
else if (kArchVariant == kMips64r6) {
1691 int count = InstrCountForLiLower32Bit(imm);
1692 imm = (imm >> 32) + bit31;
1693 if (imm & kImm16Mask) {
1696 imm = (imm >> 16) + (imm >> 15 & 0x1);
1697 if (imm & kImm16Mask) {
1702 if (is_int48(value)) {
1704 int count = InstrCountForLiLower32Bit(k) + 1;
1705 if (value & kImm16Mask) {
1711 int count = InstrCountForLiLower32Bit(k);
1712 if ((value >> 16) & kImm16Mask) {
1714 if (value & kImm16Mask) {
1719 if (value & kImm16Mask) {
1735 void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
1736 DCHECK(!j.is_reg());
1737 DCHECK(!MustUseReg(j.rmode()));
1738 DCHECK(mode == OPTIMIZE_SIZE);
1739 BlockTrampolinePoolScope block_trampoline_pool(
this);
1741 if (is_int32(j.immediate())) {
1742 LiLower32BitHelper(rd, j);
1744 int bit31 = j.immediate() >> 31 & 0x1;
1745 if ((j.immediate() & kUpper16MaskOf64) == 0 &&
1746 is_int16(j.immediate() >> 32) && kArchVariant == kMips64r6) {
1750 ori(rd, zero_reg, j.immediate() & kImm16Mask);
1751 dahi(rd, j.immediate() >> 32 & kImm16Mask);
1752 }
else if ((j.immediate() & (kHigher16MaskOf64 | kUpper16MaskOf64)) == 0 &&
1753 kArchVariant == kMips64r6) {
1757 ori(rd, zero_reg, j.immediate() & kImm16Mask);
1758 dati(rd, j.immediate() >> 48 & kImm16Mask);
1759 }
else if ((j.immediate() & kImm16Mask) == 0 &&
1760 is_int16((j.immediate() >> 32) + bit31) &&
1761 kArchVariant == kMips64r6) {
1764 lui(rd, j.immediate() >> kLuiShift & kImm16Mask);
1765 dahi(rd, ((j.immediate() >> 32) + bit31) & kImm16Mask);
1766 }
else if ((j.immediate() & kImm16Mask) == 0 &&
1767 ((j.immediate() >> 31) & 0x1FFFF) ==
1768 ((0x20000 - bit31) & 0x1FFFF) &&
1769 kArchVariant == kMips64r6) {
1773 lui(rd, j.immediate() >> kLuiShift & kImm16Mask);
1774 dati(rd, ((j.immediate() >> 48) + bit31) & kImm16Mask);
1775 }
else if (is_int16(static_cast<int32_t>(j.immediate())) &&
1776 is_int16((j.immediate() >> 32) + bit31) &&
1777 kArchVariant == kMips64r6) {
1780 daddiu(rd, zero_reg, j.immediate() & kImm16Mask);
1781 dahi(rd, ((j.immediate() >> 32) + bit31) & kImm16Mask);
1782 }
else if (is_int16(static_cast<int32_t>(j.immediate())) &&
1783 ((j.immediate() >> 31) & 0x1FFFF) ==
1784 ((0x20000 - bit31) & 0x1FFFF) &&
1785 kArchVariant == kMips64r6) {
1788 daddiu(rd, zero_reg, j.immediate() & kImm16Mask);
1789 dati(rd, ((j.immediate() >> 48) + bit31) & kImm16Mask);
1790 }
else if (base::bits::IsPowerOfTwo(j.immediate() + 1) ||
1791 j.immediate() == std::numeric_limits<int64_t>::max()) {
1794 int shift_cnt = 64 - base::bits::CountTrailingZeros64(j.immediate() + 1);
1795 daddiu(rd, zero_reg, -1);
1796 if (shift_cnt < 32) {
1797 dsrl(rd, rd, shift_cnt);
1799 dsrl32(rd, rd, shift_cnt & 31);
1802 int shift_cnt = base::bits::CountTrailingZeros64(j.immediate());
1803 int rep32_count = InstrCountForLoadReplicatedConst32(j.immediate());
1804 int64_t tmp = j.immediate() >> shift_cnt;
1805 if (is_uint16(tmp)) {
1808 ori(rd, zero_reg, tmp & kImm16Mask);
1809 if (shift_cnt < 32) {
1810 dsll(rd, rd, shift_cnt);
1812 dsll32(rd, rd, shift_cnt & 31);
1814 }
else if (is_int16(tmp)) {
1817 daddiu(rd, zero_reg, static_cast<int32_t>(tmp));
1818 if (shift_cnt < 32) {
1819 dsll(rd, rd, shift_cnt);
1821 dsll32(rd, rd, shift_cnt & 31);
1823 }
else if (rep32_count < 3) {
1827 LiLower32BitHelper(rd, j);
1828 Dins(rd, rd, 32, 32);
1829 }
else if (is_int32(tmp)) {
1833 lui(rd, tmp >> kLuiShift & kImm16Mask);
1834 ori(rd, rd, tmp & kImm16Mask);
1835 if (shift_cnt < 32) {
1836 dsll(rd, rd, shift_cnt);
1838 dsll32(rd, rd, shift_cnt & 31);
1841 shift_cnt = 16 + base::bits::CountTrailingZeros64(j.immediate() >> 16);
1842 tmp = j.immediate() >> shift_cnt;
1843 if (is_uint16(tmp)) {
1846 ori(rd, zero_reg, tmp & kImm16Mask);
1847 if (shift_cnt < 32) {
1848 dsll(rd, rd, shift_cnt);
1850 dsll32(rd, rd, shift_cnt & 31);
1852 ori(rd, rd, j.immediate() & kImm16Mask);
1853 }
else if (is_int16(tmp)) {
1856 daddiu(rd, zero_reg, static_cast<int32_t>(tmp));
1857 if (shift_cnt < 32) {
1858 dsll(rd, rd, shift_cnt);
1860 dsll32(rd, rd, shift_cnt & 31);
1862 ori(rd, rd, j.immediate() & kImm16Mask);
1863 }
else if (rep32_count < 4) {
1866 LiLower32BitHelper(rd, j);
1867 Dins(rd, rd, 32, 32);
1868 }
else if (kArchVariant == kMips64r6) {
1873 LiLower32BitHelper(rd, j);
1874 imm = (imm >> 32) + bit31;
1875 if (imm & kImm16Mask) {
1876 dahi(rd, imm & kImm16Mask);
1878 imm = (imm >> 16) + (imm >> 15 & 0x1);
1879 if (imm & kImm16Mask) {
1880 dati(rd, imm & kImm16Mask);
1883 if (is_int48(j.immediate())) {
1884 Operand k = Operand(j.immediate() >> 16);
1885 LiLower32BitHelper(rd, k);
1887 if (j.immediate() & kImm16Mask) {
1888 ori(rd, rd, j.immediate() & kImm16Mask);
1891 Operand k = Operand(j.immediate() >> 32);
1892 LiLower32BitHelper(rd, k);
1893 if ((j.immediate() >> 16) & kImm16Mask) {
1895 ori(rd, rd, (j.immediate() >> 16) & kImm16Mask);
1897 if (j.immediate() & kImm16Mask) {
1898 ori(rd, rd, j.immediate() & kImm16Mask);
1902 if (j.immediate() & kImm16Mask) {
1903 ori(rd, rd, j.immediate() & kImm16Mask);
1913 void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
1914 DCHECK(!j.is_reg());
1915 BlockTrampolinePoolScope block_trampoline_pool(
this);
1916 if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) {
1917 int li_count = InstrCountForLi64Bit(j.immediate());
1918 int li_neg_count = InstrCountForLi64Bit(-j.immediate());
1919 int li_not_count = InstrCountForLi64Bit(~j.immediate());
1922 if (li_neg_count <= li_not_count && li_neg_count < li_count - 1) {
1923 DCHECK(j.immediate() != std::numeric_limits<int64_t>::min());
1924 li_optimized(rd, Operand(-j.immediate()), mode);
1925 Dsubu(rd, zero_reg, rd);
1926 }
else if (li_neg_count > li_not_count && li_not_count < li_count - 1) {
1927 DCHECK(j.immediate() != std::numeric_limits<int64_t>::min());
1928 li_optimized(rd, Operand(~j.immediate()), mode);
1931 li_optimized(rd, j, mode);
1933 }
else if (MustUseReg(j.rmode())) {
1935 if (j.IsHeapObjectRequest()) {
1936 RequestHeapObject(j.heap_object_request());
1939 immediate = j.immediate();
1942 RecordRelocInfo(j.rmode(), immediate);
1943 lui(rd, (immediate >> 32) & kImm16Mask);
1944 ori(rd, rd, (immediate >> 16) & kImm16Mask);
1946 ori(rd, rd, immediate & kImm16Mask);
1947 }
else if (mode == ADDRESS_LOAD) {
1950 lui(rd, (j.immediate() >> 32) & kImm16Mask);
1951 ori(rd, rd, (j.immediate() >> 16) & kImm16Mask);
1953 ori(rd, rd, j.immediate() & kImm16Mask);
1956 if (kArchVariant == kMips64r6) {
1958 lui(rd, imm >> kLuiShift & kImm16Mask);
1959 ori(rd, rd, (imm & kImm16Mask));
1960 imm = (imm >> 32) + ((imm >> 31) & 0x1);
1961 dahi(rd, imm & kImm16Mask & kImm16Mask);
1962 imm = (imm >> 16) + ((imm >> 15) & 0x1);
1963 dati(rd, imm & kImm16Mask & kImm16Mask);
1965 lui(rd, (j.immediate() >> 48) & kImm16Mask);
1966 ori(rd, rd, (j.immediate() >> 32) & kImm16Mask);
1968 ori(rd, rd, (j.immediate() >> 16) & kImm16Mask);
1970 ori(rd, rd, j.immediate() & kImm16Mask);
1975 void TurboAssembler::MultiPush(RegList regs) {
1976 int16_t num_to_push = base::bits::CountPopulation(regs);
1977 int16_t stack_offset = num_to_push * kPointerSize;
1979 Dsubu(sp, sp, Operand(stack_offset));
1980 for (int16_t
i = kNumRegisters - 1;
i >= 0;
i--) {
1981 if ((regs & (1 <<
i)) != 0) {
1982 stack_offset -= kPointerSize;
1983 Sd(ToRegister(
i), MemOperand(sp, stack_offset));
1989 void TurboAssembler::MultiPop(RegList regs) {
1990 int16_t stack_offset = 0;
1992 for (int16_t
i = 0;
i < kNumRegisters;
i++) {
1993 if ((regs & (1 <<
i)) != 0) {
1994 Ld(ToRegister(
i), MemOperand(sp, stack_offset));
1995 stack_offset += kPointerSize;
1998 daddiu(sp, sp, stack_offset);
2002 void TurboAssembler::MultiPushFPU(RegList regs) {
2003 int16_t num_to_push = base::bits::CountPopulation(regs);
2004 int16_t stack_offset = num_to_push * kDoubleSize;
2006 Dsubu(sp, sp, Operand(stack_offset));
2007 for (int16_t
i = kNumRegisters - 1;
i >= 0;
i--) {
2008 if ((regs & (1 <<
i)) != 0) {
2009 stack_offset -= kDoubleSize;
2010 Sdc1(FPURegister::from_code(
i), MemOperand(sp, stack_offset));
2016 void TurboAssembler::MultiPopFPU(RegList regs) {
2017 int16_t stack_offset = 0;
2019 for (int16_t
i = 0;
i < kNumRegisters;
i++) {
2020 if ((regs & (1 <<
i)) != 0) {
2021 Ldc1(FPURegister::from_code(
i), MemOperand(sp, stack_offset));
2022 stack_offset += kDoubleSize;
2025 daddiu(sp, sp, stack_offset);
2029 void TurboAssembler::Ext(Register rt, Register rs, uint16_t pos,
2032 DCHECK_LT(pos + size, 33);
2033 ext_(rt, rs, pos, size);
2036 void TurboAssembler::Dext(Register rt, Register rs, uint16_t pos,
2038 DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size &&
2041 dextm_(rt, rs, pos, size);
2042 }
else if (pos >= 32) {
2043 dextu_(rt, rs, pos, size);
2045 dext_(rt, rs, pos, size);
2049 void TurboAssembler::Ins(Register rt, Register rs, uint16_t pos,
2052 DCHECK_LE(pos + size, 32);
2054 ins_(rt, rs, pos, size);
2057 void TurboAssembler::Dins(Register rt, Register rs, uint16_t pos,
2059 DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size &&
2061 if (pos + size <= 32) {
2062 dins_(rt, rs, pos, size);
2063 }
else if (pos < 32) {
2064 dinsm_(rt, rs, pos, size);
2066 dinsu_(rt, rs, pos, size);
2070 void TurboAssembler::ExtractBits(Register dest, Register source, Register pos,
2071 int size,
bool sign_extend) {
2072 dsrav(dest, source, pos);
2073 Dext(dest, dest, 0, size);
2092 void TurboAssembler::InsertBits(Register dest, Register source, Register pos,
2094 Dror(dest, dest, pos);
2095 Dins(dest, source, 0, size);
2097 UseScratchRegisterScope temps(
this);
2098 Register scratch = temps.Acquire();
2099 Dsubu(scratch, zero_reg, pos);
2100 Dror(dest, dest, scratch);
2104 void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) {
2105 if (kArchVariant == kMips64r6) {
2109 DCHECK_EQ(kArchVariant, kMips64r2);
2110 BlockTrampolinePoolScope block_trampoline_pool(
this);
2112 Register scratch1 = t8;
2113 Register scratch2 = t9;
2114 CompareIsNanF32(fs, fs);
2115 BranchTrueShortF(&is_nan);
2116 Branch(USE_DELAY_SLOT, &done);
2122 li(scratch2, kBinary32SignMask);
2123 Xor(scratch1, scratch1, scratch2);
2129 void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) {
2130 if (kArchVariant == kMips64r6) {
2134 DCHECK_EQ(kArchVariant, kMips64r2);
2135 BlockTrampolinePoolScope block_trampoline_pool(
this);
2137 Register scratch1 = t8;
2138 Register scratch2 = t9;
2139 CompareIsNanF64(fs, fs);
2140 BranchTrueShortF(&is_nan);
2141 Branch(USE_DELAY_SLOT, &done);
2146 dmfc1(scratch1, fs);
2147 li(scratch2, Double::kSignMask);
2148 Xor(scratch1, scratch1, scratch2);
2149 dmtc1(scratch1, fd);
2154 void TurboAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
2156 BlockTrampolinePoolScope block_trampoline_pool(
this);
2161 void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
2162 BlockTrampolinePoolScope block_trampoline_pool(
this);
2169 Dext(t9, rs, 0, 32);
2174 void TurboAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) {
2175 BlockTrampolinePoolScope block_trampoline_pool(
this);
2181 void TurboAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
2182 BlockTrampolinePoolScope block_trampoline_pool(
this);
2188 Label msb_clear, conversion_done;
2190 Branch(&msb_clear, ge, rs, Operand(zero_reg));
2198 Branch(USE_DELAY_SLOT, &conversion_done);
2206 bind(&conversion_done);
2209 void TurboAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) {
2210 BlockTrampolinePoolScope block_trampoline_pool(
this);
2216 void TurboAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
2217 BlockTrampolinePoolScope block_trampoline_pool(
this);
2223 Dext(t9, rs, 0, 32);
2228 void TurboAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
2229 BlockTrampolinePoolScope block_trampoline_pool(
this);
2235 void TurboAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
2236 BlockTrampolinePoolScope block_trampoline_pool(
this);
2242 Label positive, conversion_done;
2244 Branch(&positive, ge, rs, Operand(zero_reg));
2252 Branch(USE_DELAY_SLOT, &conversion_done);
2260 bind(&conversion_done);
2264 void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) {
2269 void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) {
2274 void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) {
2279 void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {
2284 void MacroAssembler::Trunc_l_ud(FPURegister fd,
2286 FPURegister scratch) {
2287 BlockTrampolinePoolScope block_trampoline_pool(
this);
2292 UseScratchRegisterScope temps(
this);
2293 Register scratch1 = temps.Acquire();
2294 li(scratch1, 0x7FFFFFFFFFFFFFFF);
2295 and_(t8, t8, scratch1);
2301 void TurboAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs,
2302 FPURegister scratch) {
2303 BlockTrampolinePoolScope block_trampoline_pool(
this);
2304 Trunc_uw_d(t8, fs, scratch);
2308 void TurboAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
2309 FPURegister scratch) {
2310 BlockTrampolinePoolScope block_trampoline_pool(
this);
2311 Trunc_uw_s(t8, fs, scratch);
2315 void TurboAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs,
2316 FPURegister scratch, Register result) {
2317 BlockTrampolinePoolScope block_trampoline_pool(
this);
2318 Trunc_ul_d(t8, fs, scratch, result);
2322 void TurboAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs,
2323 FPURegister scratch, Register result) {
2324 BlockTrampolinePoolScope block_trampoline_pool(
this);
2325 Trunc_ul_s(t8, fs, scratch, result);
2330 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
2335 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
2340 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
2345 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
2349 void TurboAssembler::Trunc_uw_d(Register rd, FPURegister fs,
2350 FPURegister scratch) {
2351 DCHECK(fs != scratch);
2356 UseScratchRegisterScope temps(
this);
2357 Register scratch1 = temps.Acquire();
2358 li(scratch1, 0x41E00000);
2359 mtc1(zero_reg, scratch);
2360 mthc1(scratch1, scratch);
2364 Label simple_convert;
2365 CompareF64(OLT, fs, scratch);
2366 BranchTrueShortF(&simple_convert);
2370 sub_d(scratch, fs, scratch);
2371 trunc_w_d(scratch, scratch);
2373 Or(rd, rd, 1 << 31);
2378 bind(&simple_convert);
2379 trunc_w_d(scratch, fs);
2385 void TurboAssembler::Trunc_uw_s(Register rd, FPURegister fs,
2386 FPURegister scratch) {
2387 DCHECK(fs != scratch);
2392 UseScratchRegisterScope temps(
this);
2393 Register scratch1 = temps.Acquire();
2394 li(scratch1, 0x4F000000);
2395 mtc1(scratch1, scratch);
2399 Label simple_convert;
2400 CompareF32(OLT, fs, scratch);
2401 BranchTrueShortF(&simple_convert);
2405 sub_s(scratch, fs, scratch);
2406 trunc_w_s(scratch, scratch);
2408 Or(rd, rd, 1 << 31);
2413 bind(&simple_convert);
2414 trunc_w_s(scratch, fs);
2420 void TurboAssembler::Trunc_ul_d(Register rd, FPURegister fs,
2421 FPURegister scratch, Register result) {
2422 DCHECK(fs != scratch);
2423 DCHECK(result.is_valid() ? !AreAliased(rd, result, at) : !AreAliased(rd, at));
2425 Label simple_convert, done, fail;
2426 if (result.is_valid()) {
2427 mov(result, zero_reg);
2428 Move(scratch, -1.0);
2430 CompareF64(OLE, fs, scratch);
2431 BranchTrueShortF(&fail);
2432 CompareIsNanF64(fs, scratch);
2433 BranchTrueShortF(&fail);
2437 li(at, 0x43E0000000000000);
2442 CompareF64(OLT, fs, scratch);
2443 BranchTrueShortF(&simple_convert);
2447 sub_d(scratch, fs, scratch);
2448 trunc_l_d(scratch, scratch);
2450 Or(rd, rd, Operand(1UL << 63));
2454 bind(&simple_convert);
2455 trunc_l_d(scratch, fs);
2459 if (result.is_valid()) {
2462 UseScratchRegisterScope temps(
this);
2463 Register scratch1 = temps.Acquire();
2464 addiu(scratch1, zero_reg, -1);
2465 dsrl(scratch1, scratch1, 1);
2466 dmfc1(result, scratch);
2467 xor_(result, result, scratch1);
2469 Slt(result, zero_reg, result);
2475 void TurboAssembler::Trunc_ul_s(Register rd, FPURegister fs,
2476 FPURegister scratch, Register result) {
2477 DCHECK(fs != scratch);
2478 DCHECK(result.is_valid() ? !AreAliased(rd, result, at) : !AreAliased(rd, at));
2480 Label simple_convert, done, fail;
2481 if (result.is_valid()) {
2482 mov(result, zero_reg);
2483 Move(scratch, -1.0f);
2485 CompareF32(OLE, fs, scratch);
2486 BranchTrueShortF(&fail);
2487 CompareIsNanF32(fs, scratch);
2488 BranchTrueShortF(&fail);
2493 UseScratchRegisterScope temps(
this);
2494 Register scratch1 = temps.Acquire();
2495 li(scratch1, 0x5F000000);
2496 mtc1(scratch1, scratch);
2501 CompareF32(OLT, fs, scratch);
2502 BranchTrueShortF(&simple_convert);
2506 sub_s(scratch, fs, scratch);
2507 trunc_l_s(scratch, scratch);
2509 Or(rd, rd, Operand(1UL << 63));
2513 bind(&simple_convert);
2514 trunc_l_s(scratch, fs);
2518 if (result.is_valid()) {
2521 UseScratchRegisterScope temps(
this);
2522 Register scratch1 = temps.Acquire();
2523 addiu(scratch1, zero_reg, -1);
2524 dsrl(scratch1, scratch1, 1);
2525 dmfc1(result, scratch);
2526 xor_(result, result, scratch1);
2528 Slt(result, zero_reg, result);
2534 template <
typename RoundFunc>
2535 void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src,
2536 FPURoundingMode mode, RoundFunc round) {
2537 BlockTrampolinePoolScope block_trampoline_pool(
this);
2538 Register scratch = t8;
2539 if (kArchVariant == kMips64r6) {
2540 cfc1(scratch, FCSR);
2541 li(at, Operand(mode));
2544 ctc1(scratch, FCSR);
2547 mfhc1(scratch, src);
2548 Ext(at, scratch, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
2549 Branch(USE_DELAY_SLOT, &done, hs, at,
2550 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits));
2552 round(
this, dst, src);
2554 Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg));
2556 srl(at, scratch, 31);
2563 void TurboAssembler::Floor_d_d(FPURegister dst, FPURegister src) {
2564 RoundDouble(dst, src, mode_floor,
2565 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2566 tasm->floor_l_d(dst, src);
2570 void TurboAssembler::Ceil_d_d(FPURegister dst, FPURegister src) {
2571 RoundDouble(dst, src, mode_ceil,
2572 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2573 tasm->ceil_l_d(dst, src);
2577 void TurboAssembler::Trunc_d_d(FPURegister dst, FPURegister src) {
2578 RoundDouble(dst, src, mode_trunc,
2579 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2580 tasm->trunc_l_d(dst, src);
2584 void TurboAssembler::Round_d_d(FPURegister dst, FPURegister src) {
2585 RoundDouble(dst, src, mode_round,
2586 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2587 tasm->round_l_d(dst, src);
2591 template <
typename RoundFunc>
2592 void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src,
2593 FPURoundingMode mode, RoundFunc round) {
2594 BlockTrampolinePoolScope block_trampoline_pool(
this);
2595 Register scratch = t8;
2596 if (kArchVariant == kMips64r6) {
2597 cfc1(scratch, FCSR);
2598 li(at, Operand(mode));
2601 ctc1(scratch, FCSR);
2603 int32_t kFloat32ExponentBias = 127;
2604 int32_t kFloat32MantissaBits = 23;
2605 int32_t kFloat32ExponentBits = 8;
2608 Ext(at, scratch, kFloat32MantissaBits, kFloat32ExponentBits);
2609 Branch(USE_DELAY_SLOT, &done, hs, at,
2610 Operand(kFloat32ExponentBias + kFloat32MantissaBits));
2612 round(
this, dst, src);
2614 Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg));
2616 srl(at, scratch, 31);
2623 void TurboAssembler::Floor_s_s(FPURegister dst, FPURegister src) {
2624 RoundFloat(dst, src, mode_floor,
2625 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2626 tasm->floor_w_s(dst, src);
2630 void TurboAssembler::Ceil_s_s(FPURegister dst, FPURegister src) {
2631 RoundFloat(dst, src, mode_ceil,
2632 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2633 tasm->ceil_w_s(dst, src);
2637 void TurboAssembler::Trunc_s_s(FPURegister dst, FPURegister src) {
2638 RoundFloat(dst, src, mode_trunc,
2639 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2640 tasm->trunc_w_s(dst, src);
2644 void TurboAssembler::Round_s_s(FPURegister dst, FPURegister src) {
2645 RoundFloat(dst, src, mode_round,
2646 [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
2647 tasm->round_w_s(dst, src);
2651 void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
2652 FPURegister ft, FPURegister scratch) {
2653 DCHECK(fr != scratch && fs != scratch && ft != scratch);
2654 mul_s(scratch, fs, ft);
2655 add_s(fd, fr, scratch);
2658 void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2659 FPURegister ft, FPURegister scratch) {
2660 DCHECK(fr != scratch && fs != scratch && ft != scratch);
2661 mul_d(scratch, fs, ft);
2662 add_d(fd, fr, scratch);
2665 void MacroAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
2666 FPURegister ft, FPURegister scratch) {
2667 DCHECK(fr != scratch && fs != scratch && ft != scratch);
2668 mul_s(scratch, fs, ft);
2669 sub_s(fd, scratch, fr);
2672 void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
2673 FPURegister ft, FPURegister scratch) {
2674 DCHECK(fr != scratch && fs != scratch && ft != scratch);
2675 mul_d(scratch, fs, ft);
2676 sub_d(fd, scratch, fr);
2679 void TurboAssembler::CompareF(SecondaryField sizeField, FPUCondition cc,
2680 FPURegister cmp1, FPURegister cmp2) {
2681 if (kArchVariant == kMips64r6) {
2682 sizeField = sizeField == D ? L : W;
2683 DCHECK(cmp1 != kDoubleCompareReg && cmp2 != kDoubleCompareReg);
2684 cmp(cc, sizeField, kDoubleCompareReg, cmp1, cmp2);
2686 c(cc, sizeField, cmp1, cmp2);
2690 void TurboAssembler::CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
2692 CompareF(sizeField, UN, cmp1, cmp2);
2695 void TurboAssembler::BranchTrueShortF(Label* target, BranchDelaySlot bd) {
2696 if (kArchVariant == kMips64r6) {
2697 bc1nez(target, kDoubleCompareReg);
2701 if (bd == PROTECT) {
2706 void TurboAssembler::BranchFalseShortF(Label* target, BranchDelaySlot bd) {
2707 if (kArchVariant == kMips64r6) {
2708 bc1eqz(target, kDoubleCompareReg);
2712 if (bd == PROTECT) {
2717 void TurboAssembler::BranchTrueF(Label* target, BranchDelaySlot bd) {
2719 target->is_bound() ? !is_near(target) : is_trampoline_emitted();
2722 BranchFalseShortF(&skip);
2723 BranchLong(target, bd);
2726 BranchTrueShortF(target, bd);
2730 void TurboAssembler::BranchFalseF(Label* target, BranchDelaySlot bd) {
2732 target->is_bound() ? !is_near(target) : is_trampoline_emitted();
2735 BranchTrueShortF(&skip);
2736 BranchLong(target, bd);
2739 BranchFalseShortF(target, bd);
2743 void TurboAssembler::BranchMSA(Label* target, MSABranchDF df,
2744 MSABranchCondition cond, MSARegister wt,
2745 BranchDelaySlot bd) {
2747 BlockTrampolinePoolScope block_trampoline_pool(
this);
2751 target->is_bound() ? !is_near(target) : is_trampoline_emitted();
2754 MSABranchCondition neg_cond = NegateMSABranchCondition(cond);
2755 BranchShortMSA(df, &skip, neg_cond, wt, bd);
2756 BranchLong(target, bd);
2759 BranchShortMSA(df, target, cond, wt, bd);
2765 void TurboAssembler::BranchShortMSA(MSABranchDF df, Label* target,
2766 MSABranchCondition cond, MSARegister wt,
2767 BranchDelaySlot bd) {
2768 if (kArchVariant == kMips64r6) {
2769 BlockTrampolinePoolScope block_trampoline_pool(
this);
2788 case one_elem_not_zero:
2815 if (bd == PROTECT) {
2820 void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) {
2821 UseScratchRegisterScope temps(
this);
2822 Register scratch = temps.Acquire();
2823 DCHECK(src_low != scratch);
2824 mfhc1(scratch, dst);
2826 mthc1(scratch, dst);
2829 void TurboAssembler::Move(FPURegister dst,
uint32_t src) {
2830 UseScratchRegisterScope temps(
this);
2831 Register scratch = temps.Acquire();
2832 li(scratch, Operand(static_cast<int32_t>(src)));
2836 void TurboAssembler::Move(FPURegister dst, uint64_t src) {
2838 if (src == bit_cast<uint64_t>(0.0) && has_double_zero_reg_set_) {
2839 mov_d(dst, kDoubleRegZero);
2840 }
else if (src == bit_cast<uint64_t>(-0.0) && has_double_zero_reg_set_) {
2841 Neg_d(dst, kDoubleRegZero);
2848 UseScratchRegisterScope temps(
this);
2849 Register scratch = temps.Acquire();
2850 li(scratch, Operand(lo));
2853 mtc1(zero_reg, dst);
2858 UseScratchRegisterScope temps(
this);
2859 Register scratch = temps.Acquire();
2860 li(scratch, Operand(hi));
2861 mthc1(scratch, dst);
2863 mthc1(zero_reg, dst);
2865 if (dst == kDoubleRegZero) has_double_zero_reg_set_ =
true;
2869 void TurboAssembler::Movz(Register rd, Register rs, Register rt) {
2870 if (kArchVariant == kMips64r6) {
2872 Branch(&done, ne, rt, Operand(zero_reg));
2880 void TurboAssembler::Movn(Register rd, Register rs, Register rt) {
2881 if (kArchVariant == kMips64r6) {
2883 Branch(&done, eq, rt, Operand(zero_reg));
2891 void TurboAssembler::LoadZeroOnCondition(Register rd, Register rs,
2892 const Operand& rt, Condition cond) {
2893 BlockTrampolinePoolScope block_trampoline_pool(
this);
2899 if (rs == zero_reg) {
2901 LoadZeroIfConditionZero(rd, rt.rm());
2903 if (rt.immediate() == 0) {
2909 }
else if (IsZero(rt)) {
2910 LoadZeroIfConditionZero(rd, rs);
2913 LoadZeroIfConditionZero(rd, t9);
2917 if (rs == zero_reg) {
2919 LoadZeroIfConditionNotZero(rd, rt.rm());
2921 if (rt.immediate() != 0) {
2927 }
else if (IsZero(rt)) {
2928 LoadZeroIfConditionNotZero(rd, rs);
2931 LoadZeroIfConditionNotZero(rd, t9);
2938 LoadZeroIfConditionNotZero(rd, t9);
2942 LoadZeroIfConditionNotZero(rd, t9);
2947 LoadZeroIfConditionNotZero(rd, t9);
2952 LoadZeroIfConditionNotZero(rd, t9);
2959 LoadZeroIfConditionNotZero(rd, t9);
2963 case Ugreater_equal:
2965 LoadZeroIfConditionNotZero(rd, t9);
2970 LoadZeroIfConditionNotZero(rd, t9);
2975 LoadZeroIfConditionNotZero(rd, t9);
2983 void TurboAssembler::LoadZeroIfConditionNotZero(Register dest,
2984 Register condition) {
2985 if (kArchVariant == kMips64r6) {
2986 seleqz(dest, dest, condition);
2988 Movn(dest, zero_reg, condition);
2992 void TurboAssembler::LoadZeroIfConditionZero(Register dest,
2993 Register condition) {
2994 if (kArchVariant == kMips64r6) {
2995 selnez(dest, dest, condition);
2997 Movz(dest, zero_reg, condition);
3001 void TurboAssembler::LoadZeroIfFPUCondition(Register dest) {
3002 if (kArchVariant == kMips64r6) {
3003 dmfc1(kScratchReg, kDoubleCompareReg);
3004 LoadZeroIfConditionNotZero(dest, kScratchReg);
3006 Movt(dest, zero_reg);
3010 void TurboAssembler::LoadZeroIfNotFPUCondition(Register dest) {
3011 if (kArchVariant == kMips64r6) {
3012 dmfc1(kScratchReg, kDoubleCompareReg);
3013 LoadZeroIfConditionZero(dest, kScratchReg);
3015 Movf(dest, zero_reg);
3019 void TurboAssembler::Movt(Register rd, Register rs, uint16_t cc) {
3023 void TurboAssembler::Movf(Register rd, Register rs, uint16_t cc) {
3027 void TurboAssembler::Clz(Register rd, Register rs) { clz(rd, rs); }
3029 void TurboAssembler::Ctz(Register rd, Register rs) {
3030 if (kArchVariant == kMips64r6) {
3041 UseScratchRegisterScope temps(
this);
3042 Register scratch = temps.Acquire();
3043 Daddu(scratch, rs, -1);
3044 Xor(rd, scratch, rs);
3045 And(rd, rd, scratch);
3051 Subu(rd, scratch, rd);
3055 void TurboAssembler::Dctz(Register rd, Register rs) {
3056 if (kArchVariant == kMips64r6) {
3067 UseScratchRegisterScope temps(
this);
3068 Register scratch = temps.Acquire();
3069 Daddu(scratch, rs, -1);
3070 Xor(rd, scratch, rs);
3071 And(rd, rd, scratch);
3077 Dsubu(rd, scratch, rd);
3081 void TurboAssembler::Popcnt(Register rd, Register rs) {
3110 UseScratchRegisterScope temps(
this);
3111 BlockTrampolinePoolScope block_trampoline_pool(
this);
3112 Register scratch = temps.Acquire();
3113 Register scratch2 = t8;
3114 srl(scratch, rs, 1);
3116 And(scratch, scratch, scratch2);
3117 Subu(scratch, rs, scratch);
3119 And(rd, scratch, scratch2);
3120 srl(scratch, scratch, 2);
3121 And(scratch, scratch, scratch2);
3122 Addu(scratch, rd, scratch);
3123 srl(rd, scratch, 4);
3124 Addu(rd, rd, scratch);
3126 And(rd, rd, scratch2);
3128 Mul(rd, rd, scratch);
3132 void TurboAssembler::Dpopcnt(Register rd, Register rs) {
3133 uint64_t B0 = 0x5555555555555555l;
3134 uint64_t B1 = 0x3333333333333333l;
3135 uint64_t B2 = 0x0F0F0F0F0F0F0F0Fl;
3136 uint64_t value = 0x0101010101010101l;
3137 uint64_t shift = 24;
3139 UseScratchRegisterScope temps(
this);
3140 BlockTrampolinePoolScope block_trampoline_pool(
this);
3141 Register scratch = temps.Acquire();
3142 Register scratch2 = t8;
3143 dsrl(scratch, rs, 1);
3145 And(scratch, scratch, scratch2);
3146 Dsubu(scratch, rs, scratch);
3148 And(rd, scratch, scratch2);
3149 dsrl(scratch, scratch, 2);
3150 And(scratch, scratch, scratch2);
3151 Daddu(scratch, rd, scratch);
3152 dsrl(rd, scratch, 4);
3153 Daddu(rd, rd, scratch);
3155 And(rd, rd, scratch2);
3157 Dmul(rd, rd, scratch);
3158 dsrl32(rd, rd, shift);
3161 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
3163 DoubleRegister double_input,
3165 DoubleRegister double_scratch,
3166 Register except_flag,
3167 CheckForInexactConversion check_inexact) {
3168 DCHECK(result != scratch);
3169 DCHECK(double_input != double_scratch);
3170 DCHECK(except_flag != scratch);
3175 mov(except_flag, zero_reg);
3178 cvt_w_d(double_scratch, double_input);
3179 mfc1(result, double_scratch);
3180 cvt_d_w(double_scratch, double_scratch);
3181 CompareF64(EQ, double_input, double_scratch);
3182 BranchTrueShortF(&done);
3184 int32_t except_mask = kFCSRFlagMask;
3186 if (check_inexact == kDontCheckForInexactConversion) {
3188 except_mask &= ~kFCSRInexactFlagMask;
3192 cfc1(scratch, FCSR);
3194 ctc1(zero_reg, FCSR);
3197 switch (rounding_mode) {
3198 case kRoundToNearest:
3199 Round_w_d(double_scratch, double_input);
3202 Trunc_w_d(double_scratch, double_input);
3204 case kRoundToPlusInf:
3205 Ceil_w_d(double_scratch, double_input);
3207 case kRoundToMinusInf:
3208 Floor_w_d(double_scratch, double_input);
3213 cfc1(except_flag, FCSR);
3215 ctc1(scratch, FCSR);
3217 mfc1(result, double_scratch);
3220 And(except_flag, except_flag, Operand(except_mask));
3225 void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
3226 DoubleRegister double_input,
3228 DoubleRegister single_scratch = kScratchDoubleReg.low();
3229 UseScratchRegisterScope temps(
this);
3230 BlockTrampolinePoolScope block_trampoline_pool(
this);
3231 Register scratch = temps.Acquire();
3232 Register scratch2 = t9;
3235 cfc1(scratch2, FCSR);
3236 ctc1(zero_reg, FCSR);
3238 trunc_w_d(single_scratch, double_input);
3239 mfc1(result, single_scratch);
3241 cfc1(scratch, FCSR);
3242 ctc1(scratch2, FCSR);
3246 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
3248 Branch(done, eq, scratch, Operand(zero_reg));
3251 void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
3253 DoubleRegister double_input,
3254 StubCallMode stub_mode) {
3257 TryInlineTruncateDoubleToI(result, double_input, &done);
3261 Dsubu(sp, sp, Operand(kDoubleSize));
3262 Sdc1(double_input, MemOperand(sp, 0));
3264 if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
3265 Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
3267 Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
3269 Ld(result, MemOperand(sp, 0));
3271 Daddu(sp, sp, Operand(kDoubleSize));
3280 #define BRANCH_ARGS_CHECK(cond, rs, rt) \ 3281 DCHECK((cond == cc_always && rs == zero_reg && rt.rm() == zero_reg) || \ 3282 (cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg))) 3284 void TurboAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
3285 DCHECK_EQ(kArchVariant, kMips64r6 ? is_int26(offset) : is_int16(offset));
3286 BranchShort(offset, bdslot);
3289 void TurboAssembler::Branch(int32_t offset, Condition cond, Register rs,
3290 const Operand& rt, BranchDelaySlot bdslot) {
3291 bool is_near = BranchShortCheck(offset,
nullptr, cond, rs, rt, bdslot);
3296 void TurboAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
3297 if (L->is_bound()) {
3298 if (is_near_branch(L)) {
3299 BranchShort(L, bdslot);
3301 BranchLong(L, bdslot);
3304 if (is_trampoline_emitted()) {
3305 BranchLong(L, bdslot);
3307 BranchShort(L, bdslot);
3312 void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
3313 const Operand& rt, BranchDelaySlot bdslot) {
3314 if (L->is_bound()) {
3315 if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
3316 if (cond != cc_always) {
3318 Condition neg_cond = NegateCondition(cond);
3319 BranchShort(&skip, neg_cond, rs, rt);
3320 BranchLong(L, bdslot);
3323 BranchLong(L, bdslot);
3327 if (is_trampoline_emitted()) {
3328 if (cond != cc_always) {
3330 Condition neg_cond = NegateCondition(cond);
3331 BranchShort(&skip, neg_cond, rs, rt);
3332 BranchLong(L, bdslot);
3335 BranchLong(L, bdslot);
3338 BranchShort(L, cond, rs, rt, bdslot);
3343 void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
3344 RootIndex index, BranchDelaySlot bdslot) {
3345 UseScratchRegisterScope temps(
this);
3346 Register scratch = temps.Acquire();
3347 LoadRoot(scratch, index);
3348 Branch(L, cond, rs, Operand(scratch), bdslot);
3351 void TurboAssembler::BranchShortHelper(int16_t offset, Label* L,
3352 BranchDelaySlot bdslot) {
3353 DCHECK(L ==
nullptr || offset == 0);
3354 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3358 if (bdslot == PROTECT)
3362 void TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
3363 DCHECK(L ==
nullptr || offset == 0);
3364 offset = GetOffset(offset, L, OffsetSize::kOffset26);
3368 void TurboAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
3369 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3370 DCHECK(is_int26(offset));
3371 BranchShortHelperR6(offset,
nullptr);
3373 DCHECK(is_int16(offset));
3374 BranchShortHelper(offset,
nullptr, bdslot);
3378 void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
3379 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3380 BranchShortHelperR6(0, L);
3382 BranchShortHelper(0, L, bdslot);
3387 int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
3389 offset = branch_offset_helper(L, bits) >> 2;
3391 DCHECK(is_intn(offset, bits));
3396 Register TurboAssembler::GetRtAsRegisterHelper(
const Operand& rt,
3398 Register r2 = no_reg;
3409 bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset,
3411 if (!is_near(L, bits))
return false;
3412 offset = GetOffset(offset, L, bits);
3416 bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset, OffsetSize bits,
3417 Register& scratch,
const Operand& rt) {
3418 if (!is_near(L, bits))
return false;
3419 scratch = GetRtAsRegisterHelper(rt, scratch);
3420 offset = GetOffset(offset, L, bits);
3424 bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
3425 Condition cond, Register rs,
3426 const Operand& rt) {
3427 DCHECK(L ==
nullptr || offset == 0);
3428 UseScratchRegisterScope temps(
this);
3429 BlockTrampolinePoolScope block_trampoline_pool(
this);
3430 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
3436 BlockTrampolinePoolScope block_trampoline_pool(
this);
3439 if (!CalculateOffset(L, offset, OffsetSize::kOffset26))
return false;
3443 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3446 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3448 beq(rs, scratch, offset);
3450 }
else if (IsZero(rt)) {
3451 if (!CalculateOffset(L, offset, OffsetSize::kOffset21))
return false;
3455 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3457 beqc(rs, scratch, offset);
3461 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3464 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3466 bne(rs, scratch, offset);
3468 }
else if (IsZero(rt)) {
3469 if (!CalculateOffset(L, offset, OffsetSize::kOffset21))
return false;
3473 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3475 bnec(rs, scratch, offset);
3482 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3484 }
else if (rs == zero_reg) {
3485 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3487 bltzc(scratch, offset);
3488 }
else if (IsZero(rt)) {
3489 if (!CalculateOffset(L, offset, OffsetSize::kOffset16))
return false;
3492 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3494 DCHECK(rs != scratch);
3495 bltc(scratch, rs, offset);
3500 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3501 if (!CalculateOffset(L, offset, OffsetSize::kOffset26))
return false;
3503 }
else if (rs == zero_reg) {
3504 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3506 blezc(scratch, offset);
3507 }
else if (IsZero(rt)) {
3508 if (!CalculateOffset(L, offset, OffsetSize::kOffset16))
return false;
3511 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3513 DCHECK(rs != scratch);
3514 bgec(rs, scratch, offset);
3519 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3521 }
else if (rs == zero_reg) {
3522 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3524 bgtzc(scratch, offset);
3525 }
else if (IsZero(rt)) {
3526 if (!CalculateOffset(L, offset, OffsetSize::kOffset16))
return false;
3529 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3531 DCHECK(rs != scratch);
3532 bltc(rs, scratch, offset);
3537 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3538 if (!CalculateOffset(L, offset, OffsetSize::kOffset26))
return false;
3540 }
else if (rs == zero_reg) {
3541 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3543 bgezc(scratch, offset);
3544 }
else if (IsZero(rt)) {
3545 if (!CalculateOffset(L, offset, OffsetSize::kOffset16))
return false;
3548 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3550 DCHECK(rs != scratch);
3551 bgec(scratch, rs, offset);
3558 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3560 }
else if (rs == zero_reg) {
3561 if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
3563 bnezc(scratch, offset);
3564 }
else if (IsZero(rt)) {
3565 if (!CalculateOffset(L, offset, OffsetSize::kOffset21))
return false;
3568 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3570 DCHECK(rs != scratch);
3571 bltuc(scratch, rs, offset);
3574 case Ugreater_equal:
3576 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3577 if (!CalculateOffset(L, offset, OffsetSize::kOffset26))
return false;
3579 }
else if (rs == zero_reg) {
3580 if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
3582 beqzc(scratch, offset);
3583 }
else if (IsZero(rt)) {
3584 if (!CalculateOffset(L, offset, OffsetSize::kOffset26))
return false;
3587 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3589 DCHECK(rs != scratch);
3590 bgeuc(rs, scratch, offset);
3595 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3597 }
else if (rs == zero_reg) {
3598 if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
3600 bnezc(scratch, offset);
3601 }
else if (IsZero(rt)) {
3604 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3606 DCHECK(rs != scratch);
3607 bltuc(rs, scratch, offset);
3612 if (rt.is_reg() && rs.code() == rt.rm().code()) {
3613 if (!CalculateOffset(L, offset, OffsetSize::kOffset26))
return false;
3615 }
else if (rs == zero_reg) {
3616 if (!CalculateOffset(L, offset, OffsetSize::kOffset26, scratch, rt))
3619 }
else if (IsZero(rt)) {
3620 if (!CalculateOffset(L, offset, OffsetSize::kOffset21))
return false;
3623 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3625 DCHECK(rs != scratch);
3626 bgeuc(scratch, rs, offset);
3633 CheckTrampolinePoolQuick(1);
3637 bool TurboAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
3638 Register rs,
const Operand& rt,
3639 BranchDelaySlot bdslot) {
3640 DCHECK(L ==
nullptr || offset == 0);
3641 if (!is_near(L, OffsetSize::kOffset16))
return false;
3643 UseScratchRegisterScope temps(
this);
3644 BlockTrampolinePoolScope block_trampoline_pool(
this);
3645 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
3652 BlockTrampolinePoolScope block_trampoline_pool(
this);
3655 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3660 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3661 beq(rs, zero_reg, offset32);
3664 scratch = GetRtAsRegisterHelper(rt, scratch);
3665 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3666 beq(rs, scratch, offset32);
3671 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3672 bne(rs, zero_reg, offset32);
3675 scratch = GetRtAsRegisterHelper(rt, scratch);
3676 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3677 bne(rs, scratch, offset32);
3684 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3687 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3688 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3689 bne(scratch, zero_reg, offset32);
3694 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3697 Slt(scratch, rs, rt);
3698 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3699 beq(scratch, zero_reg, offset32);
3704 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3707 Slt(scratch, rs, rt);
3708 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3709 bne(scratch, zero_reg, offset32);
3714 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3717 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3718 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3719 beq(scratch, zero_reg, offset32);
3726 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3727 bne(rs, zero_reg, offset32);
3729 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3730 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3731 bne(scratch, zero_reg, offset32);
3734 case Ugreater_equal:
3736 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3739 Sltu(scratch, rs, rt);
3740 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3741 beq(scratch, zero_reg, offset32);
3748 Sltu(scratch, rs, rt);
3749 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3750 bne(scratch, zero_reg, offset32);
3755 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3756 beq(rs, zero_reg, offset32);
3758 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3759 offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3760 beq(scratch, zero_reg, offset32);
3769 if (bdslot == PROTECT)
3775 bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
3776 Register rs,
const Operand& rt,
3777 BranchDelaySlot bdslot) {
3778 BRANCH_ARGS_CHECK(cond, rs, rt);
3781 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3782 DCHECK(is_int26(offset));
3783 return BranchShortHelperR6(offset,
nullptr, cond, rs, rt);
3785 DCHECK(is_int16(offset));
3786 return BranchShortHelper(offset,
nullptr, cond, rs, rt, bdslot);
3789 DCHECK_EQ(offset, 0);
3790 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3791 return BranchShortHelperR6(0, L, cond, rs, rt);
3793 return BranchShortHelper(0, L, cond, rs, rt, bdslot);
3799 void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
3800 const Operand& rt, BranchDelaySlot bdslot) {
3801 BranchShortCheck(offset,
nullptr, cond, rs, rt, bdslot);
3804 void TurboAssembler::BranchShort(Label* L, Condition cond, Register rs,
3805 const Operand& rt, BranchDelaySlot bdslot) {
3806 BranchShortCheck(0, L, cond, rs, rt, bdslot);
3809 void TurboAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
3810 BranchAndLinkShort(offset, bdslot);
3813 void TurboAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
3814 const Operand& rt, BranchDelaySlot bdslot) {
3815 bool is_near = BranchAndLinkShortCheck(offset,
nullptr, cond, rs, rt, bdslot);
3820 void TurboAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
3821 if (L->is_bound()) {
3822 if (is_near_branch(L)) {
3823 BranchAndLinkShort(L, bdslot);
3825 BranchAndLinkLong(L, bdslot);
3828 if (is_trampoline_emitted()) {
3829 BranchAndLinkLong(L, bdslot);
3831 BranchAndLinkShort(L, bdslot);
3836 void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
3837 const Operand& rt, BranchDelaySlot bdslot) {
3838 if (L->is_bound()) {
3839 if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
3841 Condition neg_cond = NegateCondition(cond);
3842 BranchShort(&skip, neg_cond, rs, rt);
3843 BranchAndLinkLong(L, bdslot);
3847 if (is_trampoline_emitted()) {
3849 Condition neg_cond = NegateCondition(cond);
3850 BranchShort(&skip, neg_cond, rs, rt);
3851 BranchAndLinkLong(L, bdslot);
3854 BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot);
3859 void TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3860 BranchDelaySlot bdslot) {
3861 DCHECK(L ==
nullptr || offset == 0);
3862 offset = GetOffset(offset, L, OffsetSize::kOffset16);
3866 if (bdslot == PROTECT)
3870 void TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
3871 DCHECK(L ==
nullptr || offset == 0);
3872 offset = GetOffset(offset, L, OffsetSize::kOffset26);
3876 void TurboAssembler::BranchAndLinkShort(int32_t offset,
3877 BranchDelaySlot bdslot) {
3878 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3879 DCHECK(is_int26(offset));
3880 BranchAndLinkShortHelperR6(offset,
nullptr);
3882 DCHECK(is_int16(offset));
3883 BranchAndLinkShortHelper(offset,
nullptr, bdslot);
3887 void TurboAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
3888 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3889 BranchAndLinkShortHelperR6(0, L);
3891 BranchAndLinkShortHelper(0, L, bdslot);
3895 bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
3896 Condition cond, Register rs,
3897 const Operand& rt) {
3898 DCHECK(L ==
nullptr || offset == 0);
3899 UseScratchRegisterScope temps(
this);
3900 Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
3901 OffsetSize bits = OffsetSize::kOffset16;
3903 BlockTrampolinePoolScope block_trampoline_pool(
this);
3904 DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
3907 if (!CalculateOffset(L, offset, OffsetSize::kOffset26))
return false;
3911 if (!is_near(L, bits))
return false;
3912 Subu(scratch, rs, rt);
3913 offset = GetOffset(offset, L, bits);
3914 beqzalc(scratch, offset);
3917 if (!is_near(L, bits))
return false;
3918 Subu(scratch, rs, rt);
3919 offset = GetOffset(offset, L, bits);
3920 bnezalc(scratch, offset);
3926 if (rs.code() == rt.rm().code()) {
3928 }
else if (rs == zero_reg) {
3929 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3931 bltzalc(scratch, offset);
3932 }
else if (IsZero(rt)) {
3933 if (!CalculateOffset(L, offset, OffsetSize::kOffset16))
return false;
3934 bgtzalc(rs, offset);
3936 if (!is_near(L, bits))
return false;
3937 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3938 offset = GetOffset(offset, L, bits);
3939 bnezalc(scratch, offset);
3944 if (rs.code() == rt.rm().code()) {
3945 if (!CalculateOffset(L, offset, OffsetSize::kOffset26))
return false;
3947 }
else if (rs == zero_reg) {
3948 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3950 blezalc(scratch, offset);
3951 }
else if (IsZero(rt)) {
3952 if (!CalculateOffset(L, offset, OffsetSize::kOffset16))
return false;
3953 bgezalc(rs, offset);
3955 if (!is_near(L, bits))
return false;
3956 Slt(scratch, rs, rt);
3957 offset = GetOffset(offset, L, bits);
3958 beqzalc(scratch, offset);
3963 if (rs.code() == rt.rm().code()) {
3965 }
else if (rs == zero_reg) {
3966 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3968 bgtzalc(scratch, offset);
3969 }
else if (IsZero(rt)) {
3970 if (!CalculateOffset(L, offset, OffsetSize::kOffset16))
return false;
3971 bltzalc(rs, offset);
3973 if (!is_near(L, bits))
return false;
3974 Slt(scratch, rs, rt);
3975 offset = GetOffset(offset, L, bits);
3976 bnezalc(scratch, offset);
3981 if (rs.code() == rt.rm().code()) {
3982 if (!CalculateOffset(L, offset, OffsetSize::kOffset26))
return false;
3984 }
else if (rs == zero_reg) {
3985 if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
3987 bgezalc(scratch, offset);
3988 }
else if (IsZero(rt)) {
3989 if (!CalculateOffset(L, offset, OffsetSize::kOffset16))
return false;
3990 blezalc(rs, offset);
3992 if (!is_near(L, bits))
return false;
3993 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3994 offset = GetOffset(offset, L, bits);
3995 beqzalc(scratch, offset);
4003 if (!is_near(L, bits))
return false;
4004 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
4005 offset = GetOffset(offset, L, bits);
4006 bnezalc(scratch, offset);
4008 case Ugreater_equal:
4010 if (!is_near(L, bits))
return false;
4011 Sltu(scratch, rs, rt);
4012 offset = GetOffset(offset, L, bits);
4013 beqzalc(scratch, offset);
4017 if (!is_near(L, bits))
return false;
4018 Sltu(scratch, rs, rt);
4019 offset = GetOffset(offset, L, bits);
4020 bnezalc(scratch, offset);
4024 if (!is_near(L, bits))
return false;
4025 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
4026 offset = GetOffset(offset, L, bits);
4027 beqzalc(scratch, offset);
4038 bool TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
4039 Condition cond, Register rs,
4041 BranchDelaySlot bdslot) {
4042 DCHECK(L ==
nullptr || offset == 0);
4043 if (!is_near(L, OffsetSize::kOffset16))
return false;
4045 Register scratch = t8;
4046 BlockTrampolinePoolScope block_trampoline_pool(
this);
4050 offset = GetOffset(offset, L, OffsetSize::kOffset16);
4054 bne(rs, GetRtAsRegisterHelper(rt, scratch), 2);
4056 offset = GetOffset(offset, L, OffsetSize::kOffset16);
4060 beq(rs, GetRtAsRegisterHelper(rt, scratch), 2);
4062 offset = GetOffset(offset, L, OffsetSize::kOffset16);
4068 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
4069 addiu(scratch, scratch, -1);
4070 offset = GetOffset(offset, L, OffsetSize::kOffset16);
4071 bgezal(scratch, offset);
4074 Slt(scratch, rs, rt);
4075 addiu(scratch, scratch, -1);
4076 offset = GetOffset(offset, L, OffsetSize::kOffset16);
4077 bltzal(scratch, offset);
4080 Slt(scratch, rs, rt);
4081 addiu(scratch, scratch, -1);
4082 offset = GetOffset(offset, L, OffsetSize::kOffset16);
4083 bgezal(scratch, offset);
4086 Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
4087 addiu(scratch, scratch, -1);
4088 offset = GetOffset(offset, L, OffsetSize::kOffset16);
4089 bltzal(scratch, offset);
4094 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
4095 addiu(scratch, scratch, -1);
4096 offset = GetOffset(offset, L, OffsetSize::kOffset16);
4097 bgezal(scratch, offset);
4099 case Ugreater_equal:
4100 Sltu(scratch, rs, rt);
4101 addiu(scratch, scratch, -1);
4102 offset = GetOffset(offset, L, OffsetSize::kOffset16);
4103 bltzal(scratch, offset);
4106 Sltu(scratch, rs, rt);
4107 addiu(scratch, scratch, -1);
4108 offset = GetOffset(offset, L, OffsetSize::kOffset16);
4109 bgezal(scratch, offset);
4112 Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
4113 addiu(scratch, scratch, -1);
4114 offset = GetOffset(offset, L, OffsetSize::kOffset16);
4115 bltzal(scratch, offset);
4123 if (bdslot == PROTECT)
4129 bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
4130 Condition cond, Register rs,
4132 BranchDelaySlot bdslot) {
4133 BRANCH_ARGS_CHECK(cond, rs, rt);
4136 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
4137 DCHECK(is_int26(offset));
4138 return BranchAndLinkShortHelperR6(offset,
nullptr, cond, rs, rt);
4140 DCHECK(is_int16(offset));
4141 return BranchAndLinkShortHelper(offset,
nullptr, cond, rs, rt, bdslot);
4144 DCHECK_EQ(offset, 0);
4145 if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
4146 return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
4148 return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
4154 void TurboAssembler::LoadFromConstantsTable(Register destination,
4155 int constant_index) {
4156 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
4157 LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
4159 FieldMemOperand(destination,
4160 FixedArray::kHeaderSize + constant_index * kPointerSize));
4163 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
4164 Ld(destination, MemOperand(kRootRegister, offset));
4167 void TurboAssembler::LoadRootRegisterOffset(Register destination,
4170 Move(destination, kRootRegister);
4172 Daddu(destination, kRootRegister, Operand(offset));
4176 void TurboAssembler::Jump(Register target, Condition cond, Register rs,
4177 const Operand& rt, BranchDelaySlot bd) {
4178 BlockTrampolinePoolScope block_trampoline_pool(
this);
4179 if (kArchVariant == kMips64r6 && bd == PROTECT) {
4180 if (cond == cc_always) {
4183 BRANCH_ARGS_CHECK(cond, rs, rt);
4184 Branch(2, NegateCondition(cond), rs, rt);
4188 if (cond == cc_always) {
4191 BRANCH_ARGS_CHECK(cond, rs, rt);
4192 Branch(2, NegateCondition(cond), rs, rt);
4196 if (bd == PROTECT) nop();
4200 void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
4201 Condition cond, Register rs,
const Operand& rt,
4202 BranchDelaySlot bd) {
4204 if (cond != cc_always) {
4205 Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
4210 BlockTrampolinePoolScope block_trampoline_pool(
this);
4211 li(t9, Operand(target, rmode));
4212 Jump(t9, al, zero_reg, Operand(zero_reg), bd);
4217 void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
4218 Register rs,
const Operand& rt, BranchDelaySlot bd) {
4219 DCHECK(!RelocInfo::IsCodeTarget(rmode));
4220 Jump(static_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
4223 void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
4224 Condition cond, Register rs,
const Operand& rt,
4225 BranchDelaySlot bd) {
4226 DCHECK(RelocInfo::IsCodeTarget(rmode));
4227 if (FLAG_embedded_builtins) {
4228 BlockTrampolinePoolScope block_trampoline_pool(
this);
4229 if (root_array_available_ && options().isolate_independent_code) {
4230 IndirectLoadConstant(t9, code);
4231 Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
4232 Jump(t9, cond, rs, rt, bd);
4234 }
else if (options().inline_offheap_trampolines) {
4235 int builtin_index = Builtins::kNoBuiltinId;
4236 if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
4237 Builtins::IsIsolateIndependent(builtin_index)) {
4239 RecordCommentForOffHeapTrampoline(builtin_index);
4240 CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
4241 EmbeddedData d = EmbeddedData::FromBlob();
4242 Address entry = d.InstructionStartOfBuiltin(builtin_index);
4243 li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
4244 Jump(t9, cond, rs, rt, bd);
4249 Jump(static_cast<intptr_t>(code.address()), rmode, cond, rs, rt, bd);
4253 void TurboAssembler::Call(Register target, Condition cond, Register rs,
4254 const Operand& rt, BranchDelaySlot bd) {
4255 BlockTrampolinePoolScope block_trampoline_pool(
this);
4256 if (kArchVariant == kMips64r6 && bd == PROTECT) {
4257 if (cond == cc_always) {
4260 BRANCH_ARGS_CHECK(cond, rs, rt);
4261 Branch(2, NegateCondition(cond), rs, rt);
4265 if (cond == cc_always) {
4268 BRANCH_ARGS_CHECK(cond, rs, rt);
4269 Branch(2, NegateCondition(cond), rs, rt);
4273 if (bd == PROTECT) nop();
4277 void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
4278 Register rs,
const Operand& rt, BranchDelaySlot bd) {
4279 BlockTrampolinePoolScope block_trampoline_pool(
this);
4280 li(t9, Operand(static_cast<int64_t>(target), rmode), ADDRESS_LOAD);
4281 Call(t9, cond, rs, rt, bd);
4284 void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
4285 Condition cond, Register rs,
const Operand& rt,
4286 BranchDelaySlot bd) {
4287 BlockTrampolinePoolScope block_trampoline_pool(
this);
4288 if (FLAG_embedded_builtins) {
4289 if (root_array_available_ && options().isolate_independent_code) {
4290 IndirectLoadConstant(t9, code);
4291 Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
4292 Call(t9, cond, rs, rt, bd);
4294 }
else if (options().inline_offheap_trampolines) {
4295 int builtin_index = Builtins::kNoBuiltinId;
4296 if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
4297 Builtins::IsIsolateIndependent(builtin_index)) {
4299 RecordCommentForOffHeapTrampoline(builtin_index);
4300 CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
4301 EmbeddedData d = EmbeddedData::FromBlob();
4302 Address entry = d.InstructionStartOfBuiltin(builtin_index);
4303 li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
4304 Call(t9, cond, rs, rt, bd);
4309 DCHECK(RelocInfo::IsCodeTarget(rmode));
4310 Call(code.address(), rmode, cond, rs, rt, bd);
4313 void TurboAssembler::Ret(Condition cond, Register rs,
const Operand& rt,
4314 BranchDelaySlot bd) {
4315 Jump(ra, cond, rs, rt, bd);
4318 void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
4319 if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
4320 (!L->is_bound() || is_near_r6(L))) {
4321 BranchShortHelperR6(0, L);
4324 BlockTrampolinePoolScope block_trampoline_pool(
this);
4326 imm64 = branch_long_offset(L);
4327 DCHECK(is_int32(imm64));
4328 or_(t8, ra, zero_reg);
4330 lui(t9, (imm64 & kHiMaskOf32) >> kLuiShift);
4331 ori(t9, t9, (imm64 & kImm16Mask));
4333 if (bdslot == USE_DELAY_SLOT) {
4334 or_(ra, t8, zero_reg);
4338 if (bdslot == PROTECT) or_(ra, t8, zero_reg);
4342 void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
4343 if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
4344 (!L->is_bound() || is_near_r6(L))) {
4345 BranchAndLinkShortHelperR6(0, L);
4348 BlockTrampolinePoolScope block_trampoline_pool(
this);
4350 imm64 = branch_long_offset(L);
4351 DCHECK(is_int32(imm64));
4352 lui(t8, (imm64 & kHiMaskOf32) >> kLuiShift);
4354 ori(t8, t8, (imm64 & kImm16Mask));
4358 if (bdslot == PROTECT) nop();
4362 void TurboAssembler::DropAndRet(
int drop) {
4363 DCHECK(is_int16(drop * kPointerSize));
4364 Ret(USE_DELAY_SLOT);
4365 daddiu(sp, sp, drop * kPointerSize);
4368 void TurboAssembler::DropAndRet(
int drop, Condition cond, Register r1,
4369 const Operand& r2) {
4372 if (cond != cc_always) {
4373 Branch(&skip, NegateCondition(cond), r1, r2);
4379 if (cond != cc_always) {
4384 void TurboAssembler::Drop(
int count, Condition cond, Register reg,
4385 const Operand& op) {
4393 Branch(&skip, NegateCondition(cond), reg, op);
4396 Daddu(sp, sp, Operand(count * kPointerSize));
4405 void MacroAssembler::Swap(Register reg1,
4408 if (scratch == no_reg) {
4409 Xor(reg1, reg1, Operand(reg2));
4410 Xor(reg2, reg2, Operand(reg1));
4411 Xor(reg1, reg1, Operand(reg2));
4419 void TurboAssembler::Call(Label* target) { BranchAndLink(target); }
4421 void TurboAssembler::Push(Smi smi) {
4422 UseScratchRegisterScope temps(
this);
4423 Register scratch = temps.Acquire();
4424 li(scratch, Operand(smi));
4428 void TurboAssembler::Push(Handle<HeapObject> handle) {
4429 UseScratchRegisterScope temps(
this);
4430 Register scratch = temps.Acquire();
4431 li(scratch, Operand(handle));
4435 void MacroAssembler::MaybeDropFrames() {
4437 li(a1, ExternalReference::debug_restart_fp_address(isolate()));
4438 Ld(a1, MemOperand(a1));
4439 Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
4440 ne, a1, Operand(zero_reg));
4446 void MacroAssembler::PushStackHandler() {
4448 STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
4449 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
4455 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
4456 Ld(a5, MemOperand(a6));
4460 Sd(sp, MemOperand(a6));
4464 void MacroAssembler::PopStackHandler() {
4465 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4467 Daddu(sp, sp, Operand(static_cast<int64_t>(StackHandlerConstants::kSize -
4469 UseScratchRegisterScope temps(
this);
4470 Register scratch = temps.Acquire();
4472 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
4473 Sd(a1, MemOperand(scratch));
4476 void TurboAssembler::FPUCanonicalizeNaN(
const DoubleRegister dst,
4477 const DoubleRegister src) {
4478 sub_d(dst, src, kDoubleRegZero);
4481 void TurboAssembler::MovFromFloatResult(
const DoubleRegister dst) {
4482 if (IsMipsSoftFloatABI) {
4483 if (kArchEndian == kLittle) {
4493 void TurboAssembler::MovFromFloatParameter(
const DoubleRegister dst) {
4494 if (IsMipsSoftFloatABI) {
4495 if (kArchEndian == kLittle) {
4505 void TurboAssembler::MovToFloatParameter(DoubleRegister src) {
4506 if (!IsMipsSoftFloatABI) {
4509 if (kArchEndian == kLittle) {
4517 void TurboAssembler::MovToFloatResult(DoubleRegister src) {
4518 if (!IsMipsSoftFloatABI) {
4521 if (kArchEndian == kLittle) {
4529 void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
4530 DoubleRegister src2) {
4531 if (!IsMipsSoftFloatABI) {
4532 const DoubleRegister fparg2 = f13;
4534 DCHECK(src1 != fparg2);
4542 if (kArchEndian == kLittle) {
4556 void TurboAssembler::PrepareForTailCall(
const ParameterCount& callee_args_count,
4557 Register caller_args_count_reg,
4558 Register scratch0, Register scratch1) {
4560 if (callee_args_count.is_reg()) {
4561 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
4564 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
4571 Register dst_reg = scratch0;
4572 Dlsa(dst_reg, fp, caller_args_count_reg, kPointerSizeLog2);
4573 Daddu(dst_reg, dst_reg,
4574 Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
4576 Register src_reg = caller_args_count_reg;
4578 if (callee_args_count.is_reg()) {
4579 Dlsa(src_reg, sp, callee_args_count.reg(), kPointerSizeLog2);
4580 Daddu(src_reg, src_reg, Operand(kPointerSize));
4583 Operand((callee_args_count.immediate() + 1) * kPointerSize));
4586 if (FLAG_debug_code) {
4587 Check(lo, AbortReason::kStackAccessBelowStackPointer, src_reg,
4593 Ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
4594 Ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4601 Register tmp_reg = scratch1;
4605 Dsubu(src_reg, src_reg, Operand(kPointerSize));
4606 Dsubu(dst_reg, dst_reg, Operand(kPointerSize));
4607 Ld(tmp_reg, MemOperand(src_reg));
4608 Sd(tmp_reg, MemOperand(dst_reg));
4610 Branch(&loop, ne, sp, Operand(src_reg));
4616 void MacroAssembler::InvokePrologue(
const ParameterCount& expected,
4617 const ParameterCount& actual, Label* done,
4618 bool* definitely_mismatches,
4620 bool definitely_matches =
false;
4621 *definitely_mismatches =
false;
4622 Label regular_invoke;
4633 DCHECK(actual.is_immediate() || actual.reg() == a0);
4634 DCHECK(expected.is_immediate() || expected.reg() == a2);
4636 if (expected.is_immediate()) {
4637 DCHECK(actual.is_immediate());
4638 li(a0, Operand(actual.immediate()));
4639 if (expected.immediate() == actual.immediate()) {
4640 definitely_matches =
true;
4642 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
4643 if (expected.immediate() == sentinel) {
4648 definitely_matches =
true;
4650 *definitely_mismatches =
true;
4651 li(a2, Operand(expected.immediate()));
4654 }
else if (actual.is_immediate()) {
4655 li(a0, Operand(actual.immediate()));
4656 Branch(®ular_invoke, eq, expected.reg(), Operand(a0));
4658 Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
4661 if (!definitely_matches) {
4662 Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
4663 if (flag == CALL_FUNCTION) {
4665 if (!*definitely_mismatches) {
4669 Jump(adaptor, RelocInfo::CODE_TARGET);
4671 bind(®ular_invoke);
4675 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
4676 const ParameterCount& expected,
4677 const ParameterCount& actual) {
4680 li(t0, ExternalReference::debug_hook_on_function_call_address(isolate()));
4681 Lb(t0, MemOperand(t0));
4682 Branch(&skip_hook, eq, t0, Operand(zero_reg));
4686 if (actual.is_reg()) {
4687 mov(t0, actual.reg());
4689 li(t0, actual.immediate());
4691 Dlsa(t0, sp, t0, kPointerSizeLog2);
4692 Ld(t0, MemOperand(t0));
4693 FrameScope frame(
this,
4694 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
4695 if (expected.is_reg()) {
4696 SmiTag(expected.reg());
4697 Push(expected.reg());
4699 if (actual.is_reg()) {
4700 SmiTag(actual.reg());
4703 if (new_target.is_valid()) {
4709 CallRuntime(Runtime::kDebugOnFunctionCall);
4711 if (new_target.is_valid()) {
4714 if (actual.is_reg()) {
4716 SmiUntag(actual.reg());
4718 if (expected.is_reg()) {
4719 Pop(expected.reg());
4720 SmiUntag(expected.reg());
4726 void MacroAssembler::InvokeFunctionCode(Register
function, Register new_target,
4727 const ParameterCount& expected,
4728 const ParameterCount& actual,
4731 DCHECK(flag == JUMP_FUNCTION || has_frame());
4732 DCHECK(
function == a1);
4733 DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
4736 CheckDebugHook(
function, new_target, expected, actual);
4739 if (!new_target.is_valid()) {
4740 LoadRoot(a3, RootIndex::kUndefinedValue);
4744 bool definitely_mismatches =
false;
4745 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag);
4746 if (!definitely_mismatches) {
4750 Register code = kJavaScriptCallCodeStartRegister;
4751 Ld(code, FieldMemOperand(
function, JSFunction::kCodeOffset));
4752 if (flag == CALL_FUNCTION) {
4753 Daddu(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
4756 DCHECK(flag == JUMP_FUNCTION);
4757 Daddu(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
4766 void MacroAssembler::InvokeFunction(Register
function, Register new_target,
4767 const ParameterCount& actual,
4770 DCHECK(flag == JUMP_FUNCTION || has_frame());
4773 DCHECK(
function == a1);
4774 Register expected_reg = a2;
4775 Register temp_reg = t0;
4776 Ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4777 Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4780 FieldMemOperand(temp_reg,
4781 SharedFunctionInfo::kFormalParameterCountOffset));
4782 ParameterCount expected(expected_reg);
4783 InvokeFunctionCode(a1, new_target, expected, actual, flag);
4786 void MacroAssembler::InvokeFunction(Register
function,
4787 const ParameterCount& expected,
4788 const ParameterCount& actual,
4791 DCHECK(flag == JUMP_FUNCTION || has_frame());
4794 DCHECK(
function == a1);
4797 Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4799 InvokeFunctionCode(a1, no_reg, expected, actual, flag);
4806 void MacroAssembler::GetObjectType(Register
object,
4808 Register type_reg) {
4809 Ld(map, FieldMemOperand(
object, HeapObject::kMapOffset));
4810 Lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
4817 void MacroAssembler::CallStub(CodeStub* stub,
4821 BranchDelaySlot bd) {
4822 DCHECK(AllowThisStubCall(stub));
4823 Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
4826 void MacroAssembler::TailCallStub(CodeStub* stub,
4830 BranchDelaySlot bd) {
4831 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
4834 bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
4835 return has_frame() || !stub->SometimesSetsUpAFrame();
4838 void TurboAssembler::DaddOverflow(Register dst, Register left,
4839 const Operand& right, Register overflow) {
4840 BlockTrampolinePoolScope block_trampoline_pool(
this);
4841 Register right_reg = no_reg;
4842 Register scratch = t8;
4843 if (!right.is_reg()) {
4844 li(at, Operand(right));
4847 right_reg = right.rm();
4850 DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
4851 overflow != scratch);
4852 DCHECK(overflow != left && overflow != right_reg);
4854 if (dst == left || dst == right_reg) {
4855 daddu(scratch, left, right_reg);
4856 xor_(overflow, scratch, left);
4857 xor_(at, scratch, right_reg);
4858 and_(overflow, overflow, at);
4861 daddu(dst, left, right_reg);
4862 xor_(overflow, dst, left);
4863 xor_(at, dst, right_reg);
4864 and_(overflow, overflow, at);
4868 void TurboAssembler::DsubOverflow(Register dst, Register left,
4869 const Operand& right, Register overflow) {
4870 BlockTrampolinePoolScope block_trampoline_pool(
this);
4871 Register right_reg = no_reg;
4872 Register scratch = t8;
4873 if (!right.is_reg()) {
4874 li(at, Operand(right));
4877 right_reg = right.rm();
4880 DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
4881 overflow != scratch);
4882 DCHECK(overflow != left && overflow != right_reg);
4884 if (dst == left || dst == right_reg) {
4885 dsubu(scratch, left, right_reg);
4886 xor_(overflow, left, scratch);
4887 xor_(at, left, right_reg);
4888 and_(overflow, overflow, at);
4891 dsubu(dst, left, right_reg);
4892 xor_(overflow, left, dst);
4893 xor_(at, left, right_reg);
4894 and_(overflow, overflow, at);
4898 void TurboAssembler::MulOverflow(Register dst, Register left,
4899 const Operand& right, Register overflow) {
4900 BlockTrampolinePoolScope block_trampoline_pool(
this);
4901 Register right_reg = no_reg;
4902 Register scratch = t8;
4903 if (!right.is_reg()) {
4904 li(at, Operand(right));
4907 right_reg = right.rm();
4910 DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
4911 overflow != scratch);
4912 DCHECK(overflow != left && overflow != right_reg);
4914 if (dst == left || dst == right_reg) {
4915 Mul(scratch, left, right_reg);
4916 Mulh(overflow, left, right_reg);
4919 Mul(dst, left, right_reg);
4920 Mulh(overflow, left, right_reg);
4923 dsra32(scratch, dst, 0);
4924 xor_(overflow, overflow, scratch);
4927 void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
4929 const Runtime::Function* f = Runtime::FunctionForId(fid);
4934 PrepareCEntryArgs(f->nargs);
4935 PrepareCEntryFunction(ExternalReference::Create(f));
4936 DCHECK(!AreAliased(centry, a0, a1));
4937 Daddu(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag));
4941 void MacroAssembler::CallRuntime(
const Runtime::Function* f,
int num_arguments,
4942 SaveFPRegsMode save_doubles) {
4948 CHECK(f->nargs < 0 || f->nargs == num_arguments);
4954 PrepareCEntryArgs(num_arguments);
4955 PrepareCEntryFunction(ExternalReference::Create(f));
4957 CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
4958 Call(code, RelocInfo::CODE_TARGET);
4961 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
4962 const Runtime::Function*
function = Runtime::FunctionForId(fid);
4963 DCHECK_EQ(1, function->result_size);
4964 if (function->nargs >= 0) {
4965 PrepareCEntryArgs(function->nargs);
4967 JumpToExternalReference(ExternalReference::Create(fid));
4970 void MacroAssembler::JumpToExternalReference(
const ExternalReference& builtin,
4972 bool builtin_exit_frame) {
4973 PrepareCEntryFunction(builtin);
4974 Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
4975 kArgvOnStack, builtin_exit_frame);
4976 Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), bd);
4979 void MacroAssembler::JumpToInstructionStream(Address entry) {
4980 li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
4981 Jump(kOffHeapTrampolineRegister);
4984 void MacroAssembler::LoadWeakValue(Register out, Register in,
4985 Label* target_if_cleared) {
4986 Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObjectLower32));
4988 And(out, in, Operand(~kWeakHeapObjectMask));
4991 void MacroAssembler::IncrementCounter(StatsCounter* counter,
int value,
4992 Register scratch1, Register scratch2) {
4993 DCHECK_GT(value, 0);
4994 if (FLAG_native_code_counters && counter->Enabled()) {
4995 li(scratch2, ExternalReference::Create(counter));
4996 Lw(scratch1, MemOperand(scratch2));
4997 Addu(scratch1, scratch1, Operand(value));
4998 Sw(scratch1, MemOperand(scratch2));
5003 void MacroAssembler::DecrementCounter(StatsCounter* counter,
int value,
5004 Register scratch1, Register scratch2) {
5005 DCHECK_GT(value, 0);
5006 if (FLAG_native_code_counters && counter->Enabled()) {
5007 li(scratch2, ExternalReference::Create(counter));
5008 Lw(scratch1, MemOperand(scratch2));
5009 Subu(scratch1, scratch1, Operand(value));
5010 Sw(scratch1, MemOperand(scratch2));
5018 void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
5020 if (emit_debug_code())
5021 Check(cc, reason, rs, rt);
5024 void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
5027 Branch(&L, cc, rs, rt);
5033 void TurboAssembler::Abort(AbortReason reason) {
5036 const char* msg = GetAbortReason(reason);
5038 RecordComment(
"Abort message: ");
5043 if (trap_on_abort()) {
5048 if (should_abort_hard()) {
5050 FrameScope assume_frame(
this, StackFrame::NONE);
5051 PrepareCallCFunction(0, a0);
5052 li(a0, Operand(static_cast<int>(reason)));
5053 CallCFunction(ExternalReference::abort_with_reason(), 1);
5057 Move(a0, Smi::FromInt(static_cast<int>(reason)));
5063 FrameScope scope(
this, StackFrame::NONE);
5064 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
5066 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
5069 if (is_trampoline_pool_blocked()) {
5075 static const int kExpectedAbortInstructions = 10;
5076 int abort_instructions = InstructionsGeneratedSince(&abort_start);
5077 DCHECK_LE(abort_instructions, kExpectedAbortInstructions);
5078 while (abort_instructions++ < kExpectedAbortInstructions) {
5084 void MacroAssembler::LoadNativeContextSlot(
int index, Register dst) {
5085 Ld(dst, NativeContextMemOperand());
5086 Ld(dst, ContextMemOperand(dst, index));
5090 void TurboAssembler::StubPrologue(StackFrame::Type type) {
5091 UseScratchRegisterScope temps(
this);
5092 Register scratch = temps.Acquire();
5093 li(scratch, Operand(StackFrame::TypeToMarker(type)));
5094 PushCommonFrame(scratch);
5097 void TurboAssembler::Prologue() { PushStandardFrame(a1); }
5099 void TurboAssembler::EnterFrame(StackFrame::Type type) {
5100 BlockTrampolinePoolScope block_trampoline_pool(
this);
5101 int stack_offset = -3 * kPointerSize;
5102 const int fp_offset = 1 * kPointerSize;
5103 daddiu(sp, sp, stack_offset);
5104 stack_offset = -stack_offset - kPointerSize;
5105 Sd(ra, MemOperand(sp, stack_offset));
5106 stack_offset -= kPointerSize;
5107 Sd(fp, MemOperand(sp, stack_offset));
5108 stack_offset -= kPointerSize;
5109 li(t9, Operand(StackFrame::TypeToMarker(type)));
5110 Sd(t9, MemOperand(sp, stack_offset));
5112 DCHECK_EQ(stack_offset, 0);
5113 Daddu(fp, sp, Operand(fp_offset));
5116 void TurboAssembler::LeaveFrame(StackFrame::Type type) {
5117 daddiu(sp, fp, 2 * kPointerSize);
5118 Ld(ra, MemOperand(fp, 1 * kPointerSize));
5119 Ld(fp, MemOperand(fp, 0 * kPointerSize));
5122 void MacroAssembler::EnterExitFrame(
bool save_doubles,
int stack_space,
5123 StackFrame::Type frame_type) {
5124 DCHECK(frame_type == StackFrame::EXIT ||
5125 frame_type == StackFrame::BUILTIN_EXIT);
5128 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
5129 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
5130 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
5143 daddiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
5144 Sd(ra, MemOperand(sp, 4 * kPointerSize));
5145 Sd(fp, MemOperand(sp, 3 * kPointerSize));
5147 UseScratchRegisterScope temps(
this);
5148 Register scratch = temps.Acquire();
5149 li(scratch, Operand(StackFrame::TypeToMarker(frame_type)));
5150 Sd(scratch, MemOperand(sp, 2 * kPointerSize));
5153 daddiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
5155 if (emit_debug_code()) {
5156 Sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
5160 BlockTrampolinePoolScope block_trampoline_pool(
this);
5162 li(t8, CodeObject(), CONSTANT_SIZE);
5163 Sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
5166 li(t8, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
5168 Sd(fp, MemOperand(t8));
5170 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
5171 Sd(cp, MemOperand(t8));
5174 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
5177 int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2;
5178 int space = kNumOfSavedRegisters * kDoubleSize;
5179 Dsubu(sp, sp, Operand(space));
5181 for (
int i = 0;
i < kNumOfSavedRegisters;
i++) {
5182 FPURegister reg = FPURegister::from_code(2 *
i);
5183 Sdc1(reg, MemOperand(sp,
i * kDoubleSize));
5190 DCHECK_GE(stack_space, 0);
5191 Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize));
5192 if (frame_alignment > 0) {
5193 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
5194 And(sp, sp, Operand(-frame_alignment));
5199 UseScratchRegisterScope temps(
this);
5200 Register scratch = temps.Acquire();
5201 daddiu(scratch, sp, kPointerSize);
5202 Sd(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
5205 void MacroAssembler::LeaveExitFrame(
bool save_doubles, Register argument_count,
5207 bool argument_count_is_length) {
5208 BlockTrampolinePoolScope block_trampoline_pool(
this);
5212 int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2;
5213 Dsubu(t8, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp +
5214 kNumOfSavedRegisters * kDoubleSize));
5215 for (
int i = 0;
i < kNumOfSavedRegisters;
i++) {
5216 FPURegister reg = FPURegister::from_code(2 *
i);
5217 Ldc1(reg, MemOperand(t8,
i * kDoubleSize));
5223 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()));
5224 Sd(zero_reg, MemOperand(t8));
5228 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
5229 Ld(cp, MemOperand(t8));
5233 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
5234 Sd(a3, MemOperand(t8));
5239 Ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
5240 Ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
5242 if (argument_count.is_valid()) {
5243 if (argument_count_is_length) {
5244 daddu(sp, sp, argument_count);
5246 Dlsa(sp, sp, argument_count, kPointerSizeLog2, t8);
5251 Ret(USE_DELAY_SLOT);
5254 daddiu(sp, sp, 2 * kPointerSize);
5257 int TurboAssembler::ActivationFrameAlignment() {
5258 #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 5263 return base::OS::ActivationFrameAlignment();
5264 #else // V8_HOST_ARCH_MIPS 5269 return FLAG_sim_stack_alignment;
5270 #endif // V8_HOST_ARCH_MIPS 5274 void MacroAssembler::AssertStackIsAligned() {
5275 if (emit_debug_code()) {
5276 const int frame_alignment = ActivationFrameAlignment();
5277 const int frame_alignment_mask = frame_alignment - 1;
5279 if (frame_alignment > kPointerSize) {
5280 Label alignment_as_expected;
5281 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
5283 UseScratchRegisterScope temps(
this);
5284 Register scratch = temps.Acquire();
5285 andi(scratch, sp, frame_alignment_mask);
5286 Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
5289 stop(
"Unexpected stack alignment");
5290 bind(&alignment_as_expected);
5295 void TurboAssembler::SmiUntag(Register dst,
const MemOperand& src) {
5296 if (SmiValuesAre32Bits()) {
5297 Lw(dst, MemOperand(src.rm(), SmiWordOffset(src.offset())));
5299 DCHECK(SmiValuesAre31Bits());
5305 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
5309 UseScratchRegisterScope temps(
this);
5310 Register scratch = temps.Acquire();
5311 JumpIfSmi(src, smi_case, scratch, USE_DELAY_SLOT);
5315 void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
5316 Register scratch, BranchDelaySlot bd) {
5317 DCHECK_EQ(0, kSmiTag);
5318 andi(scratch, value, kSmiTagMask);
5319 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
5322 void MacroAssembler::JumpIfNotSmi(Register value,
5323 Label* not_smi_label,
5325 BranchDelaySlot bd) {
5326 DCHECK_EQ(0, kSmiTag);
5327 andi(scratch, value, kSmiTagMask);
5328 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
5332 void MacroAssembler::JumpIfEitherSmi(Register reg1,
5334 Label* on_either_smi) {
5335 STATIC_ASSERT(kSmiTag == 0);
5337 #if defined(__APPLE__) 5338 DCHECK_EQ(1, kSmiTagMask);
5340 DCHECK_EQ((
int64_t)1, kSmiTagMask);
5343 UseScratchRegisterScope temps(
this);
5344 Register scratch = temps.Acquire();
5345 and_(scratch, reg1, reg2);
5346 JumpIfSmi(scratch, on_either_smi);
5349 void MacroAssembler::AssertNotSmi(Register
object) {
5350 if (emit_debug_code()) {
5351 STATIC_ASSERT(kSmiTag == 0);
5352 UseScratchRegisterScope temps(
this);
5353 Register scratch = temps.Acquire();
5354 andi(scratch,
object, kSmiTagMask);
5355 Check(ne, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
5360 void MacroAssembler::AssertSmi(Register
object) {
5361 if (emit_debug_code()) {
5362 STATIC_ASSERT(kSmiTag == 0);
5363 UseScratchRegisterScope temps(
this);
5364 Register scratch = temps.Acquire();
5365 andi(scratch,
object, kSmiTagMask);
5366 Check(eq, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
5370 void MacroAssembler::AssertConstructor(Register
object) {
5371 if (emit_debug_code()) {
5372 BlockTrampolinePoolScope block_trampoline_pool(
this);
5373 STATIC_ASSERT(kSmiTag == 0);
5375 Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, t8,
5378 ld(t8, FieldMemOperand(
object, HeapObject::kMapOffset));
5379 Lbu(t8, FieldMemOperand(t8, Map::kBitFieldOffset));
5380 And(t8, t8, Operand(Map::IsConstructorBit::kMask));
5381 Check(ne, AbortReason::kOperandIsNotAConstructor, t8, Operand(zero_reg));
5385 void MacroAssembler::AssertFunction(Register
object) {
5386 if (emit_debug_code()) {
5387 BlockTrampolinePoolScope block_trampoline_pool(
this);
5388 STATIC_ASSERT(kSmiTag == 0);
5390 Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
5392 GetObjectType(
object, t8, t8);
5393 Check(eq, AbortReason::kOperandIsNotAFunction, t8,
5394 Operand(JS_FUNCTION_TYPE));
5399 void MacroAssembler::AssertBoundFunction(Register
object) {
5400 if (emit_debug_code()) {
5401 BlockTrampolinePoolScope block_trampoline_pool(
this);
5402 STATIC_ASSERT(kSmiTag == 0);
5404 Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, t8,
5406 GetObjectType(
object, t8, t8);
5407 Check(eq, AbortReason::kOperandIsNotABoundFunction, t8,
5408 Operand(JS_BOUND_FUNCTION_TYPE));
5412 void MacroAssembler::AssertGeneratorObject(Register
object) {
5413 if (!emit_debug_code())
return;
5414 BlockTrampolinePoolScope block_trampoline_pool(
this);
5415 STATIC_ASSERT(kSmiTag == 0);
5417 Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, t8,
5420 GetObjectType(
object, t8, t8);
5425 Branch(&done, eq, t8, Operand(JS_GENERATOR_OBJECT_TYPE));
5428 Branch(&done, eq, t8, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
5431 Branch(&done, eq, t8, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
5433 Abort(AbortReason::kOperandIsNotAGeneratorObject);
5438 void MacroAssembler::AssertUndefinedOrAllocationSite(Register
object,
5440 if (emit_debug_code()) {
5441 Label done_checking;
5442 AssertNotSmi(
object);
5443 LoadRoot(scratch, RootIndex::kUndefinedValue);
5444 Branch(&done_checking, eq,
object, Operand(scratch));
5445 GetObjectType(
object, scratch, scratch);
5446 Assert(eq, AbortReason::kExpectedUndefinedOrCell, scratch,
5447 Operand(ALLOCATION_SITE_TYPE));
5448 bind(&done_checking);
5453 void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
5454 FPURegister src2, Label* out_of_line) {
5461 CompareIsNanF32(src1, src2);
5462 BranchTrueF(out_of_line);
5464 if (kArchVariant >= kMips64r6) {
5465 max_s(dst, src1, src2);
5467 Label return_left, return_right, done;
5469 CompareF32(OLT, src1, src2);
5470 BranchTrueShortF(&return_right);
5471 CompareF32(OLT, src2, src1);
5472 BranchTrueShortF(&return_left);
5476 BlockTrampolinePoolScope block_trampoline_pool(
this);
5479 Branch(&return_left, eq, t8, Operand(zero_reg));
5480 Branch(&return_right);
5483 bind(&return_right);
5498 void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
5500 add_s(dst, src1, src2);
5503 void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
5504 FPURegister src2, Label* out_of_line) {
5511 CompareIsNanF32(src1, src2);
5512 BranchTrueF(out_of_line);
5514 if (kArchVariant >= kMips64r6) {
5515 min_s(dst, src1, src2);
5517 Label return_left, return_right, done;
5519 CompareF32(OLT, src1, src2);
5520 BranchTrueShortF(&return_left);
5521 CompareF32(OLT, src2, src1);
5522 BranchTrueShortF(&return_right);
5526 BlockTrampolinePoolScope block_trampoline_pool(
this);
5529 Branch(&return_right, eq, t8, Operand(zero_reg));
5530 Branch(&return_left);
5533 bind(&return_right);
5548 void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
5550 add_s(dst, src1, src2);
5553 void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1,
5554 FPURegister src2, Label* out_of_line) {
5561 CompareIsNanF64(src1, src2);
5562 BranchTrueF(out_of_line);
5564 if (kArchVariant >= kMips64r6) {
5565 max_d(dst, src1, src2);
5567 Label return_left, return_right, done;
5569 CompareF64(OLT, src1, src2);
5570 BranchTrueShortF(&return_right);
5571 CompareF64(OLT, src2, src1);
5572 BranchTrueShortF(&return_left);
5576 BlockTrampolinePoolScope block_trampoline_pool(
this);
5578 Branch(&return_left, eq, t8, Operand(zero_reg));
5579 Branch(&return_right);
5582 bind(&return_right);
5597 void TurboAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1,
5599 add_d(dst, src1, src2);
5602 void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1,
5603 FPURegister src2, Label* out_of_line) {
5610 CompareIsNanF64(src1, src2);
5611 BranchTrueF(out_of_line);
5613 if (kArchVariant >= kMips64r6) {
5614 min_d(dst, src1, src2);
5616 Label return_left, return_right, done;
5618 CompareF64(OLT, src1, src2);
5619 BranchTrueShortF(&return_left);
5620 CompareF64(OLT, src2, src1);
5621 BranchTrueShortF(&return_right);
5625 BlockTrampolinePoolScope block_trampoline_pool(
this);
5627 Branch(&return_right, eq, t8, Operand(zero_reg));
5628 Branch(&return_left);
5631 bind(&return_right);
5646 void TurboAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1,
5648 add_d(dst, src1, src2);
5651 static const int kRegisterPassedArguments = 8;
5653 int TurboAssembler::CalculateStackPassedWords(
int num_reg_arguments,
5654 int num_double_arguments) {
5655 int stack_passed_words = 0;
5656 num_reg_arguments += 2 * num_double_arguments;
5660 if (num_reg_arguments > kRegisterPassedArguments) {
5661 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5663 stack_passed_words += kCArgSlotCount;
5664 return stack_passed_words;
5667 void TurboAssembler::PrepareCallCFunction(
int num_reg_arguments,
5668 int num_double_arguments,
5670 int frame_alignment = ActivationFrameAlignment();
5679 int stack_passed_arguments = CalculateStackPassedWords(
5680 num_reg_arguments, num_double_arguments);
5681 if (frame_alignment > kPointerSize) {
5685 Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5686 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
5687 And(sp, sp, Operand(-frame_alignment));
5688 Sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5690 Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5694 void TurboAssembler::PrepareCallCFunction(
int num_reg_arguments,
5696 PrepareCallCFunction(num_reg_arguments, 0, scratch);
5699 void TurboAssembler::CallCFunction(ExternalReference
function,
5700 int num_reg_arguments,
5701 int num_double_arguments) {
5702 BlockTrampolinePoolScope block_trampoline_pool(
this);
5704 CallCFunctionHelper(t9, num_reg_arguments, num_double_arguments);
5707 void TurboAssembler::CallCFunction(Register
function,
int num_reg_arguments,
5708 int num_double_arguments) {
5709 CallCFunctionHelper(
function, num_reg_arguments, num_double_arguments);
5712 void TurboAssembler::CallCFunction(ExternalReference
function,
5713 int num_arguments) {
5714 CallCFunction(
function, num_arguments, 0);
5717 void TurboAssembler::CallCFunction(Register
function,
int num_arguments) {
5718 CallCFunction(
function, num_arguments, 0);
5721 void TurboAssembler::CallCFunctionHelper(Register
function,
5722 int num_reg_arguments,
5723 int num_double_arguments) {
5724 DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
5725 DCHECK(has_frame());
5732 #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 5733 if (emit_debug_code()) {
5734 int frame_alignment = base::OS::ActivationFrameAlignment();
5735 int frame_alignment_mask = frame_alignment - 1;
5736 if (frame_alignment > kPointerSize) {
5737 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
5738 Label alignment_as_expected;
5740 UseScratchRegisterScope temps(
this);
5741 Register scratch = temps.Acquire();
5742 And(scratch, sp, Operand(frame_alignment_mask));
5743 Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
5747 stop(
"Unexpected alignment in CallCFunction");
5748 bind(&alignment_as_expected);
5751 #endif // V8_HOST_ARCH_MIPS 5757 BlockTrampolinePoolScope block_trampoline_pool(
this);
5758 if (
function != t9) {
5766 int stack_passed_arguments = CalculateStackPassedWords(
5767 num_reg_arguments, num_double_arguments);
5769 if (base::OS::ActivationFrameAlignment() > kPointerSize) {
5770 Ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5772 Daddu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5777 #undef BRANCH_ARGS_CHECK 5779 void TurboAssembler::CheckPageFlag(Register
object, Register scratch,
int mask,
5780 Condition cc, Label* condition_met) {
5781 And(scratch,
object, Operand(~kPageAlignmentMask));
5782 Ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5783 And(scratch, scratch, Operand(mask));
5784 Branch(condition_met, cc, scratch, Operand(zero_reg));
5788 Register GetRegisterThatIsNotOneOf(Register reg1,
5795 if (reg1.is_valid()) regs |= reg1.bit();
5796 if (reg2.is_valid()) regs |= reg2.bit();
5797 if (reg3.is_valid()) regs |= reg3.bit();
5798 if (reg4.is_valid()) regs |= reg4.bit();
5799 if (reg5.is_valid()) regs |= reg5.bit();
5800 if (reg6.is_valid()) regs |= reg6.bit();
5802 const RegisterConfiguration* config = RegisterConfiguration::Default();
5803 for (
int i = 0;
i < config->num_allocatable_general_registers(); ++
i) {
5804 int code = config->GetAllocatableGeneralCode(
i);
5805 Register candidate = Register::from_code(code);
5806 if (regs & candidate.bit())
continue;
5812 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
5819 if (kArchVariant == kMips64r6) {
5825 int pc = pc_offset();
5826 li(dst, Operand(pc));
5827 Dsubu(dst, ra, dst);
5832 void TurboAssembler::ResetSpeculationPoisonRegister() {
5833 li(kSpeculationPoisonRegister, -1);
5839 #endif // V8_TARGET_ARCH_MIPS64