5 #include "src/assembler-inl.h" 6 #include "src/deoptimizer.h" 7 #include "src/register-configuration.h" 8 #include "src/safepoint-table.h" 18 void Deoptimizer::TableEntryGenerator::Generate() {
23 const int kNumberOfRegisters = Register::kNumRegisters;
25 RegList restored_regs = kJSCallerSaved | kCalleeSaved;
26 RegList saved_regs = restored_regs | sp.bit() | ra.bit();
28 const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
29 const int kFloatRegsSize = kFloatSize * FloatRegister::kNumRegisters;
32 __ Dsubu(sp, sp, Operand(kDoubleRegsSize));
33 const RegisterConfiguration* config = RegisterConfiguration::Default();
34 for (
int i = 0;
i < config->num_allocatable_double_registers(); ++
i) {
35 int code = config->GetAllocatableDoubleCode(
i);
36 const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
37 int offset = code * kDoubleSize;
38 __ Sdc1(fpu_reg, MemOperand(sp, offset));
42 __ Dsubu(sp, sp, Operand(kFloatRegsSize));
43 for (
int i = 0;
i < config->num_allocatable_float_registers(); ++
i) {
44 int code = config->GetAllocatableFloatCode(
i);
45 const FloatRegister fpu_reg = FloatRegister::from_code(code);
46 int offset = code * kFloatSize;
47 __ Swc1(fpu_reg, MemOperand(sp, offset));
52 __ Dsubu(sp, sp, kNumberOfRegisters * kPointerSize);
53 for (int16_t
i = kNumberOfRegisters - 1;
i >= 0;
i--) {
54 if ((saved_regs & (1 <<
i)) != 0) {
55 __ Sd(ToRegister(
i), MemOperand(sp, kPointerSize *
i));
59 __ li(a2, Operand(ExternalReference::Create(
60 IsolateAddressId::kCEntryFPAddress, isolate())));
61 __ Sd(fp, MemOperand(a2));
63 const int kSavedRegistersAreaSize =
64 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
67 __ Ld(a2, MemOperand(sp, kSavedRegistersAreaSize));
74 __ Daddu(a4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
79 __ PrepareCallCFunction(6, a5);
83 __ Ld(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
84 __ JumpIfSmi(a1, &context_check);
85 __ Ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
86 __ bind(&context_check);
87 __ li(a1, Operand(static_cast<int>(deopt_kind())));
91 __ li(a5, Operand(ExternalReference::isolate_address(isolate())));
95 AllowExternalCallThatCantCauseGC scope(masm());
96 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
103 __ Ld(a1, MemOperand(v0, Deoptimizer::input_offset()));
106 DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
107 for (
int i = 0;
i < kNumberOfRegisters;
i++) {
108 int offset = (
i * kPointerSize) + FrameDescription::registers_offset();
109 if ((saved_regs & (1 <<
i)) != 0) {
110 __ Ld(a2, MemOperand(sp,
i * kPointerSize));
111 __ Sd(a2, MemOperand(a1, offset));
112 }
else if (FLAG_debug_code) {
113 __ li(a2, kDebugZapValue);
114 __ Sd(a2, MemOperand(a1, offset));
118 int double_regs_offset = FrameDescription::double_registers_offset();
121 for (
int i = 0;
i < config->num_allocatable_double_registers(); ++
i) {
122 int code = config->GetAllocatableDoubleCode(
i);
123 int dst_offset = code * kDoubleSize + double_regs_offset;
125 code * kDoubleSize + kNumberOfRegisters * kPointerSize + kFloatRegsSize;
126 __ Ldc1(f0, MemOperand(sp, src_offset));
127 __ Sdc1(f0, MemOperand(a1, dst_offset));
130 int float_regs_offset = FrameDescription::float_registers_offset();
133 for (
int i = 0;
i < config->num_allocatable_float_registers(); ++
i) {
134 int code = config->GetAllocatableFloatCode(
i);
135 int dst_offset = code * kFloatSize + float_regs_offset;
136 int src_offset = code * kFloatSize + kNumberOfRegisters * kPointerSize;
137 __ Lwc1(f0, MemOperand(sp, src_offset));
138 __ Swc1(f0, MemOperand(a1, dst_offset));
142 __ Daddu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
146 __ Ld(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
147 __ Daddu(a2, a2, sp);
152 __ Daddu(a3, a1, Operand(FrameDescription::frame_content_offset()));
154 Label pop_loop_header;
155 __ BranchShort(&pop_loop_header);
158 __ Sd(a4, MemOperand(a3, 0));
159 __ daddiu(a3, a3,
sizeof(uint64_t));
160 __ bind(&pop_loop_header);
161 __ BranchShort(&pop_loop, ne, a2, Operand(sp));
165 __ PrepareCallCFunction(1, a1);
168 AllowExternalCallThatCantCauseGC scope(masm());
169 __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
173 __ Ld(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
176 Label outer_push_loop, inner_push_loop,
177 outer_loop_header, inner_loop_header;
180 __ Lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
181 __ Ld(a4, MemOperand(a0, Deoptimizer::output_offset()));
182 __ Dlsa(a1, a4, a1, kPointerSizeLog2);
183 __ BranchShort(&outer_loop_header);
184 __ bind(&outer_push_loop);
186 __ Ld(a2, MemOperand(a4, 0));
187 __ Ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
188 __ BranchShort(&inner_loop_header);
189 __ bind(&inner_push_loop);
190 __ Dsubu(a3, a3, Operand(
sizeof(uint64_t)));
191 __ Daddu(a6, a2, Operand(a3));
192 __ Ld(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
194 __ bind(&inner_loop_header);
195 __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
197 __ Daddu(a4, a4, Operand(kPointerSize));
198 __ bind(&outer_loop_header);
199 __ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
201 __ Ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
202 for (
int i = 0;
i < config->num_allocatable_double_registers(); ++
i) {
203 int code = config->GetAllocatableDoubleCode(
i);
204 const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
205 int src_offset = code * kDoubleSize + double_regs_offset;
206 __ Ldc1(fpu_reg, MemOperand(a1, src_offset));
210 __ Ld(a6, MemOperand(a2, FrameDescription::pc_offset()));
212 __ Ld(a6, MemOperand(a2, FrameDescription::continuation_offset()));
217 DCHECK(!(at.bit() & restored_regs));
220 for (
int i = kNumberOfRegisters - 1;
i >= 0;
i--) {
221 int offset = (
i * kPointerSize) + FrameDescription::registers_offset();
222 if ((restored_regs & (1 <<
i)) != 0) {
223 __ Ld(ToRegister(
i), MemOperand(at, offset));
230 __ stop(
"Unreachable.");
235 #ifdef _MIPS_ARCH_MIPS64R6 236 const int Deoptimizer::table_entry_size_ = 2 * kInstrSize;
238 const int Deoptimizer::table_entry_size_ = 3 * kInstrSize;
241 void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
242 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
246 Label table_start, done, trampoline_jump;
247 __ bind(&table_start);
248 #ifdef _MIPS_ARCH_MIPS64R6 249 int kMaxEntriesBranchReach =
250 (1 << (kImm26Bits - 2)) / (table_entry_size_ / kInstrSize);
252 int kMaxEntriesBranchReach =
253 (1 << (kImm16Bits - 2)) / (table_entry_size_ / kInstrSize);
256 if (count() <= kMaxEntriesBranchReach) {
258 for (
int i = 0;
i < count();
i++) {
262 if (kArchVariant == kMips64r6) {
263 __ li(kScratchReg,
i);
264 __ BranchShort(PROTECT, &done);
266 __ BranchShort(USE_DELAY_SLOT, &done);
267 __ li(kScratchReg,
i);
271 DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
274 DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
275 count() * table_entry_size_);
277 __ Push(kScratchReg);
279 DCHECK_NE(kArchVariant, kMips64r6);
282 for (
int i = 0, j = 0;
i < count();
i++, j++) {
286 if (j >= kMaxEntriesBranchReach) {
288 __ li(kScratchReg,
i);
289 __ bind(&trampoline_jump);
290 trampoline_jump = Label();
291 __ BranchShort(USE_DELAY_SLOT, &trampoline_jump);
294 __ BranchShort(USE_DELAY_SLOT, &trampoline_jump);
295 __ li(kScratchReg,
i);
298 DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
301 DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
302 count() * table_entry_size_);
303 __ bind(&trampoline_jump);
304 __ Push(kScratchReg);
308 bool Deoptimizer::PadTopOfStackRegister() {
return false; }
310 void FrameDescription::SetCallerPc(
unsigned offset, intptr_t value) {
311 SetFrameSlot(offset, value);
315 void FrameDescription::SetCallerFp(
unsigned offset, intptr_t value) {
316 SetFrameSlot(offset, value);
320 void FrameDescription::SetCallerConstantPool(
unsigned offset, intptr_t value) {