5 #include "src/assembler-inl.h" 6 #include "src/deoptimizer.h" 7 #include "src/register-configuration.h" 8 #include "src/safepoint-table.h" 19 void Deoptimizer::TableEntryGenerator::Generate() {
24 const int kNumberOfRegisters = Register::kNumRegisters;
26 RegList restored_regs = kJSCallerSaved | kCalleeSaved;
27 RegList saved_regs = restored_regs | sp.bit() | ra.bit();
29 const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
30 const int kFloatRegsSize = kFloatSize * FloatRegister::kNumRegisters;
33 __ Subu(sp, sp, Operand(kDoubleRegsSize));
34 const RegisterConfiguration* config = RegisterConfiguration::Default();
35 for (
int i = 0;
i < config->num_allocatable_double_registers(); ++
i) {
36 int code = config->GetAllocatableDoubleCode(
i);
37 const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
38 int offset = code * kDoubleSize;
39 __ Sdc1(fpu_reg, MemOperand(sp, offset));
42 __ Subu(sp, sp, Operand(kFloatRegsSize));
43 for (
int i = 0;
i < config->num_allocatable_float_registers(); ++
i) {
44 int code = config->GetAllocatableFloatCode(
i);
45 const FloatRegister fpu_reg = FloatRegister::from_code(code);
46 int offset = code * kFloatSize;
47 __ swc1(fpu_reg, MemOperand(sp, offset));
52 __ Subu(sp, sp, kNumberOfRegisters * kPointerSize);
53 for (int16_t
i = kNumberOfRegisters - 1;
i >= 0;
i--) {
54 if ((saved_regs & (1 <<
i)) != 0) {
55 __ sw(ToRegister(
i), MemOperand(sp, kPointerSize *
i));
59 __ li(a2, Operand(ExternalReference::Create(
60 IsolateAddressId::kCEntryFPAddress, isolate())));
61 __ sw(fp, MemOperand(a2));
63 const int kSavedRegistersAreaSize =
64 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
67 __ lw(a2, MemOperand(sp, kSavedRegistersAreaSize));
74 __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
79 __ PrepareCallCFunction(6, t1);
83 __ lw(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
84 __ JumpIfSmi(a1, &context_check);
85 __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
86 __ bind(&context_check);
87 __ li(a1, Operand(static_cast<int>(deopt_kind())));
90 __ sw(t0, CFunctionArgumentOperand(5));
91 __ li(t1, Operand(ExternalReference::isolate_address(isolate())));
92 __ sw(t1, CFunctionArgumentOperand(6));
95 AllowExternalCallThatCantCauseGC scope(masm());
96 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
103 __ lw(a1, MemOperand(v0, Deoptimizer::input_offset()));
106 DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
107 for (
int i = 0;
i < kNumberOfRegisters;
i++) {
108 int offset = (
i * kPointerSize) + FrameDescription::registers_offset();
109 if ((saved_regs & (1 <<
i)) != 0) {
110 __ lw(a2, MemOperand(sp,
i * kPointerSize));
111 __ sw(a2, MemOperand(a1, offset));
112 }
else if (FLAG_debug_code) {
113 __ li(a2, kDebugZapValue);
114 __ sw(a2, MemOperand(a1, offset));
118 int double_regs_offset = FrameDescription::double_registers_offset();
121 for (
int i = 0;
i < config->num_allocatable_double_registers(); ++
i) {
122 int code = config->GetAllocatableDoubleCode(
i);
123 int dst_offset = code * kDoubleSize + double_regs_offset;
125 code * kDoubleSize + kNumberOfRegisters * kPointerSize + kFloatRegsSize;
126 __ Ldc1(f0, MemOperand(sp, src_offset));
127 __ Sdc1(f0, MemOperand(a1, dst_offset));
132 int float_regs_offset = FrameDescription::float_registers_offset();
133 for (
int i = 0;
i < config->num_allocatable_float_registers(); ++
i) {
134 int code = config->GetAllocatableFloatCode(
i);
135 int dst_offset = code * kFloatSize + float_regs_offset;
136 int src_offset = code * kFloatSize + kNumberOfRegisters * kPointerSize;
137 __ lwc1(f0, MemOperand(sp, src_offset));
138 __ swc1(f0, MemOperand(a1, dst_offset));
142 __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
146 __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
152 __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
154 Label pop_loop_header;
155 __ BranchShort(&pop_loop_header);
158 __ sw(t0, MemOperand(a3, 0));
160 __ bind(&pop_loop_header);
161 __ BranchShort(&pop_loop, ne, a2, Operand(sp));
166 __ PrepareCallCFunction(1, a1);
169 AllowExternalCallThatCantCauseGC scope(masm());
170 __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
174 __ lw(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
177 Label outer_push_loop, inner_push_loop,
178 outer_loop_header, inner_loop_header;
181 __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
182 __ lw(t0, MemOperand(a0, Deoptimizer::output_offset()));
183 __ Lsa(a1, t0, a1, kPointerSizeLog2);
184 __ BranchShort(&outer_loop_header);
185 __ bind(&outer_push_loop);
187 __ lw(a2, MemOperand(t0, 0));
188 __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
189 __ BranchShort(&inner_loop_header);
190 __ bind(&inner_push_loop);
191 __ Subu(a3, a3, Operand(
sizeof(
uint32_t)));
192 __ Addu(t2, a2, Operand(a3));
193 __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
195 __ bind(&inner_loop_header);
196 __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
198 __ Addu(t0, t0, Operand(kPointerSize));
199 __ bind(&outer_loop_header);
200 __ BranchShort(&outer_push_loop, lt, t0, Operand(a1));
202 __ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
203 for (
int i = 0;
i < config->num_allocatable_double_registers(); ++
i) {
204 int code = config->GetAllocatableDoubleCode(
i);
205 const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
206 int src_offset = code * kDoubleSize + double_regs_offset;
207 __ Ldc1(fpu_reg, MemOperand(a1, src_offset));
211 __ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
213 __ lw(t2, MemOperand(a2, FrameDescription::continuation_offset()));
219 DCHECK(!(at.bit() & restored_regs));
222 for (
int i = kNumberOfRegisters - 1;
i >= 0;
i--) {
223 int offset = (
i * kPointerSize) + FrameDescription::registers_offset();
224 if ((restored_regs & (1 <<
i)) != 0) {
225 __ lw(ToRegister(
i), MemOperand(at, offset));
232 __ stop(
"Unreachable.");
237 #ifdef _MIPS_ARCH_MIPS32R6 238 const int Deoptimizer::table_entry_size_ = 2 * kInstrSize;
240 const int Deoptimizer::table_entry_size_ = 3 * kInstrSize;
243 void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
244 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
248 Label table_start, done, trampoline_jump;
249 __ bind(&table_start);
251 #ifdef _MIPS_ARCH_MIPS32R6 252 int kMaxEntriesBranchReach =
253 (1 << (kImm26Bits - 2)) / (table_entry_size_ / kInstrSize);
255 int kMaxEntriesBranchReach =
256 (1 << (kImm16Bits - 2)) / (table_entry_size_ / kInstrSize);
259 if (count() <= kMaxEntriesBranchReach) {
261 for (
int i = 0;
i < count();
i++) {
265 if (IsMipsArchVariant(kMips32r6)) {
266 __ li(kScratchReg,
i);
267 __ BranchShort(PROTECT, &done);
269 __ BranchShort(USE_DELAY_SLOT, &done);
270 __ li(kScratchReg,
i);
273 DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
276 DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
277 count() * table_entry_size_);
279 __ Push(kScratchReg);
281 DCHECK(!IsMipsArchVariant(kMips32r6));
284 for (
int i = 0, j = 0;
i < count();
i++, j++) {
288 if (j >= kMaxEntriesBranchReach) {
290 __ li(kScratchReg,
i);
291 __ bind(&trampoline_jump);
292 trampoline_jump = Label();
293 __ BranchShort(USE_DELAY_SLOT, &trampoline_jump);
296 __ BranchShort(USE_DELAY_SLOT, &trampoline_jump);
297 __ li(kScratchReg,
i);
300 DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
303 DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
304 count() * table_entry_size_);
305 __ bind(&trampoline_jump);
306 __ Push(kScratchReg);
310 bool Deoptimizer::PadTopOfStackRegister() {
return false; }
312 void FrameDescription::SetCallerPc(
unsigned offset, intptr_t value) {
313 SetFrameSlot(offset, value);
317 void FrameDescription::SetCallerFp(
unsigned offset, intptr_t value) {
318 SetFrameSlot(offset, value);
322 void FrameDescription::SetCallerConstantPool(
unsigned offset, intptr_t value) {