V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
macro-assembler-mips.h
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
6 #error This header must be included via macro-assembler.h
7 #endif
8 
9 #ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
10 #define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
11 
12 #include "src/assembler.h"
13 #include "src/contexts.h"
14 #include "src/globals.h"
15 #include "src/mips/assembler-mips.h"
16 
17 namespace v8 {
18 namespace internal {
19 
20 // Give alias names to registers for calling conventions.
21 constexpr Register kReturnRegister0 = v0;
22 constexpr Register kReturnRegister1 = v1;
23 constexpr Register kReturnRegister2 = a0;
24 constexpr Register kJSFunctionRegister = a1;
25 constexpr Register kContextRegister = s7;
26 constexpr Register kAllocateSizeRegister = a0;
27 constexpr Register kSpeculationPoisonRegister = t3;
28 constexpr Register kInterpreterAccumulatorRegister = v0;
29 constexpr Register kInterpreterBytecodeOffsetRegister = t4;
30 constexpr Register kInterpreterBytecodeArrayRegister = t5;
31 constexpr Register kInterpreterDispatchTableRegister = t6;
32 
33 constexpr Register kJavaScriptCallArgCountRegister = a0;
34 constexpr Register kJavaScriptCallCodeStartRegister = a2;
35 constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
36 constexpr Register kJavaScriptCallNewTargetRegister = a3;
37 constexpr Register kJavaScriptCallExtraArg1Register = a2;
38 
39 constexpr Register kOffHeapTrampolineRegister = at;
40 constexpr Register kRuntimeCallFunctionRegister = a1;
41 constexpr Register kRuntimeCallArgCountRegister = a0;
42 constexpr Register kRuntimeCallArgvRegister = a2;
43 constexpr Register kWasmInstanceRegister = a0;
44 constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
45 
46 // Forward declarations
47 enum class AbortReason : uint8_t;
48 
49 // Reserved Register Usage Summary.
50 //
51 // Registers t8, t9, and at are reserved for use by the MacroAssembler.
52 //
53 // The programmer should know that the MacroAssembler may clobber these three,
54 // but won't touch other registers except in special cases.
55 //
56 // Per the MIPS ABI, register t9 must be used for indirect function call
57 // via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
58 // trying to update gp register for position-independent-code. Whenever
59 // MIPS generated code calls C code, it must be via t9 register.
60 
61 
62 // Flags used for LeaveExitFrame function.
63 enum LeaveExitFrameMode {
64  EMIT_RETURN = true,
65  NO_EMIT_RETURN = false
66 };
67 
68 // Flags used for the li macro-assembler function.
69 enum LiFlags {
70  // If the constant value can be represented in just 16 bits, then
71  // optimize the li to use a single instruction, rather than lui/ori pair.
72  OPTIMIZE_SIZE = 0,
73  // Always use 2 instructions (lui/ori pair), even if the constant could
74  // be loaded with just one, so that this value is patchable later.
75  CONSTANT_SIZE = 1
76 };
77 
78 
79 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
80 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
81 enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
82 
83 Register GetRegisterThatIsNotOneOf(Register reg1,
84  Register reg2 = no_reg,
85  Register reg3 = no_reg,
86  Register reg4 = no_reg,
87  Register reg5 = no_reg,
88  Register reg6 = no_reg);
89 
90 // -----------------------------------------------------------------------------
91 // Static helper functions.
92 
93 inline MemOperand ContextMemOperand(Register context, int index) {
94  return MemOperand(context, Context::SlotOffset(index));
95 }
96 
97 
98 inline MemOperand NativeContextMemOperand() {
99  return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
100 }
101 
102 
103 // Generate a MemOperand for loading a field from an object.
104 inline MemOperand FieldMemOperand(Register object, int offset) {
105  return MemOperand(object, offset - kHeapObjectTag);
106 }
107 
108 
109 // Generate a MemOperand for storing arguments 5..N on the stack
110 // when calling CallCFunction().
111 inline MemOperand CFunctionArgumentOperand(int index) {
112  DCHECK_GT(index, kCArgSlotCount);
113  // Argument 5 takes the slot just past the four Arg-slots.
114  int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
115  return MemOperand(sp, offset);
116 }
117 
118 class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
119  public:
120  TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size)
121  : TurboAssemblerBase(options, buffer, buffer_size) {}
122 
123  TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
124  void* buffer, int buffer_size,
125  CodeObjectRequired create_code_object)
126  : TurboAssemblerBase(isolate, options, buffer, buffer_size,
127  create_code_object) {}
128 
129  // Activation support.
130  void EnterFrame(StackFrame::Type type);
131  void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
132  // Out-of-line constant pool not implemented on mips.
133  UNREACHABLE();
134  }
135  void LeaveFrame(StackFrame::Type type);
136 
137  // Generates function and stub prologue code.
138  void StubPrologue(StackFrame::Type type);
139  void Prologue();
140 
141  void InitializeRootRegister() {
142  ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
143  li(kRootRegister, Operand(isolate_root));
144  }
145 
146  // Jump unconditionally to given label.
147  // We NEED a nop in the branch delay slot, as it used by v8, for example in
148  // CodeGenerator::ProcessDeferred().
149  // Currently the branch delay slot is filled by the MacroAssembler.
150  // Use rather b(Label) for code generation.
151  void jmp(Label* L) { Branch(L); }
152 
153  // -------------------------------------------------------------------------
154  // Debugging.
155 
156  // Calls Abort(msg) if the condition cc is not satisfied.
157  // Use --debug_code to enable.
158  void Assert(Condition cc, AbortReason reason, Register rs, Operand rt);
159 
160  // Like Assert(), but always enabled.
161  void Check(Condition cc, AbortReason reason, Register rs, Operand rt);
162 
163  // Print a message to stdout and abort execution.
164  void Abort(AbortReason msg);
165 
166  inline bool AllowThisStubCall(CodeStub* stub);
167 
168  // Arguments macros.
169 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
170 #define COND_ARGS cond, r1, r2
171 
172  // Cases when relocation is not needed.
173 #define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
174  void Name(target_type target, BranchDelaySlot bd = PROTECT); \
175  inline void Name(BranchDelaySlot bd, target_type target) { \
176  Name(target, bd); \
177  } \
178  void Name(target_type target, \
179  COND_TYPED_ARGS, \
180  BranchDelaySlot bd = PROTECT); \
181  inline void Name(BranchDelaySlot bd, \
182  target_type target, \
183  COND_TYPED_ARGS) { \
184  Name(target, COND_ARGS, bd); \
185  }
186 
187 #define DECLARE_BRANCH_PROTOTYPES(Name) \
188  DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
189  DECLARE_NORELOC_PROTOTYPE(Name, int32_t)
190 
191  DECLARE_BRANCH_PROTOTYPES(Branch)
192  DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
193  DECLARE_BRANCH_PROTOTYPES(BranchShort)
194 
195 #undef DECLARE_BRANCH_PROTOTYPES
196 #undef COND_TYPED_ARGS
197 #undef COND_ARGS
198 
199  // Floating point branches
200  void CompareF32(FPUCondition cc, FPURegister cmp1, FPURegister cmp2) {
201  CompareF(S, cc, cmp1, cmp2);
202  }
203 
204  void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2) {
205  CompareIsNanF(S, cmp1, cmp2);
206  }
207 
208  void CompareF64(FPUCondition cc, FPURegister cmp1, FPURegister cmp2) {
209  CompareF(D, cc, cmp1, cmp2);
210  }
211 
212  void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2) {
213  CompareIsNanF(D, cmp1, cmp2);
214  }
215 
216  void BranchTrueShortF(Label* target, BranchDelaySlot bd = PROTECT);
217  void BranchFalseShortF(Label* target, BranchDelaySlot bd = PROTECT);
218 
219  void BranchTrueF(Label* target, BranchDelaySlot bd = PROTECT);
220  void BranchFalseF(Label* target, BranchDelaySlot bd = PROTECT);
221 
222  // MSA Branches
223  void BranchMSA(Label* target, MSABranchDF df, MSABranchCondition cond,
224  MSARegister wt, BranchDelaySlot bd = PROTECT);
225 
226  void Branch(Label* L, Condition cond, Register rs, RootIndex index,
227  BranchDelaySlot bdslot = PROTECT);
228 
229  // Load int32 in the rd register.
230  void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
231  inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
232  li(rd, Operand(j), mode);
233  }
234  void li(Register dst, Handle<HeapObject> value, LiFlags mode = OPTIMIZE_SIZE);
235  void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
236  void li(Register dst, const StringConstantBase* string,
237  LiFlags mode = OPTIMIZE_SIZE);
238 
239  void LoadFromConstantsTable(Register destination,
240  int constant_index) override;
241  void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
242  void LoadRootRelative(Register destination, int32_t offset) override;
243 
244 // Jump, Call, and Ret pseudo instructions implementing inter-working.
245 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
246  const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
247 
248  void Jump(Register target, int16_t offset = 0, COND_ARGS);
249  void Jump(Register target, Register base, int16_t offset = 0, COND_ARGS);
250  void Jump(Register target, const Operand& offset, COND_ARGS);
251  void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
252  void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
253  void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
254  void Call(Register target, int16_t offset = 0, COND_ARGS);
255  void Call(Register target, Register base, int16_t offset = 0, COND_ARGS);
256  void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
257  void Call(Handle<Code> code,
258  RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
259  COND_ARGS);
260  void Call(Label* target);
261 
262  void CallForDeoptimization(Address target, int deopt_id,
263  RelocInfo::Mode rmode) {
264  USE(deopt_id);
265  Call(target, rmode);
266  }
267 
268  void Ret(COND_ARGS);
269  inline void Ret(BranchDelaySlot bd, Condition cond = al,
270  Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
271  Ret(cond, rs, rt, bd);
272  }
273 
274  // Emit code to discard a non-negative number of pointer-sized elements
275  // from the stack, clobbering only the sp register.
276  void Drop(int count,
277  Condition cond = cc_always,
278  Register reg = no_reg,
279  const Operand& op = Operand(no_reg));
280 
281  // Trivial case of DropAndRet that utilizes the delay slot and only emits
282  // 2 instructions.
283  void DropAndRet(int drop);
284 
285  void DropAndRet(int drop,
286  Condition cond,
287  Register reg,
288  const Operand& op);
289 
290  void push(Register src) {
291  Addu(sp, sp, Operand(-kPointerSize));
292  sw(src, MemOperand(sp, 0));
293  }
294 
295  void Push(Register src) { push(src); }
296  void Push(Handle<HeapObject> handle);
297  void Push(Smi smi);
298 
299  // Push two registers. Pushes leftmost register first (to highest address).
300  void Push(Register src1, Register src2) {
301  Subu(sp, sp, Operand(2 * kPointerSize));
302  sw(src1, MemOperand(sp, 1 * kPointerSize));
303  sw(src2, MemOperand(sp, 0 * kPointerSize));
304  }
305 
306  // Push three registers. Pushes leftmost register first (to highest address).
307  void Push(Register src1, Register src2, Register src3) {
308  Subu(sp, sp, Operand(3 * kPointerSize));
309  sw(src1, MemOperand(sp, 2 * kPointerSize));
310  sw(src2, MemOperand(sp, 1 * kPointerSize));
311  sw(src3, MemOperand(sp, 0 * kPointerSize));
312  }
313 
314  // Push four registers. Pushes leftmost register first (to highest address).
315  void Push(Register src1, Register src2, Register src3, Register src4) {
316  Subu(sp, sp, Operand(4 * kPointerSize));
317  sw(src1, MemOperand(sp, 3 * kPointerSize));
318  sw(src2, MemOperand(sp, 2 * kPointerSize));
319  sw(src3, MemOperand(sp, 1 * kPointerSize));
320  sw(src4, MemOperand(sp, 0 * kPointerSize));
321  }
322 
323  // Push five registers. Pushes leftmost register first (to highest address).
324  void Push(Register src1, Register src2, Register src3, Register src4,
325  Register src5) {
326  Subu(sp, sp, Operand(5 * kPointerSize));
327  sw(src1, MemOperand(sp, 4 * kPointerSize));
328  sw(src2, MemOperand(sp, 3 * kPointerSize));
329  sw(src3, MemOperand(sp, 2 * kPointerSize));
330  sw(src4, MemOperand(sp, 1 * kPointerSize));
331  sw(src5, MemOperand(sp, 0 * kPointerSize));
332  }
333 
334  void Push(Register src, Condition cond, Register tst1, Register tst2) {
335  // Since we don't have conditional execution we use a Branch.
336  Branch(3, cond, tst1, Operand(tst2));
337  Subu(sp, sp, Operand(kPointerSize));
338  sw(src, MemOperand(sp, 0));
339  }
340 
341  void SaveRegisters(RegList registers);
342  void RestoreRegisters(RegList registers);
343 
344  void CallRecordWriteStub(Register object, Register address,
345  RememberedSetAction remembered_set_action,
346  SaveFPRegsMode fp_mode);
347  void CallRecordWriteStub(Register object, Register address,
348  RememberedSetAction remembered_set_action,
349  SaveFPRegsMode fp_mode, Address wasm_target);
350 
351  // Push multiple registers on the stack.
352  // Registers are saved in numerical order, with higher numbered registers
353  // saved in higher memory addresses.
354  void MultiPush(RegList regs);
355  void MultiPushFPU(RegList regs);
356 
357  // Calculate how much stack space (in bytes) are required to store caller
358  // registers excluding those specified in the arguments.
359  int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
360  Register exclusion1 = no_reg,
361  Register exclusion2 = no_reg,
362  Register exclusion3 = no_reg) const;
363 
364  // Push caller saved registers on the stack, and return the number of bytes
365  // stack pointer is adjusted.
366  int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
367  Register exclusion2 = no_reg,
368  Register exclusion3 = no_reg);
369  // Restore caller saved registers from the stack, and return the number of
370  // bytes stack pointer is adjusted.
371  int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
372  Register exclusion2 = no_reg,
373  Register exclusion3 = no_reg);
374 
375  void pop(Register dst) {
376  lw(dst, MemOperand(sp, 0));
377  Addu(sp, sp, Operand(kPointerSize));
378  }
379 
380  void Pop(Register dst) { pop(dst); }
381 
382  // Pop two registers. Pops rightmost register first (from lower address).
383  void Pop(Register src1, Register src2) {
384  DCHECK(src1 != src2);
385  lw(src2, MemOperand(sp, 0 * kPointerSize));
386  lw(src1, MemOperand(sp, 1 * kPointerSize));
387  Addu(sp, sp, 2 * kPointerSize);
388  }
389 
390  // Pop three registers. Pops rightmost register first (from lower address).
391  void Pop(Register src1, Register src2, Register src3) {
392  lw(src3, MemOperand(sp, 0 * kPointerSize));
393  lw(src2, MemOperand(sp, 1 * kPointerSize));
394  lw(src1, MemOperand(sp, 2 * kPointerSize));
395  Addu(sp, sp, 3 * kPointerSize);
396  }
397 
398  void Pop(uint32_t count = 1) { Addu(sp, sp, Operand(count * kPointerSize)); }
399 
400  // Pops multiple values from the stack and load them in the
401  // registers specified in regs. Pop order is the opposite as in MultiPush.
402  void MultiPop(RegList regs);
403  void MultiPopFPU(RegList regs);
404 
405  // Load Scaled Address instructions. Parameter sa (shift argument) must be
406  // between [1, 31] (inclusive). On pre-r6 architectures the scratch register
407  // may be clobbered.
408  void Lsa(Register rd, Register rs, Register rt, uint8_t sa,
409  Register scratch = at);
410 
411 #define DEFINE_INSTRUCTION(instr) \
412  void instr(Register rd, Register rs, const Operand& rt); \
413  void instr(Register rd, Register rs, Register rt) { \
414  instr(rd, rs, Operand(rt)); \
415  } \
416  void instr(Register rs, Register rt, int32_t j) { instr(rs, rt, Operand(j)); }
417 
418 #define DEFINE_INSTRUCTION2(instr) \
419  void instr(Register rs, const Operand& rt); \
420  void instr(Register rs, Register rt) { instr(rs, Operand(rt)); } \
421  void instr(Register rs, int32_t j) { instr(rs, Operand(j)); }
422 
423 #define DEFINE_INSTRUCTION3(instr) \
424  void instr(Register rd_hi, Register rd_lo, Register rs, const Operand& rt); \
425  void instr(Register rd_hi, Register rd_lo, Register rs, Register rt) { \
426  instr(rd_hi, rd_lo, rs, Operand(rt)); \
427  } \
428  void instr(Register rd_hi, Register rd_lo, Register rs, int32_t j) { \
429  instr(rd_hi, rd_lo, rs, Operand(j)); \
430  }
431 
432  DEFINE_INSTRUCTION(Addu);
433  DEFINE_INSTRUCTION(Subu);
434  DEFINE_INSTRUCTION(Mul);
435  DEFINE_INSTRUCTION(Div);
436  DEFINE_INSTRUCTION(Divu);
437  DEFINE_INSTRUCTION(Mod);
438  DEFINE_INSTRUCTION(Modu);
439  DEFINE_INSTRUCTION(Mulh);
440  DEFINE_INSTRUCTION2(Mult);
441  DEFINE_INSTRUCTION(Mulhu);
442  DEFINE_INSTRUCTION2(Multu);
443  DEFINE_INSTRUCTION2(Div);
444  DEFINE_INSTRUCTION2(Divu);
445 
446  DEFINE_INSTRUCTION3(Div);
447  DEFINE_INSTRUCTION3(Mul);
448  DEFINE_INSTRUCTION3(Mulu);
449 
450  DEFINE_INSTRUCTION(And);
451  DEFINE_INSTRUCTION(Or);
452  DEFINE_INSTRUCTION(Xor);
453  DEFINE_INSTRUCTION(Nor);
454  DEFINE_INSTRUCTION2(Neg);
455 
456  DEFINE_INSTRUCTION(Slt);
457  DEFINE_INSTRUCTION(Sltu);
458  DEFINE_INSTRUCTION(Sle);
459  DEFINE_INSTRUCTION(Sleu);
460  DEFINE_INSTRUCTION(Sgt);
461  DEFINE_INSTRUCTION(Sgtu);
462  DEFINE_INSTRUCTION(Sge);
463  DEFINE_INSTRUCTION(Sgeu);
464 
465  // MIPS32 R2 instruction macro.
466  DEFINE_INSTRUCTION(Ror);
467 
468 #undef DEFINE_INSTRUCTION
469 #undef DEFINE_INSTRUCTION2
470 #undef DEFINE_INSTRUCTION3
471 
472  void SmiUntag(Register reg) { sra(reg, reg, kSmiTagSize); }
473 
474  void SmiUntag(Register dst, Register src) { sra(dst, src, kSmiTagSize); }
475 
476  // Removes current frame and its arguments from the stack preserving
477  // the arguments and a return address pushed to the stack for the next call.
478  // Both |callee_args_count| and |caller_args_count_reg| do not include
479  // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
480  // is trashed.
481  void PrepareForTailCall(const ParameterCount& callee_args_count,
482  Register caller_args_count_reg, Register scratch0,
483  Register scratch1);
484 
485  int CalculateStackPassedWords(int num_reg_arguments,
486  int num_double_arguments);
487 
488  // Before calling a C-function from generated code, align arguments on stack
489  // and add space for the four mips argument slots.
490  // After aligning the frame, non-register arguments must be stored on the
491  // stack, after the argument-slots using helper: CFunctionArgumentOperand().
492  // The argument count assumes all arguments are word sized.
493  // Some compilers/platforms require the stack to be aligned when calling
494  // C++ code.
495  // Needs a scratch register to do some arithmetic. This register will be
496  // trashed.
497  void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
498  Register scratch);
499  void PrepareCallCFunction(int num_reg_arguments, Register scratch);
500 
501  // Arguments 1-4 are placed in registers a0 through a3 respectively.
502  // Arguments 5..n are stored to stack using following:
503  // sw(t0, CFunctionArgumentOperand(5));
504 
505  // Calls a C function and cleans up the space for arguments allocated
506  // by PrepareCallCFunction. The called function is not allowed to trigger a
507  // garbage collection, since that might move the code and invalidate the
508  // return address (unless this is somehow accounted for by the called
509  // function).
510  void CallCFunction(ExternalReference function, int num_arguments);
511  void CallCFunction(Register function, int num_arguments);
512  void CallCFunction(ExternalReference function, int num_reg_arguments,
513  int num_double_arguments);
514  void CallCFunction(Register function, int num_reg_arguments,
515  int num_double_arguments);
516  void MovFromFloatResult(DoubleRegister dst);
517  void MovFromFloatParameter(DoubleRegister dst);
518 
519  // There are two ways of passing double arguments on MIPS, depending on
520  // whether soft or hard floating point ABI is used. These functions
521  // abstract parameter passing for the three different ways we call
522  // C functions from generated code.
523  void MovToFloatParameter(DoubleRegister src);
524  void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
525  void MovToFloatResult(DoubleRegister src);
526 
527  // See comments at the beginning of Builtins::Generate_CEntry.
528  inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
529  inline void PrepareCEntryFunction(const ExternalReference& ref) {
530  li(a1, ref);
531  }
532 
533  void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
534  Label* condition_met);
535 #undef COND_ARGS
536 
537  // Call a runtime routine. This expects {centry} to contain a fitting CEntry
538  // builtin for the target runtime function and uses an indirect call.
539  void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
540 
541  // Performs a truncating conversion of a floating point number as used by
542  // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
543  // succeeds, otherwise falls through if result is saturated. On return
544  // 'result' either holds answer, or is clobbered on fall through.
545  //
546  // Only public for the test code in test-code-stubs-arm.cc.
547  void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
548  Label* done);
549 
550  // Performs a truncating conversion of a floating point number as used by
551  // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
552  // Exits with 'result' holding the answer.
553  void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
554  DoubleRegister double_input, StubCallMode stub_mode);
555 
556  // Conditional move.
557  void Movz(Register rd, Register rs, Register rt);
558  void Movn(Register rd, Register rs, Register rt);
559  void Movt(Register rd, Register rs, uint16_t cc = 0);
560  void Movf(Register rd, Register rs, uint16_t cc = 0);
561 
562  void LoadZeroIfFPUCondition(Register dest);
563  void LoadZeroIfNotFPUCondition(Register dest);
564 
565  void LoadZeroIfConditionNotZero(Register dest, Register condition);
566  void LoadZeroIfConditionZero(Register dest, Register condition);
567  void LoadZeroOnCondition(Register rd, Register rs, const Operand& rt,
568  Condition cond);
569 
570  void Clz(Register rd, Register rs);
571  void Ctz(Register rd, Register rs);
572  void Popcnt(Register rd, Register rs);
573 
574  // Int64Lowering instructions
575  void AddPair(Register dst_low, Register dst_high, Register left_low,
576  Register left_high, Register right_low, Register right_high,
577  Register scratch1, Register scratch2);
578 
579  void SubPair(Register dst_low, Register dst_high, Register left_low,
580  Register left_high, Register right_low, Register right_high,
581  Register scratch1, Register scratch2);
582 
583  void AndPair(Register dst_low, Register dst_high, Register left_low,
584  Register left_high, Register right_low, Register right_high);
585 
586  void OrPair(Register dst_low, Register dst_high, Register left_low,
587  Register left_high, Register right_low, Register right_high);
588 
589  void XorPair(Register dst_low, Register dst_high, Register left_low,
590  Register left_high, Register right_low, Register right_high);
591 
592  void MulPair(Register dst_low, Register dst_high, Register left_low,
593  Register left_high, Register right_low, Register right_high,
594  Register scratch1, Register scratch2);
595 
596  void ShlPair(Register dst_low, Register dst_high, Register src_low,
597  Register src_high, Register shift, Register scratch1,
598  Register scratch2);
599 
600  void ShlPair(Register dst_low, Register dst_high, Register src_low,
601  Register src_high, uint32_t shift, Register scratch);
602 
603  void ShrPair(Register dst_low, Register dst_high, Register src_low,
604  Register src_high, Register shift, Register scratch1,
605  Register scratch2);
606 
607  void ShrPair(Register dst_low, Register dst_high, Register src_low,
608  Register src_high, uint32_t shift, Register scratch);
609 
610  void SarPair(Register dst_low, Register dst_high, Register src_low,
611  Register src_high, Register shift, Register scratch1,
612  Register scratch2);
613 
614  void SarPair(Register dst_low, Register dst_high, Register src_low,
615  Register src_high, uint32_t shift, Register scratch);
616 
617  // MIPS32 R2 instruction macro.
618  void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
619  void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
620  void ExtractBits(Register dest, Register source, Register pos, int size,
621  bool sign_extend = false);
622  void InsertBits(Register dest, Register source, Register pos, int size);
623 
624  void Seb(Register rd, Register rt);
625  void Seh(Register rd, Register rt);
626  void Neg_s(FPURegister fd, FPURegister fs);
627  void Neg_d(FPURegister fd, FPURegister fs);
628 
629  // MIPS32 R6 instruction macros.
630  void Bovc(Register rt, Register rs, Label* L);
631  void Bnvc(Register rt, Register rs, Label* L);
632 
633  // Convert single to unsigned word.
634  void Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch);
635  void Trunc_uw_s(Register rd, FPURegister fs, FPURegister scratch);
636 
637  void Trunc_w_d(FPURegister fd, FPURegister fs);
638  void Round_w_d(FPURegister fd, FPURegister fs);
639  void Floor_w_d(FPURegister fd, FPURegister fs);
640  void Ceil_w_d(FPURegister fd, FPURegister fs);
641 
642  // Round double functions
643  void Trunc_d_d(FPURegister fd, FPURegister fs);
644  void Round_d_d(FPURegister fd, FPURegister fs);
645  void Floor_d_d(FPURegister fd, FPURegister fs);
646  void Ceil_d_d(FPURegister fd, FPURegister fs);
647 
648  // Round float functions
649  void Trunc_s_s(FPURegister fd, FPURegister fs);
650  void Round_s_s(FPURegister fd, FPURegister fs);
651  void Floor_s_s(FPURegister fd, FPURegister fs);
652  void Ceil_s_s(FPURegister fd, FPURegister fs);
653 
654  // FP32 mode: Move the general purpose register into
655  // the high part of the double-register pair.
656  // FP64 mode: Move the general-purpose register into
657  // the higher 32 bits of the 64-bit coprocessor register,
658  // while leaving the low bits unchanged.
659  void Mthc1(Register rt, FPURegister fs);
660 
661  // FP32 mode: move the high part of the double-register pair into
662  // general purpose register.
663  // FP64 mode: Move the higher 32 bits of the 64-bit coprocessor register into
664  // general-purpose register.
665  void Mfhc1(Register rt, FPURegister fs);
666 
667  void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
668  FPURegister scratch);
669  void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
670  FPURegister scratch);
671  void Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
672  FPURegister scratch);
673  void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
674  FPURegister scratch);
675 
676  // Change endianness
677  void ByteSwapSigned(Register dest, Register src, int operand_size);
678  void ByteSwapUnsigned(Register dest, Register src, int operand_size);
679 
680  void Ulh(Register rd, const MemOperand& rs);
681  void Ulhu(Register rd, const MemOperand& rs);
682  void Ush(Register rd, const MemOperand& rs, Register scratch);
683 
684  void Ulw(Register rd, const MemOperand& rs);
685  void Usw(Register rd, const MemOperand& rs);
686 
687  void Ulwc1(FPURegister fd, const MemOperand& rs, Register scratch);
688  void Uswc1(FPURegister fd, const MemOperand& rs, Register scratch);
689 
690  void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch);
691  void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch);
692 
693  void Ldc1(FPURegister fd, const MemOperand& src);
694  void Sdc1(FPURegister fs, const MemOperand& dst);
695 
696  void Ll(Register rd, const MemOperand& rs);
697  void Sc(Register rd, const MemOperand& rs);
698 
699  // Perform a floating-point min or max operation with the
700  // (IEEE-754-compatible) semantics of MIPS32's Release 6 MIN.fmt/MAX.fmt.
701  // Some cases, typically NaNs or +/-0.0, are expected to be rare and are
702  // handled in out-of-line code. The specific behaviour depends on supported
703  // instructions.
704  //
705  // These functions assume (and assert) that src1!=src2. It is permitted
706  // for the result to alias either input register.
707  void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2,
708  Label* out_of_line);
709  void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2,
710  Label* out_of_line);
711  void Float64Max(DoubleRegister dst, DoubleRegister src1, DoubleRegister src2,
712  Label* out_of_line);
713  void Float64Min(DoubleRegister dst, DoubleRegister src1, DoubleRegister src2,
714  Label* out_of_line);
715 
716  // Generate out-of-line cases for the macros above.
717  void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
718  void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
719  void Float64MaxOutOfLine(DoubleRegister dst, DoubleRegister src1,
720  DoubleRegister src2);
721  void Float64MinOutOfLine(DoubleRegister dst, DoubleRegister src1,
722  DoubleRegister src2);
723 
724  bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
725 
726  void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
727 
728  inline void Move(Register dst, Handle<HeapObject> handle) { li(dst, handle); }
729  inline void Move(Register dst, Smi smi) { li(dst, Operand(smi)); }
730 
731  inline void Move(Register dst, Register src) {
732  if (dst != src) {
733  mov(dst, src);
734  }
735  }
736 
737  inline void Move_d(FPURegister dst, FPURegister src) {
738  if (dst != src) {
739  mov_d(dst, src);
740  }
741  }
742 
743  inline void Move_s(FPURegister dst, FPURegister src) {
744  if (dst != src) {
745  mov_s(dst, src);
746  }
747  }
748 
749  inline void Move(FPURegister dst, FPURegister src) { Move_d(dst, src); }
750 
751  inline void Move(Register dst_low, Register dst_high, FPURegister src) {
752  mfc1(dst_low, src);
753  Mfhc1(dst_high, src);
754  }
755 
756  inline void FmoveHigh(Register dst_high, FPURegister src) {
757  Mfhc1(dst_high, src);
758  }
759 
760  inline void FmoveHigh(FPURegister dst, Register src_high) {
761  Mthc1(src_high, dst);
762  }
763 
764  inline void FmoveLow(Register dst_low, FPURegister src) {
765  mfc1(dst_low, src);
766  }
767 
768  void FmoveLow(FPURegister dst, Register src_low);
769 
770  inline void Move(FPURegister dst, Register src_low, Register src_high) {
771  mtc1(src_low, dst);
772  Mthc1(src_high, dst);
773  }
774 
775  void Move(FPURegister dst, float imm) { Move(dst, bit_cast<uint32_t>(imm)); }
776  void Move(FPURegister dst, double imm) { Move(dst, bit_cast<uint64_t>(imm)); }
777  void Move(FPURegister dst, uint32_t src);
778  void Move(FPURegister dst, uint64_t src);
779 
780  // -------------------------------------------------------------------------
781  // Overflow operations.
782 
783  // AddOverflow sets overflow register to a negative value if
784  // overflow occured, otherwise it is zero or positive
785  void AddOverflow(Register dst, Register left, const Operand& right,
786  Register overflow);
787  // SubOverflow sets overflow register to a negative value if
788  // overflow occured, otherwise it is zero or positive
789  void SubOverflow(Register dst, Register left, const Operand& right,
790  Register overflow);
791  // MulOverflow sets overflow register to zero if no overflow occured
792  void MulOverflow(Register dst, Register left, const Operand& right,
793  Register overflow);
794 
795 // Number of instructions needed for calculation of switch table entry address
796 #ifdef _MIPS_ARCH_MIPS32R6
797  static constexpr int kSwitchTablePrologueSize = 5;
798 #else
799  static constexpr int kSwitchTablePrologueSize = 10;
800 #endif
801  // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
802  // functor/function with 'Label *func(size_t index)' declaration.
803  template <typename Func>
804  void GenerateSwitchTable(Register index, size_t case_count,
805  Func GetLabelFunction);
806 
807  // Load an object from the root table.
808  void LoadRoot(Register destination, RootIndex index) override;
809  void LoadRoot(Register destination, RootIndex index, Condition cond,
810  Register src1, const Operand& src2);
811 
812  // If the value is a NaN, canonicalize the value else, do nothing.
813  void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
814 
815  // ---------------------------------------------------------------------------
816  // FPU macros. These do not handle special cases like NaN or +- inf.
817 
818  // Convert unsigned word to double.
819  void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
820 
821  // Convert double to unsigned word.
822  void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
823  void Trunc_uw_d(Register rd, FPURegister fs, FPURegister scratch);
824 
825  // Jump the register contains a smi.
826  void JumpIfSmi(Register value, Label* smi_label, Register scratch = at,
827  BranchDelaySlot bd = PROTECT);
828 
829  void JumpIfEqual(Register a, int32_t b, Label* dest) {
830  li(kScratchReg, Operand(b));
831  Branch(dest, eq, a, Operand(kScratchReg));
832  }
833 
834  void JumpIfLessThan(Register a, int32_t b, Label* dest) {
835  li(kScratchReg, Operand(b));
836  Branch(dest, lt, a, Operand(kScratchReg));
837  }
838 
839  // Push a standard frame, consisting of ra, fp, context and JS function.
840  void PushStandardFrame(Register function_reg);
841 
842  // Get the actual activation frame alignment for target environment.
843  static int ActivationFrameAlignment();
844 
845  // Compute the start of the generated instruction stream from the current PC.
846  // This is an alternative to embedding the {CodeObject} handle as a reference.
847  void ComputeCodeStartAddress(Register dst);
848 
849  void ResetSpeculationPoisonRegister();
850 
851  protected:
852  void BranchLong(Label* L, BranchDelaySlot bdslot);
853 
854  inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
855 
856  inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
857 
858  private:
859  bool has_double_zero_reg_set_ = false;
860 
861  void CallCFunctionHelper(Register function_base, int16_t function_offset,
862  int num_reg_arguments, int num_double_arguments);
863 
864  void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1,
865  FPURegister cmp2);
866 
867  void CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
868  FPURegister cmp2);
869 
870  void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
871  MSARegister wt, BranchDelaySlot bd = PROTECT);
872 
873  bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits);
874  bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits,
875  Register& scratch, const Operand& rt);
876 
877  void BranchShortHelperR6(int32_t offset, Label* L);
878  void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
879  bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
880  Register rs, const Operand& rt);
881  bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs,
882  const Operand& rt, BranchDelaySlot bdslot);
883  bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
884  const Operand& rt, BranchDelaySlot bdslot);
885 
886  void BranchAndLinkShortHelperR6(int32_t offset, Label* L);
887  void BranchAndLinkShortHelper(int16_t offset, Label* L,
888  BranchDelaySlot bdslot);
889  void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT);
890  void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
891  bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond,
892  Register rs, const Operand& rt);
893  bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond,
894  Register rs, const Operand& rt,
895  BranchDelaySlot bdslot);
896  bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
897  Register rs, const Operand& rt,
898  BranchDelaySlot bdslot);
899  void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
900 
901  template <typename RoundFunc>
902  void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode,
903  RoundFunc round);
904 
905  template <typename RoundFunc>
906  void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode,
907  RoundFunc round);
908 
909  // Push a fixed frame, consisting of ra, fp.
910  void PushCommonFrame(Register marker_reg = no_reg);
911 
912  void CallRecordWriteStub(Register object, Register address,
913  RememberedSetAction remembered_set_action,
914  SaveFPRegsMode fp_mode, Handle<Code> code_target,
915  Address wasm_target);
916 };
917 
918 // MacroAssembler implements a collection of frequently used macros.
919 class MacroAssembler : public TurboAssembler {
920  public:
921  MacroAssembler(const AssemblerOptions& options, void* buffer, int size)
922  : TurboAssembler(options, buffer, size) {}
923 
924  MacroAssembler(Isolate* isolate, void* buffer, int size,
925  CodeObjectRequired create_code_object)
926  : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
927  size, create_code_object) {}
928 
929  MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
930  void* buffer, int size, CodeObjectRequired create_code_object);
931 
932  // Swap two registers. If the scratch register is omitted then a slightly
933  // less efficient form using xor instead of mov is emitted.
934  void Swap(Register reg1, Register reg2, Register scratch = no_reg);
935 
936  void PushRoot(RootIndex index) {
937  UseScratchRegisterScope temps(this);
938  Register scratch = temps.Acquire();
939  LoadRoot(scratch, index);
940  Push(scratch);
941  }
942 
943  // Compare the object in a register to a value and jump if they are equal.
944  void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
945  UseScratchRegisterScope temps(this);
946  Register scratch = temps.Acquire();
947  LoadRoot(scratch, index);
948  Branch(if_equal, eq, with, Operand(scratch));
949  }
950 
951  // Compare the object in a register to a value and jump if they are not equal.
952  void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) {
953  UseScratchRegisterScope temps(this);
954  Register scratch = temps.Acquire();
955  LoadRoot(scratch, index);
956  Branch(if_not_equal, ne, with, Operand(scratch));
957  }
958 
959  // ---------------------------------------------------------------------------
960  // GC Support
961 
962  // Notify the garbage collector that we wrote a pointer into an object.
963  // |object| is the object being stored into, |value| is the object being
964  // stored. value and scratch registers are clobbered by the operation.
965  // The offset is the offset from the start of the object, not the offset from
966  // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
967  void RecordWriteField(
968  Register object, int offset, Register value, Register scratch,
969  RAStatus ra_status, SaveFPRegsMode save_fp,
970  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
971  SmiCheck smi_check = INLINE_SMI_CHECK);
972 
973  // For a given |object| notify the garbage collector that the slot |address|
974  // has been written. |value| is the object being stored. The value and
975  // address registers are clobbered by the operation.
976  void RecordWrite(
977  Register object, Register address, Register value, RAStatus ra_status,
978  SaveFPRegsMode save_fp,
979  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
980  SmiCheck smi_check = INLINE_SMI_CHECK);
981 
982  void Pref(int32_t hint, const MemOperand& rs);
983 
984  // Push and pop the registers that can hold pointers, as defined by the
985  // RegList constant kSafepointSavedRegisters.
986  void PushSafepointRegisters();
987  void PopSafepointRegisters();
988 
989  // Truncates a double using a specific rounding mode, and writes the value
990  // to the result register.
991  // The except_flag will contain any exceptions caused by the instruction.
992  // If check_inexact is kDontCheckForInexactConversion, then the inexact
993  // exception is masked.
994  void EmitFPUTruncate(
995  FPURoundingMode rounding_mode, Register result,
996  DoubleRegister double_input, Register scratch,
997  DoubleRegister double_scratch, Register except_flag,
998  CheckForInexactConversion check_inexact = kDontCheckForInexactConversion);
999 
1000  // Enter exit frame.
1001  // argc - argument count to be dropped by LeaveExitFrame.
1002  // save_doubles - saves FPU registers on stack, currently disabled.
1003  // stack_space - extra stack space.
1004  void EnterExitFrame(bool save_doubles, int stack_space = 0,
1005  StackFrame::Type frame_type = StackFrame::EXIT);
1006 
1007  // Leave the current exit frame.
1008  void LeaveExitFrame(bool save_doubles, Register arg_count,
1009  bool do_return = NO_EMIT_RETURN,
1010  bool argument_count_is_length = false);
1011 
1012  // Make sure the stack is aligned. Only emits code in debug mode.
1013  void AssertStackIsAligned();
1014 
1015  // Load the global proxy from the current context.
1016  void LoadGlobalProxy(Register dst) {
1017  LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
1018  }
1019 
1020  void LoadNativeContextSlot(int index, Register dst);
1021 
1022  // -------------------------------------------------------------------------
1023  // JavaScript invokes.
1024 
1025  // Invoke the JavaScript function code by either calling or jumping.
1026  void InvokeFunctionCode(Register function, Register new_target,
1027  const ParameterCount& expected,
1028  const ParameterCount& actual, InvokeFlag flag);
1029 
1030  // On function call, call into the debugger if necessary.
1031  void CheckDebugHook(Register fun, Register new_target,
1032  const ParameterCount& expected,
1033  const ParameterCount& actual);
1034 
1035  // Invoke the JavaScript function in the given register. Changes the
1036  // current context to the context in the function before invoking.
1037  void InvokeFunction(Register function, Register new_target,
1038  const ParameterCount& actual, InvokeFlag flag);
1039 
1040  void InvokeFunction(Register function, const ParameterCount& expected,
1041  const ParameterCount& actual, InvokeFlag flag);
1042 
1043  // Frame restart support.
1044  void MaybeDropFrames();
1045 
1046  // Exception handling.
1047 
1048  // Push a new stack handler and link into stack handler chain.
1049  void PushStackHandler();
1050 
1051  // Unlink the stack handler on top of the stack from the stack handler chain.
1052  // Must preserve the result register.
1053  void PopStackHandler();
1054 
1055  // -------------------------------------------------------------------------
1056  // Support functions.
1057 
1058  void GetObjectType(Register function,
1059  Register map,
1060  Register type_reg);
1061 
1062  // -------------------------------------------------------------------------
1063  // Runtime calls.
1064 
1065 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
1066 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
1067 
1068  // Call a code stub.
1069  void CallStub(CodeStub* stub,
1070  COND_ARGS);
1071 
1072  // Tail call a code stub (jump).
1073  void TailCallStub(CodeStub* stub, COND_ARGS);
1074 
1075 #undef COND_ARGS
1076 
1077  // Call a runtime routine.
1078  void CallRuntime(const Runtime::Function* f, int num_arguments,
1079  SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1080 
1081  // Convenience function: Same as above, but takes the fid instead.
1082  void CallRuntime(Runtime::FunctionId fid,
1083  SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1084  const Runtime::Function* function = Runtime::FunctionForId(fid);
1085  CallRuntime(function, function->nargs, save_doubles);
1086  }
1087 
1088  // Convenience function: Same as above, but takes the fid instead.
1089  void CallRuntime(Runtime::FunctionId id, int num_arguments,
1090  SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1091  CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1092  }
1093 
1094  // Convenience function: tail call a runtime routine (jump).
1095  void TailCallRuntime(Runtime::FunctionId fid);
1096 
1097  // Jump to the builtin routine.
1098  void JumpToExternalReference(const ExternalReference& builtin,
1099  BranchDelaySlot bd = PROTECT,
1100  bool builtin_exit_frame = false);
1101 
1102  // Generates a trampoline to jump to the off-heap instruction stream.
1103  void JumpToInstructionStream(Address entry);
1104 
1105  // ---------------------------------------------------------------------------
1106  // In-place weak references.
1107  void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
1108 
1109  // -------------------------------------------------------------------------
1110  // StatsCounter support.
1111 
1112  void IncrementCounter(StatsCounter* counter, int value,
1113  Register scratch1, Register scratch2);
1114  void DecrementCounter(StatsCounter* counter, int value,
1115  Register scratch1, Register scratch2);
1116 
1117  // -------------------------------------------------------------------------
1118  // Smi utilities.
1119 
1120  void SmiTag(Register reg) {
1121  Addu(reg, reg, reg);
1122  }
1123 
1124  void SmiTag(Register dst, Register src) { Addu(dst, src, src); }
1125 
1126  // Test if the register contains a smi.
1127  inline void SmiTst(Register value, Register scratch) {
1128  And(scratch, value, Operand(kSmiTagMask));
1129  }
1130 
1131  // Untag the source value into destination and jump if source is a smi.
1132  // Souce and destination can be the same register.
1133  void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1134 
1135  // Jump if the register contains a non-smi.
1136  void JumpIfNotSmi(Register value,
1137  Label* not_smi_label,
1138  Register scratch = at,
1139  BranchDelaySlot bd = PROTECT);
1140 
1141  // Jump if either of the registers contain a smi.
1142  void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1143 
1144  // Abort execution if argument is a smi, enabled via --debug-code.
1145  void AssertNotSmi(Register object);
1146  void AssertSmi(Register object);
1147 
1148  // Abort execution if argument is not a Constructor, enabled via --debug-code.
1149  void AssertConstructor(Register object);
1150 
1151  // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1152  void AssertFunction(Register object);
1153 
1154  // Abort execution if argument is not a JSBoundFunction,
1155  // enabled via --debug-code.
1156  void AssertBoundFunction(Register object);
1157 
1158  // Abort execution if argument is not a JSGeneratorObject (or subclass),
1159  // enabled via --debug-code.
1160  void AssertGeneratorObject(Register object);
1161 
1162  // Abort execution if argument is not undefined or an AllocationSite, enabled
1163  // via --debug-code.
1164  void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1165 
1166  template<typename Field>
1167  void DecodeField(Register dst, Register src) {
1168  Ext(dst, src, Field::kShift, Field::kSize);
1169  }
1170 
1171  template<typename Field>
1172  void DecodeField(Register reg) {
1173  DecodeField<Field>(reg, reg);
1174  }
1175 
1176  private:
1177  // Helper functions for generating invokes.
1178  void InvokePrologue(const ParameterCount& expected,
1179  const ParameterCount& actual, Label* done,
1180  bool* definitely_mismatches, InvokeFlag flag);
1181 
1182  // Compute memory operands for safepoint stack slots.
1183  static int SafepointRegisterStackIndex(int reg_code);
1184 
1185  // Needs access to SafepointRegisterStackIndex for compiled frame
1186  // traversal.
1187  friend class StandardFrame;
1188 };
1189 
1190 template <typename Func>
1191 void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
1192  Func GetLabelFunction) {
1193  Label here;
1194  BlockTrampolinePoolFor(case_count + kSwitchTablePrologueSize);
1195  UseScratchRegisterScope temps(this);
1196  Register scratch = temps.Acquire();
1197  if (kArchVariant >= kMips32r6) {
1198  addiupc(scratch, 5);
1199  Lsa(scratch, scratch, index, kPointerSizeLog2);
1200  lw(scratch, MemOperand(scratch));
1201  } else {
1202  push(ra);
1203  bal(&here);
1204  sll(scratch, index, kPointerSizeLog2); // Branch delay slot.
1205  bind(&here);
1206  addu(scratch, scratch, ra);
1207  pop(ra);
1208  lw(scratch, MemOperand(scratch, 6 * v8::internal::kInstrSize));
1209  }
1210  jr(scratch);
1211  nop(); // Branch delay slot nop.
1212  for (size_t index = 0; index < case_count; ++index) {
1213  dd(GetLabelFunction(index));
1214  }
1215 }
1216 
1217 #define ACCESS_MASM(masm) masm->
1218 
1219 } // namespace internal
1220 } // namespace v8
1221 
1222 #endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
Definition: libplatform.h:13