V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
macro-assembler-ppc.h
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
6 #error This header must be included via macro-assembler.h
7 #endif
8 
9 #ifndef V8_PPC_MACRO_ASSEMBLER_PPC_H_
10 #define V8_PPC_MACRO_ASSEMBLER_PPC_H_
11 
12 #include "src/bailout-reason.h"
13 #include "src/contexts.h"
14 #include "src/double.h"
15 #include "src/globals.h"
16 #include "src/ppc/assembler-ppc.h"
17 
18 namespace v8 {
19 namespace internal {
20 
21 // Give alias names to registers for calling conventions.
22 constexpr Register kReturnRegister0 = r3;
23 constexpr Register kReturnRegister1 = r4;
24 constexpr Register kReturnRegister2 = r5;
25 constexpr Register kJSFunctionRegister = r4;
26 constexpr Register kContextRegister = r30;
27 constexpr Register kAllocateSizeRegister = r4;
28 constexpr Register kSpeculationPoisonRegister = r14;
29 constexpr Register kInterpreterAccumulatorRegister = r3;
30 constexpr Register kInterpreterBytecodeOffsetRegister = r15;
31 constexpr Register kInterpreterBytecodeArrayRegister = r16;
32 constexpr Register kInterpreterDispatchTableRegister = r17;
33 
34 constexpr Register kJavaScriptCallArgCountRegister = r3;
35 constexpr Register kJavaScriptCallCodeStartRegister = r5;
36 constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
37 constexpr Register kJavaScriptCallNewTargetRegister = r6;
38 constexpr Register kJavaScriptCallExtraArg1Register = r5;
39 
40 constexpr Register kOffHeapTrampolineRegister = ip;
41 constexpr Register kRuntimeCallFunctionRegister = r4;
42 constexpr Register kRuntimeCallArgCountRegister = r3;
43 constexpr Register kRuntimeCallArgvRegister = r5;
44 constexpr Register kWasmInstanceRegister = r10;
45 constexpr Register kWasmCompileLazyFuncIndexRegister = r15;
46 
47 // ----------------------------------------------------------------------------
48 // Static helper functions
49 
50 // Generate a MemOperand for loading a field from an object.
51 inline MemOperand FieldMemOperand(Register object, int offset) {
52  return MemOperand(object, offset - kHeapObjectTag);
53 }
54 
55 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
56 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
57 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
58 
59 
60 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
61  Register reg3 = no_reg,
62  Register reg4 = no_reg,
63  Register reg5 = no_reg,
64  Register reg6 = no_reg);
65 
66 // These exist to provide portability between 32 and 64bit
67 #if V8_TARGET_ARCH_PPC64
68 #define LoadPX ldx
69 #define LoadPUX ldux
70 #define StorePX stdx
71 #define StorePUX stdux
72 #define ShiftLeftImm sldi
73 #define ShiftRightImm srdi
74 #define ClearLeftImm clrldi
75 #define ClearRightImm clrrdi
76 #define ShiftRightArithImm sradi
77 #define ShiftLeft_ sld
78 #define ShiftRight_ srd
79 #define ShiftRightArith srad
80 #else
81 #define LoadPX lwzx
82 #define LoadPUX lwzux
83 #define StorePX stwx
84 #define StorePUX stwux
85 #define ShiftLeftImm slwi
86 #define ShiftRightImm srwi
87 #define ClearLeftImm clrlwi
88 #define ClearRightImm clrrwi
89 #define ShiftRightArithImm srawi
90 #define ShiftLeft_ slw
91 #define ShiftRight_ srw
92 #define ShiftRightArith sraw
93 #endif
94 
95 class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
96  public:
97  TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size)
98  : TurboAssemblerBase(options, buffer, buffer_size) {}
99 
100  TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
101  void* buffer, int buffer_size,
102  CodeObjectRequired create_code_object)
103  : TurboAssemblerBase(isolate, options, buffer, buffer_size,
104  create_code_object) {}
105 
106  // Converts the integer (untagged smi) in |src| to a double, storing
107  // the result to |dst|
108  void ConvertIntToDouble(Register src, DoubleRegister dst);
109 
110  // Converts the unsigned integer (untagged smi) in |src| to
111  // a double, storing the result to |dst|
112  void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst);
113 
114  // Converts the integer (untagged smi) in |src| to
115  // a float, storing the result in |dst|
116  void ConvertIntToFloat(Register src, DoubleRegister dst);
117 
118  // Converts the unsigned integer (untagged smi) in |src| to
119  // a float, storing the result in |dst|
120  void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst);
121 
122 #if V8_TARGET_ARCH_PPC64
123  void ConvertInt64ToFloat(Register src, DoubleRegister double_dst);
124  void ConvertInt64ToDouble(Register src, DoubleRegister double_dst);
125  void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst);
126  void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst);
127 #endif
128 
129  // Converts the double_input to an integer. Note that, upon return,
130  // the contents of double_dst will also hold the fixed point representation.
131  void ConvertDoubleToInt64(const DoubleRegister double_input,
132 #if !V8_TARGET_ARCH_PPC64
133  const Register dst_hi,
134 #endif
135  const Register dst, const DoubleRegister double_dst,
136  FPRoundingMode rounding_mode = kRoundToZero);
137 
138 #if V8_TARGET_ARCH_PPC64
139  // Converts the double_input to an unsigned integer. Note that, upon return,
140  // the contents of double_dst will also hold the fixed point representation.
141  void ConvertDoubleToUnsignedInt64(
142  const DoubleRegister double_input, const Register dst,
143  const DoubleRegister double_dst,
144  FPRoundingMode rounding_mode = kRoundToZero);
145 #endif
146 
147  // Activation support.
148  void EnterFrame(StackFrame::Type type,
149  bool load_constant_pool_pointer_reg = false);
150 
151  // Returns the pc offset at which the frame ends.
152  int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
153 
154  // Push a fixed frame, consisting of lr, fp, constant pool.
155  void PushCommonFrame(Register marker_reg = no_reg);
156 
157  // Generates function and stub prologue code.
158  void StubPrologue(StackFrame::Type type);
159  void Prologue();
160 
161  // Push a standard frame, consisting of lr, fp, constant pool,
162  // context and JS function
163  void PushStandardFrame(Register function_reg);
164 
165  // Restore caller's frame pointer and return address prior to being
166  // overwritten by tail call stack preparation.
167  void RestoreFrameStateForTailCall();
168 
169  // Get the actual activation frame alignment for target environment.
170  static int ActivationFrameAlignment();
171 
172  void InitializeRootRegister() {
173  ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
174  mov(kRootRegister, Operand(isolate_root));
175  }
176 
177  // These exist to provide portability between 32 and 64bit
178  void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
179  void LoadPU(Register dst, const MemOperand& mem, Register scratch = no_reg);
180  void LoadWordArith(Register dst, const MemOperand& mem,
181  Register scratch = no_reg);
182  void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
183  void StorePU(Register src, const MemOperand& mem, Register scratch = no_reg);
184 
185  void LoadDouble(DoubleRegister dst, const MemOperand& mem,
186  Register scratch = no_reg);
187  void LoadDoubleLiteral(DoubleRegister result, Double value, Register scratch);
188 
189  // load a literal signed int value <value> to GPR <dst>
190  void LoadIntLiteral(Register dst, int value);
191  // load an SMI value <value> to GPR <dst>
192  void LoadSmiLiteral(Register dst, Smi smi);
193 
194  void LoadSingle(DoubleRegister dst, const MemOperand& mem,
195  Register scratch = no_reg);
196  void LoadSingleU(DoubleRegister dst, const MemOperand& mem,
197  Register scratch = no_reg);
198  void LoadPC(Register dst);
199  void ComputeCodeStartAddress(Register dst);
200 
201  void StoreDouble(DoubleRegister src, const MemOperand& mem,
202  Register scratch = no_reg);
203  void StoreDoubleU(DoubleRegister src, const MemOperand& mem,
204  Register scratch = no_reg);
205 
206  void StoreSingle(DoubleRegister src, const MemOperand& mem,
207  Register scratch = no_reg);
208  void StoreSingleU(DoubleRegister src, const MemOperand& mem,
209  Register scratch = no_reg);
210 
211  void Cmpi(Register src1, const Operand& src2, Register scratch,
212  CRegister cr = cr7);
213  void Cmpli(Register src1, const Operand& src2, Register scratch,
214  CRegister cr = cr7);
215  void Cmpwi(Register src1, const Operand& src2, Register scratch,
216  CRegister cr = cr7);
217  // Set new rounding mode RN to FPSCR
218  void SetRoundingMode(FPRoundingMode RN);
219 
220  // reset rounding mode to default (kRoundToNearest)
221  void ResetRoundingMode();
222  void Add(Register dst, Register src, intptr_t value, Register scratch);
223 
224  void Push(Register src) { push(src); }
225  // Push a handle.
226  void Push(Handle<HeapObject> handle);
227  void Push(Smi smi);
228 
229  // Push two registers. Pushes leftmost register first (to highest address).
230  void Push(Register src1, Register src2) {
231  StorePU(src2, MemOperand(sp, -2 * kPointerSize));
232  StoreP(src1, MemOperand(sp, kPointerSize));
233  }
234 
235  // Push three registers. Pushes leftmost register first (to highest address).
236  void Push(Register src1, Register src2, Register src3) {
237  StorePU(src3, MemOperand(sp, -3 * kPointerSize));
238  StoreP(src2, MemOperand(sp, kPointerSize));
239  StoreP(src1, MemOperand(sp, 2 * kPointerSize));
240  }
241 
242  // Push four registers. Pushes leftmost register first (to highest address).
243  void Push(Register src1, Register src2, Register src3, Register src4) {
244  StorePU(src4, MemOperand(sp, -4 * kPointerSize));
245  StoreP(src3, MemOperand(sp, kPointerSize));
246  StoreP(src2, MemOperand(sp, 2 * kPointerSize));
247  StoreP(src1, MemOperand(sp, 3 * kPointerSize));
248  }
249 
250  // Push five registers. Pushes leftmost register first (to highest address).
251  void Push(Register src1, Register src2, Register src3, Register src4,
252  Register src5) {
253  StorePU(src5, MemOperand(sp, -5 * kPointerSize));
254  StoreP(src4, MemOperand(sp, kPointerSize));
255  StoreP(src3, MemOperand(sp, 2 * kPointerSize));
256  StoreP(src2, MemOperand(sp, 3 * kPointerSize));
257  StoreP(src1, MemOperand(sp, 4 * kPointerSize));
258  }
259 
260  void Pop(Register dst) { pop(dst); }
261 
262  // Pop two registers. Pops rightmost register first (from lower address).
263  void Pop(Register src1, Register src2) {
264  LoadP(src2, MemOperand(sp, 0));
265  LoadP(src1, MemOperand(sp, kPointerSize));
266  addi(sp, sp, Operand(2 * kPointerSize));
267  }
268 
269  // Pop three registers. Pops rightmost register first (from lower address).
270  void Pop(Register src1, Register src2, Register src3) {
271  LoadP(src3, MemOperand(sp, 0));
272  LoadP(src2, MemOperand(sp, kPointerSize));
273  LoadP(src1, MemOperand(sp, 2 * kPointerSize));
274  addi(sp, sp, Operand(3 * kPointerSize));
275  }
276 
277  // Pop four registers. Pops rightmost register first (from lower address).
278  void Pop(Register src1, Register src2, Register src3, Register src4) {
279  LoadP(src4, MemOperand(sp, 0));
280  LoadP(src3, MemOperand(sp, kPointerSize));
281  LoadP(src2, MemOperand(sp, 2 * kPointerSize));
282  LoadP(src1, MemOperand(sp, 3 * kPointerSize));
283  addi(sp, sp, Operand(4 * kPointerSize));
284  }
285 
286  // Pop five registers. Pops rightmost register first (from lower address).
287  void Pop(Register src1, Register src2, Register src3, Register src4,
288  Register src5) {
289  LoadP(src5, MemOperand(sp, 0));
290  LoadP(src4, MemOperand(sp, kPointerSize));
291  LoadP(src3, MemOperand(sp, 2 * kPointerSize));
292  LoadP(src2, MemOperand(sp, 3 * kPointerSize));
293  LoadP(src1, MemOperand(sp, 4 * kPointerSize));
294  addi(sp, sp, Operand(5 * kPointerSize));
295  }
296 
297  void SaveRegisters(RegList registers);
298  void RestoreRegisters(RegList registers);
299 
300  void CallRecordWriteStub(Register object, Register address,
301  RememberedSetAction remembered_set_action,
302  SaveFPRegsMode fp_mode);
303  void CallRecordWriteStub(Register object, Register address,
304  RememberedSetAction remembered_set_action,
305  SaveFPRegsMode fp_mode, Address wasm_target);
306 
307  void MultiPush(RegList regs, Register location = sp);
308  void MultiPop(RegList regs, Register location = sp);
309 
310  void MultiPushDoubles(RegList dregs, Register location = sp);
311  void MultiPopDoubles(RegList dregs, Register location = sp);
312 
313  // Calculate how much stack space (in bytes) are required to store caller
314  // registers excluding those specified in the arguments.
315  int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
316  Register exclusion1 = no_reg,
317  Register exclusion2 = no_reg,
318  Register exclusion3 = no_reg) const;
319 
320  // Push caller saved registers on the stack, and return the number of bytes
321  // stack pointer is adjusted.
322  int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
323  Register exclusion2 = no_reg,
324  Register exclusion3 = no_reg);
325  // Restore caller saved registers from the stack, and return the number of
326  // bytes stack pointer is adjusted.
327  int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
328  Register exclusion2 = no_reg,
329  Register exclusion3 = no_reg);
330 
331  // Load an object from the root table.
332  void LoadRoot(Register destination, RootIndex index) override {
333  LoadRoot(destination, index, al);
334  }
335  void LoadRoot(Register destination, RootIndex index, Condition cond);
336 
337  void SwapP(Register src, Register dst, Register scratch);
338  void SwapP(Register src, MemOperand dst, Register scratch);
339  void SwapP(MemOperand src, MemOperand dst, Register scratch_0,
340  Register scratch_1);
341  void SwapFloat32(DoubleRegister src, DoubleRegister dst,
342  DoubleRegister scratch);
343  void SwapFloat32(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
344  void SwapFloat32(MemOperand src, MemOperand dst, DoubleRegister scratch_0,
345  DoubleRegister scratch_1);
346  void SwapDouble(DoubleRegister src, DoubleRegister dst,
347  DoubleRegister scratch);
348  void SwapDouble(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
349  void SwapDouble(MemOperand src, MemOperand dst, DoubleRegister scratch_0,
350  DoubleRegister scratch_1);
351 
352  // Before calling a C-function from generated code, align arguments on stack.
353  // After aligning the frame, non-register arguments must be stored in
354  // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
355  // are word sized. If double arguments are used, this function assumes that
356  // all double arguments are stored before core registers; otherwise the
357  // correct alignment of the double values is not guaranteed.
358  // Some compilers/platforms require the stack to be aligned when calling
359  // C++ code.
360  // Needs a scratch register to do some arithmetic. This register will be
361  // trashed.
362  void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
363  Register scratch);
364  void PrepareCallCFunction(int num_reg_arguments, Register scratch);
365 
366  void PrepareForTailCall(const ParameterCount& callee_args_count,
367  Register caller_args_count_reg, Register scratch0,
368  Register scratch1);
369 
370  // There are two ways of passing double arguments on ARM, depending on
371  // whether soft or hard floating point ABI is used. These functions
372  // abstract parameter passing for the three different ways we call
373  // C functions from generated code.
374  void MovToFloatParameter(DoubleRegister src);
375  void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
376  void MovToFloatResult(DoubleRegister src);
377 
378  // Calls a C function and cleans up the space for arguments allocated
379  // by PrepareCallCFunction. The called function is not allowed to trigger a
380  // garbage collection, since that might move the code and invalidate the
381  // return address (unless this is somehow accounted for by the called
382  // function).
383  void CallCFunction(ExternalReference function, int num_arguments);
384  void CallCFunction(Register function, int num_arguments);
385  void CallCFunction(ExternalReference function, int num_reg_arguments,
386  int num_double_arguments);
387  void CallCFunction(Register function, int num_reg_arguments,
388  int num_double_arguments);
389 
390  // Call a runtime routine. This expects {centry} to contain a fitting CEntry
391  // builtin for the target runtime function and uses an indirect call.
392  void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
393 
394  void MovFromFloatParameter(DoubleRegister dst);
395  void MovFromFloatResult(DoubleRegister dst);
396 
397  // Calls Abort(msg) if the condition cond is not satisfied.
398  // Use --debug_code to enable.
399  void Assert(Condition cond, AbortReason reason, CRegister cr = cr7);
400 
401  // Like Assert(), but always enabled.
402  void Check(Condition cond, AbortReason reason, CRegister cr = cr7);
403 
404  // Print a message to stdout and abort execution.
405  void Abort(AbortReason reason);
406 
407  inline bool AllowThisStubCall(CodeStub* stub);
408 #if !V8_TARGET_ARCH_PPC64
409  void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
410  Register src_high, Register scratch, Register shift);
411  void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
412  Register src_high, uint32_t shift);
413  void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
414  Register src_high, Register scratch, Register shift);
415  void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
416  Register src_high, uint32_t shift);
417  void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
418  Register src_high, Register scratch, Register shift);
419  void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
420  Register src_high, uint32_t shift);
421 #endif
422 
423  void LoadFromConstantsTable(Register destination,
424  int constant_index) override;
425  void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
426  void LoadRootRelative(Register destination, int32_t offset) override;
427 
428  // Jump, Call, and Ret pseudo instructions implementing inter-working.
429  void Jump(Register target);
430  void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
431  CRegister cr = cr7);
432  void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al,
433  CRegister cr = cr7);
434  void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
435  CRegister cr = cr7);
436  void Call(Register target);
437  void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
438  void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
439  Condition cond = al);
440  void Call(Label* target);
441 
442  void CallForDeoptimization(Address target, int deopt_id,
443  RelocInfo::Mode rmode) {
444  USE(deopt_id);
445  Call(target, rmode);
446  }
447 
448  // Emit code to discard a non-negative number of pointer-sized elements
449  // from the stack, clobbering only the sp register.
450  void Drop(int count);
451  void Drop(Register count, Register scratch = r0);
452 
453  void Ret() { blr(); }
454  void Ret(Condition cond, CRegister cr = cr7) { bclr(cond, cr); }
455  void Ret(int drop) {
456  Drop(drop);
457  blr();
458  }
459 
460  // If the value is a NaN, canonicalize the value else, do nothing.
461  void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
462  void CanonicalizeNaN(const DoubleRegister value) {
463  CanonicalizeNaN(value, value);
464  }
465  void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
466  Label* condition_met);
467 
468  // Move values between integer and floating point registers.
469  void MovIntToDouble(DoubleRegister dst, Register src, Register scratch);
470  void MovUnsignedIntToDouble(DoubleRegister dst, Register src,
471  Register scratch);
472  void MovInt64ToDouble(DoubleRegister dst,
473 #if !V8_TARGET_ARCH_PPC64
474  Register src_hi,
475 #endif
476  Register src);
477 #if V8_TARGET_ARCH_PPC64
478  void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi,
479  Register src_lo, Register scratch);
480 #endif
481  void InsertDoubleLow(DoubleRegister dst, Register src, Register scratch);
482  void InsertDoubleHigh(DoubleRegister dst, Register src, Register scratch);
483  void MovDoubleLowToInt(Register dst, DoubleRegister src);
484  void MovDoubleHighToInt(Register dst, DoubleRegister src);
485  void MovDoubleToInt64(
486 #if !V8_TARGET_ARCH_PPC64
487  Register dst_hi,
488 #endif
489  Register dst, DoubleRegister src);
490  void MovIntToFloat(DoubleRegister dst, Register src);
491  void MovFloatToInt(Register dst, DoubleRegister src);
492  // Register move. May do nothing if the registers are identical.
493  void Move(Register dst, Smi smi) { LoadSmiLiteral(dst, smi); }
494  void Move(Register dst, Handle<HeapObject> value);
495  void Move(Register dst, ExternalReference reference);
496  void Move(Register dst, Register src, Condition cond = al);
497  void Move(DoubleRegister dst, DoubleRegister src);
498 
499  void SmiUntag(Register reg, RCBit rc = LeaveRC, int scale = 0) {
500  SmiUntag(reg, reg, rc, scale);
501  }
502 
503  void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC, int scale = 0) {
504  if (scale > kSmiShift) {
505  ShiftLeftImm(dst, src, Operand(scale - kSmiShift), rc);
506  } else if (scale < kSmiShift) {
507  ShiftRightArithImm(dst, src, kSmiShift - scale, rc);
508  } else {
509  // do nothing
510  }
511  }
512  // ---------------------------------------------------------------------------
513  // Bit testing/extraction
514  //
515  // Bit numbering is such that the least significant bit is bit 0
516  // (for consistency between 32/64-bit).
517 
518  // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
519  // and, if !test, shift them into the least significant bits of dst.
520  inline void ExtractBitRange(Register dst, Register src, int rangeStart,
521  int rangeEnd, RCBit rc = LeaveRC,
522  bool test = false) {
523  DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerSystemPointer);
524  int rotate = (rangeEnd == 0) ? 0 : kBitsPerSystemPointer - rangeEnd;
525  int width = rangeStart - rangeEnd + 1;
526  if (rc == SetRC && rangeStart < 16 && (rangeEnd == 0 || test)) {
527  // Prefer faster andi when applicable.
528  andi(dst, src, Operand(((1 << width) - 1) << rangeEnd));
529  } else {
530 #if V8_TARGET_ARCH_PPC64
531  rldicl(dst, src, rotate, kBitsPerSystemPointer - width, rc);
532 #else
533  rlwinm(dst, src, rotate, kBitsPerSystemPointer - width,
534  kBitsPerSystemPointer - 1, rc);
535 #endif
536  }
537  }
538 
539  inline void ExtractBit(Register dst, Register src, uint32_t bitNumber,
540  RCBit rc = LeaveRC, bool test = false) {
541  ExtractBitRange(dst, src, bitNumber, bitNumber, rc, test);
542  }
543 
544  // Extract consecutive bits (defined by mask) from src and place them
545  // into the least significant bits of dst.
546  inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
547  RCBit rc = LeaveRC, bool test = false) {
548  int start = kBitsPerSystemPointer - 1;
549  int end;
550  uintptr_t bit = (1L << start);
551 
552  while (bit && (mask & bit) == 0) {
553  start--;
554  bit >>= 1;
555  }
556  end = start;
557  bit >>= 1;
558 
559  while (bit && (mask & bit)) {
560  end--;
561  bit >>= 1;
562  }
563 
564  // 1-bits in mask must be contiguous
565  DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
566 
567  ExtractBitRange(dst, src, start, end, rc, test);
568  }
569 
570  // Test single bit in value.
571  inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
572  ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC, true);
573  }
574 
575  // Test consecutive bit range in value. Range is defined by mask.
576  inline void TestBitMask(Register value, uintptr_t mask,
577  Register scratch = r0) {
578  ExtractBitMask(scratch, value, mask, SetRC, true);
579  }
580  // Test consecutive bit range in value. Range is defined by
581  // rangeStart - rangeEnd.
582  inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
583  Register scratch = r0) {
584  ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC, true);
585  }
586 
587  inline void TestIfSmi(Register value, Register scratch) {
588  TestBitRange(value, kSmiTagSize - 1, 0, scratch);
589  }
590  // Jump the register contains a smi.
591  inline void JumpIfSmi(Register value, Label* smi_label) {
592  TestIfSmi(value, r0);
593  beq(smi_label, cr0); // branch if SMI
594  }
595  void JumpIfEqual(Register x, int32_t y, Label* dest);
596  void JumpIfLessThan(Register x, int32_t y, Label* dest);
597 
598 #if V8_TARGET_ARCH_PPC64
599  inline void TestIfInt32(Register value, Register scratch,
600  CRegister cr = cr7) {
601  // High bits must be identical to fit into an 32-bit integer
602  extsw(scratch, value);
603  cmp(scratch, value, cr);
604  }
605 #else
606  inline void TestIfInt32(Register hi_word, Register lo_word, Register scratch,
607  CRegister cr = cr7) {
608  // High bits must be identical to fit into an 32-bit integer
609  srawi(scratch, lo_word, 31);
610  cmp(scratch, hi_word, cr);
611  }
612 #endif
613 
614  // Overflow handling functions.
615  // Usage: call the appropriate arithmetic function and then call one of the
616  // flow control functions with the corresponding label.
617 
618  // Compute dst = left + right, setting condition codes. dst may be same as
619  // either left or right (or a unique register). left and right must not be
620  // the same register.
621  void AddAndCheckForOverflow(Register dst, Register left, Register right,
622  Register overflow_dst, Register scratch = r0);
623  void AddAndCheckForOverflow(Register dst, Register left, intptr_t right,
624  Register overflow_dst, Register scratch = r0);
625 
626  // Compute dst = left - right, setting condition codes. dst may be same as
627  // either left or right (or a unique register). left and right must not be
628  // the same register.
629  void SubAndCheckForOverflow(Register dst, Register left, Register right,
630  Register overflow_dst, Register scratch = r0);
631 
632  // Performs a truncating conversion of a floating point number as used by
633  // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
634  // succeeds, otherwise falls through if result is saturated. On return
635  // 'result' either holds answer, or is clobbered on fall through.
636  //
637  // Only public for the test code in test-code-stubs-arm.cc.
638  void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
639  Label* done);
640  void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
641  DoubleRegister double_input, StubCallMode stub_mode);
642 
643  void LoadConstantPoolPointerRegister();
644 
645  // Loads the constant pool pointer (kConstantPoolRegister).
646  void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
647  Register code_target_address);
648  void AbortConstantPoolBuilding() {
649 #ifdef DEBUG
650  // Avoid DCHECK(!is_linked()) failure in ~Label()
651  bind(ConstantPoolPosition());
652 #endif
653  }
654 
655  void ResetSpeculationPoisonRegister();
656 
657  private:
658  static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
659 
660  int CalculateStackPassedWords(int num_reg_arguments,
661  int num_double_arguments);
662  void CallCFunctionHelper(Register function, int num_reg_arguments,
663  int num_double_arguments);
664  void CallRecordWriteStub(Register object, Register address,
665  RememberedSetAction remembered_set_action,
666  SaveFPRegsMode fp_mode, Handle<Code> code_target,
667  Address wasm_target);
668 };
669 
670 // MacroAssembler implements a collection of frequently used acros.
671 class MacroAssembler : public TurboAssembler {
672  public:
673  MacroAssembler(const AssemblerOptions& options, void* buffer, int size)
674  : TurboAssembler(options, buffer, size) {}
675 
676  MacroAssembler(Isolate* isolate, void* buffer, int size,
677  CodeObjectRequired create_code_object)
678  : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
679  size, create_code_object) {}
680 
681  MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
682  void* buffer, int size, CodeObjectRequired create_code_object);
683 
684  // ---------------------------------------------------------------------------
685  // GC Support
686 
687  void IncrementalMarkingRecordWriteHelper(Register object, Register value,
688  Register address);
689 
690  void JumpToJSEntry(Register target);
691  // Check if object is in new space. Jumps if the object is not in new space.
692  // The register scratch can be object itself, but scratch will be clobbered.
693  void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
694  InNewSpace(object, scratch, eq, branch);
695  }
696 
697  // Check if object is in new space. Jumps if the object is in new space.
698  // The register scratch can be object itself, but it will be clobbered.
699  void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
700  InNewSpace(object, scratch, ne, branch);
701  }
702 
703  // Check if an object has a given incremental marking color.
704  void HasColor(Register object, Register scratch0, Register scratch1,
705  Label* has_color, int first_bit, int second_bit);
706 
707  void JumpIfBlack(Register object, Register scratch0, Register scratch1,
708  Label* on_black);
709 
710  // Checks the color of an object. If the object is white we jump to the
711  // incremental marker.
712  void JumpIfWhite(Register value, Register scratch1, Register scratch2,
713  Register scratch3, Label* value_is_white);
714 
715  // Notify the garbage collector that we wrote a pointer into an object.
716  // |object| is the object being stored into, |value| is the object being
717  // stored. value and scratch registers are clobbered by the operation.
718  // The offset is the offset from the start of the object, not the offset from
719  // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
720  void RecordWriteField(
721  Register object, int offset, Register value, Register scratch,
722  LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
723  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
724  SmiCheck smi_check = INLINE_SMI_CHECK);
725 
726  // For a given |object| notify the garbage collector that the slot |address|
727  // has been written. |value| is the object being stored. The value and
728  // address registers are clobbered by the operation.
729  void RecordWrite(
730  Register object, Register address, Register value,
731  LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
732  RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
733  SmiCheck smi_check = INLINE_SMI_CHECK);
734 
735  // Push and pop the registers that can hold pointers, as defined by the
736  // RegList constant kSafepointSavedRegisters.
737  void PushSafepointRegisters();
738  void PopSafepointRegisters();
739 
740  // Enter exit frame.
741  // stack_space - extra stack space, used for parameters before call to C.
742  // At least one slot (for the return address) should be provided.
743  void EnterExitFrame(bool save_doubles, int stack_space = 1,
744  StackFrame::Type frame_type = StackFrame::EXIT);
745 
746  // Leave the current exit frame. Expects the return value in r0.
747  // Expect the number of values, pushed prior to the exit frame, to
748  // remove in a register (or no_reg, if there is nothing to remove).
749  void LeaveExitFrame(bool save_doubles, Register argument_count,
750  bool argument_count_is_length = false);
751 
752  // Load the global proxy from the current context.
753  void LoadGlobalProxy(Register dst) {
754  LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
755  }
756 
757  void LoadNativeContextSlot(int index, Register dst);
758 
759  // ----------------------------------------------------------------
760  // new PPC macro-assembler interfaces that are slightly higher level
761  // than assembler-ppc and may generate variable length sequences
762 
763  // load a literal double value <value> to FPR <result>
764  void LoadWord(Register dst, const MemOperand& mem, Register scratch);
765  void StoreWord(Register src, const MemOperand& mem, Register scratch);
766 
767  void LoadHalfWord(Register dst, const MemOperand& mem,
768  Register scratch = no_reg);
769  void LoadHalfWordArith(Register dst, const MemOperand& mem,
770  Register scratch = no_reg);
771  void StoreHalfWord(Register src, const MemOperand& mem, Register scratch);
772 
773  void LoadByte(Register dst, const MemOperand& mem, Register scratch);
774  void StoreByte(Register src, const MemOperand& mem, Register scratch);
775 
776  void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
777  Register scratch = no_reg);
778  void StoreRepresentation(Register src, const MemOperand& mem,
779  Representation r, Register scratch = no_reg);
780  void LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
781  Register scratch = no_reg);
782 
783  void Cmplwi(Register src1, const Operand& src2, Register scratch,
784  CRegister cr = cr7);
785  void And(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
786  void Or(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
787  void Xor(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
788 
789  void AddSmiLiteral(Register dst, Register src, Smi smi, Register scratch);
790  void SubSmiLiteral(Register dst, Register src, Smi smi, Register scratch);
791  void CmpSmiLiteral(Register src1, Smi smi, Register scratch,
792  CRegister cr = cr7);
793  void CmplSmiLiteral(Register src1, Smi smi, Register scratch,
794  CRegister cr = cr7);
795  void AndSmiLiteral(Register dst, Register src, Smi smi, Register scratch,
796  RCBit rc = LeaveRC);
797 
798  // ---------------------------------------------------------------------------
799  // JavaScript invokes
800 
801  // Removes current frame and its arguments from the stack preserving
802  // the arguments and a return address pushed to the stack for the next call.
803  // Both |callee_args_count| and |caller_args_count_reg| do not include
804  // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
805  // is trashed.
806 
807  // Invoke the JavaScript function code by either calling or jumping.
808  void InvokeFunctionCode(Register function, Register new_target,
809  const ParameterCount& expected,
810  const ParameterCount& actual, InvokeFlag flag);
811 
812  // On function call, call into the debugger if necessary.
813  void CheckDebugHook(Register fun, Register new_target,
814  const ParameterCount& expected,
815  const ParameterCount& actual);
816 
817  // Invoke the JavaScript function in the given register. Changes the
818  // current context to the context in the function before invoking.
819  void InvokeFunction(Register function, Register new_target,
820  const ParameterCount& actual, InvokeFlag flag);
821 
822  void InvokeFunction(Register function, const ParameterCount& expected,
823  const ParameterCount& actual, InvokeFlag flag);
824 
825  void DebugBreak();
826  // Frame restart support
827  void MaybeDropFrames();
828 
829  // Exception handling
830 
831  // Push a new stack handler and link into stack handler chain.
832  void PushStackHandler();
833 
834  // Unlink the stack handler on top of the stack from the stack handler chain.
835  // Must preserve the result register.
836  void PopStackHandler();
837 
838  // ---------------------------------------------------------------------------
839  // Support functions.
840 
841  // Compare object type for heap object. heap_object contains a non-Smi
842  // whose object type should be compared with the given type. This both
843  // sets the flags and leaves the object type in the type_reg register.
844  // It leaves the map in the map register (unless the type_reg and map register
845  // are the same register). It leaves the heap object in the heap_object
846  // register unless the heap_object register is the same register as one of the
847  // other registers.
848  // Type_reg can be no_reg. In that case ip is used.
849  void CompareObjectType(Register heap_object, Register map, Register type_reg,
850  InstanceType type);
851 
852  // Compare instance type in a map. map contains a valid map object whose
853  // object type should be compared with the given type. This both
854  // sets the flags and leaves the object type in the type_reg register.
855  void CompareInstanceType(Register map, Register type_reg, InstanceType type);
856 
857  // Compare the object in a register to a value from the root list.
858  // Uses the ip register as scratch.
859  void CompareRoot(Register obj, RootIndex index);
860  void PushRoot(RootIndex index) {
861  LoadRoot(r0, index);
862  Push(r0);
863  }
864 
865  // Compare the object in a register to a value and jump if they are equal.
866  void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
867  CompareRoot(with, index);
868  beq(if_equal);
869  }
870 
871  // Compare the object in a register to a value and jump if they are not equal.
872  void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) {
873  CompareRoot(with, index);
874  bne(if_not_equal);
875  }
876 
877  // Try to convert a double to a signed 32-bit integer.
878  // CR_EQ in cr7 is set and result assigned if the conversion is exact.
879  void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
880  Register scratch, DoubleRegister double_scratch);
881 
882  // ---------------------------------------------------------------------------
883  // Runtime calls
884 
885  static int CallSizeNotPredictableCodeSize(Address target,
886  RelocInfo::Mode rmode,
887  Condition cond = al);
888  void CallJSEntry(Register target);
889 
890  // Call a code stub.
891  void CallStub(CodeStub* stub, Condition cond = al);
892  void TailCallStub(CodeStub* stub, Condition cond = al);
893 
894  // Call a runtime routine.
895  void CallRuntime(const Runtime::Function* f, int num_arguments,
896  SaveFPRegsMode save_doubles = kDontSaveFPRegs);
897  void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
898  const Runtime::Function* function = Runtime::FunctionForId(fid);
899  CallRuntime(function, function->nargs, kSaveFPRegs);
900  }
901 
902  // Convenience function: Same as above, but takes the fid instead.
903  void CallRuntime(Runtime::FunctionId fid,
904  SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
905  const Runtime::Function* function = Runtime::FunctionForId(fid);
906  CallRuntime(function, function->nargs, save_doubles);
907  }
908 
909  // Convenience function: Same as above, but takes the fid instead.
910  void CallRuntime(Runtime::FunctionId fid, int num_arguments,
911  SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
912  CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
913  }
914 
915  // Convenience function: tail call a runtime routine (jump).
916  void TailCallRuntime(Runtime::FunctionId fid);
917 
918 
919 
920  // Jump to a runtime routine.
921  void JumpToExternalReference(const ExternalReference& builtin,
922  bool builtin_exit_frame = false);
923 
924  // Generates a trampoline to jump to the off-heap instruction stream.
925  void JumpToInstructionStream(Address entry);
926 
927  // ---------------------------------------------------------------------------
928  // In-place weak references.
929  void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
930 
931  // ---------------------------------------------------------------------------
932  // StatsCounter support
933 
934  void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
935  Register scratch2);
936  void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
937  Register scratch2);
938 
939  // ---------------------------------------------------------------------------
940  // Smi utilities
941 
942  // Shift left by kSmiShift
943  void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); }
944  void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) {
945  ShiftLeftImm(dst, src, Operand(kSmiShift), rc);
946  }
947 
948  void SmiToPtrArrayOffset(Register dst, Register src) {
949 #if V8_TARGET_ARCH_PPC64
950  STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
951  ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2);
952 #else
953  STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
954  ShiftLeftImm(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
955 #endif
956  }
957 
958  // Untag the source value into destination and jump if source is a smi.
959  // Souce and destination can be the same register.
960  void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
961 
962  // Jump if either of the registers contain a non-smi.
963  inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
964  TestIfSmi(value, r0);
965  bne(not_smi_label, cr0);
966  }
967  // Jump if either of the registers contain a smi.
968  void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
969 
970  // Abort execution if argument is a smi, enabled via --debug-code.
971  void AssertNotSmi(Register object);
972  void AssertSmi(Register object);
973 
974 
975 
976 #if V8_TARGET_ARCH_PPC64
977  // Ensure it is permissible to read/write int value directly from
978  // upper half of the smi.
979  STATIC_ASSERT(kSmiTag == 0);
980  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
981 #endif
982 #if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN
983 #define SmiWordOffset(offset) (offset + kPointerSize / 2)
984 #else
985 #define SmiWordOffset(offset) offset
986 #endif
987 
988  // Abort execution if argument is not a Constructor, enabled via --debug-code.
989  void AssertConstructor(Register object);
990 
991  // Abort execution if argument is not a JSFunction, enabled via --debug-code.
992  void AssertFunction(Register object);
993 
994  // Abort execution if argument is not a JSBoundFunction,
995  // enabled via --debug-code.
996  void AssertBoundFunction(Register object);
997 
998  // Abort execution if argument is not a JSGeneratorObject (or subclass),
999  // enabled via --debug-code.
1000  void AssertGeneratorObject(Register object);
1001 
1002  // Abort execution if argument is not undefined or an AllocationSite, enabled
1003  // via --debug-code.
1004  void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1005 
1006  // ---------------------------------------------------------------------------
1007  // Patching helpers.
1008 
1009  template <typename Field>
1010  void DecodeField(Register dst, Register src, RCBit rc = LeaveRC) {
1011  ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift,
1012  rc);
1013  }
1014 
1015  template <typename Field>
1016  void DecodeField(Register reg, RCBit rc = LeaveRC) {
1017  DecodeField<Field>(reg, reg, rc);
1018  }
1019 
1020  private:
1021  static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1022 
1023  // Helper functions for generating invokes.
1024  void InvokePrologue(const ParameterCount& expected,
1025  const ParameterCount& actual, Label* done,
1026  bool* definitely_mismatches, InvokeFlag flag);
1027 
1028  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1029  void InNewSpace(Register object, Register scratch,
1030  Condition cond, // eq for new space, ne otherwise.
1031  Label* branch);
1032 
1033  // Compute memory operands for safepoint stack slots.
1034  static int SafepointRegisterStackIndex(int reg_code);
1035 
1036  // Needs access to SafepointRegisterStackIndex for compiled frame
1037  // traversal.
1038  friend class StandardFrame;
1039 };
1040 
1041 // -----------------------------------------------------------------------------
1042 // Static helper functions.
1043 
1044 inline MemOperand ContextMemOperand(Register context, int index = 0) {
1045  return MemOperand(context, Context::SlotOffset(index));
1046 }
1047 
1048 
1049 inline MemOperand NativeContextMemOperand() {
1050  return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
1051 }
1052 
1053 #define ACCESS_MASM(masm) masm->
1054 
1055 } // namespace internal
1056 } // namespace v8
1057 
1058 #endif // V8_PPC_MACRO_ASSEMBLER_PPC_H_
Definition: libplatform.h:13