V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
assembler-ppc.h
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32 
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2014 the V8 project authors. All rights reserved.
36 
37 // A light-weight PPC Assembler
38 // Generates user mode instructions for the PPC architecture up
39 
40 #ifndef V8_PPC_ASSEMBLER_PPC_H_
41 #define V8_PPC_ASSEMBLER_PPC_H_
42 
43 #include <stdio.h>
44 #include <vector>
45 
46 #include "src/assembler.h"
47 #include "src/constant-pool.h"
48 #include "src/double.h"
49 #include "src/external-reference.h"
50 #include "src/label.h"
51 #include "src/objects/smi.h"
52 #include "src/ppc/constants-ppc.h"
53 
54 #if V8_HOST_ARCH_PPC && \
55  (V8_OS_AIX || (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN))
56 #define ABI_USES_FUNCTION_DESCRIPTORS 1
57 #else
58 #define ABI_USES_FUNCTION_DESCRIPTORS 0
59 #endif
60 
61 #if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
62 #define ABI_PASSES_HANDLES_IN_REGS 1
63 #else
64 #define ABI_PASSES_HANDLES_IN_REGS 0
65 #endif
66 
67 #if !V8_HOST_ARCH_PPC || !V8_TARGET_ARCH_PPC64 || V8_TARGET_LITTLE_ENDIAN
68 #define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 1
69 #else
70 #define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 0
71 #endif
72 
73 #if !V8_HOST_ARCH_PPC || (V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
74 #define ABI_CALL_VIA_IP 1
75 #else
76 #define ABI_CALL_VIA_IP 0
77 #endif
78 
79 #if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
80 #define ABI_TOC_REGISTER 2
81 #else
82 #define ABI_TOC_REGISTER 13
83 #endif
84 
85 #define INSTR_AND_DATA_CACHE_COHERENCY LWSYNC
86 
87 namespace v8 {
88 namespace internal {
89 
90 // clang-format off
91 #define GENERAL_REGISTERS(V) \
92  V(r0) V(sp) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
93  V(r8) V(r9) V(r10) V(r11) V(ip) V(r13) V(r14) V(r15) \
94  V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
95  V(r24) V(r25) V(r26) V(r27) V(r28) V(r29) V(r30) V(fp)
96 
97 #if V8_EMBEDDED_CONSTANT_POOL
98 #define ALLOCATABLE_GENERAL_REGISTERS(V) \
99  V(r3) V(r4) V(r5) V(r6) V(r7) \
100  V(r8) V(r9) V(r10) V(r14) V(r15) \
101  V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
102  V(r24) V(r25) V(r26) V(r27) V(r30)
103 #else
104 #define ALLOCATABLE_GENERAL_REGISTERS(V) \
105  V(r3) V(r4) V(r5) V(r6) V(r7) \
106  V(r8) V(r9) V(r10) V(r14) V(r15) \
107  V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
108  V(r24) V(r25) V(r26) V(r27) V(r28) V(r30)
109 #endif
110 
111 #define LOW_DOUBLE_REGISTERS(V) \
112  V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
113  V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)
114 
115 #define NON_LOW_DOUBLE_REGISTERS(V) \
116  V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
117  V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
118 
119 #define DOUBLE_REGISTERS(V) \
120  LOW_DOUBLE_REGISTERS(V) NON_LOW_DOUBLE_REGISTERS(V)
121 
122 #define FLOAT_REGISTERS DOUBLE_REGISTERS
123 #define SIMD128_REGISTERS DOUBLE_REGISTERS
124 
125 #define ALLOCATABLE_DOUBLE_REGISTERS(V) \
126  V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
127  V(d8) V(d9) V(d10) V(d11) V(d12) V(d15) \
128  V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
129  V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
130 
131 #define C_REGISTERS(V) \
132  V(cr0) V(cr1) V(cr2) V(cr3) V(cr4) V(cr5) V(cr6) V(cr7) \
133  V(cr8) V(cr9) V(cr10) V(cr11) V(cr12) V(cr15)
134 // clang-format on
135 
136 // Register list in load/store instructions
137 // Note that the bit values must match those used in actual instruction encoding
138 const int kNumRegs = 32;
139 
140 // Caller-saved/arguments registers
141 const RegList kJSCallerSaved = 1 << 3 | // r3 a1
142  1 << 4 | // r4 a2
143  1 << 5 | // r5 a3
144  1 << 6 | // r6 a4
145  1 << 7 | // r7 a5
146  1 << 8 | // r8 a6
147  1 << 9 | // r9 a7
148  1 << 10 | // r10 a8
149  1 << 11;
150 
151 const int kNumJSCallerSaved = 9;
152 
153 // Return the code of the n-th caller-saved register available to JavaScript
154 // e.g. JSCallerSavedReg(0) returns r0.code() == 0
155 int JSCallerSavedCode(int n);
156 
157 // Callee-saved registers preserved when switching from C to JavaScript
158 const RegList kCalleeSaved = 1 << 14 | // r14
159  1 << 15 | // r15
160  1 << 16 | // r16
161  1 << 17 | // r17
162  1 << 18 | // r18
163  1 << 19 | // r19
164  1 << 20 | // r20
165  1 << 21 | // r21
166  1 << 22 | // r22
167  1 << 23 | // r23
168  1 << 24 | // r24
169  1 << 25 | // r25
170  1 << 26 | // r26
171  1 << 27 | // r27
172  1 << 28 | // r28
173  1 << 29 | // r29
174  1 << 30 | // r20
175  1 << 31; // r31
176 
177 const int kNumCalleeSaved = 18;
178 
179 const RegList kCallerSavedDoubles = 1 << 0 | // d0
180  1 << 1 | // d1
181  1 << 2 | // d2
182  1 << 3 | // d3
183  1 << 4 | // d4
184  1 << 5 | // d5
185  1 << 6 | // d6
186  1 << 7 | // d7
187  1 << 8 | // d8
188  1 << 9 | // d9
189  1 << 10 | // d10
190  1 << 11 | // d11
191  1 << 12 | // d12
192  1 << 13; // d13
193 
194 const int kNumCallerSavedDoubles = 14;
195 
196 const RegList kCalleeSavedDoubles = 1 << 14 | // d14
197  1 << 15 | // d15
198  1 << 16 | // d16
199  1 << 17 | // d17
200  1 << 18 | // d18
201  1 << 19 | // d19
202  1 << 20 | // d20
203  1 << 21 | // d21
204  1 << 22 | // d22
205  1 << 23 | // d23
206  1 << 24 | // d24
207  1 << 25 | // d25
208  1 << 26 | // d26
209  1 << 27 | // d27
210  1 << 28 | // d28
211  1 << 29 | // d29
212  1 << 30 | // d30
213  1 << 31; // d31
214 
215 const int kNumCalleeSavedDoubles = 18;
216 
217 // Number of registers for which space is reserved in safepoints. Must be a
218 // multiple of 8.
219 const int kNumSafepointRegisters = 32;
220 
221 // The following constants describe the stack frame linkage area as
222 // defined by the ABI. Note that kNumRequiredStackFrameSlots must
223 // satisfy alignment requirements (rounding up if required).
224 #if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN // ppc64le linux
225 // [0] back chain
226 // [1] condition register save area
227 // [2] link register save area
228 // [3] TOC save area
229 // [4] Parameter1 save area
230 // ...
231 // [11] Parameter8 save area
232 // [12] Parameter9 slot (if necessary)
233 // ...
234 const int kNumRequiredStackFrameSlots = 12;
235 const int kStackFrameLRSlot = 2;
236 const int kStackFrameExtraParamSlot = 12;
237 #else // AIX
238 // [0] back chain
239 // [1] condition register save area
240 // [2] link register save area
241 // [3] reserved for compiler
242 // [4] reserved by binder
243 // [5] TOC save area
244 // [6] Parameter1 save area
245 // ...
246 // [13] Parameter8 save area
247 // [14] Parameter9 slot (if necessary)
248 // ...
249 const int kNumRequiredStackFrameSlots = 14;
250 const int kStackFrameLRSlot = 2;
251 const int kStackFrameExtraParamSlot = 14;
252 #endif
253 
254 // Define the list of registers actually saved at safepoints.
255 // Note that the number of saved registers may be smaller than the reserved
256 // space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
257 const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
258 const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
259 
260 enum RegisterCode {
261 #define REGISTER_CODE(R) kRegCode_##R,
262  GENERAL_REGISTERS(REGISTER_CODE)
263 #undef REGISTER_CODE
264  kRegAfterLast
265 };
266 
267 class Register : public RegisterBase<Register, kRegAfterLast> {
268  public:
269 #if V8_TARGET_LITTLE_ENDIAN
270  static constexpr int kMantissaOffset = 0;
271  static constexpr int kExponentOffset = 4;
272 #else
273  static constexpr int kMantissaOffset = 4;
274  static constexpr int kExponentOffset = 0;
275 #endif
276 
277  private:
278  friend class RegisterBase;
279  explicit constexpr Register(int code) : RegisterBase(code) {}
280 };
281 
282 ASSERT_TRIVIALLY_COPYABLE(Register);
283 static_assert(sizeof(Register) == sizeof(int),
284  "Register can efficiently be passed by value");
285 
286 #define DEFINE_REGISTER(R) \
287  constexpr Register R = Register::from_code<kRegCode_##R>();
288 GENERAL_REGISTERS(DEFINE_REGISTER)
289 #undef DEFINE_REGISTER
290 constexpr Register no_reg = Register::no_reg();
291 
292 // Aliases
293 constexpr Register kConstantPoolRegister = r28; // Constant pool.
294 constexpr Register kRootRegister = r29; // Roots array pointer.
295 constexpr Register cp = r30; // JavaScript context pointer.
296 
297 constexpr bool kPadArguments = false;
298 constexpr bool kSimpleFPAliasing = true;
299 constexpr bool kSimdMaskRegisters = false;
300 
301 enum DoubleRegisterCode {
302 #define REGISTER_CODE(R) kDoubleCode_##R,
303  DOUBLE_REGISTERS(REGISTER_CODE)
304 #undef REGISTER_CODE
305  kDoubleAfterLast
306 };
307 
308 // Double word FP register.
309 class DoubleRegister : public RegisterBase<DoubleRegister, kDoubleAfterLast> {
310  public:
311  // A few double registers are reserved: one as a scratch register and one to
312  // hold 0.0, that does not fit in the immediate field of vmov instructions.
313  // d14: 0.0
314  // d15: scratch register.
315  static constexpr int kSizeInBytes = 8;
316  inline static int NumRegisters();
317 
318  private:
319  friend class RegisterBase;
320  explicit constexpr DoubleRegister(int code) : RegisterBase(code) {}
321 };
322 
323 ASSERT_TRIVIALLY_COPYABLE(DoubleRegister);
324 static_assert(sizeof(DoubleRegister) == sizeof(int),
325  "DoubleRegister can efficiently be passed by value");
326 
328 
329 // TODO(ppc) Define SIMD registers.
331 
332 #define DEFINE_REGISTER(R) \
333  constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
334 DOUBLE_REGISTERS(DEFINE_REGISTER)
335 #undef DEFINE_REGISTER
336 constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
337 
338 constexpr DoubleRegister kFirstCalleeSavedDoubleReg = d14;
339 constexpr DoubleRegister kLastCalleeSavedDoubleReg = d31;
340 constexpr DoubleRegister kDoubleRegZero = d14;
341 constexpr DoubleRegister kScratchDoubleReg = d13;
342 
343 Register ToRegister(int num);
344 
345 enum CRegisterCode {
346 #define REGISTER_CODE(R) kCCode_##R,
347  C_REGISTERS(REGISTER_CODE)
348 #undef REGISTER_CODE
349  kCAfterLast
350 };
351 
352 // Coprocessor register
353 class CRegister : public RegisterBase<CRegister, kCAfterLast> {
354  friend class RegisterBase;
355  explicit constexpr CRegister(int code) : RegisterBase(code) {}
356 };
357 
358 constexpr CRegister no_creg = CRegister::no_reg();
359 #define DECLARE_C_REGISTER(R) \
360  constexpr CRegister R = CRegister::from_code<kCCode_##R>();
361 C_REGISTERS(DECLARE_C_REGISTER)
362 #undef DECLARE_C_REGISTER
363 
364 // -----------------------------------------------------------------------------
365 // Machine instruction Operands
366 
367 // Class Operand represents a shifter operand in data processing instructions
368 class Operand {
369  public:
370  // immediate
371  V8_INLINE explicit Operand(intptr_t immediate,
372  RelocInfo::Mode rmode = RelocInfo::NONE)
373  : rmode_(rmode) {
374  value_.immediate = immediate;
375  }
376  V8_INLINE static Operand Zero() { return Operand(static_cast<intptr_t>(0)); }
377  V8_INLINE explicit Operand(const ExternalReference& f)
378  : rmode_(RelocInfo::EXTERNAL_REFERENCE) {
379  value_.immediate = static_cast<intptr_t>(f.address());
380  }
381  explicit Operand(Handle<HeapObject> handle);
382  V8_INLINE explicit Operand(Smi value) : rmode_(RelocInfo::NONE) {
383  value_.immediate = static_cast<intptr_t>(value.ptr());
384  }
385  // rm
386  V8_INLINE explicit Operand(Register rm);
387 
388  static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
389  static Operand EmbeddedStringConstant(const StringConstantBase* str);
390  static Operand EmbeddedCode(CodeStub* stub);
391 
392  // Return true if this is a register operand.
393  V8_INLINE bool is_reg() const { return rm_.is_valid(); }
394 
395  bool must_output_reloc_info(const Assembler* assembler) const;
396 
397  inline intptr_t immediate() const {
398  DCHECK(IsImmediate());
399  DCHECK(!IsHeapObjectRequest());
400  return value_.immediate;
401  }
402  bool IsImmediate() const { return !rm_.is_valid(); }
403 
404  HeapObjectRequest heap_object_request() const {
405  DCHECK(IsHeapObjectRequest());
406  return value_.heap_object_request;
407  }
408 
409  Register rm() const { return rm_; }
410 
411  bool IsHeapObjectRequest() const {
412  DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
413  DCHECK_IMPLIES(is_heap_object_request_,
414  rmode_ == RelocInfo::EMBEDDED_OBJECT ||
415  rmode_ == RelocInfo::CODE_TARGET);
416  return is_heap_object_request_;
417  }
418 
419  private:
420  Register rm_ = no_reg;
421  union Value {
422  Value() {}
423  HeapObjectRequest heap_object_request; // if is_heap_object_request_
424  intptr_t immediate; // otherwise
425  } value_; // valid if rm_ == no_reg
426  bool is_heap_object_request_ = false;
427 
428  RelocInfo::Mode rmode_;
429 
430  friend class Assembler;
431  friend class MacroAssembler;
432 };
433 
434 
435 // Class MemOperand represents a memory operand in load and store instructions
436 // On PowerPC we have base register + 16bit signed value
437 // Alternatively we can have a 16bit signed value immediate
438 class MemOperand {
439  public:
440  explicit MemOperand(Register rn, int32_t offset = 0);
441 
442  explicit MemOperand(Register ra, Register rb);
443 
444  int32_t offset() const {
445  return offset_;
446  }
447 
448  // PowerPC - base register
449  Register ra() const {
450  return ra_;
451  }
452 
453  Register rb() const {
454  return rb_;
455  }
456 
457  private:
458  Register ra_; // base
459  int32_t offset_; // offset
460  Register rb_; // index
461 
462  friend class Assembler;
463 };
464 
465 
467  public:
468  DeferredRelocInfo() {}
469  DeferredRelocInfo(int position, RelocInfo::Mode rmode, intptr_t data)
470  : position_(position), rmode_(rmode), data_(data) {}
471 
472  int position() const { return position_; }
473  RelocInfo::Mode rmode() const { return rmode_; }
474  intptr_t data() const { return data_; }
475 
476  private:
477  int position_;
478  RelocInfo::Mode rmode_;
479  intptr_t data_;
480 };
481 
482 
483 class Assembler : public AssemblerBase {
484  public:
485  // Create an assembler. Instructions and relocation information are emitted
486  // into a buffer, with the instructions starting from the beginning and the
487  // relocation information starting from the end of the buffer. See CodeDesc
488  // for a detailed comment on the layout (globals.h).
489  //
490  // If the provided buffer is nullptr, the assembler allocates and grows its
491  // own buffer, and buffer_size determines the initial buffer size. The buffer
492  // is owned by the assembler and deallocated upon destruction of the
493  // assembler.
494  //
495  // If the provided buffer is not nullptr, the assembler uses the provided
496  // buffer for code generation and assumes its size to be buffer_size. If the
497  // buffer is too small, a fatal error occurs. No deallocation of the buffer is
498  // done upon destruction of the assembler.
499  Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
500  virtual ~Assembler() {}
501 
502  // GetCode emits any pending (non-emitted) code and fills the descriptor
503  // desc. GetCode() is idempotent; it returns the same result if no other
504  // Assembler functions are invoked in between GetCode() calls.
505  void GetCode(Isolate* isolate, CodeDesc* desc);
506 
507  // Label operations & relative jumps (PPUM Appendix D)
508  //
509  // Takes a branch opcode (cc) and a label (L) and generates
510  // either a backward branch or a forward branch and links it
511  // to the label fixup chain. Usage:
512  //
513  // Label L; // unbound label
514  // j(cc, &L); // forward branch to unbound label
515  // bind(&L); // bind label to the current pc
516  // j(cc, &L); // backward branch to bound label
517  // bind(&L); // illegal: a label may be bound only once
518  //
519  // Note: The same Label can be used for forward and backward branches
520  // but it may be bound only once.
521 
522  void bind(Label* L); // binds an unbound label L to the current code position
523 
524  // Links a label at the current pc_offset(). If already bound, returns the
525  // bound position. If already linked, returns the position of the prior link.
526  // Otherwise, returns the current pc_offset().
527  int link(Label* L);
528 
529  // Determines if Label is bound and near enough so that a single
530  // branch instruction can be used to reach it.
531  bool is_near(Label* L, Condition cond);
532 
533  // Returns the branch offset to the given label from the current code position
534  // Links the label to the current position if it is still unbound
535  int branch_offset(Label* L) {
536  if (L->is_unused() && !trampoline_emitted_) {
537  TrackBranch();
538  }
539  return link(L) - pc_offset();
540  }
541 
542  // Puts a labels target address at the given position.
543  // The high 8 bits are set to zero.
544  void label_at_put(Label* L, int at_offset);
545 
546  V8_INLINE static bool IsConstantPoolLoadStart(
547  Address pc, ConstantPoolEntry::Access* access = nullptr);
548  V8_INLINE static bool IsConstantPoolLoadEnd(
549  Address pc, ConstantPoolEntry::Access* access = nullptr);
550  V8_INLINE static int GetConstantPoolOffset(Address pc,
551  ConstantPoolEntry::Access access,
552  ConstantPoolEntry::Type type);
553  V8_INLINE void PatchConstantPoolAccessInstruction(
554  int pc_offset, int offset, ConstantPoolEntry::Access access,
555  ConstantPoolEntry::Type type);
556 
557  // Return the address in the constant pool of the code target address used by
558  // the branch/call instruction at pc, or the object in a mov.
559  V8_INLINE static Address target_constant_pool_address_at(
560  Address pc, Address constant_pool, ConstantPoolEntry::Access access,
561  ConstantPoolEntry::Type type);
562 
563  // Read/Modify the code target address in the branch/call instruction at pc.
564  // The isolate argument is unused (and may be nullptr) when skipping flushing.
565  V8_INLINE static Address target_address_at(Address pc, Address constant_pool);
566  V8_INLINE static void set_target_address_at(
567  Address pc, Address constant_pool, Address target,
568  ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
569 
570  // Return the code target address at a call site from the return address
571  // of that call in the instruction stream.
572  inline static Address target_address_from_return_address(Address pc);
573 
574  // Given the address of the beginning of a call, return the address
575  // in the instruction stream that the call will return to.
576  V8_INLINE static Address return_address_from_call_start(Address pc);
577 
578  // This sets the branch destination.
579  // This is for calls and branches within generated code.
580  inline static void deserialization_set_special_target_at(
581  Address instruction_payload, Code code, Address target);
582 
583  // Get the size of the special target encoded at 'instruction_payload'.
584  inline static int deserialization_special_target_size(
585  Address instruction_payload);
586 
587  // This sets the internal reference at the pc.
588  inline static void deserialization_set_target_internal_reference_at(
589  Address pc, Address target,
590  RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
591 
592  // Here we are patching the address in the LUI/ORI instruction pair.
593  // These values are used in the serialization process and must be zero for
594  // PPC platform, as Code, Embedded Object or External-reference pointers
595  // are split across two consecutive instructions and don't exist separately
596  // in the code, so the serializer should not step forwards in memory after
597  // a target is resolved and written.
598  static constexpr int kSpecialTargetSize = 0;
599 
600 // Number of instructions to load an address via a mov sequence.
601 #if V8_TARGET_ARCH_PPC64
602  static constexpr int kMovInstructionsConstantPool = 1;
603  static constexpr int kMovInstructionsNoConstantPool = 5;
604 #if defined(V8_PPC_TAGGING_OPT)
605  static constexpr int kTaggedLoadInstructions = 1;
606 #else
607  static constexpr int kTaggedLoadInstructions = 2;
608 #endif
609 #else
610  static constexpr int kMovInstructionsConstantPool = 1;
611  static constexpr int kMovInstructionsNoConstantPool = 2;
612  static constexpr int kTaggedLoadInstructions = 1;
613 #endif
614  static constexpr int kMovInstructions = FLAG_enable_embedded_constant_pool
615  ? kMovInstructionsConstantPool
616  : kMovInstructionsNoConstantPool;
617 
618  // Distance between the instruction referring to the address of the call
619  // target and the return address.
620 
621  // Call sequence is a FIXED_SEQUENCE:
622  // mov r8, @ call address
623  // mtlr r8
624  // blrl
625  // @ return address
626  static constexpr int kCallTargetAddressOffset =
627  (kMovInstructions + 2) * kInstrSize;
628 
629  static inline int encode_crbit(const CRegister& cr, enum CRBit crbit) {
630  return ((cr.code() * CRWIDTH) + crbit);
631  }
632 
633 #define DECLARE_PPC_X_INSTRUCTIONS_A_FORM(name, instr_name, instr_value) \
634  inline void name(const Register rt, const Register ra, \
635  const Register rb, const RCBit rc = LeaveRC) { \
636  x_form(instr_name, rt, ra, rb, rc); \
637  }
638 
639 #define DECLARE_PPC_X_INSTRUCTIONS_B_FORM(name, instr_name, instr_value) \
640  inline void name(const Register ra, const Register rs, \
641  const Register rb, const RCBit rc = LeaveRC) { \
642  x_form(instr_name, rs, ra, rb, rc); \
643  }
644 
645 #define DECLARE_PPC_X_INSTRUCTIONS_C_FORM(name, instr_name, instr_value) \
646  inline void name(const Register dst, const Register src, \
647  const RCBit rc = LeaveRC) { \
648  x_form(instr_name, src, dst, r0, rc); \
649  }
650 
651 #define DECLARE_PPC_X_INSTRUCTIONS_D_FORM(name, instr_name, instr_value) \
652  template <class R> \
653  inline void name(const R rt, const Register ra, const Register rb, \
654  const RCBit rc = LeaveRC) { \
655  x_form(instr_name, rt.code(), ra.code(), rb.code(), rc); \
656  } \
657  template <class R> \
658  inline void name(const R dst, const MemOperand& src) { \
659  name(dst, src.ra(), src.rb()); \
660  }
661 
662 #define DECLARE_PPC_X_INSTRUCTIONS_E_FORM(name, instr_name, instr_value) \
663  inline void name(const Register dst, const Register src, \
664  const int sh, const RCBit rc = LeaveRC) { \
665  x_form(instr_name, src.code(), dst.code(), sh, rc); \
666  }
667 
668 #define DECLARE_PPC_X_INSTRUCTIONS_F_FORM(name, instr_name, instr_value) \
669  inline void name(const Register src1, const Register src2, \
670  const CRegister cr = cr7, const RCBit rc = LeaveRC) { \
671  x_form(instr_name, cr, src1, src2, rc); \
672  } \
673  inline void name##w(const Register src1, const Register src2, \
674  const CRegister cr = cr7, const RCBit rc = LeaveRC) { \
675  x_form(instr_name, cr.code() * B2, src1.code(), src2.code(), LeaveRC); \
676  }
677 
678 #define DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM(name, instr_name, instr_value) \
679  inline void name(const Register dst, const MemOperand& src) { \
680  x_form(instr_name, src.ra(), dst, src.rb(), SetEH); \
681  }
682 #define DECLARE_PPC_X_INSTRUCTIONS_EH_L_FORM(name, instr_name, instr_value) \
683  inline void name(const Register dst, const MemOperand& src) { \
684  DCHECK(src.ra_ != r0); \
685  x_form(instr_name, src.ra(), dst, src.rb(), SetEH); \
686  }
687 
688  inline void x_form(Instr instr, int f1, int f2, int f3, int rc) {
689  emit(instr | f1 * B21 | f2 * B16 | f3 * B11 | rc);
690  }
691  inline void x_form(Instr instr, Register rs, Register ra, Register rb,
692  RCBit rc) {
693  emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | rc);
694  }
695  inline void x_form(Instr instr, Register ra, Register rs, Register rb,
696  EHBit eh = SetEH) {
697  emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | eh);
698  }
699  inline void x_form(Instr instr, CRegister cr, Register s1, Register s2,
700  RCBit rc) {
701 #if V8_TARGET_ARCH_PPC64
702  int L = 1;
703 #else
704  int L = 0;
705 #endif
706  emit(instr | cr.code() * B23 | L * B21 | s1.code() * B16 |
707  s2.code() * B11 | rc);
708  }
709 
710  PPC_X_OPCODE_A_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_A_FORM)
711  PPC_X_OPCODE_B_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_B_FORM)
712  PPC_X_OPCODE_C_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_C_FORM)
713  PPC_X_OPCODE_D_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_D_FORM)
714  PPC_X_OPCODE_E_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_E_FORM)
715  PPC_X_OPCODE_F_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_F_FORM)
716  PPC_X_OPCODE_EH_S_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM)
717  PPC_X_OPCODE_EH_L_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_EH_L_FORM)
718 
719  inline void notx(Register dst, Register src, RCBit rc = LeaveRC) {
720  nor(dst, src, src, rc);
721  }
722  inline void lwax(Register rt, const MemOperand& src) {
723 #if V8_TARGET_ARCH_PPC64
724  Register ra = src.ra();
725  Register rb = src.rb();
726  DCHECK(ra != r0);
727  x_form(LWAX, rt, ra, rb, LeaveRC);
728 #else
729  lwzx(rt, src);
730 #endif
731  }
732  inline void extsw(Register rs, Register ra, RCBit rc = LeaveRC) {
733 #if V8_TARGET_ARCH_PPC64
734  emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
735 #else
736  // nop on 32-bit
737  DCHECK(rs == ra && rc == LeaveRC);
738 #endif
739  }
740 
741 #undef DECLARE_PPC_X_INSTRUCTIONS_A_FORM
742 #undef DECLARE_PPC_X_INSTRUCTIONS_B_FORM
743 #undef DECLARE_PPC_X_INSTRUCTIONS_C_FORM
744 #undef DECLARE_PPC_X_INSTRUCTIONS_D_FORM
745 #undef DECLARE_PPC_X_INSTRUCTIONS_E_FORM
746 #undef DECLARE_PPC_X_INSTRUCTIONS_F_FORM
747 #undef DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM
748 #undef DECLARE_PPC_X_INSTRUCTIONS_EH_L_FORM
749 
750 #define DECLARE_PPC_XX3_INSTRUCTIONS(name, instr_name, instr_value) \
751  inline void name(const DoubleRegister rt, const DoubleRegister ra, \
752  const DoubleRegister rb) { \
753  xx3_form(instr_name, rt, ra, rb); \
754  }
755 
756  inline void xx3_form(Instr instr, DoubleRegister t, DoubleRegister a,
757  DoubleRegister b) {
758  int AX = ((a.code() & 0x20) >> 5) & 0x1;
759  int BX = ((b.code() & 0x20) >> 5) & 0x1;
760  int TX = ((t.code() & 0x20) >> 5) & 0x1;
761 
762  emit(instr | (t.code() & 0x1F) * B21 | (a.code() & 0x1F) * B16 |
763  (b.code() & 0x1F) * B11 | AX * B2 | BX * B1 | TX);
764  }
765 
766  PPC_XX3_OPCODE_LIST(DECLARE_PPC_XX3_INSTRUCTIONS)
767 #undef DECLARE_PPC_XX3_INSTRUCTIONS
768 
769  // ---------------------------------------------------------------------------
770  // Code generation
771 
772  // Insert the smallest number of nop instructions
773  // possible to align the pc offset to a multiple
774  // of m. m must be a power of 2 (>= 4).
775  void Align(int m);
776  // Insert the smallest number of zero bytes possible to align the pc offset
777  // to a mulitple of m. m must be a power of 2 (>= 2).
778  void DataAlign(int m);
779  // Aligns code to something that's optimal for a jump target for the platform.
780  void CodeTargetAlign();
781 
782  // Branch instructions
783  void bclr(BOfield bo, int condition_bit, LKBit lk);
784  void blr();
785  void bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk = LeaveLK);
786  void b(int branch_offset, LKBit lk);
787 
788  void bcctr(BOfield bo, int condition_bit, LKBit lk);
789  void bctr();
790  void bctrl();
791 
792  // Convenience branch instructions using labels
793  void b(Label* L, LKBit lk = LeaveLK) { b(branch_offset(L), lk); }
794 
795  inline CRegister cmpi_optimization(CRegister cr) {
796  // Check whether the branch is preceded by an optimizable cmpi against 0.
797  // The cmpi can be deleted if it is also preceded by an instruction that
798  // sets the register used by the compare and supports a dot form.
799  unsigned int sradi_mask = kOpcodeMask | kExt2OpcodeVariant2Mask;
800  unsigned int srawi_mask = kOpcodeMask | kExt2OpcodeMask;
801  int pos = pc_offset();
802  int cmpi_pos = pc_offset() - kInstrSize;
803 
804  if (cmpi_pos > 0 && optimizable_cmpi_pos_ == cmpi_pos &&
805  cmpi_cr_.code() == cr.code() && last_bound_pos_ != pos) {
806  int xpos = cmpi_pos - kInstrSize;
807  int xinstr = instr_at(xpos);
808  int cmpi_ra = (instr_at(cmpi_pos) & 0x1f0000) >> 16;
809  // ra is at the same bit position for the three cases below.
810  int ra = (xinstr & 0x1f0000) >> 16;
811  if (cmpi_ra == ra) {
812  if ((xinstr & sradi_mask) == (EXT2 | SRADIX)) {
813  cr = cr0;
814  instr_at_put(xpos, xinstr | SetRC);
815  pc_ -= kInstrSize;
816  } else if ((xinstr & srawi_mask) == (EXT2 | SRAWIX)) {
817  cr = cr0;
818  instr_at_put(xpos, xinstr | SetRC);
819  pc_ -= kInstrSize;
820  } else if ((xinstr & kOpcodeMask) == ANDIx) {
821  cr = cr0;
822  pc_ -= kInstrSize;
823  // nothing to do here since andi. records.
824  }
825  // didn't match one of the above, must keep cmpwi.
826  }
827  }
828  return cr;
829  }
830 
831  void bc_short(Condition cond, Label* L, CRegister cr = cr7,
832  LKBit lk = LeaveLK) {
833  DCHECK(cond != al);
834  DCHECK(cr.code() >= 0 && cr.code() <= 7);
835 
836  cr = cmpi_optimization(cr);
837 
838  int b_offset = branch_offset(L);
839 
840  switch (cond) {
841  case eq:
842  bc(b_offset, BT, encode_crbit(cr, CR_EQ), lk);
843  break;
844  case ne:
845  bc(b_offset, BF, encode_crbit(cr, CR_EQ), lk);
846  break;
847  case gt:
848  bc(b_offset, BT, encode_crbit(cr, CR_GT), lk);
849  break;
850  case le:
851  bc(b_offset, BF, encode_crbit(cr, CR_GT), lk);
852  break;
853  case lt:
854  bc(b_offset, BT, encode_crbit(cr, CR_LT), lk);
855  break;
856  case ge:
857  bc(b_offset, BF, encode_crbit(cr, CR_LT), lk);
858  break;
859  case unordered:
860  bc(b_offset, BT, encode_crbit(cr, CR_FU), lk);
861  break;
862  case ordered:
863  bc(b_offset, BF, encode_crbit(cr, CR_FU), lk);
864  break;
865  case overflow:
866  bc(b_offset, BT, encode_crbit(cr, CR_SO), lk);
867  break;
868  case nooverflow:
869  bc(b_offset, BF, encode_crbit(cr, CR_SO), lk);
870  break;
871  default:
872  UNIMPLEMENTED();
873  }
874  }
875 
876  void bclr(Condition cond, CRegister cr = cr7, LKBit lk = LeaveLK) {
877  DCHECK(cond != al);
878  DCHECK(cr.code() >= 0 && cr.code() <= 7);
879 
880  cr = cmpi_optimization(cr);
881 
882  switch (cond) {
883  case eq:
884  bclr(BT, encode_crbit(cr, CR_EQ), lk);
885  break;
886  case ne:
887  bclr(BF, encode_crbit(cr, CR_EQ), lk);
888  break;
889  case gt:
890  bclr(BT, encode_crbit(cr, CR_GT), lk);
891  break;
892  case le:
893  bclr(BF, encode_crbit(cr, CR_GT), lk);
894  break;
895  case lt:
896  bclr(BT, encode_crbit(cr, CR_LT), lk);
897  break;
898  case ge:
899  bclr(BF, encode_crbit(cr, CR_LT), lk);
900  break;
901  case unordered:
902  bclr(BT, encode_crbit(cr, CR_FU), lk);
903  break;
904  case ordered:
905  bclr(BF, encode_crbit(cr, CR_FU), lk);
906  break;
907  case overflow:
908  bclr(BT, encode_crbit(cr, CR_SO), lk);
909  break;
910  case nooverflow:
911  bclr(BF, encode_crbit(cr, CR_SO), lk);
912  break;
913  default:
914  UNIMPLEMENTED();
915  }
916  }
917 
918  void isel(Register rt, Register ra, Register rb, int cb);
919  void isel(Condition cond, Register rt, Register ra, Register rb,
920  CRegister cr = cr7) {
921  DCHECK(cond != al);
922  DCHECK(cr.code() >= 0 && cr.code() <= 7);
923 
924  cr = cmpi_optimization(cr);
925 
926  switch (cond) {
927  case eq:
928  isel(rt, ra, rb, encode_crbit(cr, CR_EQ));
929  break;
930  case ne:
931  isel(rt, rb, ra, encode_crbit(cr, CR_EQ));
932  break;
933  case gt:
934  isel(rt, ra, rb, encode_crbit(cr, CR_GT));
935  break;
936  case le:
937  isel(rt, rb, ra, encode_crbit(cr, CR_GT));
938  break;
939  case lt:
940  isel(rt, ra, rb, encode_crbit(cr, CR_LT));
941  break;
942  case ge:
943  isel(rt, rb, ra, encode_crbit(cr, CR_LT));
944  break;
945  case unordered:
946  isel(rt, ra, rb, encode_crbit(cr, CR_FU));
947  break;
948  case ordered:
949  isel(rt, rb, ra, encode_crbit(cr, CR_FU));
950  break;
951  case overflow:
952  isel(rt, ra, rb, encode_crbit(cr, CR_SO));
953  break;
954  case nooverflow:
955  isel(rt, rb, ra, encode_crbit(cr, CR_SO));
956  break;
957  default:
958  UNIMPLEMENTED();
959  }
960  }
961 
962  void b(Condition cond, Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
963  if (cond == al) {
964  b(L, lk);
965  return;
966  }
967 
968  if ((L->is_bound() && is_near(L, cond)) || !is_trampoline_emitted()) {
969  bc_short(cond, L, cr, lk);
970  return;
971  }
972 
973  Label skip;
974  Condition neg_cond = NegateCondition(cond);
975  bc_short(neg_cond, &skip, cr);
976  b(L, lk);
977  bind(&skip);
978  }
979 
980  void bne(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
981  b(ne, L, cr, lk);
982  }
983  void beq(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
984  b(eq, L, cr, lk);
985  }
986  void blt(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
987  b(lt, L, cr, lk);
988  }
989  void bge(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
990  b(ge, L, cr, lk);
991  }
992  void ble(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
993  b(le, L, cr, lk);
994  }
995  void bgt(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
996  b(gt, L, cr, lk);
997  }
998  void bunordered(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
999  b(unordered, L, cr, lk);
1000  }
1001  void bordered(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
1002  b(ordered, L, cr, lk);
1003  }
1004  void boverflow(Label* L, CRegister cr = cr0, LKBit lk = LeaveLK) {
1005  b(overflow, L, cr, lk);
1006  }
1007  void bnooverflow(Label* L, CRegister cr = cr0, LKBit lk = LeaveLK) {
1008  b(nooverflow, L, cr, lk);
1009  }
1010 
1011  // Decrement CTR; branch if CTR != 0
1012  void bdnz(Label* L, LKBit lk = LeaveLK) {
1013  bc(branch_offset(L), DCBNZ, 0, lk);
1014  }
1015 
1016  // Data-processing instructions
1017 
1018  void sub(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
1019  RCBit r = LeaveRC);
1020 
1021  void subc(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
1022  RCBit r = LeaveRC);
1023  void sube(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
1024  RCBit r = LeaveRC);
1025 
1026  void subfic(Register dst, Register src, const Operand& imm);
1027 
1028  void add(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
1029  RCBit r = LeaveRC);
1030 
1031  void addc(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1032  RCBit r = LeaveRC);
1033  void adde(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1034  RCBit r = LeaveRC);
1035  void addze(Register dst, Register src1, OEBit o = LeaveOE, RCBit r = LeaveRC);
1036 
1037  void mullw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1038  RCBit r = LeaveRC);
1039 
1040  void mulhw(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
1041  void mulhwu(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
1042 
1043  void divw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1044  RCBit r = LeaveRC);
1045  void divwu(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1046  RCBit r = LeaveRC);
1047 
1048  void addi(Register dst, Register src, const Operand& imm);
1049  void addis(Register dst, Register src, const Operand& imm);
1050  void addic(Register dst, Register src, const Operand& imm);
1051 
1052  void andi(Register ra, Register rs, const Operand& imm);
1053  void andis(Register ra, Register rs, const Operand& imm);
1054  void ori(Register dst, Register src, const Operand& imm);
1055  void oris(Register dst, Register src, const Operand& imm);
1056  void xori(Register dst, Register src, const Operand& imm);
1057  void xoris(Register ra, Register rs, const Operand& imm);
1058  void cmpi(Register src1, const Operand& src2, CRegister cr = cr7);
1059  void cmpli(Register src1, const Operand& src2, CRegister cr = cr7);
1060  void cmpwi(Register src1, const Operand& src2, CRegister cr = cr7);
1061  void cmplwi(Register src1, const Operand& src2, CRegister cr = cr7);
1062  void li(Register dst, const Operand& src);
1063  void lis(Register dst, const Operand& imm);
1064  void mr(Register dst, Register src);
1065 
1066  void lbz(Register dst, const MemOperand& src);
1067  void lhz(Register dst, const MemOperand& src);
1068  void lha(Register dst, const MemOperand& src);
1069  void lwz(Register dst, const MemOperand& src);
1070  void lwzu(Register dst, const MemOperand& src);
1071  void lwa(Register dst, const MemOperand& src);
1072  void stb(Register dst, const MemOperand& src);
1073  void sth(Register dst, const MemOperand& src);
1074  void stw(Register dst, const MemOperand& src);
1075  void stwu(Register dst, const MemOperand& src);
1076  void neg(Register rt, Register ra, OEBit o = LeaveOE, RCBit c = LeaveRC);
1077 
1078 #if V8_TARGET_ARCH_PPC64
1079  void ld(Register rd, const MemOperand& src);
1080  void ldu(Register rd, const MemOperand& src);
1081  void std(Register rs, const MemOperand& src);
1082  void stdu(Register rs, const MemOperand& src);
1083  void rldic(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC);
1084  void rldicl(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC);
1085  void rldcl(Register ra, Register rs, Register rb, int mb, RCBit r = LeaveRC);
1086  void rldicr(Register dst, Register src, int sh, int me, RCBit r = LeaveRC);
1087  void rldimi(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC);
1088  void sldi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
1089  void srdi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
1090  void clrrdi(Register dst, Register src, const Operand& val,
1091  RCBit rc = LeaveRC);
1092  void clrldi(Register dst, Register src, const Operand& val,
1093  RCBit rc = LeaveRC);
1094  void sradi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
1095  void rotld(Register ra, Register rs, Register rb, RCBit r = LeaveRC);
1096  void rotldi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
1097  void rotrdi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
1098  void mulld(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1099  RCBit r = LeaveRC);
1100  void divd(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1101  RCBit r = LeaveRC);
1102  void divdu(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1103  RCBit r = LeaveRC);
1104 #endif
1105 
1106  void rlwinm(Register ra, Register rs, int sh, int mb, int me,
1107  RCBit rc = LeaveRC);
1108  void rlwimi(Register ra, Register rs, int sh, int mb, int me,
1109  RCBit rc = LeaveRC);
1110  void rlwnm(Register ra, Register rs, Register rb, int mb, int me,
1111  RCBit rc = LeaveRC);
1112  void slwi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
1113  void srwi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
1114  void clrrwi(Register dst, Register src, const Operand& val,
1115  RCBit rc = LeaveRC);
1116  void clrlwi(Register dst, Register src, const Operand& val,
1117  RCBit rc = LeaveRC);
1118  void rotlw(Register ra, Register rs, Register rb, RCBit r = LeaveRC);
1119  void rotlwi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
1120  void rotrwi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
1121 
1122  void subi(Register dst, Register src1, const Operand& src2);
1123 
1124  void mov(Register dst, const Operand& src);
1125  void bitwise_mov(Register dst, intptr_t value);
1126  void bitwise_mov32(Register dst, int32_t value);
1127  void bitwise_add32(Register dst, Register src, int32_t value);
1128 
1129  // Load the position of the label relative to the generated code object
1130  // pointer in a register.
1131  void mov_label_offset(Register dst, Label* label);
1132 
1133  // dst = base + label position + delta
1134  void add_label_offset(Register dst, Register base, Label* label,
1135  int delta = 0);
1136 
1137  // Load the address of the label in a register and associate with an
1138  // internal reference relocation.
1139  void mov_label_addr(Register dst, Label* label);
1140 
1141  // Emit the address of the label (i.e. a jump table entry) and associate with
1142  // an internal reference relocation.
1143  void emit_label_addr(Label* label);
1144 
1145  // Multiply instructions
1146  void mul(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
1147  RCBit r = LeaveRC);
1148 
1149  // Miscellaneous arithmetic instructions
1150 
1151  // Special register access
1152  void crxor(int bt, int ba, int bb);
1153  void crclr(int bt) { crxor(bt, bt, bt); }
1154  void creqv(int bt, int ba, int bb);
1155  void crset(int bt) { creqv(bt, bt, bt); }
1156  void mflr(Register dst);
1157  void mtlr(Register src);
1158  void mtctr(Register src);
1159  void mtxer(Register src);
1160  void mcrfs(CRegister cr, FPSCRBit bit);
1161  void mfcr(Register dst);
1162 #if V8_TARGET_ARCH_PPC64
1163  void mffprd(Register dst, DoubleRegister src);
1164  void mffprwz(Register dst, DoubleRegister src);
1165  void mtfprd(DoubleRegister dst, Register src);
1166  void mtfprwz(DoubleRegister dst, Register src);
1167  void mtfprwa(DoubleRegister dst, Register src);
1168 #endif
1169 
1170  void function_descriptor();
1171 
1172  // Exception-generating instructions and debugging support
1173  void stop(const char* msg, Condition cond = al,
1174  int32_t code = kDefaultStopCode, CRegister cr = cr7);
1175 
1176  void bkpt(uint32_t imm16); // v5 and above
1177 
1178  void dcbf(Register ra, Register rb);
1179  void sync();
1180  void lwsync();
1181  void icbi(Register ra, Register rb);
1182  void isync();
1183 
1184  // Support for floating point
1185  void lfd(const DoubleRegister frt, const MemOperand& src);
1186  void lfdu(const DoubleRegister frt, const MemOperand& src);
1187  void lfs(const DoubleRegister frt, const MemOperand& src);
1188  void lfsu(const DoubleRegister frt, const MemOperand& src);
1189  void stfd(const DoubleRegister frs, const MemOperand& src);
1190  void stfdu(const DoubleRegister frs, const MemOperand& src);
1191  void stfs(const DoubleRegister frs, const MemOperand& src);
1192  void stfsu(const DoubleRegister frs, const MemOperand& src);
1193 
1194  void fadd(const DoubleRegister frt, const DoubleRegister fra,
1195  const DoubleRegister frb, RCBit rc = LeaveRC);
1196  void fsub(const DoubleRegister frt, const DoubleRegister fra,
1197  const DoubleRegister frb, RCBit rc = LeaveRC);
1198  void fdiv(const DoubleRegister frt, const DoubleRegister fra,
1199  const DoubleRegister frb, RCBit rc = LeaveRC);
1200  void fmul(const DoubleRegister frt, const DoubleRegister fra,
1201  const DoubleRegister frc, RCBit rc = LeaveRC);
1202  void fcmpu(const DoubleRegister fra, const DoubleRegister frb,
1203  CRegister cr = cr7);
1204  void fmr(const DoubleRegister frt, const DoubleRegister frb,
1205  RCBit rc = LeaveRC);
1206  void fctiwz(const DoubleRegister frt, const DoubleRegister frb);
1207  void fctiw(const DoubleRegister frt, const DoubleRegister frb);
1208  void frin(const DoubleRegister frt, const DoubleRegister frb,
1209  RCBit rc = LeaveRC);
1210  void friz(const DoubleRegister frt, const DoubleRegister frb,
1211  RCBit rc = LeaveRC);
1212  void frip(const DoubleRegister frt, const DoubleRegister frb,
1213  RCBit rc = LeaveRC);
1214  void frim(const DoubleRegister frt, const DoubleRegister frb,
1215  RCBit rc = LeaveRC);
1216  void frsp(const DoubleRegister frt, const DoubleRegister frb,
1217  RCBit rc = LeaveRC);
1218  void fcfid(const DoubleRegister frt, const DoubleRegister frb,
1219  RCBit rc = LeaveRC);
1220  void fcfidu(const DoubleRegister frt, const DoubleRegister frb,
1221  RCBit rc = LeaveRC);
1222  void fcfidus(const DoubleRegister frt, const DoubleRegister frb,
1223  RCBit rc = LeaveRC);
1224  void fcfids(const DoubleRegister frt, const DoubleRegister frb,
1225  RCBit rc = LeaveRC);
1226  void fctid(const DoubleRegister frt, const DoubleRegister frb,
1227  RCBit rc = LeaveRC);
1228  void fctidz(const DoubleRegister frt, const DoubleRegister frb,
1229  RCBit rc = LeaveRC);
1230  void fctidu(const DoubleRegister frt, const DoubleRegister frb,
1231  RCBit rc = LeaveRC);
1232  void fctiduz(const DoubleRegister frt, const DoubleRegister frb,
1233  RCBit rc = LeaveRC);
1234  void fsel(const DoubleRegister frt, const DoubleRegister fra,
1235  const DoubleRegister frc, const DoubleRegister frb,
1236  RCBit rc = LeaveRC);
1237  void fneg(const DoubleRegister frt, const DoubleRegister frb,
1238  RCBit rc = LeaveRC);
1239  void mtfsb0(FPSCRBit bit, RCBit rc = LeaveRC);
1240  void mtfsb1(FPSCRBit bit, RCBit rc = LeaveRC);
1241  void mtfsfi(int bf, int immediate, RCBit rc = LeaveRC);
1242  void mffs(const DoubleRegister frt, RCBit rc = LeaveRC);
1243  void mtfsf(const DoubleRegister frb, bool L = 1, int FLM = 0, bool W = 0,
1244  RCBit rc = LeaveRC);
1245  void fsqrt(const DoubleRegister frt, const DoubleRegister frb,
1246  RCBit rc = LeaveRC);
1247  void fabs(const DoubleRegister frt, const DoubleRegister frb,
1248  RCBit rc = LeaveRC);
1249  void fmadd(const DoubleRegister frt, const DoubleRegister fra,
1250  const DoubleRegister frc, const DoubleRegister frb,
1251  RCBit rc = LeaveRC);
1252  void fmsub(const DoubleRegister frt, const DoubleRegister fra,
1253  const DoubleRegister frc, const DoubleRegister frb,
1254  RCBit rc = LeaveRC);
1255 
1256  // Pseudo instructions
1257 
1258  // Different nop operations are used by the code generator to detect certain
1259  // states of the generated code.
1260  enum NopMarkerTypes {
1261  NON_MARKING_NOP = 0,
1262  GROUP_ENDING_NOP,
1263  DEBUG_BREAK_NOP,
1264  // IC markers.
1265  PROPERTY_ACCESS_INLINED,
1266  PROPERTY_ACCESS_INLINED_CONTEXT,
1267  PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
1268  // Helper values.
1269  LAST_CODE_MARKER,
1270  FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
1271  };
1272 
1273  void nop(int type = 0); // 0 is the default non-marking type.
1274 
1275  void push(Register src) {
1276 #if V8_TARGET_ARCH_PPC64
1277  stdu(src, MemOperand(sp, -kPointerSize));
1278 #else
1279  stwu(src, MemOperand(sp, -kPointerSize));
1280 #endif
1281  }
1282 
1283  void pop(Register dst) {
1284 #if V8_TARGET_ARCH_PPC64
1285  ld(dst, MemOperand(sp));
1286 #else
1287  lwz(dst, MemOperand(sp));
1288 #endif
1289  addi(sp, sp, Operand(kPointerSize));
1290  }
1291 
1292  void pop() { addi(sp, sp, Operand(kPointerSize)); }
1293 
1294  // Jump unconditionally to given label.
1295  void jmp(Label* L) { b(L); }
1296 
1297  // Check the code size generated from label to here.
1298  int SizeOfCodeGeneratedSince(Label* label) {
1299  return pc_offset() - label->pos();
1300  }
1301 
1302  // Check the number of instructions generated from label to here.
1303  int InstructionsGeneratedSince(Label* label) {
1304  return SizeOfCodeGeneratedSince(label) / kInstrSize;
1305  }
1306 
1307  // Class for scoping postponing the trampoline pool generation.
1308  class BlockTrampolinePoolScope {
1309  public:
1310  explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
1311  assem_->StartBlockTrampolinePool();
1312  }
1313  ~BlockTrampolinePoolScope() { assem_->EndBlockTrampolinePool(); }
1314 
1315  private:
1316  Assembler* assem_;
1317 
1318  DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
1319  };
1320 
1321  // Class for scoping disabling constant pool entry merging
1323  public:
1325  : assem_(assem) {
1326  assem_->StartBlockConstantPoolEntrySharing();
1327  }
1329  assem_->EndBlockConstantPoolEntrySharing();
1330  }
1331 
1332  private:
1333  Assembler* assem_;
1334 
1335  DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstantPoolEntrySharingScope);
1336  };
1337 
1338  // Record a comment relocation entry that can be used by a disassembler.
1339  // Use --code-comments to enable.
1340  void RecordComment(const char* msg);
1341 
1342  // Record a deoptimization reason that can be used by a log or cpu profiler.
1343  // Use --trace-deopt to enable.
1344  void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
1345  int id);
1346 
1347  // Writes a single byte or word of data in the code stream. Used
1348  // for inline tables, e.g., jump-tables.
1349  void db(uint8_t data);
1350  void dd(uint32_t data);
1351  void dq(uint64_t data);
1352  void dp(uintptr_t data);
1353 
1354  // Read/patch instructions
1355  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
1356  void instr_at_put(int pos, Instr instr) {
1357  *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
1358  }
1359  static Instr instr_at(Address pc) { return *reinterpret_cast<Instr*>(pc); }
1360  static void instr_at_put(Address pc, Instr instr) {
1361  *reinterpret_cast<Instr*>(pc) = instr;
1362  }
1363  static Condition GetCondition(Instr instr);
1364 
1365  static bool IsLis(Instr instr);
1366  static bool IsLi(Instr instr);
1367  static bool IsAddic(Instr instr);
1368  static bool IsOri(Instr instr);
1369 
1370  static bool IsBranch(Instr instr);
1371  static Register GetRA(Instr instr);
1372  static Register GetRB(Instr instr);
1373 #if V8_TARGET_ARCH_PPC64
1374  static bool Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
1375  Instr instr4, Instr instr5);
1376 #else
1377  static bool Is32BitLoadIntoR12(Instr instr1, Instr instr2);
1378 #endif
1379 
1380  static bool IsCmpRegister(Instr instr);
1381  static bool IsCmpImmediate(Instr instr);
1382  static bool IsRlwinm(Instr instr);
1383  static bool IsAndi(Instr instr);
1384 #if V8_TARGET_ARCH_PPC64
1385  static bool IsRldicl(Instr instr);
1386 #endif
1387  static bool IsCrSet(Instr instr);
1388  static Register GetCmpImmediateRegister(Instr instr);
1389  static int GetCmpImmediateRawImmediate(Instr instr);
1390  static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
1391 
1392  // Postpone the generation of the trampoline pool for the specified number of
1393  // instructions.
1394  void BlockTrampolinePoolFor(int instructions);
1395  void CheckTrampolinePool();
1396 
1397  // For mov. Return the number of actual instructions required to
1398  // load the operand into a register. This can be anywhere from
1399  // one (constant pool small section) to five instructions (full
1400  // 64-bit sequence).
1401  //
1402  // The value returned is only valid as long as no entries are added to the
1403  // constant pool between this call and the actual instruction being emitted.
1404  int instructions_required_for_mov(Register dst, const Operand& src) const;
1405 
1406  // Decide between using the constant pool vs. a mov immediate sequence.
1407  bool use_constant_pool_for_mov(Register dst, const Operand& src,
1408  bool canOptimize) const;
1409 
1410  // The code currently calls CheckBuffer() too often. This has the side
1411  // effect of randomly growing the buffer in the middle of multi-instruction
1412  // sequences.
1413  //
1414  // This function allows outside callers to check and grow the buffer
1415  void EnsureSpaceFor(int space_needed);
1416 
1417  int EmitConstantPool() { return constant_pool_builder_.Emit(this); }
1418 
1419  bool ConstantPoolAccessIsInOverflow() const {
1420  return constant_pool_builder_.NextAccess(ConstantPoolEntry::INTPTR) ==
1421  ConstantPoolEntry::OVERFLOWED;
1422  }
1423 
1424  Label* ConstantPoolPosition() {
1425  return constant_pool_builder_.EmittedPosition();
1426  }
1427 
1428  void EmitRelocations();
1429 
1430  protected:
1431  int buffer_space() const { return reloc_info_writer.pos() - pc_; }
1432 
1433  // Decode instruction(s) at pos and return backchain to previous
1434  // label reference or kEndOfChain.
1435  int target_at(int pos);
1436 
1437  // Patch instruction(s) at pos to target target_pos (e.g. branch)
1438  void target_at_put(int pos, int target_pos, bool* is_branch = nullptr);
1439 
1440  // Record reloc info for current pc_
1441  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
1442  ConstantPoolEntry::Access ConstantPoolAddEntry(RelocInfo::Mode rmode,
1443  intptr_t value) {
1444  bool sharing_ok =
1445  RelocInfo::IsNone(rmode) ||
1446  (!options().record_reloc_info_for_serialization &&
1447  RelocInfo::IsShareableRelocMode(rmode) &&
1448  !is_constant_pool_entry_sharing_blocked() &&
1449  // TODO(johnyan): make the following rmode shareable
1450  !RelocInfo::IsWasmCall(rmode) && !RelocInfo::IsWasmStubCall(rmode));
1451  return constant_pool_builder_.AddEntry(pc_offset(), value, sharing_ok);
1452  }
1453  ConstantPoolEntry::Access ConstantPoolAddEntry(Double value) {
1454  return constant_pool_builder_.AddEntry(pc_offset(), value);
1455  }
1456 
1457  // Block the emission of the trampoline pool before pc_offset.
1458  void BlockTrampolinePoolBefore(int pc_offset) {
1459  if (no_trampoline_pool_before_ < pc_offset)
1460  no_trampoline_pool_before_ = pc_offset;
1461  }
1462 
1463  void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; }
1464  void EndBlockTrampolinePool() {
1465  int count = --trampoline_pool_blocked_nesting_;
1466  if (count == 0) CheckTrampolinePoolQuick();
1467  }
1468  bool is_trampoline_pool_blocked() const {
1469  return trampoline_pool_blocked_nesting_ > 0;
1470  }
1471 
1472  void StartBlockConstantPoolEntrySharing() {
1473  constant_pool_entry_sharing_blocked_nesting_++;
1474  }
1475  void EndBlockConstantPoolEntrySharing() {
1476  constant_pool_entry_sharing_blocked_nesting_--;
1477  }
1478  bool is_constant_pool_entry_sharing_blocked() const {
1479  return constant_pool_entry_sharing_blocked_nesting_ > 0;
1480  }
1481 
1482  bool has_exception() const { return internal_trampoline_exception_; }
1483 
1484  bool is_trampoline_emitted() const { return trampoline_emitted_; }
1485 
1486  // Code generation
1487  // The relocation writer's position is at least kGap bytes below the end of
1488  // the generated instructions. This is so that multi-instruction sequences do
1489  // not have to check for overflow. The same is true for writes of large
1490  // relocation info entries.
1491  static constexpr int kGap = 32;
1492 
1493  RelocInfoWriter reloc_info_writer;
1494 
1495  private:
1496  // Avoid overflows for displacements etc.
1497  static const int kMaximalBufferSize = 512 * MB;
1498 
1499  // Repeated checking whether the trampoline pool should be emitted is rather
1500  // expensive. By default we only check again once a number of instructions
1501  // has been generated.
1502  int next_trampoline_check_; // pc offset of next buffer check.
1503 
1504  // Emission of the trampoline pool may be blocked in some code sequences.
1505  int trampoline_pool_blocked_nesting_; // Block emission if this is not zero.
1506  int no_trampoline_pool_before_; // Block emission before this pc offset.
1507 
1508  // Do not share constant pool entries.
1509  int constant_pool_entry_sharing_blocked_nesting_;
1510 
1511  // Relocation info generation
1512  // Each relocation is encoded as a variable size value
1513  static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
1514  std::vector<DeferredRelocInfo> relocations_;
1515 
1516  // The bound position, before this we cannot do instruction elimination.
1517  int last_bound_pos_;
1518  // Optimizable cmpi information.
1519  int optimizable_cmpi_pos_;
1520  CRegister cmpi_cr_ = CRegister::no_reg();
1521 
1522  ConstantPoolBuilder constant_pool_builder_;
1523 
1524  void CheckBuffer() {
1525  if (buffer_space() <= kGap) {
1526  GrowBuffer();
1527  }
1528  }
1529 
1530  void GrowBuffer(int needed = 0);
1531  // Code emission
1532  void emit(Instr x) {
1533  CheckBuffer();
1534  *reinterpret_cast<Instr*>(pc_) = x;
1535  pc_ += kInstrSize;
1536  CheckTrampolinePoolQuick();
1537  }
1538  void TrackBranch() {
1539  DCHECK(!trampoline_emitted_);
1540  int count = tracked_branch_count_++;
1541  if (count == 0) {
1542  // We leave space (kMaxBlockTrampolineSectionSize)
1543  // for BlockTrampolinePoolScope buffer.
1544  next_trampoline_check_ =
1545  pc_offset() + kMaxCondBranchReach - kMaxBlockTrampolineSectionSize;
1546  } else {
1547  next_trampoline_check_ -= kTrampolineSlotsSize;
1548  }
1549  }
1550 
1551  inline void UntrackBranch();
1552  void CheckTrampolinePoolQuick() {
1553  if (pc_offset() >= next_trampoline_check_) {
1554  CheckTrampolinePool();
1555  }
1556  }
1557 
1558  // Instruction generation
1559  void a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
1560  DoubleRegister frb, RCBit r);
1561  void d_form(Instr instr, Register rt, Register ra, const intptr_t val,
1562  bool signed_disp);
1563  void xo_form(Instr instr, Register rt, Register ra, Register rb, OEBit o,
1564  RCBit r);
1565  void md_form(Instr instr, Register ra, Register rs, int shift, int maskbit,
1566  RCBit r);
1567  void mds_form(Instr instr, Register ra, Register rs, Register rb, int maskbit,
1568  RCBit r);
1569 
1570  // Labels
1571  void print(Label* L);
1572  int max_reach_from(int pos);
1573  void bind_to(Label* L, int pos);
1574  void next(Label* L);
1575 
1576  class Trampoline {
1577  public:
1578  Trampoline() {
1579  next_slot_ = 0;
1580  free_slot_count_ = 0;
1581  }
1582  Trampoline(int start, int slot_count) {
1583  next_slot_ = start;
1584  free_slot_count_ = slot_count;
1585  }
1586  int take_slot() {
1587  int trampoline_slot = kInvalidSlotPos;
1588  if (free_slot_count_ <= 0) {
1589  // We have run out of space on trampolines.
1590  // Make sure we fail in debug mode, so we become aware of each case
1591  // when this happens.
1592  DCHECK(0);
1593  // Internal exception will be caught.
1594  } else {
1595  trampoline_slot = next_slot_;
1596  free_slot_count_--;
1597  next_slot_ += kTrampolineSlotsSize;
1598  }
1599  return trampoline_slot;
1600  }
1601 
1602  private:
1603  int next_slot_;
1604  int free_slot_count_;
1605  };
1606 
1607  int32_t get_trampoline_entry();
1608  int tracked_branch_count_;
1609  // If trampoline is emitted, generated code is becoming large. As
1610  // this is already a slow case which can possibly break our code
1611  // generation for the extreme case, we use this information to
1612  // trigger different mode of branch instruction generation, where we
1613  // no longer use a single branch instruction.
1614  bool trampoline_emitted_;
1615  static constexpr int kTrampolineSlotsSize = kInstrSize;
1616  static constexpr int kMaxCondBranchReach = (1 << (16 - 1)) - 1;
1617  static constexpr int kMaxBlockTrampolineSectionSize = 64 * kInstrSize;
1618  static constexpr int kInvalidSlotPos = -1;
1619 
1620  Trampoline trampoline_;
1621  bool internal_trampoline_exception_;
1622 
1623  void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
1624 
1625  friend class RegExpMacroAssemblerPPC;
1626  friend class RelocInfo;
1627  friend class BlockTrampolinePoolScope;
1628  friend class EnsureSpace;
1629 };
1630 
1631 class EnsureSpace {
1632  public:
1633  explicit EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
1634 };
1635 
1636 class PatchingAssembler : public Assembler {
1637  public:
1638  PatchingAssembler(const AssemblerOptions& options, byte* address,
1639  int instructions);
1640  ~PatchingAssembler();
1641 };
1642 
1643 // Define {RegisterName} methods for the register types.
1644 DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS);
1645 DEFINE_REGISTER_NAMES(DoubleRegister, DOUBLE_REGISTERS);
1646 
1647 
1648 } // namespace internal
1649 } // namespace v8
1650 
1651 #endif // V8_PPC_ASSEMBLER_PPC_H_
STL namespace.
Definition: libplatform.h:13