V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
assembler-ppc.cc
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32 
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2014 the V8 project authors. All rights reserved.
36 
37 #include "src/ppc/assembler-ppc.h"
38 
39 #if V8_TARGET_ARCH_PPC
40 
41 #include "src/base/bits.h"
42 #include "src/base/cpu.h"
43 #include "src/code-stubs.h"
44 #include "src/deoptimizer.h"
45 #include "src/macro-assembler.h"
46 #include "src/ppc/assembler-ppc-inl.h"
47 #include "src/string-constants.h"
48 
49 namespace v8 {
50 namespace internal {
51 
52 // Get the CPU features enabled by the build.
53 static unsigned CpuFeaturesImpliedByCompiler() {
54  unsigned answer = 0;
55  return answer;
56 }
57 
58 
59 void CpuFeatures::ProbeImpl(bool cross_compile) {
60  supported_ |= CpuFeaturesImpliedByCompiler();
61  icache_line_size_ = 128;
62 
63  // Only use statically determined features for cross compile (snapshot).
64  if (cross_compile) return;
65 
66 // Detect whether frim instruction is supported (POWER5+)
67 // For now we will just check for processors we know do not
68 // support it
69 #ifndef USE_SIMULATOR
70  // Probe for additional features at runtime.
71  base::CPU cpu;
72  if (cpu.part() == base::CPU::PPC_POWER9) {
73  supported_ |= (1u << MODULO);
74  }
75 #if V8_TARGET_ARCH_PPC64
76  if (cpu.part() == base::CPU::PPC_POWER8) {
77  supported_ |= (1u << FPR_GPR_MOV);
78  }
79 #endif
80  if (cpu.part() == base::CPU::PPC_POWER6 ||
81  cpu.part() == base::CPU::PPC_POWER7 ||
82  cpu.part() == base::CPU::PPC_POWER8) {
83  supported_ |= (1u << LWSYNC);
84  }
85  if (cpu.part() == base::CPU::PPC_POWER7 ||
86  cpu.part() == base::CPU::PPC_POWER8) {
87  supported_ |= (1u << ISELECT);
88  supported_ |= (1u << VSX);
89  }
90 #if V8_OS_LINUX
91  if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) {
92  // Assume support
93  supported_ |= (1u << FPU);
94  }
95  if (cpu.icache_line_size() != base::CPU::UNKNOWN_CACHE_LINE_SIZE) {
96  icache_line_size_ = cpu.icache_line_size();
97  }
98 #elif V8_OS_AIX
99  // Assume support FP support and default cache line size
100  supported_ |= (1u << FPU);
101 #endif
102 #else // Simulator
103  supported_ |= (1u << FPU);
104  supported_ |= (1u << LWSYNC);
105  supported_ |= (1u << ISELECT);
106  supported_ |= (1u << VSX);
107  supported_ |= (1u << MODULO);
108 #if V8_TARGET_ARCH_PPC64
109  supported_ |= (1u << FPR_GPR_MOV);
110 #endif
111 #endif
112 }
113 
114 
115 void CpuFeatures::PrintTarget() {
116  const char* ppc_arch = nullptr;
117 
118 #if V8_TARGET_ARCH_PPC64
119  ppc_arch = "ppc64";
120 #else
121  ppc_arch = "ppc";
122 #endif
123 
124  printf("target %s\n", ppc_arch);
125 }
126 
127 
128 void CpuFeatures::PrintFeatures() {
129  printf("FPU=%d\n", CpuFeatures::IsSupported(FPU));
130 }
131 
132 
133 Register ToRegister(int num) {
134  DCHECK(num >= 0 && num < kNumRegisters);
135  const Register kRegisters[] = {r0, sp, r2, r3, r4, r5, r6, r7,
136  r8, r9, r10, r11, ip, r13, r14, r15,
137  r16, r17, r18, r19, r20, r21, r22, r23,
138  r24, r25, r26, r27, r28, r29, r30, fp};
139  return kRegisters[num];
140 }
141 
142 
143 // -----------------------------------------------------------------------------
144 // Implementation of RelocInfo
145 
146 const int RelocInfo::kApplyMask =
147  RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
148  RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
149 
150 bool RelocInfo::IsCodedSpecially() {
151  // The deserializer needs to know whether a pointer is specially
152  // coded. Being specially coded on PPC means that it is a lis/ori
153  // instruction sequence or is a constant pool entry, and these are
154  // always the case inside code objects.
155  return true;
156 }
157 
158 
159 bool RelocInfo::IsInConstantPool() {
160  if (FLAG_enable_embedded_constant_pool && constant_pool_ != kNullAddress) {
161  return Assembler::IsConstantPoolLoadStart(pc_);
162  }
163  return false;
164 }
165 
166 int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
167  DCHECK(IsRuntimeEntry(rmode_));
168  return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
169 }
170 
171 uint32_t RelocInfo::wasm_call_tag() const {
172  DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
173  return static_cast<uint32_t>(
174  Assembler::target_address_at(pc_, constant_pool_));
175 }
176 
177 // -----------------------------------------------------------------------------
178 // Implementation of Operand and MemOperand
179 // See assembler-ppc-inl.h for inlined constructors
180 
181 Operand::Operand(Handle<HeapObject> handle) {
182  rm_ = no_reg;
183  value_.immediate = static_cast<intptr_t>(handle.address());
184  rmode_ = RelocInfo::EMBEDDED_OBJECT;
185 }
186 
187 Operand Operand::EmbeddedNumber(double value) {
188  int32_t smi;
189  if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
190  Operand result(0, RelocInfo::EMBEDDED_OBJECT);
191  result.is_heap_object_request_ = true;
192  result.value_.heap_object_request = HeapObjectRequest(value);
193  return result;
194 }
195 
196 Operand Operand::EmbeddedCode(CodeStub* stub) {
197  Operand result(0, RelocInfo::CODE_TARGET);
198  result.is_heap_object_request_ = true;
199  result.value_.heap_object_request = HeapObjectRequest(stub);
200  return result;
201 }
202 
203 Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
204  Operand result(0, RelocInfo::EMBEDDED_OBJECT);
205  result.is_heap_object_request_ = true;
206  result.value_.heap_object_request = HeapObjectRequest(str);
207  return result;
208 }
209 
210 MemOperand::MemOperand(Register rn, int32_t offset)
211  : ra_(rn), offset_(offset), rb_(no_reg) {}
212 
213 MemOperand::MemOperand(Register ra, Register rb)
214  : ra_(ra), offset_(0), rb_(rb) {}
215 
216 void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
217  DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
218  for (auto& request : heap_object_requests_) {
219  Handle<HeapObject> object;
220  switch (request.kind()) {
221  case HeapObjectRequest::kHeapNumber: {
222  object =
223  isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
224  break;
225  }
226  case HeapObjectRequest::kCodeStub: {
227  request.code_stub()->set_isolate(isolate);
228  object = request.code_stub()->GetCode();
229  break;
230  }
231  case HeapObjectRequest::kStringConstant: {
232  const StringConstantBase* str = request.string();
233  CHECK_NOT_NULL(str);
234  object = str->AllocateStringConstant(isolate);
235  break;
236  }
237  }
238  Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
239  Address constant_pool = kNullAddress;
240  set_target_address_at(pc, constant_pool, object.address(),
241  SKIP_ICACHE_FLUSH);
242  }
243 }
244 
245 // -----------------------------------------------------------------------------
246 // Specific instructions, constants, and masks.
247 
248 Assembler::Assembler(const AssemblerOptions& options, void* buffer,
249  int buffer_size)
250  : AssemblerBase(options, buffer, buffer_size),
251  constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits) {
252  reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
253 
254  no_trampoline_pool_before_ = 0;
255  trampoline_pool_blocked_nesting_ = 0;
256  constant_pool_entry_sharing_blocked_nesting_ = 0;
257  next_trampoline_check_ = kMaxInt;
258  internal_trampoline_exception_ = false;
259  last_bound_pos_ = 0;
260  optimizable_cmpi_pos_ = -1;
261  trampoline_emitted_ = FLAG_force_long_branches;
262  tracked_branch_count_ = 0;
263  relocations_.reserve(128);
264 }
265 
266 void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
267  // Emit constant pool if necessary.
268  int constant_pool_offset = EmitConstantPool();
269 
270  EmitRelocations();
271  AllocateAndInstallRequestedHeapObjects(isolate);
272 
273  // Set up code descriptor.
274  desc->buffer = buffer_;
275  desc->buffer_size = buffer_size_;
276  desc->instr_size = pc_offset();
277  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
278  desc->constant_pool_size =
279  (constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
280  desc->origin = this;
281  desc->unwinding_info_size = 0;
282  desc->unwinding_info = nullptr;
283 }
284 
285 
286 void Assembler::Align(int m) {
287  DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
288  DCHECK_EQ(pc_offset() & (kInstrSize - 1), 0);
289  while ((pc_offset() & (m - 1)) != 0) {
290  nop();
291  }
292 }
293 
294 
295 void Assembler::CodeTargetAlign() { Align(8); }
296 
297 
298 Condition Assembler::GetCondition(Instr instr) {
299  switch (instr & kCondMask) {
300  case BT:
301  return eq;
302  case BF:
303  return ne;
304  default:
305  UNIMPLEMENTED();
306  }
307  return al;
308 }
309 
310 
311 bool Assembler::IsLis(Instr instr) {
312  return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr) == r0;
313 }
314 
315 
316 bool Assembler::IsLi(Instr instr) {
317  return ((instr & kOpcodeMask) == ADDI) && GetRA(instr) == r0;
318 }
319 
320 
321 bool Assembler::IsAddic(Instr instr) { return (instr & kOpcodeMask) == ADDIC; }
322 
323 
324 bool Assembler::IsOri(Instr instr) { return (instr & kOpcodeMask) == ORI; }
325 
326 
327 bool Assembler::IsBranch(Instr instr) { return ((instr & kOpcodeMask) == BCX); }
328 
329 
330 Register Assembler::GetRA(Instr instr) {
331  return Register::from_code(Instruction::RAValue(instr));
332 }
333 
334 
335 Register Assembler::GetRB(Instr instr) {
336  return Register::from_code(Instruction::RBValue(instr));
337 }
338 
339 
340 #if V8_TARGET_ARCH_PPC64
341 // This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori)
342 bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
343  Instr instr4, Instr instr5) {
344  // Check the instructions are indeed a five part load (into r12)
345  // 3d800000 lis r12, 0
346  // 618c0000 ori r12, r12, 0
347  // 798c07c6 rldicr r12, r12, 32, 31
348  // 658c00c3 oris r12, r12, 195
349  // 618ccd40 ori r12, r12, 52544
350  return (((instr1 >> 16) == 0x3D80) && ((instr2 >> 16) == 0x618C) &&
351  (instr3 == 0x798C07C6) && ((instr4 >> 16) == 0x658C) &&
352  ((instr5 >> 16) == 0x618C));
353 }
354 #else
355 // This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori)
356 bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) {
357  // Check the instruction is indeed a two part load (into r12)
358  // 3d802553 lis r12, 9555
359  // 618c5000 ori r12, r12, 20480
360  return (((instr1 >> 16) == 0x3D80) && ((instr2 >> 16) == 0x618C));
361 }
362 #endif
363 
364 
365 bool Assembler::IsCmpRegister(Instr instr) {
366  return (((instr & kOpcodeMask) == EXT2) &&
367  ((EXT2 | (instr & kExt2OpcodeMask)) == CMP));
368 }
369 
370 
371 bool Assembler::IsRlwinm(Instr instr) {
372  return ((instr & kOpcodeMask) == RLWINMX);
373 }
374 
375 
376 bool Assembler::IsAndi(Instr instr) { return ((instr & kOpcodeMask) == ANDIx); }
377 
378 
379 #if V8_TARGET_ARCH_PPC64
380 bool Assembler::IsRldicl(Instr instr) {
381  return (((instr & kOpcodeMask) == EXT5) &&
382  ((EXT5 | (instr & kExt5OpcodeMask)) == RLDICL));
383 }
384 #endif
385 
386 
387 bool Assembler::IsCmpImmediate(Instr instr) {
388  return ((instr & kOpcodeMask) == CMPI);
389 }
390 
391 
392 bool Assembler::IsCrSet(Instr instr) {
393  return (((instr & kOpcodeMask) == EXT1) &&
394  ((EXT1 | (instr & kExt1OpcodeMask)) == CREQV));
395 }
396 
397 
398 Register Assembler::GetCmpImmediateRegister(Instr instr) {
399  DCHECK(IsCmpImmediate(instr));
400  return GetRA(instr);
401 }
402 
403 
404 int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
405  DCHECK(IsCmpImmediate(instr));
406  return instr & kOff16Mask;
407 }
408 
409 
410 // Labels refer to positions in the (to be) generated code.
411 // There are bound, linked, and unused labels.
412 //
413 // Bound labels refer to known positions in the already
414 // generated code. pos() is the position the label refers to.
415 //
416 // Linked labels refer to unknown positions in the code
417 // to be generated; pos() is the position of the last
418 // instruction using the label.
419 
420 
421 // The link chain is terminated by a negative code position (must be aligned)
422 const int kEndOfChain = -4;
423 
424 
425 // Dummy opcodes for unbound label mov instructions or jump table entries.
426 enum {
427  kUnboundMovLabelOffsetOpcode = 0 << 26,
428  kUnboundAddLabelOffsetOpcode = 1 << 26,
429  kUnboundAddLabelLongOffsetOpcode = 2 << 26,
430  kUnboundMovLabelAddrOpcode = 3 << 26,
431  kUnboundJumpTableEntryOpcode = 4 << 26
432 };
433 
434 int Assembler::target_at(int pos) {
435  Instr instr = instr_at(pos);
436  // check which type of branch this is 16 or 26 bit offset
437  uint32_t opcode = instr & kOpcodeMask;
438  int link;
439  switch (opcode) {
440  case BX:
441  link = SIGN_EXT_IMM26(instr & kImm26Mask);
442  link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
443  break;
444  case BCX:
445  link = SIGN_EXT_IMM16((instr & kImm16Mask));
446  link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
447  break;
448  case kUnboundMovLabelOffsetOpcode:
449  case kUnboundAddLabelOffsetOpcode:
450  case kUnboundAddLabelLongOffsetOpcode:
451  case kUnboundMovLabelAddrOpcode:
452  case kUnboundJumpTableEntryOpcode:
453  link = SIGN_EXT_IMM26(instr & kImm26Mask);
454  link <<= 2;
455  break;
456  default:
457  DCHECK(false);
458  return -1;
459  }
460 
461  if (link == 0) return kEndOfChain;
462  return pos + link;
463 }
464 
465 
466 void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
467  Instr instr = instr_at(pos);
468  uint32_t opcode = instr & kOpcodeMask;
469 
470  if (is_branch != nullptr) {
471  *is_branch = (opcode == BX || opcode == BCX);
472  }
473 
474  switch (opcode) {
475  case BX: {
476  int imm26 = target_pos - pos;
477  CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
478  if (imm26 == kInstrSize && !(instr & kLKMask)) {
479  // Branch to next instr without link.
480  instr = ORI; // nop: ori, 0,0,0
481  } else {
482  instr &= ((~kImm26Mask) | kAAMask | kLKMask);
483  instr |= (imm26 & kImm26Mask);
484  }
485  instr_at_put(pos, instr);
486  break;
487  }
488  case BCX: {
489  int imm16 = target_pos - pos;
490  CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
491  if (imm16 == kInstrSize && !(instr & kLKMask)) {
492  // Branch to next instr without link.
493  instr = ORI; // nop: ori, 0,0,0
494  } else {
495  instr &= ((~kImm16Mask) | kAAMask | kLKMask);
496  instr |= (imm16 & kImm16Mask);
497  }
498  instr_at_put(pos, instr);
499  break;
500  }
501  case kUnboundMovLabelOffsetOpcode: {
502  // Load the position of the label relative to the generated code object
503  // pointer in a register.
504  Register dst = Register::from_code(instr_at(pos + kInstrSize));
505  int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
506  PatchingAssembler patcher(options(),
507  reinterpret_cast<byte*>(buffer_ + pos), 2);
508  patcher.bitwise_mov32(dst, offset);
509  break;
510  }
511  case kUnboundAddLabelLongOffsetOpcode:
512  case kUnboundAddLabelOffsetOpcode: {
513  // dst = base + position + immediate
514  Instr operands = instr_at(pos + kInstrSize);
515  Register dst = Register::from_code((operands >> 27) & 0x1F);
516  Register base = Register::from_code((operands >> 22) & 0x1F);
517  int32_t delta = (opcode == kUnboundAddLabelLongOffsetOpcode)
518  ? static_cast<int32_t>(instr_at(pos + 2 * kInstrSize))
519  : (SIGN_EXT_IMM22(operands & kImm22Mask));
520  int32_t offset = target_pos + delta;
521  PatchingAssembler patcher(
522  options(), reinterpret_cast<byte*>(buffer_ + pos),
523  2 + static_cast<int32_t>(opcode == kUnboundAddLabelLongOffsetOpcode));
524  patcher.bitwise_add32(dst, base, offset);
525  if (opcode == kUnboundAddLabelLongOffsetOpcode) patcher.nop();
526  break;
527  }
528  case kUnboundMovLabelAddrOpcode: {
529  // Load the address of the label in a register.
530  Register dst = Register::from_code(instr_at(pos + kInstrSize));
531  PatchingAssembler patcher(options(),
532  reinterpret_cast<byte*>(buffer_ + pos),
533  kMovInstructionsNoConstantPool);
534  // Keep internal references relative until EmitRelocations.
535  patcher.bitwise_mov(dst, target_pos);
536  break;
537  }
538  case kUnboundJumpTableEntryOpcode: {
539  PatchingAssembler patcher(options(),
540  reinterpret_cast<byte*>(buffer_ + pos),
541  kPointerSize / kInstrSize);
542  // Keep internal references relative until EmitRelocations.
543  patcher.dp(target_pos);
544  break;
545  }
546  default:
547  DCHECK(false);
548  break;
549  }
550 }
551 
552 
553 int Assembler::max_reach_from(int pos) {
554  Instr instr = instr_at(pos);
555  uint32_t opcode = instr & kOpcodeMask;
556 
557  // check which type of branch this is 16 or 26 bit offset
558  switch (opcode) {
559  case BX:
560  return 26;
561  case BCX:
562  return 16;
563  case kUnboundMovLabelOffsetOpcode:
564  case kUnboundAddLabelOffsetOpcode:
565  case kUnboundMovLabelAddrOpcode:
566  case kUnboundJumpTableEntryOpcode:
567  return 0; // no limit on reach
568  }
569 
570  DCHECK(false);
571  return 0;
572 }
573 
574 
575 void Assembler::bind_to(Label* L, int pos) {
576  DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
577  int32_t trampoline_pos = kInvalidSlotPos;
578  bool is_branch = false;
579  while (L->is_linked()) {
580  int fixup_pos = L->pos();
581  int32_t offset = pos - fixup_pos;
582  int maxReach = max_reach_from(fixup_pos);
583  next(L); // call next before overwriting link with target at fixup_pos
584  if (maxReach && is_intn(offset, maxReach) == false) {
585  if (trampoline_pos == kInvalidSlotPos) {
586  trampoline_pos = get_trampoline_entry();
587  CHECK_NE(trampoline_pos, kInvalidSlotPos);
588  target_at_put(trampoline_pos, pos);
589  }
590  target_at_put(fixup_pos, trampoline_pos);
591  } else {
592  target_at_put(fixup_pos, pos, &is_branch);
593  }
594  }
595  L->bind_to(pos);
596 
597  if (!trampoline_emitted_ && is_branch) {
598  UntrackBranch();
599  }
600 
601  // Keep track of the last bound label so we don't eliminate any instructions
602  // before a bound label.
603  if (pos > last_bound_pos_) last_bound_pos_ = pos;
604 }
605 
606 
607 void Assembler::bind(Label* L) {
608  DCHECK(!L->is_bound()); // label can only be bound once
609  bind_to(L, pc_offset());
610 }
611 
612 
613 void Assembler::next(Label* L) {
614  DCHECK(L->is_linked());
615  int link = target_at(L->pos());
616  if (link == kEndOfChain) {
617  L->Unuse();
618  } else {
619  DCHECK_GE(link, 0);
620  L->link_to(link);
621  }
622 }
623 
624 
625 bool Assembler::is_near(Label* L, Condition cond) {
626  DCHECK(L->is_bound());
627  if (L->is_bound() == false) return false;
628 
629  int maxReach = ((cond == al) ? 26 : 16);
630  int offset = L->pos() - pc_offset();
631 
632  return is_intn(offset, maxReach);
633 }
634 
635 
636 void Assembler::a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
637  DoubleRegister frb, RCBit r) {
638  emit(instr | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | r);
639 }
640 
641 
642 void Assembler::d_form(Instr instr, Register rt, Register ra,
643  const intptr_t val, bool signed_disp) {
644  if (signed_disp) {
645  if (!is_int16(val)) {
646  PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val);
647  }
648  CHECK(is_int16(val));
649  } else {
650  if (!is_uint16(val)) {
651  PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR
652  ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n",
653  val, val, is_uint16(val), kImm16Mask);
654  }
655  CHECK(is_uint16(val));
656  }
657  emit(instr | rt.code() * B21 | ra.code() * B16 | (kImm16Mask & val));
658 }
659 
660 void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
661  OEBit o, RCBit r) {
662  emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r);
663 }
664 
665 void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
666  int maskbit, RCBit r) {
667  int sh0_4 = shift & 0x1F;
668  int sh5 = (shift >> 5) & 0x1;
669  int m0_4 = maskbit & 0x1F;
670  int m5 = (maskbit >> 5) & 0x1;
671 
672  emit(instr | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | m0_4 * B6 |
673  m5 * B5 | sh5 * B1 | r);
674 }
675 
676 
677 void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb,
678  int maskbit, RCBit r) {
679  int m0_4 = maskbit & 0x1F;
680  int m5 = (maskbit >> 5) & 0x1;
681 
682  emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | m0_4 * B6 |
683  m5 * B5 | r);
684 }
685 
686 
687 // Returns the next free trampoline entry.
688 int32_t Assembler::get_trampoline_entry() {
689  int32_t trampoline_entry = kInvalidSlotPos;
690 
691  if (!internal_trampoline_exception_) {
692  trampoline_entry = trampoline_.take_slot();
693 
694  if (kInvalidSlotPos == trampoline_entry) {
695  internal_trampoline_exception_ = true;
696  }
697  }
698  return trampoline_entry;
699 }
700 
701 
702 int Assembler::link(Label* L) {
703  int position;
704  if (L->is_bound()) {
705  position = L->pos();
706  } else {
707  if (L->is_linked()) {
708  position = L->pos(); // L's link
709  } else {
710  // was: target_pos = kEndOfChain;
711  // However, using self to mark the first reference
712  // should avoid most instances of branch offset overflow. See
713  // target_at() for where this is converted back to kEndOfChain.
714  position = pc_offset();
715  }
716  L->link_to(pc_offset());
717  }
718 
719  return position;
720 }
721 
722 
723 // Branch instructions.
724 
725 
726 void Assembler::bclr(BOfield bo, int condition_bit, LKBit lk) {
727  emit(EXT1 | bo | condition_bit * B16 | BCLRX | lk);
728 }
729 
730 
731 void Assembler::bcctr(BOfield bo, int condition_bit, LKBit lk) {
732  emit(EXT1 | bo | condition_bit * B16 | BCCTRX | lk);
733 }
734 
735 
736 // Pseudo op - branch to link register
737 void Assembler::blr() { bclr(BA, 0, LeaveLK); }
738 
739 
740 // Pseudo op - branch to count register -- used for "jump"
741 void Assembler::bctr() { bcctr(BA, 0, LeaveLK); }
742 
743 
744 void Assembler::bctrl() { bcctr(BA, 0, SetLK); }
745 
746 
747 void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
748  int imm16 = branch_offset;
749  CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
750  emit(BCX | bo | condition_bit * B16 | (imm16 & kImm16Mask) | lk);
751 }
752 
753 
754 void Assembler::b(int branch_offset, LKBit lk) {
755  int imm26 = branch_offset;
756  CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
757  emit(BX | (imm26 & kImm26Mask) | lk);
758 }
759 
760 
761 void Assembler::xori(Register dst, Register src, const Operand& imm) {
762  d_form(XORI, src, dst, imm.immediate(), false);
763 }
764 
765 
766 void Assembler::xoris(Register ra, Register rs, const Operand& imm) {
767  d_form(XORIS, rs, ra, imm.immediate(), false);
768 }
769 
770 
771 void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
772  RCBit rc) {
773  sh &= 0x1F;
774  mb &= 0x1F;
775  me &= 0x1F;
776  emit(RLWINMX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
777  me << 1 | rc);
778 }
779 
780 
781 void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me,
782  RCBit rc) {
783  mb &= 0x1F;
784  me &= 0x1F;
785  emit(RLWNMX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | mb * B6 |
786  me << 1 | rc);
787 }
788 
789 
790 void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me,
791  RCBit rc) {
792  sh &= 0x1F;
793  mb &= 0x1F;
794  me &= 0x1F;
795  emit(RLWIMIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
796  me << 1 | rc);
797 }
798 
799 
800 void Assembler::slwi(Register dst, Register src, const Operand& val, RCBit rc) {
801  DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
802  rlwinm(dst, src, val.immediate(), 0, 31 - val.immediate(), rc);
803 }
804 
805 
806 void Assembler::srwi(Register dst, Register src, const Operand& val, RCBit rc) {
807  DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
808  rlwinm(dst, src, 32 - val.immediate(), val.immediate(), 31, rc);
809 }
810 
811 
812 void Assembler::clrrwi(Register dst, Register src, const Operand& val,
813  RCBit rc) {
814  DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
815  rlwinm(dst, src, 0, 0, 31 - val.immediate(), rc);
816 }
817 
818 
819 void Assembler::clrlwi(Register dst, Register src, const Operand& val,
820  RCBit rc) {
821  DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
822  rlwinm(dst, src, 0, val.immediate(), 31, rc);
823 }
824 
825 
826 void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) {
827  rlwnm(ra, rs, rb, 0, 31, r);
828 }
829 
830 
831 void Assembler::rotlwi(Register ra, Register rs, int sh, RCBit r) {
832  rlwinm(ra, rs, sh, 0, 31, r);
833 }
834 
835 
836 void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) {
837  rlwinm(ra, rs, 32 - sh, 0, 31, r);
838 }
839 
840 
841 void Assembler::subi(Register dst, Register src, const Operand& imm) {
842  addi(dst, src, Operand(-(imm.immediate())));
843 }
844 
845 void Assembler::addc(Register dst, Register src1, Register src2, OEBit o,
846  RCBit r) {
847  xo_form(EXT2 | ADDCX, dst, src1, src2, o, r);
848 }
849 
850 void Assembler::adde(Register dst, Register src1, Register src2, OEBit o,
851  RCBit r) {
852  xo_form(EXT2 | ADDEX, dst, src1, src2, o, r);
853 }
854 
855 void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) {
856  // a special xo_form
857  emit(EXT2 | ADDZEX | dst.code() * B21 | src1.code() * B16 | o | r);
858 }
859 
860 
861 void Assembler::sub(Register dst, Register src1, Register src2, OEBit o,
862  RCBit r) {
863  xo_form(EXT2 | SUBFX, dst, src2, src1, o, r);
864 }
865 
866 void Assembler::subc(Register dst, Register src1, Register src2, OEBit o,
867  RCBit r) {
868  xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r);
869 }
870 
871 void Assembler::sube(Register dst, Register src1, Register src2, OEBit o,
872  RCBit r) {
873  xo_form(EXT2 | SUBFEX, dst, src2, src1, o, r);
874 }
875 
876 void Assembler::subfic(Register dst, Register src, const Operand& imm) {
877  d_form(SUBFIC, dst, src, imm.immediate(), true);
878 }
879 
880 
881 void Assembler::add(Register dst, Register src1, Register src2, OEBit o,
882  RCBit r) {
883  xo_form(EXT2 | ADDX, dst, src1, src2, o, r);
884 }
885 
886 
887 // Multiply low word
888 void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o,
889  RCBit r) {
890  xo_form(EXT2 | MULLW, dst, src1, src2, o, r);
891 }
892 
893 
894 // Multiply hi word
895 void Assembler::mulhw(Register dst, Register src1, Register src2, RCBit r) {
896  xo_form(EXT2 | MULHWX, dst, src1, src2, LeaveOE, r);
897 }
898 
899 
900 // Multiply hi word unsigned
901 void Assembler::mulhwu(Register dst, Register src1, Register src2, RCBit r) {
902  xo_form(EXT2 | MULHWUX, dst, src1, src2, LeaveOE, r);
903 }
904 
905 
906 // Divide word
907 void Assembler::divw(Register dst, Register src1, Register src2, OEBit o,
908  RCBit r) {
909  xo_form(EXT2 | DIVW, dst, src1, src2, o, r);
910 }
911 
912 
913 // Divide word unsigned
914 void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
915  RCBit r) {
916  xo_form(EXT2 | DIVWU, dst, src1, src2, o, r);
917 }
918 
919 
920 void Assembler::addi(Register dst, Register src, const Operand& imm) {
921  DCHECK(src != r0); // use li instead to show intent
922  d_form(ADDI, dst, src, imm.immediate(), true);
923 }
924 
925 
926 void Assembler::addis(Register dst, Register src, const Operand& imm) {
927  DCHECK(src != r0); // use lis instead to show intent
928  d_form(ADDIS, dst, src, imm.immediate(), true);
929 }
930 
931 
932 void Assembler::addic(Register dst, Register src, const Operand& imm) {
933  d_form(ADDIC, dst, src, imm.immediate(), true);
934 }
935 
936 
937 void Assembler::andi(Register ra, Register rs, const Operand& imm) {
938  d_form(ANDIx, rs, ra, imm.immediate(), false);
939 }
940 
941 
942 void Assembler::andis(Register ra, Register rs, const Operand& imm) {
943  d_form(ANDISx, rs, ra, imm.immediate(), false);
944 }
945 
946 
947 void Assembler::ori(Register ra, Register rs, const Operand& imm) {
948  d_form(ORI, rs, ra, imm.immediate(), false);
949 }
950 
951 
952 void Assembler::oris(Register dst, Register src, const Operand& imm) {
953  d_form(ORIS, src, dst, imm.immediate(), false);
954 }
955 
956 
957 void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
958  intptr_t imm16 = src2.immediate();
959 #if V8_TARGET_ARCH_PPC64
960  int L = 1;
961 #else
962  int L = 0;
963 #endif
964  DCHECK(is_int16(imm16));
965  DCHECK(cr.code() >= 0 && cr.code() <= 7);
966  imm16 &= kImm16Mask;
967  emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
968 }
969 
970 
971 void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
972  uintptr_t uimm16 = src2.immediate();
973 #if V8_TARGET_ARCH_PPC64
974  int L = 1;
975 #else
976  int L = 0;
977 #endif
978  DCHECK(is_uint16(uimm16));
979  DCHECK(cr.code() >= 0 && cr.code() <= 7);
980  uimm16 &= kImm16Mask;
981  emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
982 }
983 
984 
985 void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
986  intptr_t imm16 = src2.immediate();
987  int L = 0;
988  int pos = pc_offset();
989  DCHECK(is_int16(imm16));
990  DCHECK(cr.code() >= 0 && cr.code() <= 7);
991  imm16 &= kImm16Mask;
992 
993  // For cmpwi against 0, save postition and cr for later examination
994  // of potential optimization.
995  if (imm16 == 0 && pos > 0 && last_bound_pos_ != pos) {
996  optimizable_cmpi_pos_ = pos;
997  cmpi_cr_ = cr;
998  }
999  emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
1000 }
1001 
1002 
1003 void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) {
1004  uintptr_t uimm16 = src2.immediate();
1005  int L = 0;
1006  DCHECK(is_uint16(uimm16));
1007  DCHECK(cr.code() >= 0 && cr.code() <= 7);
1008  uimm16 &= kImm16Mask;
1009  emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
1010 }
1011 
1012 
1013 void Assembler::isel(Register rt, Register ra, Register rb, int cb) {
1014  emit(EXT2 | ISEL | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1015  cb * B6);
1016 }
1017 
1018 
1019 // Pseudo op - load immediate
1020 void Assembler::li(Register dst, const Operand& imm) {
1021  d_form(ADDI, dst, r0, imm.immediate(), true);
1022 }
1023 
1024 
1025 void Assembler::lis(Register dst, const Operand& imm) {
1026  d_form(ADDIS, dst, r0, imm.immediate(), true);
1027 }
1028 
1029 
1030 // Pseudo op - move register
1031 void Assembler::mr(Register dst, Register src) {
1032  // actually or(dst, src, src)
1033  orx(dst, src, src);
1034 }
1035 
1036 
1037 void Assembler::lbz(Register dst, const MemOperand& src) {
1038  DCHECK(src.ra_ != r0);
1039  d_form(LBZ, dst, src.ra(), src.offset(), true);
1040 }
1041 
1042 
1043 void Assembler::lhz(Register dst, const MemOperand& src) {
1044  DCHECK(src.ra_ != r0);
1045  d_form(LHZ, dst, src.ra(), src.offset(), true);
1046 }
1047 
1048 
1049 void Assembler::lwz(Register dst, const MemOperand& src) {
1050  DCHECK(src.ra_ != r0);
1051  d_form(LWZ, dst, src.ra(), src.offset(), true);
1052 }
1053 
1054 
1055 void Assembler::lwzu(Register dst, const MemOperand& src) {
1056  DCHECK(src.ra_ != r0);
1057  d_form(LWZU, dst, src.ra(), src.offset(), true);
1058 }
1059 
1060 
1061 void Assembler::lha(Register dst, const MemOperand& src) {
1062  DCHECK(src.ra_ != r0);
1063  d_form(LHA, dst, src.ra(), src.offset(), true);
1064 }
1065 
1066 
1067 void Assembler::lwa(Register dst, const MemOperand& src) {
1068 #if V8_TARGET_ARCH_PPC64
1069  int offset = src.offset();
1070  DCHECK(src.ra_ != r0);
1071  CHECK(!(offset & 3) && is_int16(offset));
1072  offset = kImm16Mask & offset;
1073  emit(LD | dst.code() * B21 | src.ra().code() * B16 | offset | 2);
1074 #else
1075  lwz(dst, src);
1076 #endif
1077 }
1078 
1079 void Assembler::stb(Register dst, const MemOperand& src) {
1080  DCHECK(src.ra_ != r0);
1081  d_form(STB, dst, src.ra(), src.offset(), true);
1082 }
1083 
1084 
1085 void Assembler::sth(Register dst, const MemOperand& src) {
1086  DCHECK(src.ra_ != r0);
1087  d_form(STH, dst, src.ra(), src.offset(), true);
1088 }
1089 
1090 
1091 void Assembler::stw(Register dst, const MemOperand& src) {
1092  DCHECK(src.ra_ != r0);
1093  d_form(STW, dst, src.ra(), src.offset(), true);
1094 }
1095 
1096 
1097 void Assembler::stwu(Register dst, const MemOperand& src) {
1098  DCHECK(src.ra_ != r0);
1099  d_form(STWU, dst, src.ra(), src.offset(), true);
1100 }
1101 
1102 
1103 void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
1104  emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r);
1105 }
1106 
1107 
1108 #if V8_TARGET_ARCH_PPC64
1109 // 64bit specific instructions
1110 void Assembler::ld(Register rd, const MemOperand& src) {
1111  int offset = src.offset();
1112  DCHECK(src.ra_ != r0);
1113  CHECK(!(offset & 3) && is_int16(offset));
1114  offset = kImm16Mask & offset;
1115  emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset);
1116 }
1117 
1118 
1119 void Assembler::ldu(Register rd, const MemOperand& src) {
1120  int offset = src.offset();
1121  DCHECK(src.ra_ != r0);
1122  CHECK(!(offset & 3) && is_int16(offset));
1123  offset = kImm16Mask & offset;
1124  emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset | 1);
1125 }
1126 
1127 
1128 void Assembler::std(Register rs, const MemOperand& src) {
1129  int offset = src.offset();
1130  DCHECK(src.ra_ != r0);
1131  CHECK(!(offset & 3) && is_int16(offset));
1132  offset = kImm16Mask & offset;
1133  emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset);
1134 }
1135 
1136 
1137 void Assembler::stdu(Register rs, const MemOperand& src) {
1138  int offset = src.offset();
1139  DCHECK(src.ra_ != r0);
1140  CHECK(!(offset & 3) && is_int16(offset));
1141  offset = kImm16Mask & offset;
1142  emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset | 1);
1143 }
1144 
1145 
1146 void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) {
1147  md_form(EXT5 | RLDIC, ra, rs, sh, mb, r);
1148 }
1149 
1150 
1151 void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) {
1152  md_form(EXT5 | RLDICL, ra, rs, sh, mb, r);
1153 }
1154 
1155 
1156 void Assembler::rldcl(Register ra, Register rs, Register rb, int mb, RCBit r) {
1157  mds_form(EXT5 | RLDCL, ra, rs, rb, mb, r);
1158 }
1159 
1160 
1161 void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) {
1162  md_form(EXT5 | RLDICR, ra, rs, sh, me, r);
1163 }
1164 
1165 
1166 void Assembler::sldi(Register dst, Register src, const Operand& val, RCBit rc) {
1167  DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1168  rldicr(dst, src, val.immediate(), 63 - val.immediate(), rc);
1169 }
1170 
1171 
1172 void Assembler::srdi(Register dst, Register src, const Operand& val, RCBit rc) {
1173  DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1174  rldicl(dst, src, 64 - val.immediate(), val.immediate(), rc);
1175 }
1176 
1177 
1178 void Assembler::clrrdi(Register dst, Register src, const Operand& val,
1179  RCBit rc) {
1180  DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1181  rldicr(dst, src, 0, 63 - val.immediate(), rc);
1182 }
1183 
1184 
1185 void Assembler::clrldi(Register dst, Register src, const Operand& val,
1186  RCBit rc) {
1187  DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1188  rldicl(dst, src, 0, val.immediate(), rc);
1189 }
1190 
1191 
1192 void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) {
1193  md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r);
1194 }
1195 
1196 
1197 void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) {
1198  int sh0_4 = sh & 0x1F;
1199  int sh5 = (sh >> 5) & 0x1;
1200 
1201  emit(EXT2 | SRADIX | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 |
1202  sh5 * B1 | r);
1203 }
1204 
1205 
1206 void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) {
1207  rldcl(ra, rs, rb, 0, r);
1208 }
1209 
1210 
1211 void Assembler::rotldi(Register ra, Register rs, int sh, RCBit r) {
1212  rldicl(ra, rs, sh, 0, r);
1213 }
1214 
1215 
1216 void Assembler::rotrdi(Register ra, Register rs, int sh, RCBit r) {
1217  rldicl(ra, rs, 64 - sh, 0, r);
1218 }
1219 
1220 
1221 void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
1222  RCBit r) {
1223  xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
1224 }
1225 
1226 
1227 void Assembler::divd(Register dst, Register src1, Register src2, OEBit o,
1228  RCBit r) {
1229  xo_form(EXT2 | DIVD, dst, src1, src2, o, r);
1230 }
1231 
1232 
1233 void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
1234  RCBit r) {
1235  xo_form(EXT2 | DIVDU, dst, src1, src2, o, r);
1236 }
1237 #endif
1238 
1239 
1240 // Function descriptor for AIX.
1241 // Code address skips the function descriptor "header".
1242 // TOC and static chain are ignored and set to 0.
1243 void Assembler::function_descriptor() {
1244  if (ABI_USES_FUNCTION_DESCRIPTORS) {
1245  Label instructions;
1246  DCHECK_EQ(pc_offset(), 0);
1247  emit_label_addr(&instructions);
1248  dp(0);
1249  dp(0);
1250  bind(&instructions);
1251  }
1252 }
1253 
1254 
1255 int Assembler::instructions_required_for_mov(Register dst,
1256  const Operand& src) const {
1257  bool canOptimize =
1258  !(src.must_output_reloc_info(this) || is_trampoline_pool_blocked());
1259  if (use_constant_pool_for_mov(dst, src, canOptimize)) {
1260  if (ConstantPoolAccessIsInOverflow()) {
1261  return kMovInstructionsConstantPool + 1;
1262  }
1263  return kMovInstructionsConstantPool;
1264  }
1265  DCHECK(!canOptimize);
1266  return kMovInstructionsNoConstantPool;
1267 }
1268 
1269 
1270 bool Assembler::use_constant_pool_for_mov(Register dst, const Operand& src,
1271  bool canOptimize) const {
1272  if (!FLAG_enable_embedded_constant_pool || !is_constant_pool_available()) {
1273  // If there is no constant pool available, we must use a mov
1274  // immediate sequence.
1275  return false;
1276  }
1277  intptr_t value = src.immediate();
1278 #if V8_TARGET_ARCH_PPC64
1279  bool allowOverflow = !((canOptimize && is_int32(value)) || dst == r0);
1280 #else
1281  bool allowOverflow = !(canOptimize || dst == r0);
1282 #endif
1283  if (canOptimize && is_int16(value)) {
1284  // Prefer a single-instruction load-immediate.
1285  return false;
1286  }
1287  if (!allowOverflow && ConstantPoolAccessIsInOverflow()) {
1288  // Prefer non-relocatable two-instruction bitwise-mov32 over
1289  // overflow sequence.
1290  return false;
1291  }
1292 
1293  return true;
1294 }
1295 
1296 
1297 void Assembler::EnsureSpaceFor(int space_needed) {
1298  if (buffer_space() <= (kGap + space_needed)) {
1299  GrowBuffer(space_needed);
1300  }
1301 }
1302 
1303 
1304 bool Operand::must_output_reloc_info(const Assembler* assembler) const {
1305  if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1306  if (assembler != nullptr && assembler->predictable_code_size()) return true;
1307  return assembler->options().record_reloc_info_for_serialization;
1308  } else if (RelocInfo::IsNone(rmode_)) {
1309  return false;
1310  }
1311  return true;
1312 }
1313 
1314 
1315 // Primarily used for loading constants
1316 // This should really move to be in macro-assembler as it
1317 // is really a pseudo instruction
1318 // Some usages of this intend for a FIXED_SEQUENCE to be used
1319 // Todo - break this dependency so we can optimize mov() in general
1320 // and only use the generic version when we require a fixed sequence
1321 void Assembler::mov(Register dst, const Operand& src) {
1322  intptr_t value;
1323  if (src.IsHeapObjectRequest()) {
1324  RequestHeapObject(src.heap_object_request());
1325  value = 0;
1326  } else {
1327  value = src.immediate();
1328  }
1329  bool relocatable = src.must_output_reloc_info(this);
1330  bool canOptimize;
1331 
1332  canOptimize =
1333  !(relocatable || (is_trampoline_pool_blocked() && !is_int16(value)));
1334 
1335  if (!src.IsHeapObjectRequest() &&
1336  use_constant_pool_for_mov(dst, src, canOptimize)) {
1337  DCHECK(is_constant_pool_available());
1338  if (relocatable) {
1339  RecordRelocInfo(src.rmode_);
1340  }
1341  ConstantPoolEntry::Access access = ConstantPoolAddEntry(src.rmode_, value);
1342 #if V8_TARGET_ARCH_PPC64
1343  if (access == ConstantPoolEntry::OVERFLOWED) {
1344  addis(dst, kConstantPoolRegister, Operand::Zero());
1345  ld(dst, MemOperand(dst, 0));
1346  } else {
1347  ld(dst, MemOperand(kConstantPoolRegister, 0));
1348  }
1349 #else
1350  if (access == ConstantPoolEntry::OVERFLOWED) {
1351  addis(dst, kConstantPoolRegister, Operand::Zero());
1352  lwz(dst, MemOperand(dst, 0));
1353  } else {
1354  lwz(dst, MemOperand(kConstantPoolRegister, 0));
1355  }
1356 #endif
1357  return;
1358  }
1359 
1360  if (canOptimize) {
1361  if (is_int16(value)) {
1362  li(dst, Operand(value));
1363  } else {
1364  uint16_t u16;
1365 #if V8_TARGET_ARCH_PPC64
1366  if (is_int32(value)) {
1367 #endif
1368  lis(dst, Operand(value >> 16));
1369 #if V8_TARGET_ARCH_PPC64
1370  } else {
1371  if (is_int48(value)) {
1372  li(dst, Operand(value >> 32));
1373  } else {
1374  lis(dst, Operand(value >> 48));
1375  u16 = ((value >> 32) & 0xFFFF);
1376  if (u16) {
1377  ori(dst, dst, Operand(u16));
1378  }
1379  }
1380  sldi(dst, dst, Operand(32));
1381  u16 = ((value >> 16) & 0xFFFF);
1382  if (u16) {
1383  oris(dst, dst, Operand(u16));
1384  }
1385  }
1386 #endif
1387  u16 = (value & 0xFFFF);
1388  if (u16) {
1389  ori(dst, dst, Operand(u16));
1390  }
1391  }
1392  return;
1393  }
1394 
1395  DCHECK(!canOptimize);
1396  if (relocatable) {
1397  RecordRelocInfo(src.rmode_);
1398  }
1399  bitwise_mov(dst, value);
1400 }
1401 
1402 
1403 void Assembler::bitwise_mov(Register dst, intptr_t value) {
1404  BlockTrampolinePoolScope block_trampoline_pool(this);
1405 #if V8_TARGET_ARCH_PPC64
1406  int32_t hi_32 = static_cast<int32_t>(value >> 32);
1407  int32_t lo_32 = static_cast<int32_t>(value);
1408  int hi_word = static_cast<int>(hi_32 >> 16);
1409  int lo_word = static_cast<int>(hi_32 & 0xFFFF);
1410  lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1411  ori(dst, dst, Operand(lo_word));
1412  sldi(dst, dst, Operand(32));
1413  hi_word = static_cast<int>(((lo_32 >> 16) & 0xFFFF));
1414  lo_word = static_cast<int>(lo_32 & 0xFFFF);
1415  oris(dst, dst, Operand(hi_word));
1416  ori(dst, dst, Operand(lo_word));
1417 #else
1418  int hi_word = static_cast<int>(value >> 16);
1419  int lo_word = static_cast<int>(value & 0xFFFF);
1420  lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1421  ori(dst, dst, Operand(lo_word));
1422 #endif
1423 }
1424 
1425 
1426 void Assembler::bitwise_mov32(Register dst, int32_t value) {
1427  BlockTrampolinePoolScope block_trampoline_pool(this);
1428  int hi_word = static_cast<int>(value >> 16);
1429  int lo_word = static_cast<int>(value & 0xFFFF);
1430  lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1431  ori(dst, dst, Operand(lo_word));
1432 }
1433 
1434 
1435 void Assembler::bitwise_add32(Register dst, Register src, int32_t value) {
1436  BlockTrampolinePoolScope block_trampoline_pool(this);
1437  if (is_int16(value)) {
1438  addi(dst, src, Operand(value));
1439  nop();
1440  } else {
1441  int hi_word = static_cast<int>(value >> 16);
1442  int lo_word = static_cast<int>(value & 0xFFFF);
1443  if (lo_word & 0x8000) hi_word++;
1444  addis(dst, src, Operand(SIGN_EXT_IMM16(hi_word)));
1445  addic(dst, dst, Operand(SIGN_EXT_IMM16(lo_word)));
1446  }
1447 }
1448 
1449 
1450 void Assembler::mov_label_offset(Register dst, Label* label) {
1451  int position = link(label);
1452  if (label->is_bound()) {
1453  // Load the position of the label relative to the generated code object.
1454  mov(dst, Operand(position + Code::kHeaderSize - kHeapObjectTag));
1455  } else {
1456  // Encode internal reference to unbound label. We use a dummy opcode
1457  // such that it won't collide with any opcode that might appear in the
1458  // label's chain. Encode the destination register in the 2nd instruction.
1459  int link = position - pc_offset();
1460  DCHECK_EQ(0, link & 3);
1461  link >>= 2;
1462  DCHECK(is_int26(link));
1463 
1464  // When the label is bound, these instructions will be patched
1465  // with a 2 instruction mov sequence that will load the
1466  // destination register with the position of the label from the
1467  // beginning of the code.
1468  //
1469  // target_at extracts the link and target_at_put patches the instructions.
1470  BlockTrampolinePoolScope block_trampoline_pool(this);
1471  emit(kUnboundMovLabelOffsetOpcode | (link & kImm26Mask));
1472  emit(dst.code());
1473  }
1474 }
1475 
1476 
1477 void Assembler::add_label_offset(Register dst, Register base, Label* label,
1478  int delta) {
1479  int position = link(label);
1480  if (label->is_bound()) {
1481  // dst = base + position + delta
1482  position += delta;
1483  bitwise_add32(dst, base, position);
1484  } else {
1485  // Encode internal reference to unbound label. We use a dummy opcode
1486  // such that it won't collide with any opcode that might appear in the
1487  // label's chain. Encode the operands in the 2nd instruction.
1488  int link = position - pc_offset();
1489  DCHECK_EQ(0, link & 3);
1490  link >>= 2;
1491  DCHECK(is_int26(link));
1492  BlockTrampolinePoolScope block_trampoline_pool(this);
1493 
1494  emit((is_int22(delta) ? kUnboundAddLabelOffsetOpcode
1495  : kUnboundAddLabelLongOffsetOpcode) |
1496  (link & kImm26Mask));
1497  emit(dst.code() * B27 | base.code() * B22 | (delta & kImm22Mask));
1498 
1499  if (!is_int22(delta)) {
1500  emit(delta);
1501  }
1502  }
1503 }
1504 
1505 
1506 void Assembler::mov_label_addr(Register dst, Label* label) {
1507  CheckBuffer();
1508  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
1509  int position = link(label);
1510  if (label->is_bound()) {
1511  // Keep internal references relative until EmitRelocations.
1512  bitwise_mov(dst, position);
1513  } else {
1514  // Encode internal reference to unbound label. We use a dummy opcode
1515  // such that it won't collide with any opcode that might appear in the
1516  // label's chain. Encode the destination register in the 2nd instruction.
1517  int link = position - pc_offset();
1518  DCHECK_EQ(0, link & 3);
1519  link >>= 2;
1520  DCHECK(is_int26(link));
1521 
1522  // When the label is bound, these instructions will be patched
1523  // with a multi-instruction mov sequence that will load the
1524  // destination register with the address of the label.
1525  //
1526  // target_at extracts the link and target_at_put patches the instructions.
1527  BlockTrampolinePoolScope block_trampoline_pool(this);
1528  emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
1529  emit(dst.code());
1530  DCHECK_GE(kMovInstructionsNoConstantPool, 2);
1531  for (int i = 0; i < kMovInstructionsNoConstantPool - 2; i++) nop();
1532  }
1533 }
1534 
1535 
1536 void Assembler::emit_label_addr(Label* label) {
1537  CheckBuffer();
1538  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
1539  int position = link(label);
1540  if (label->is_bound()) {
1541  // Keep internal references relative until EmitRelocations.
1542  dp(position);
1543  } else {
1544  // Encode internal reference to unbound label. We use a dummy opcode
1545  // such that it won't collide with any opcode that might appear in the
1546  // label's chain.
1547  int link = position - pc_offset();
1548  DCHECK_EQ(0, link & 3);
1549  link >>= 2;
1550  DCHECK(is_int26(link));
1551 
1552  // When the label is bound, the instruction(s) will be patched
1553  // as a jump table entry containing the label address. target_at extracts
1554  // the link and target_at_put patches the instruction(s).
1555  BlockTrampolinePoolScope block_trampoline_pool(this);
1556  emit(kUnboundJumpTableEntryOpcode | (link & kImm26Mask));
1557 #if V8_TARGET_ARCH_PPC64
1558  nop();
1559 #endif
1560  }
1561 }
1562 
1563 
1564 // Special register instructions
1565 void Assembler::crxor(int bt, int ba, int bb) {
1566  emit(EXT1 | CRXOR | bt * B21 | ba * B16 | bb * B11);
1567 }
1568 
1569 
1570 void Assembler::creqv(int bt, int ba, int bb) {
1571  emit(EXT1 | CREQV | bt * B21 | ba * B16 | bb * B11);
1572 }
1573 
1574 
1575 void Assembler::mflr(Register dst) {
1576  emit(EXT2 | MFSPR | dst.code() * B21 | 256 << 11); // Ignore RC bit
1577 }
1578 
1579 
1580 void Assembler::mtlr(Register src) {
1581  emit(EXT2 | MTSPR | src.code() * B21 | 256 << 11); // Ignore RC bit
1582 }
1583 
1584 
1585 void Assembler::mtctr(Register src) {
1586  emit(EXT2 | MTSPR | src.code() * B21 | 288 << 11); // Ignore RC bit
1587 }
1588 
1589 
1590 void Assembler::mtxer(Register src) {
1591  emit(EXT2 | MTSPR | src.code() * B21 | 32 << 11);
1592 }
1593 
1594 
1595 void Assembler::mcrfs(CRegister cr, FPSCRBit bit) {
1596  DCHECK_LT(static_cast<int>(bit), 32);
1597  int bf = cr.code();
1598  int bfa = bit / CRWIDTH;
1599  emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
1600 }
1601 
1602 
1603 void Assembler::mfcr(Register dst) { emit(EXT2 | MFCR | dst.code() * B21); }
1604 
1605 
1606 #if V8_TARGET_ARCH_PPC64
1607 void Assembler::mffprd(Register dst, DoubleRegister src) {
1608  emit(EXT2 | MFVSRD | src.code() * B21 | dst.code() * B16);
1609 }
1610 
1611 
1612 void Assembler::mffprwz(Register dst, DoubleRegister src) {
1613  emit(EXT2 | MFVSRWZ | src.code() * B21 | dst.code() * B16);
1614 }
1615 
1616 
1617 void Assembler::mtfprd(DoubleRegister dst, Register src) {
1618  emit(EXT2 | MTVSRD | dst.code() * B21 | src.code() * B16);
1619 }
1620 
1621 
1622 void Assembler::mtfprwz(DoubleRegister dst, Register src) {
1623  emit(EXT2 | MTVSRWZ | dst.code() * B21 | src.code() * B16);
1624 }
1625 
1626 
1627 void Assembler::mtfprwa(DoubleRegister dst, Register src) {
1628  emit(EXT2 | MTVSRWA | dst.code() * B21 | src.code() * B16);
1629 }
1630 #endif
1631 
1632 
1633 // Exception-generating instructions and debugging support.
1634 // Stops with a non-negative code less than kNumOfWatchedStops support
1635 // enabling/disabling and a counter feature. See simulator-ppc.h .
1636 void Assembler::stop(const char* msg, Condition cond, int32_t code,
1637  CRegister cr) {
1638  if (cond != al) {
1639  Label skip;
1640  b(NegateCondition(cond), &skip, cr);
1641  bkpt(0);
1642  bind(&skip);
1643  } else {
1644  bkpt(0);
1645  }
1646 }
1647 
1648 void Assembler::bkpt(uint32_t imm16) { emit(0x7D821008); }
1649 
1650 void Assembler::dcbf(Register ra, Register rb) {
1651  emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
1652 }
1653 
1654 
1655 void Assembler::sync() { emit(EXT2 | SYNC); }
1656 
1657 
1658 void Assembler::lwsync() { emit(EXT2 | SYNC | 1 * B21); }
1659 
1660 
1661 void Assembler::icbi(Register ra, Register rb) {
1662  emit(EXT2 | ICBI | ra.code() * B16 | rb.code() * B11);
1663 }
1664 
1665 
1666 void Assembler::isync() { emit(EXT1 | ISYNC); }
1667 
1668 
1669 // Floating point support
1670 
1671 void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
1672  int offset = src.offset();
1673  Register ra = src.ra();
1674  DCHECK(ra != r0);
1675  CHECK(is_int16(offset));
1676  int imm16 = offset & kImm16Mask;
1677  // could be x_form instruction with some casting magic
1678  emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16);
1679 }
1680 
1681 
1682 void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
1683  int offset = src.offset();
1684  Register ra = src.ra();
1685  DCHECK(ra != r0);
1686  CHECK(is_int16(offset));
1687  int imm16 = offset & kImm16Mask;
1688  // could be x_form instruction with some casting magic
1689  emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16);
1690 }
1691 
1692 
1693 void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) {
1694  int offset = src.offset();
1695  Register ra = src.ra();
1696  CHECK(is_int16(offset));
1697  DCHECK(ra != r0);
1698  int imm16 = offset & kImm16Mask;
1699  // could be x_form instruction with some casting magic
1700  emit(LFS | frt.code() * B21 | ra.code() * B16 | imm16);
1701 }
1702 
1703 
1704 void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) {
1705  int offset = src.offset();
1706  Register ra = src.ra();
1707  CHECK(is_int16(offset));
1708  DCHECK(ra != r0);
1709  int imm16 = offset & kImm16Mask;
1710  // could be x_form instruction with some casting magic
1711  emit(LFSU | frt.code() * B21 | ra.code() * B16 | imm16);
1712 }
1713 
1714 
1715 void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) {
1716  int offset = src.offset();
1717  Register ra = src.ra();
1718  CHECK(is_int16(offset));
1719  DCHECK(ra != r0);
1720  int imm16 = offset & kImm16Mask;
1721  // could be x_form instruction with some casting magic
1722  emit(STFD | frs.code() * B21 | ra.code() * B16 | imm16);
1723 }
1724 
1725 
1726 void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) {
1727  int offset = src.offset();
1728  Register ra = src.ra();
1729  CHECK(is_int16(offset));
1730  DCHECK(ra != r0);
1731  int imm16 = offset & kImm16Mask;
1732  // could be x_form instruction with some casting magic
1733  emit(STFDU | frs.code() * B21 | ra.code() * B16 | imm16);
1734 }
1735 
1736 
1737 void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) {
1738  int offset = src.offset();
1739  Register ra = src.ra();
1740  CHECK(is_int16(offset));
1741  DCHECK(ra != r0);
1742  int imm16 = offset & kImm16Mask;
1743  // could be x_form instruction with some casting magic
1744  emit(STFS | frs.code() * B21 | ra.code() * B16 | imm16);
1745 }
1746 
1747 
1748 void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) {
1749  int offset = src.offset();
1750  Register ra = src.ra();
1751  CHECK(is_int16(offset));
1752  DCHECK(ra != r0);
1753  int imm16 = offset & kImm16Mask;
1754  // could be x_form instruction with some casting magic
1755  emit(STFSU | frs.code() * B21 | ra.code() * B16 | imm16);
1756 }
1757 
1758 
1759 void Assembler::fsub(const DoubleRegister frt, const DoubleRegister fra,
1760  const DoubleRegister frb, RCBit rc) {
1761  a_form(EXT4 | FSUB, frt, fra, frb, rc);
1762 }
1763 
1764 
1765 void Assembler::fadd(const DoubleRegister frt, const DoubleRegister fra,
1766  const DoubleRegister frb, RCBit rc) {
1767  a_form(EXT4 | FADD, frt, fra, frb, rc);
1768 }
1769 
1770 
1771 void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra,
1772  const DoubleRegister frc, RCBit rc) {
1773  emit(EXT4 | FMUL | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
1774  rc);
1775 }
1776 
1777 
1778 void Assembler::fdiv(const DoubleRegister frt, const DoubleRegister fra,
1779  const DoubleRegister frb, RCBit rc) {
1780  a_form(EXT4 | FDIV, frt, fra, frb, rc);
1781 }
1782 
1783 
1784 void Assembler::fcmpu(const DoubleRegister fra, const DoubleRegister frb,
1785  CRegister cr) {
1786  DCHECK(cr.code() >= 0 && cr.code() <= 7);
1787  emit(EXT4 | FCMPU | cr.code() * B23 | fra.code() * B16 | frb.code() * B11);
1788 }
1789 
1790 
1791 void Assembler::fmr(const DoubleRegister frt, const DoubleRegister frb,
1792  RCBit rc) {
1793  emit(EXT4 | FMR | frt.code() * B21 | frb.code() * B11 | rc);
1794 }
1795 
1796 
1797 void Assembler::fctiwz(const DoubleRegister frt, const DoubleRegister frb) {
1798  emit(EXT4 | FCTIWZ | frt.code() * B21 | frb.code() * B11);
1799 }
1800 
1801 
1802 void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) {
1803  emit(EXT4 | FCTIW | frt.code() * B21 | frb.code() * B11);
1804 }
1805 
1806 
1807 void Assembler::frin(const DoubleRegister frt, const DoubleRegister frb,
1808  RCBit rc) {
1809  emit(EXT4 | FRIN | frt.code() * B21 | frb.code() * B11 | rc);
1810 }
1811 
1812 
1813 void Assembler::friz(const DoubleRegister frt, const DoubleRegister frb,
1814  RCBit rc) {
1815  emit(EXT4 | FRIZ | frt.code() * B21 | frb.code() * B11 | rc);
1816 }
1817 
1818 
1819 void Assembler::frip(const DoubleRegister frt, const DoubleRegister frb,
1820  RCBit rc) {
1821  emit(EXT4 | FRIP | frt.code() * B21 | frb.code() * B11 | rc);
1822 }
1823 
1824 
1825 void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb,
1826  RCBit rc) {
1827  emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11 | rc);
1828 }
1829 
1830 
1831 void Assembler::frsp(const DoubleRegister frt, const DoubleRegister frb,
1832  RCBit rc) {
1833  emit(EXT4 | FRSP | frt.code() * B21 | frb.code() * B11 | rc);
1834 }
1835 
1836 
1837 void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb,
1838  RCBit rc) {
1839  emit(EXT4 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
1840 }
1841 
1842 
1843 void Assembler::fcfidu(const DoubleRegister frt, const DoubleRegister frb,
1844  RCBit rc) {
1845  emit(EXT4 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
1846 }
1847 
1848 
1849 void Assembler::fcfidus(const DoubleRegister frt, const DoubleRegister frb,
1850  RCBit rc) {
1851  emit(EXT3 | FCFIDUS | frt.code() * B21 | frb.code() * B11 | rc);
1852 }
1853 
1854 
1855 void Assembler::fcfids(const DoubleRegister frt, const DoubleRegister frb,
1856  RCBit rc) {
1857  emit(EXT3 | FCFIDS | frt.code() * B21 | frb.code() * B11 | rc);
1858 }
1859 
1860 
1861 void Assembler::fctid(const DoubleRegister frt, const DoubleRegister frb,
1862  RCBit rc) {
1863  emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc);
1864 }
1865 
1866 
1867 void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb,
1868  RCBit rc) {
1869  emit(EXT4 | FCTIDZ | frt.code() * B21 | frb.code() * B11 | rc);
1870 }
1871 
1872 
1873 void Assembler::fctidu(const DoubleRegister frt, const DoubleRegister frb,
1874  RCBit rc) {
1875  emit(EXT4 | FCTIDU | frt.code() * B21 | frb.code() * B11 | rc);
1876 }
1877 
1878 
1879 void Assembler::fctiduz(const DoubleRegister frt, const DoubleRegister frb,
1880  RCBit rc) {
1881  emit(EXT4 | FCTIDUZ | frt.code() * B21 | frb.code() * B11 | rc);
1882 }
1883 
1884 
1885 void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra,
1886  const DoubleRegister frc, const DoubleRegister frb,
1887  RCBit rc) {
1888  emit(EXT4 | FSEL | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1889  frc.code() * B6 | rc);
1890 }
1891 
1892 
1893 void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb,
1894  RCBit rc) {
1895  emit(EXT4 | FNEG | frt.code() * B21 | frb.code() * B11 | rc);
1896 }
1897 
1898 
1899 void Assembler::mtfsb0(FPSCRBit bit, RCBit rc) {
1900  DCHECK_LT(static_cast<int>(bit), 32);
1901  int bt = bit;
1902  emit(EXT4 | MTFSB0 | bt * B21 | rc);
1903 }
1904 
1905 
1906 void Assembler::mtfsb1(FPSCRBit bit, RCBit rc) {
1907  DCHECK_LT(static_cast<int>(bit), 32);
1908  int bt = bit;
1909  emit(EXT4 | MTFSB1 | bt * B21 | rc);
1910 }
1911 
1912 
1913 void Assembler::mtfsfi(int bf, int immediate, RCBit rc) {
1914  emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc);
1915 }
1916 
1917 
1918 void Assembler::mffs(const DoubleRegister frt, RCBit rc) {
1919  emit(EXT4 | MFFS | frt.code() * B21 | rc);
1920 }
1921 
1922 
1923 void Assembler::mtfsf(const DoubleRegister frb, bool L, int FLM, bool W,
1924  RCBit rc) {
1925  emit(EXT4 | MTFSF | frb.code() * B11 | W * B16 | FLM * B17 | L * B25 | rc);
1926 }
1927 
1928 
1929 void Assembler::fsqrt(const DoubleRegister frt, const DoubleRegister frb,
1930  RCBit rc) {
1931  emit(EXT4 | FSQRT | frt.code() * B21 | frb.code() * B11 | rc);
1932 }
1933 
1934 
1935 void Assembler::fabs(const DoubleRegister frt, const DoubleRegister frb,
1936  RCBit rc) {
1937  emit(EXT4 | FABS | frt.code() * B21 | frb.code() * B11 | rc);
1938 }
1939 
1940 
1941 void Assembler::fmadd(const DoubleRegister frt, const DoubleRegister fra,
1942  const DoubleRegister frc, const DoubleRegister frb,
1943  RCBit rc) {
1944  emit(EXT4 | FMADD | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1945  frc.code() * B6 | rc);
1946 }
1947 
1948 
1949 void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
1950  const DoubleRegister frc, const DoubleRegister frb,
1951  RCBit rc) {
1952  emit(EXT4 | FMSUB | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1953  frc.code() * B6 | rc);
1954 }
1955 
1956 // Pseudo instructions.
1957 void Assembler::nop(int type) {
1958  Register reg = r0;
1959  switch (type) {
1960  case NON_MARKING_NOP:
1961  reg = r0;
1962  break;
1963  case GROUP_ENDING_NOP:
1964  reg = r2;
1965  break;
1966  case DEBUG_BREAK_NOP:
1967  reg = r3;
1968  break;
1969  default:
1970  UNIMPLEMENTED();
1971  }
1972 
1973  ori(reg, reg, Operand::Zero());
1974 }
1975 
1976 
1977 bool Assembler::IsNop(Instr instr, int type) {
1978  int reg = 0;
1979  switch (type) {
1980  case NON_MARKING_NOP:
1981  reg = 0;
1982  break;
1983  case GROUP_ENDING_NOP:
1984  reg = 2;
1985  break;
1986  case DEBUG_BREAK_NOP:
1987  reg = 3;
1988  break;
1989  default:
1990  UNIMPLEMENTED();
1991  }
1992  return instr == (ORI | reg * B21 | reg * B16);
1993 }
1994 
1995 
1996 void Assembler::GrowBuffer(int needed) {
1997  if (!own_buffer_) FATAL("external code buffer is too small");
1998 
1999  // Compute new buffer size.
2000  CodeDesc desc; // the new buffer
2001  if (buffer_size_ < 4 * KB) {
2002  desc.buffer_size = 4 * KB;
2003  } else if (buffer_size_ < 1 * MB) {
2004  desc.buffer_size = 2 * buffer_size_;
2005  } else {
2006  desc.buffer_size = buffer_size_ + 1 * MB;
2007  }
2008  int space = buffer_space() + (desc.buffer_size - buffer_size_);
2009  if (space < needed) {
2010  desc.buffer_size += needed - space;
2011  }
2012 
2013  // Some internal data structures overflow for very large buffers,
2014  // they must ensure that kMaximalBufferSize is not too large.
2015  if (desc.buffer_size > kMaximalBufferSize) {
2016  V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
2017  }
2018 
2019  // Set up new buffer.
2020  desc.buffer = NewArray<byte>(desc.buffer_size);
2021  desc.origin = this;
2022 
2023  desc.instr_size = pc_offset();
2024  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2025 
2026  // Copy the data.
2027  intptr_t pc_delta = desc.buffer - buffer_;
2028  intptr_t rc_delta =
2029  (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
2030  memmove(desc.buffer, buffer_, desc.instr_size);
2031  memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
2032  desc.reloc_size);
2033 
2034  // Switch buffers.
2035  DeleteArray(buffer_);
2036  buffer_ = desc.buffer;
2037  buffer_size_ = desc.buffer_size;
2038  pc_ += pc_delta;
2039  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2040  reloc_info_writer.last_pc() + pc_delta);
2041 
2042  // Nothing else to do here since we keep all internal references and
2043  // deferred relocation entries relative to the buffer (until
2044  // EmitRelocations).
2045 }
2046 
2047 
2048 void Assembler::db(uint8_t data) {
2049  CheckBuffer();
2050  *reinterpret_cast<uint8_t*>(pc_) = data;
2051  pc_ += sizeof(uint8_t);
2052 }
2053 
2054 
2055 void Assembler::dd(uint32_t data) {
2056  CheckBuffer();
2057  *reinterpret_cast<uint32_t*>(pc_) = data;
2058  pc_ += sizeof(uint32_t);
2059 }
2060 
2061 
2062 void Assembler::dq(uint64_t value) {
2063  CheckBuffer();
2064  *reinterpret_cast<uint64_t*>(pc_) = value;
2065  pc_ += sizeof(uint64_t);
2066 }
2067 
2068 
2069 void Assembler::dp(uintptr_t data) {
2070  CheckBuffer();
2071  *reinterpret_cast<uintptr_t*>(pc_) = data;
2072  pc_ += sizeof(uintptr_t);
2073 }
2074 
2075 
2076 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2077  if (!ShouldRecordRelocInfo(rmode)) return;
2078  DeferredRelocInfo rinfo(pc_offset(), rmode, data);
2079  relocations_.push_back(rinfo);
2080 }
2081 
2082 
2083 void Assembler::EmitRelocations() {
2084  EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
2085 
2086  for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
2087  it != relocations_.end(); it++) {
2088  RelocInfo::Mode rmode = it->rmode();
2089  Address pc = reinterpret_cast<Address>(buffer_) + it->position();
2090  RelocInfo rinfo(pc, rmode, it->data(), Code());
2091 
2092  // Fix up internal references now that they are guaranteed to be bound.
2093  if (RelocInfo::IsInternalReference(rmode)) {
2094  // Jump table entry
2095  intptr_t pos = static_cast<intptr_t>(Memory<Address>(pc));
2096  Memory<Address>(pc) = reinterpret_cast<Address>(buffer_) + pos;
2097  } else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
2098  // mov sequence
2099  intptr_t pos = static_cast<intptr_t>(target_address_at(pc, kNullAddress));
2100  set_target_address_at(pc, 0, reinterpret_cast<Address>(buffer_) + pos,
2101  SKIP_ICACHE_FLUSH);
2102  }
2103 
2104  reloc_info_writer.Write(&rinfo);
2105  }
2106 }
2107 
2108 
2109 void Assembler::BlockTrampolinePoolFor(int instructions) {
2110  BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2111 }
2112 
2113 
2114 void Assembler::CheckTrampolinePool() {
2115  // Some small sequences of instructions must not be broken up by the
2116  // insertion of a trampoline pool; such sequences are protected by setting
2117  // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2118  // which are both checked here. Also, recursive calls to CheckTrampolinePool
2119  // are blocked by trampoline_pool_blocked_nesting_.
2120  if (trampoline_pool_blocked_nesting_ > 0) return;
2121  if (pc_offset() < no_trampoline_pool_before_) {
2122  next_trampoline_check_ = no_trampoline_pool_before_;
2123  return;
2124  }
2125 
2126  DCHECK(!trampoline_emitted_);
2127  if (tracked_branch_count_ > 0) {
2128  int size = tracked_branch_count_ * kInstrSize;
2129 
2130  // As we are only going to emit trampoline once, we need to prevent any
2131  // further emission.
2132  trampoline_emitted_ = true;
2133  next_trampoline_check_ = kMaxInt;
2134 
2135  // First we emit jump, then we emit trampoline pool.
2136  b(size + kInstrSize, LeaveLK);
2137  for (int i = size; i > 0; i -= kInstrSize) {
2138  b(i, LeaveLK);
2139  }
2140 
2141  trampoline_ = Trampoline(pc_offset() - size, tracked_branch_count_);
2142  }
2143 }
2144 
2145 PatchingAssembler::PatchingAssembler(const AssemblerOptions& options,
2146  byte* address, int instructions)
2147  : Assembler(options, address, instructions * kInstrSize + kGap) {
2148  DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
2149 }
2150 
2151 PatchingAssembler::~PatchingAssembler() {
2152  // Check that the code was patched as expected.
2153  DCHECK_EQ(pc_, buffer_ + buffer_size_ - kGap);
2154  DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
2155 }
2156 
2157 } // namespace internal
2158 } // namespace v8
2159 
2160 #endif // V8_TARGET_ARCH_PPC
Definition: libplatform.h:13