37 #include "src/ppc/assembler-ppc.h" 39 #if V8_TARGET_ARCH_PPC 41 #include "src/base/bits.h" 42 #include "src/base/cpu.h" 43 #include "src/code-stubs.h" 44 #include "src/deoptimizer.h" 45 #include "src/macro-assembler.h" 46 #include "src/ppc/assembler-ppc-inl.h" 47 #include "src/string-constants.h" 53 static unsigned CpuFeaturesImpliedByCompiler() {
59 void CpuFeatures::ProbeImpl(
bool cross_compile) {
60 supported_ |= CpuFeaturesImpliedByCompiler();
61 icache_line_size_ = 128;
64 if (cross_compile)
return;
72 if (cpu.part() == base::CPU::PPC_POWER9) {
73 supported_ |= (1u << MODULO);
75 #if V8_TARGET_ARCH_PPC64 76 if (cpu.part() == base::CPU::PPC_POWER8) {
77 supported_ |= (1u << FPR_GPR_MOV);
80 if (cpu.part() == base::CPU::PPC_POWER6 ||
81 cpu.part() == base::CPU::PPC_POWER7 ||
82 cpu.part() == base::CPU::PPC_POWER8) {
83 supported_ |= (1u << LWSYNC);
85 if (cpu.part() == base::CPU::PPC_POWER7 ||
86 cpu.part() == base::CPU::PPC_POWER8) {
87 supported_ |= (1u << ISELECT);
88 supported_ |= (1u << VSX);
91 if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) {
93 supported_ |= (1u << FPU);
95 if (cpu.icache_line_size() != base::CPU::UNKNOWN_CACHE_LINE_SIZE) {
96 icache_line_size_ = cpu.icache_line_size();
100 supported_ |= (1u << FPU);
103 supported_ |= (1u << FPU);
104 supported_ |= (1u << LWSYNC);
105 supported_ |= (1u << ISELECT);
106 supported_ |= (1u << VSX);
107 supported_ |= (1u << MODULO);
108 #if V8_TARGET_ARCH_PPC64 109 supported_ |= (1u << FPR_GPR_MOV);
115 void CpuFeatures::PrintTarget() {
116 const char* ppc_arch =
nullptr;
118 #if V8_TARGET_ARCH_PPC64 124 printf(
"target %s\n", ppc_arch);
128 void CpuFeatures::PrintFeatures() {
129 printf(
"FPU=%d\n", CpuFeatures::IsSupported(FPU));
133 Register ToRegister(
int num) {
134 DCHECK(num >= 0 && num < kNumRegisters);
135 const Register kRegisters[] = {r0, sp, r2, r3, r4, r5, r6, r7,
136 r8, r9, r10, r11, ip, r13, r14, r15,
137 r16, r17, r18, r19, r20, r21, r22, r23,
138 r24, r25, r26, r27, r28, r29, r30, fp};
139 return kRegisters[num];
146 const int RelocInfo::kApplyMask =
147 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
148 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
150 bool RelocInfo::IsCodedSpecially() {
159 bool RelocInfo::IsInConstantPool() {
160 if (FLAG_enable_embedded_constant_pool && constant_pool_ != kNullAddress) {
161 return Assembler::IsConstantPoolLoadStart(pc_);
166 int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
167 DCHECK(IsRuntimeEntry(rmode_));
168 return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
171 uint32_t RelocInfo::wasm_call_tag()
const {
172 DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
174 Assembler::target_address_at(pc_, constant_pool_));
181 Operand::Operand(Handle<HeapObject> handle) {
183 value_.immediate =
static_cast<intptr_t
>(handle.address());
184 rmode_ = RelocInfo::EMBEDDED_OBJECT;
187 Operand Operand::EmbeddedNumber(
double value) {
189 if (DoubleToSmiInteger(value, &smi))
return Operand(Smi::FromInt(smi));
190 Operand result(0, RelocInfo::EMBEDDED_OBJECT);
191 result.is_heap_object_request_ =
true;
192 result.value_.heap_object_request = HeapObjectRequest(value);
196 Operand Operand::EmbeddedCode(CodeStub* stub) {
197 Operand result(0, RelocInfo::CODE_TARGET);
198 result.is_heap_object_request_ =
true;
199 result.value_.heap_object_request = HeapObjectRequest(stub);
203 Operand Operand::EmbeddedStringConstant(
const StringConstantBase* str) {
204 Operand result(0, RelocInfo::EMBEDDED_OBJECT);
205 result.is_heap_object_request_ =
true;
206 result.value_.heap_object_request = HeapObjectRequest(str);
210 MemOperand::MemOperand(Register rn, int32_t offset)
211 : ra_(rn), offset_(offset), rb_(no_reg) {}
213 MemOperand::MemOperand(Register ra, Register rb)
214 : ra_(ra), offset_(0), rb_(rb) {}
216 void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
217 DCHECK_IMPLIES(isolate ==
nullptr, heap_object_requests_.empty());
218 for (
auto& request : heap_object_requests_) {
219 Handle<HeapObject> object;
220 switch (request.kind()) {
221 case HeapObjectRequest::kHeapNumber: {
223 isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
226 case HeapObjectRequest::kCodeStub: {
227 request.code_stub()->set_isolate(isolate);
228 object = request.code_stub()->GetCode();
231 case HeapObjectRequest::kStringConstant: {
232 const StringConstantBase* str = request.string();
234 object = str->AllocateStringConstant(isolate);
238 Address pc =
reinterpret_cast<Address
>(buffer_) + request.offset();
239 Address constant_pool = kNullAddress;
240 set_target_address_at(pc, constant_pool,
object.address(),
248 Assembler::Assembler(
const AssemblerOptions& options,
void* buffer,
250 : AssemblerBase(options, buffer, buffer_size),
251 constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits) {
252 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
254 no_trampoline_pool_before_ = 0;
255 trampoline_pool_blocked_nesting_ = 0;
256 constant_pool_entry_sharing_blocked_nesting_ = 0;
257 next_trampoline_check_ = kMaxInt;
258 internal_trampoline_exception_ =
false;
260 optimizable_cmpi_pos_ = -1;
261 trampoline_emitted_ = FLAG_force_long_branches;
262 tracked_branch_count_ = 0;
263 relocations_.reserve(128);
266 void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
268 int constant_pool_offset = EmitConstantPool();
271 AllocateAndInstallRequestedHeapObjects(isolate);
274 desc->buffer = buffer_;
275 desc->buffer_size = buffer_size_;
276 desc->instr_size = pc_offset();
277 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
278 desc->constant_pool_size =
279 (constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
281 desc->unwinding_info_size = 0;
282 desc->unwinding_info =
nullptr;
286 void Assembler::Align(
int m) {
287 DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
288 DCHECK_EQ(pc_offset() & (kInstrSize - 1), 0);
289 while ((pc_offset() & (m - 1)) != 0) {
295 void Assembler::CodeTargetAlign() { Align(8); }
298 Condition Assembler::GetCondition(Instr instr) {
299 switch (instr & kCondMask) {
311 bool Assembler::IsLis(Instr instr) {
312 return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr) == r0;
316 bool Assembler::IsLi(Instr instr) {
317 return ((instr & kOpcodeMask) == ADDI) && GetRA(instr) == r0;
321 bool Assembler::IsAddic(Instr instr) {
return (instr & kOpcodeMask) == ADDIC; }
324 bool Assembler::IsOri(Instr instr) {
return (instr & kOpcodeMask) == ORI; }
327 bool Assembler::IsBranch(Instr instr) {
return ((instr & kOpcodeMask) == BCX); }
330 Register Assembler::GetRA(Instr instr) {
331 return Register::from_code(Instruction::RAValue(instr));
335 Register Assembler::GetRB(Instr instr) {
336 return Register::from_code(Instruction::RBValue(instr));
340 #if V8_TARGET_ARCH_PPC64 342 bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
343 Instr instr4, Instr instr5) {
350 return (((instr1 >> 16) == 0x3D80) && ((instr2 >> 16) == 0x618C) &&
351 (instr3 == 0x798C07C6) && ((instr4 >> 16) == 0x658C) &&
352 ((instr5 >> 16) == 0x618C));
356 bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) {
360 return (((instr1 >> 16) == 0x3D80) && ((instr2 >> 16) == 0x618C));
365 bool Assembler::IsCmpRegister(Instr instr) {
366 return (((instr & kOpcodeMask) == EXT2) &&
367 ((EXT2 | (instr & kExt2OpcodeMask)) == CMP));
371 bool Assembler::IsRlwinm(Instr instr) {
372 return ((instr & kOpcodeMask) == RLWINMX);
376 bool Assembler::IsAndi(Instr instr) {
return ((instr & kOpcodeMask) == ANDIx); }
379 #if V8_TARGET_ARCH_PPC64 380 bool Assembler::IsRldicl(Instr instr) {
381 return (((instr & kOpcodeMask) == EXT5) &&
382 ((EXT5 | (instr & kExt5OpcodeMask)) == RLDICL));
387 bool Assembler::IsCmpImmediate(Instr instr) {
388 return ((instr & kOpcodeMask) == CMPI);
392 bool Assembler::IsCrSet(Instr instr) {
393 return (((instr & kOpcodeMask) == EXT1) &&
394 ((EXT1 | (instr & kExt1OpcodeMask)) == CREQV));
398 Register Assembler::GetCmpImmediateRegister(Instr instr) {
399 DCHECK(IsCmpImmediate(instr));
404 int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
405 DCHECK(IsCmpImmediate(instr));
406 return instr & kOff16Mask;
422 const int kEndOfChain = -4;
427 kUnboundMovLabelOffsetOpcode = 0 << 26,
428 kUnboundAddLabelOffsetOpcode = 1 << 26,
429 kUnboundAddLabelLongOffsetOpcode = 2 << 26,
430 kUnboundMovLabelAddrOpcode = 3 << 26,
431 kUnboundJumpTableEntryOpcode = 4 << 26
434 int Assembler::target_at(
int pos) {
435 Instr instr = instr_at(pos);
437 uint32_t opcode = instr & kOpcodeMask;
441 link = SIGN_EXT_IMM26(instr & kImm26Mask);
442 link &= ~(kAAMask | kLKMask);
445 link = SIGN_EXT_IMM16((instr & kImm16Mask));
446 link &= ~(kAAMask | kLKMask);
448 case kUnboundMovLabelOffsetOpcode:
449 case kUnboundAddLabelOffsetOpcode:
450 case kUnboundAddLabelLongOffsetOpcode:
451 case kUnboundMovLabelAddrOpcode:
452 case kUnboundJumpTableEntryOpcode:
453 link = SIGN_EXT_IMM26(instr & kImm26Mask);
461 if (link == 0)
return kEndOfChain;
466 void Assembler::target_at_put(
int pos,
int target_pos,
bool* is_branch) {
467 Instr instr = instr_at(pos);
468 uint32_t opcode = instr & kOpcodeMask;
470 if (is_branch !=
nullptr) {
471 *is_branch = (opcode == BX || opcode == BCX);
476 int imm26 = target_pos - pos;
477 CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
478 if (imm26 == kInstrSize && !(instr & kLKMask)) {
482 instr &= ((~kImm26Mask) | kAAMask | kLKMask);
483 instr |= (imm26 & kImm26Mask);
485 instr_at_put(pos, instr);
489 int imm16 = target_pos - pos;
490 CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
491 if (imm16 == kInstrSize && !(instr & kLKMask)) {
495 instr &= ((~kImm16Mask) | kAAMask | kLKMask);
496 instr |= (imm16 & kImm16Mask);
498 instr_at_put(pos, instr);
501 case kUnboundMovLabelOffsetOpcode: {
504 Register dst = Register::from_code(instr_at(pos + kInstrSize));
505 int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
506 PatchingAssembler patcher(options(),
507 reinterpret_cast<byte*>(buffer_ + pos), 2);
508 patcher.bitwise_mov32(dst, offset);
511 case kUnboundAddLabelLongOffsetOpcode:
512 case kUnboundAddLabelOffsetOpcode: {
514 Instr operands = instr_at(pos + kInstrSize);
515 Register dst = Register::from_code((operands >> 27) & 0x1F);
516 Register base = Register::from_code((operands >> 22) & 0x1F);
517 int32_t delta = (opcode == kUnboundAddLabelLongOffsetOpcode)
518 ? static_cast<int32_t>(instr_at(pos + 2 * kInstrSize))
519 : (SIGN_EXT_IMM22(operands & kImm22Mask));
520 int32_t offset = target_pos + delta;
521 PatchingAssembler patcher(
522 options(), reinterpret_cast<byte*>(buffer_ + pos),
523 2 + static_cast<int32_t>(opcode == kUnboundAddLabelLongOffsetOpcode));
524 patcher.bitwise_add32(dst, base, offset);
525 if (opcode == kUnboundAddLabelLongOffsetOpcode) patcher.nop();
528 case kUnboundMovLabelAddrOpcode: {
530 Register dst = Register::from_code(instr_at(pos + kInstrSize));
531 PatchingAssembler patcher(options(),
532 reinterpret_cast<byte*>(buffer_ + pos),
533 kMovInstructionsNoConstantPool);
535 patcher.bitwise_mov(dst, target_pos);
538 case kUnboundJumpTableEntryOpcode: {
539 PatchingAssembler patcher(options(),
540 reinterpret_cast<byte*>(buffer_ + pos),
541 kPointerSize / kInstrSize);
543 patcher.dp(target_pos);
553 int Assembler::max_reach_from(
int pos) {
554 Instr instr = instr_at(pos);
555 uint32_t opcode = instr & kOpcodeMask;
563 case kUnboundMovLabelOffsetOpcode:
564 case kUnboundAddLabelOffsetOpcode:
565 case kUnboundMovLabelAddrOpcode:
566 case kUnboundJumpTableEntryOpcode:
575 void Assembler::bind_to(Label* L,
int pos) {
576 DCHECK(0 <= pos && pos <= pc_offset());
577 int32_t trampoline_pos = kInvalidSlotPos;
578 bool is_branch =
false;
579 while (L->is_linked()) {
580 int fixup_pos = L->pos();
581 int32_t offset = pos - fixup_pos;
582 int maxReach = max_reach_from(fixup_pos);
584 if (maxReach && is_intn(offset, maxReach) ==
false) {
585 if (trampoline_pos == kInvalidSlotPos) {
586 trampoline_pos = get_trampoline_entry();
587 CHECK_NE(trampoline_pos, kInvalidSlotPos);
588 target_at_put(trampoline_pos, pos);
590 target_at_put(fixup_pos, trampoline_pos);
592 target_at_put(fixup_pos, pos, &is_branch);
597 if (!trampoline_emitted_ && is_branch) {
603 if (pos > last_bound_pos_) last_bound_pos_ = pos;
607 void Assembler::bind(Label* L) {
608 DCHECK(!L->is_bound());
609 bind_to(L, pc_offset());
613 void Assembler::next(Label* L) {
614 DCHECK(L->is_linked());
615 int link = target_at(L->pos());
616 if (link == kEndOfChain) {
625 bool Assembler::is_near(Label* L, Condition cond) {
626 DCHECK(L->is_bound());
627 if (L->is_bound() ==
false)
return false;
629 int maxReach = ((cond == al) ? 26 : 16);
630 int offset = L->pos() - pc_offset();
632 return is_intn(offset, maxReach);
636 void Assembler::a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
637 DoubleRegister frb, RCBit r) {
638 emit(instr | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | r);
642 void Assembler::d_form(Instr instr, Register rt, Register ra,
643 const intptr_t val,
bool signed_disp) {
645 if (!is_int16(val)) {
646 PrintF(
"val = %" V8PRIdPTR
", 0x%" V8PRIxPTR
"\n", val, val);
648 CHECK(is_int16(val));
650 if (!is_uint16(val)) {
651 PrintF(
"val = %" V8PRIdPTR
", 0x%" V8PRIxPTR
652 ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n",
653 val, val, is_uint16(val), kImm16Mask);
655 CHECK(is_uint16(val));
657 emit(instr | rt.code() * B21 | ra.code() * B16 | (kImm16Mask & val));
660 void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
662 emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r);
665 void Assembler::md_form(Instr instr, Register ra, Register rs,
int shift,
666 int maskbit, RCBit r) {
667 int sh0_4 = shift & 0x1F;
668 int sh5 = (shift >> 5) & 0x1;
669 int m0_4 = maskbit & 0x1F;
670 int m5 = (maskbit >> 5) & 0x1;
672 emit(instr | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | m0_4 * B6 |
673 m5 * B5 | sh5 * B1 | r);
677 void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb,
678 int maskbit, RCBit r) {
679 int m0_4 = maskbit & 0x1F;
680 int m5 = (maskbit >> 5) & 0x1;
682 emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | m0_4 * B6 |
688 int32_t Assembler::get_trampoline_entry() {
689 int32_t trampoline_entry = kInvalidSlotPos;
691 if (!internal_trampoline_exception_) {
692 trampoline_entry = trampoline_.take_slot();
694 if (kInvalidSlotPos == trampoline_entry) {
695 internal_trampoline_exception_ =
true;
698 return trampoline_entry;
702 int Assembler::link(Label* L) {
707 if (L->is_linked()) {
714 position = pc_offset();
716 L->link_to(pc_offset());
726 void Assembler::bclr(BOfield bo,
int condition_bit, LKBit lk) {
727 emit(EXT1 | bo | condition_bit * B16 | BCLRX | lk);
731 void Assembler::bcctr(BOfield bo,
int condition_bit, LKBit lk) {
732 emit(EXT1 | bo | condition_bit * B16 | BCCTRX | lk);
737 void Assembler::blr() { bclr(BA, 0, LeaveLK); }
741 void Assembler::bctr() { bcctr(BA, 0, LeaveLK); }
744 void Assembler::bctrl() { bcctr(BA, 0, SetLK); }
747 void Assembler::bc(
int branch_offset, BOfield bo,
int condition_bit, LKBit lk) {
748 int imm16 = branch_offset;
749 CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
750 emit(BCX | bo | condition_bit * B16 | (imm16 & kImm16Mask) | lk);
754 void Assembler::b(
int branch_offset, LKBit lk) {
755 int imm26 = branch_offset;
756 CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
757 emit(BX | (imm26 & kImm26Mask) | lk);
761 void Assembler::xori(Register dst, Register src,
const Operand& imm) {
762 d_form(XORI, src, dst, imm.immediate(),
false);
766 void Assembler::xoris(Register ra, Register rs,
const Operand& imm) {
767 d_form(XORIS, rs, ra, imm.immediate(),
false);
771 void Assembler::rlwinm(Register ra, Register rs,
int sh,
int mb,
int me,
776 emit(RLWINMX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
781 void Assembler::rlwnm(Register ra, Register rs, Register rb,
int mb,
int me,
785 emit(RLWNMX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | mb * B6 |
790 void Assembler::rlwimi(Register ra, Register rs,
int sh,
int mb,
int me,
795 emit(RLWIMIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
800 void Assembler::slwi(Register dst, Register src,
const Operand& val, RCBit rc) {
801 DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
802 rlwinm(dst, src, val.immediate(), 0, 31 - val.immediate(), rc);
806 void Assembler::srwi(Register dst, Register src,
const Operand& val, RCBit rc) {
807 DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
808 rlwinm(dst, src, 32 - val.immediate(), val.immediate(), 31, rc);
812 void Assembler::clrrwi(Register dst, Register src,
const Operand& val,
814 DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
815 rlwinm(dst, src, 0, 0, 31 - val.immediate(), rc);
819 void Assembler::clrlwi(Register dst, Register src,
const Operand& val,
821 DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
822 rlwinm(dst, src, 0, val.immediate(), 31, rc);
826 void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) {
827 rlwnm(ra, rs, rb, 0, 31, r);
831 void Assembler::rotlwi(Register ra, Register rs,
int sh, RCBit r) {
832 rlwinm(ra, rs, sh, 0, 31, r);
836 void Assembler::rotrwi(Register ra, Register rs,
int sh, RCBit r) {
837 rlwinm(ra, rs, 32 - sh, 0, 31, r);
841 void Assembler::subi(Register dst, Register src,
const Operand& imm) {
842 addi(dst, src, Operand(-(imm.immediate())));
845 void Assembler::addc(Register dst, Register src1, Register src2, OEBit o,
847 xo_form(EXT2 | ADDCX, dst, src1, src2, o, r);
850 void Assembler::adde(Register dst, Register src1, Register src2, OEBit o,
852 xo_form(EXT2 | ADDEX, dst, src1, src2, o, r);
855 void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) {
857 emit(EXT2 | ADDZEX | dst.code() * B21 | src1.code() * B16 | o | r);
861 void Assembler::sub(Register dst, Register src1, Register src2, OEBit o,
863 xo_form(EXT2 | SUBFX, dst, src2, src1, o, r);
866 void Assembler::subc(Register dst, Register src1, Register src2, OEBit o,
868 xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r);
871 void Assembler::sube(Register dst, Register src1, Register src2, OEBit o,
873 xo_form(EXT2 | SUBFEX, dst, src2, src1, o, r);
876 void Assembler::subfic(Register dst, Register src,
const Operand& imm) {
877 d_form(SUBFIC, dst, src, imm.immediate(),
true);
881 void Assembler::add(Register dst, Register src1, Register src2, OEBit o,
883 xo_form(EXT2 | ADDX, dst, src1, src2, o, r);
888 void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o,
890 xo_form(EXT2 | MULLW, dst, src1, src2, o, r);
895 void Assembler::mulhw(Register dst, Register src1, Register src2, RCBit r) {
896 xo_form(EXT2 | MULHWX, dst, src1, src2, LeaveOE, r);
901 void Assembler::mulhwu(Register dst, Register src1, Register src2, RCBit r) {
902 xo_form(EXT2 | MULHWUX, dst, src1, src2, LeaveOE, r);
907 void Assembler::divw(Register dst, Register src1, Register src2, OEBit o,
909 xo_form(EXT2 | DIVW, dst, src1, src2, o, r);
914 void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
916 xo_form(EXT2 | DIVWU, dst, src1, src2, o, r);
920 void Assembler::addi(Register dst, Register src,
const Operand& imm) {
922 d_form(ADDI, dst, src, imm.immediate(),
true);
926 void Assembler::addis(Register dst, Register src,
const Operand& imm) {
928 d_form(ADDIS, dst, src, imm.immediate(),
true);
932 void Assembler::addic(Register dst, Register src,
const Operand& imm) {
933 d_form(ADDIC, dst, src, imm.immediate(),
true);
937 void Assembler::andi(Register ra, Register rs,
const Operand& imm) {
938 d_form(ANDIx, rs, ra, imm.immediate(),
false);
942 void Assembler::andis(Register ra, Register rs,
const Operand& imm) {
943 d_form(ANDISx, rs, ra, imm.immediate(),
false);
947 void Assembler::ori(Register ra, Register rs,
const Operand& imm) {
948 d_form(ORI, rs, ra, imm.immediate(),
false);
952 void Assembler::oris(Register dst, Register src,
const Operand& imm) {
953 d_form(ORIS, src, dst, imm.immediate(),
false);
957 void Assembler::cmpi(Register src1,
const Operand& src2, CRegister cr) {
958 intptr_t imm16 = src2.immediate();
959 #if V8_TARGET_ARCH_PPC64 964 DCHECK(is_int16(imm16));
965 DCHECK(cr.code() >= 0 && cr.code() <= 7);
967 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
971 void Assembler::cmpli(Register src1,
const Operand& src2, CRegister cr) {
973 #if V8_TARGET_ARCH_PPC64 978 DCHECK(is_uint16(uimm16));
979 DCHECK(cr.code() >= 0 && cr.code() <= 7);
980 uimm16 &= kImm16Mask;
981 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
985 void Assembler::cmpwi(Register src1,
const Operand& src2, CRegister cr) {
986 intptr_t imm16 = src2.immediate();
988 int pos = pc_offset();
989 DCHECK(is_int16(imm16));
990 DCHECK(cr.code() >= 0 && cr.code() <= 7);
995 if (imm16 == 0 && pos > 0 && last_bound_pos_ != pos) {
996 optimizable_cmpi_pos_ = pos;
999 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
1003 void Assembler::cmplwi(Register src1,
const Operand& src2, CRegister cr) {
1006 DCHECK(is_uint16(uimm16));
1007 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1008 uimm16 &= kImm16Mask;
1009 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
1013 void Assembler::isel(Register rt, Register ra, Register rb,
int cb) {
1014 emit(EXT2 | ISEL | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1020 void Assembler::li(Register dst,
const Operand& imm) {
1021 d_form(ADDI, dst, r0, imm.immediate(),
true);
1025 void Assembler::lis(Register dst,
const Operand& imm) {
1026 d_form(ADDIS, dst, r0, imm.immediate(),
true);
1031 void Assembler::mr(Register dst, Register src) {
1037 void Assembler::lbz(Register dst,
const MemOperand& src) {
1038 DCHECK(src.ra_ != r0);
1039 d_form(LBZ, dst, src.ra(), src.offset(),
true);
1043 void Assembler::lhz(Register dst,
const MemOperand& src) {
1044 DCHECK(src.ra_ != r0);
1045 d_form(LHZ, dst, src.ra(), src.offset(),
true);
1049 void Assembler::lwz(Register dst,
const MemOperand& src) {
1050 DCHECK(src.ra_ != r0);
1051 d_form(LWZ, dst, src.ra(), src.offset(),
true);
1055 void Assembler::lwzu(Register dst,
const MemOperand& src) {
1056 DCHECK(src.ra_ != r0);
1057 d_form(LWZU, dst, src.ra(), src.offset(),
true);
1061 void Assembler::lha(Register dst,
const MemOperand& src) {
1062 DCHECK(src.ra_ != r0);
1063 d_form(LHA, dst, src.ra(), src.offset(),
true);
1067 void Assembler::lwa(Register dst,
const MemOperand& src) {
1068 #if V8_TARGET_ARCH_PPC64 1069 int offset = src.offset();
1070 DCHECK(src.ra_ != r0);
1071 CHECK(!(offset & 3) && is_int16(offset));
1072 offset = kImm16Mask & offset;
1073 emit(LD | dst.code() * B21 | src.ra().code() * B16 | offset | 2);
1079 void Assembler::stb(Register dst,
const MemOperand& src) {
1080 DCHECK(src.ra_ != r0);
1081 d_form(STB, dst, src.ra(), src.offset(),
true);
1085 void Assembler::sth(Register dst,
const MemOperand& src) {
1086 DCHECK(src.ra_ != r0);
1087 d_form(STH, dst, src.ra(), src.offset(),
true);
1091 void Assembler::stw(Register dst,
const MemOperand& src) {
1092 DCHECK(src.ra_ != r0);
1093 d_form(STW, dst, src.ra(), src.offset(),
true);
1097 void Assembler::stwu(Register dst,
const MemOperand& src) {
1098 DCHECK(src.ra_ != r0);
1099 d_form(STWU, dst, src.ra(), src.offset(),
true);
1103 void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
1104 emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r);
1108 #if V8_TARGET_ARCH_PPC64 1110 void Assembler::ld(Register rd,
const MemOperand& src) {
1111 int offset = src.offset();
1112 DCHECK(src.ra_ != r0);
1113 CHECK(!(offset & 3) && is_int16(offset));
1114 offset = kImm16Mask & offset;
1115 emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset);
1119 void Assembler::ldu(Register rd,
const MemOperand& src) {
1120 int offset = src.offset();
1121 DCHECK(src.ra_ != r0);
1122 CHECK(!(offset & 3) && is_int16(offset));
1123 offset = kImm16Mask & offset;
1124 emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset | 1);
1128 void Assembler::std(Register rs,
const MemOperand& src) {
1129 int offset = src.offset();
1130 DCHECK(src.ra_ != r0);
1131 CHECK(!(offset & 3) && is_int16(offset));
1132 offset = kImm16Mask & offset;
1133 emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset);
1137 void Assembler::stdu(Register rs,
const MemOperand& src) {
1138 int offset = src.offset();
1139 DCHECK(src.ra_ != r0);
1140 CHECK(!(offset & 3) && is_int16(offset));
1141 offset = kImm16Mask & offset;
1142 emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset | 1);
1146 void Assembler::rldic(Register ra, Register rs,
int sh,
int mb, RCBit r) {
1147 md_form(EXT5 | RLDIC, ra, rs, sh, mb, r);
1151 void Assembler::rldicl(Register ra, Register rs,
int sh,
int mb, RCBit r) {
1152 md_form(EXT5 | RLDICL, ra, rs, sh, mb, r);
1156 void Assembler::rldcl(Register ra, Register rs, Register rb,
int mb, RCBit r) {
1157 mds_form(EXT5 | RLDCL, ra, rs, rb, mb, r);
1161 void Assembler::rldicr(Register ra, Register rs,
int sh,
int me, RCBit r) {
1162 md_form(EXT5 | RLDICR, ra, rs, sh, me, r);
1166 void Assembler::sldi(Register dst, Register src,
const Operand& val, RCBit rc) {
1167 DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1168 rldicr(dst, src, val.immediate(), 63 - val.immediate(), rc);
1172 void Assembler::srdi(Register dst, Register src,
const Operand& val, RCBit rc) {
1173 DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1174 rldicl(dst, src, 64 - val.immediate(), val.immediate(), rc);
1178 void Assembler::clrrdi(Register dst, Register src,
const Operand& val,
1180 DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1181 rldicr(dst, src, 0, 63 - val.immediate(), rc);
1185 void Assembler::clrldi(Register dst, Register src,
const Operand& val,
1187 DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1188 rldicl(dst, src, 0, val.immediate(), rc);
1192 void Assembler::rldimi(Register ra, Register rs,
int sh,
int mb, RCBit r) {
1193 md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r);
1197 void Assembler::sradi(Register ra, Register rs,
int sh, RCBit r) {
1198 int sh0_4 = sh & 0x1F;
1199 int sh5 = (sh >> 5) & 0x1;
1201 emit(EXT2 | SRADIX | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 |
1206 void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) {
1207 rldcl(ra, rs, rb, 0, r);
1211 void Assembler::rotldi(Register ra, Register rs,
int sh, RCBit r) {
1212 rldicl(ra, rs, sh, 0, r);
1216 void Assembler::rotrdi(Register ra, Register rs,
int sh, RCBit r) {
1217 rldicl(ra, rs, 64 - sh, 0, r);
1221 void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
1223 xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
1227 void Assembler::divd(Register dst, Register src1, Register src2, OEBit o,
1229 xo_form(EXT2 | DIVD, dst, src1, src2, o, r);
1233 void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
1235 xo_form(EXT2 | DIVDU, dst, src1, src2, o, r);
1243 void Assembler::function_descriptor() {
1244 if (ABI_USES_FUNCTION_DESCRIPTORS) {
1246 DCHECK_EQ(pc_offset(), 0);
1247 emit_label_addr(&instructions);
1250 bind(&instructions);
1255 int Assembler::instructions_required_for_mov(Register dst,
1256 const Operand& src)
const {
1258 !(src.must_output_reloc_info(
this) || is_trampoline_pool_blocked());
1259 if (use_constant_pool_for_mov(dst, src, canOptimize)) {
1260 if (ConstantPoolAccessIsInOverflow()) {
1261 return kMovInstructionsConstantPool + 1;
1263 return kMovInstructionsConstantPool;
1265 DCHECK(!canOptimize);
1266 return kMovInstructionsNoConstantPool;
1270 bool Assembler::use_constant_pool_for_mov(Register dst,
const Operand& src,
1271 bool canOptimize)
const {
1272 if (!FLAG_enable_embedded_constant_pool || !is_constant_pool_available()) {
1277 intptr_t value = src.immediate();
1278 #if V8_TARGET_ARCH_PPC64 1279 bool allowOverflow = !((canOptimize && is_int32(value)) || dst == r0);
1281 bool allowOverflow = !(canOptimize || dst == r0);
1283 if (canOptimize && is_int16(value)) {
1287 if (!allowOverflow && ConstantPoolAccessIsInOverflow()) {
1297 void Assembler::EnsureSpaceFor(
int space_needed) {
1298 if (buffer_space() <= (kGap + space_needed)) {
1299 GrowBuffer(space_needed);
1304 bool Operand::must_output_reloc_info(
const Assembler* assembler)
const {
1305 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1306 if (assembler !=
nullptr && assembler->predictable_code_size())
return true;
1307 return assembler->options().record_reloc_info_for_serialization;
1308 }
else if (RelocInfo::IsNone(rmode_)) {
1321 void Assembler::mov(Register dst,
const Operand& src) {
1323 if (src.IsHeapObjectRequest()) {
1324 RequestHeapObject(src.heap_object_request());
1327 value = src.immediate();
1329 bool relocatable = src.must_output_reloc_info(
this);
1333 !(relocatable || (is_trampoline_pool_blocked() && !is_int16(value)));
1335 if (!src.IsHeapObjectRequest() &&
1336 use_constant_pool_for_mov(dst, src, canOptimize)) {
1337 DCHECK(is_constant_pool_available());
1339 RecordRelocInfo(src.rmode_);
1341 ConstantPoolEntry::Access access = ConstantPoolAddEntry(src.rmode_, value);
1342 #if V8_TARGET_ARCH_PPC64 1343 if (access == ConstantPoolEntry::OVERFLOWED) {
1344 addis(dst, kConstantPoolRegister, Operand::Zero());
1345 ld(dst, MemOperand(dst, 0));
1347 ld(dst, MemOperand(kConstantPoolRegister, 0));
1350 if (access == ConstantPoolEntry::OVERFLOWED) {
1351 addis(dst, kConstantPoolRegister, Operand::Zero());
1352 lwz(dst, MemOperand(dst, 0));
1354 lwz(dst, MemOperand(kConstantPoolRegister, 0));
1361 if (is_int16(value)) {
1362 li(dst, Operand(value));
1365 #if V8_TARGET_ARCH_PPC64 1366 if (is_int32(value)) {
1368 lis(dst, Operand(value >> 16));
1369 #if V8_TARGET_ARCH_PPC64 1371 if (is_int48(value)) {
1372 li(dst, Operand(value >> 32));
1374 lis(dst, Operand(value >> 48));
1375 u16 = ((value >> 32) & 0xFFFF);
1377 ori(dst, dst, Operand(u16));
1380 sldi(dst, dst, Operand(32));
1381 u16 = ((value >> 16) & 0xFFFF);
1383 oris(dst, dst, Operand(u16));
1387 u16 = (value & 0xFFFF);
1389 ori(dst, dst, Operand(u16));
1395 DCHECK(!canOptimize);
1397 RecordRelocInfo(src.rmode_);
1399 bitwise_mov(dst, value);
1403 void Assembler::bitwise_mov(Register dst, intptr_t value) {
1404 BlockTrampolinePoolScope block_trampoline_pool(
this);
1405 #if V8_TARGET_ARCH_PPC64 1406 int32_t hi_32 =
static_cast<int32_t
>(value >> 32);
1407 int32_t lo_32 =
static_cast<int32_t
>(value);
1408 int hi_word =
static_cast<int>(hi_32 >> 16);
1409 int lo_word =
static_cast<int>(hi_32 & 0xFFFF);
1410 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1411 ori(dst, dst, Operand(lo_word));
1412 sldi(dst, dst, Operand(32));
1413 hi_word =
static_cast<int>(((lo_32 >> 16) & 0xFFFF));
1414 lo_word =
static_cast<int>(lo_32 & 0xFFFF);
1415 oris(dst, dst, Operand(hi_word));
1416 ori(dst, dst, Operand(lo_word));
1418 int hi_word =
static_cast<int>(value >> 16);
1419 int lo_word =
static_cast<int>(value & 0xFFFF);
1420 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1421 ori(dst, dst, Operand(lo_word));
1426 void Assembler::bitwise_mov32(Register dst, int32_t value) {
1427 BlockTrampolinePoolScope block_trampoline_pool(
this);
1428 int hi_word =
static_cast<int>(value >> 16);
1429 int lo_word =
static_cast<int>(value & 0xFFFF);
1430 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1431 ori(dst, dst, Operand(lo_word));
1435 void Assembler::bitwise_add32(Register dst, Register src, int32_t value) {
1436 BlockTrampolinePoolScope block_trampoline_pool(
this);
1437 if (is_int16(value)) {
1438 addi(dst, src, Operand(value));
1441 int hi_word =
static_cast<int>(value >> 16);
1442 int lo_word =
static_cast<int>(value & 0xFFFF);
1443 if (lo_word & 0x8000) hi_word++;
1444 addis(dst, src, Operand(SIGN_EXT_IMM16(hi_word)));
1445 addic(dst, dst, Operand(SIGN_EXT_IMM16(lo_word)));
1450 void Assembler::mov_label_offset(Register dst, Label* label) {
1451 int position = link(label);
1452 if (label->is_bound()) {
1454 mov(dst, Operand(position + Code::kHeaderSize - kHeapObjectTag));
1459 int link = position - pc_offset();
1460 DCHECK_EQ(0, link & 3);
1462 DCHECK(is_int26(link));
1470 BlockTrampolinePoolScope block_trampoline_pool(
this);
1471 emit(kUnboundMovLabelOffsetOpcode | (link & kImm26Mask));
1477 void Assembler::add_label_offset(Register dst, Register base, Label* label,
1479 int position = link(label);
1480 if (label->is_bound()) {
1483 bitwise_add32(dst, base, position);
1488 int link = position - pc_offset();
1489 DCHECK_EQ(0, link & 3);
1491 DCHECK(is_int26(link));
1492 BlockTrampolinePoolScope block_trampoline_pool(
this);
1494 emit((is_int22(delta) ? kUnboundAddLabelOffsetOpcode
1495 : kUnboundAddLabelLongOffsetOpcode) |
1496 (link & kImm26Mask));
1497 emit(dst.code() * B27 | base.code() * B22 | (delta & kImm22Mask));
1499 if (!is_int22(delta)) {
1506 void Assembler::mov_label_addr(Register dst, Label* label) {
1508 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
1509 int position = link(label);
1510 if (label->is_bound()) {
1512 bitwise_mov(dst, position);
1517 int link = position - pc_offset();
1518 DCHECK_EQ(0, link & 3);
1520 DCHECK(is_int26(link));
1527 BlockTrampolinePoolScope block_trampoline_pool(
this);
1528 emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
1530 DCHECK_GE(kMovInstructionsNoConstantPool, 2);
1531 for (
int i = 0;
i < kMovInstructionsNoConstantPool - 2;
i++) nop();
1536 void Assembler::emit_label_addr(Label* label) {
1538 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
1539 int position = link(label);
1540 if (label->is_bound()) {
1547 int link = position - pc_offset();
1548 DCHECK_EQ(0, link & 3);
1550 DCHECK(is_int26(link));
1555 BlockTrampolinePoolScope block_trampoline_pool(
this);
1556 emit(kUnboundJumpTableEntryOpcode | (link & kImm26Mask));
1557 #if V8_TARGET_ARCH_PPC64 1565 void Assembler::crxor(
int bt,
int ba,
int bb) {
1566 emit(EXT1 | CRXOR | bt * B21 | ba * B16 | bb * B11);
1570 void Assembler::creqv(
int bt,
int ba,
int bb) {
1571 emit(EXT1 | CREQV | bt * B21 | ba * B16 | bb * B11);
1575 void Assembler::mflr(Register dst) {
1576 emit(EXT2 | MFSPR | dst.code() * B21 | 256 << 11);
1580 void Assembler::mtlr(Register src) {
1581 emit(EXT2 | MTSPR | src.code() * B21 | 256 << 11);
1585 void Assembler::mtctr(Register src) {
1586 emit(EXT2 | MTSPR | src.code() * B21 | 288 << 11);
1590 void Assembler::mtxer(Register src) {
1591 emit(EXT2 | MTSPR | src.code() * B21 | 32 << 11);
1595 void Assembler::mcrfs(CRegister cr, FPSCRBit bit) {
1596 DCHECK_LT(static_cast<int>(bit), 32);
1598 int bfa = bit / CRWIDTH;
1599 emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
1603 void Assembler::mfcr(Register dst) { emit(EXT2 | MFCR | dst.code() * B21); }
1606 #if V8_TARGET_ARCH_PPC64 1607 void Assembler::mffprd(Register dst, DoubleRegister src) {
1608 emit(EXT2 | MFVSRD | src.code() * B21 | dst.code() * B16);
1612 void Assembler::mffprwz(Register dst, DoubleRegister src) {
1613 emit(EXT2 | MFVSRWZ | src.code() * B21 | dst.code() * B16);
1617 void Assembler::mtfprd(DoubleRegister dst, Register src) {
1618 emit(EXT2 | MTVSRD | dst.code() * B21 | src.code() * B16);
1622 void Assembler::mtfprwz(DoubleRegister dst, Register src) {
1623 emit(EXT2 | MTVSRWZ | dst.code() * B21 | src.code() * B16);
1627 void Assembler::mtfprwa(DoubleRegister dst, Register src) {
1628 emit(EXT2 | MTVSRWA | dst.code() * B21 | src.code() * B16);
1636 void Assembler::stop(
const char* msg, Condition cond, int32_t code,
1640 b(NegateCondition(cond), &skip, cr);
1648 void Assembler::bkpt(
uint32_t imm16) { emit(0x7D821008); }
1650 void Assembler::dcbf(Register ra, Register rb) {
1651 emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
1655 void Assembler::sync() { emit(EXT2 | SYNC); }
1658 void Assembler::lwsync() { emit(EXT2 | SYNC | 1 * B21); }
1661 void Assembler::icbi(Register ra, Register rb) {
1662 emit(EXT2 | ICBI | ra.code() * B16 | rb.code() * B11);
1666 void Assembler::isync() { emit(EXT1 | ISYNC); }
1671 void Assembler::lfd(
const DoubleRegister frt,
const MemOperand& src) {
1672 int offset = src.offset();
1673 Register ra = src.ra();
1675 CHECK(is_int16(offset));
1676 int imm16 = offset & kImm16Mask;
1678 emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16);
1682 void Assembler::lfdu(
const DoubleRegister frt,
const MemOperand& src) {
1683 int offset = src.offset();
1684 Register ra = src.ra();
1686 CHECK(is_int16(offset));
1687 int imm16 = offset & kImm16Mask;
1689 emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16);
1693 void Assembler::lfs(
const DoubleRegister frt,
const MemOperand& src) {
1694 int offset = src.offset();
1695 Register ra = src.ra();
1696 CHECK(is_int16(offset));
1698 int imm16 = offset & kImm16Mask;
1700 emit(LFS | frt.code() * B21 | ra.code() * B16 | imm16);
1704 void Assembler::lfsu(
const DoubleRegister frt,
const MemOperand& src) {
1705 int offset = src.offset();
1706 Register ra = src.ra();
1707 CHECK(is_int16(offset));
1709 int imm16 = offset & kImm16Mask;
1711 emit(LFSU | frt.code() * B21 | ra.code() * B16 | imm16);
1715 void Assembler::stfd(
const DoubleRegister frs,
const MemOperand& src) {
1716 int offset = src.offset();
1717 Register ra = src.ra();
1718 CHECK(is_int16(offset));
1720 int imm16 = offset & kImm16Mask;
1722 emit(STFD | frs.code() * B21 | ra.code() * B16 | imm16);
1726 void Assembler::stfdu(
const DoubleRegister frs,
const MemOperand& src) {
1727 int offset = src.offset();
1728 Register ra = src.ra();
1729 CHECK(is_int16(offset));
1731 int imm16 = offset & kImm16Mask;
1733 emit(STFDU | frs.code() * B21 | ra.code() * B16 | imm16);
1737 void Assembler::stfs(
const DoubleRegister frs,
const MemOperand& src) {
1738 int offset = src.offset();
1739 Register ra = src.ra();
1740 CHECK(is_int16(offset));
1742 int imm16 = offset & kImm16Mask;
1744 emit(STFS | frs.code() * B21 | ra.code() * B16 | imm16);
1748 void Assembler::stfsu(
const DoubleRegister frs,
const MemOperand& src) {
1749 int offset = src.offset();
1750 Register ra = src.ra();
1751 CHECK(is_int16(offset));
1753 int imm16 = offset & kImm16Mask;
1755 emit(STFSU | frs.code() * B21 | ra.code() * B16 | imm16);
1759 void Assembler::fsub(
const DoubleRegister frt,
const DoubleRegister fra,
1760 const DoubleRegister frb, RCBit rc) {
1761 a_form(EXT4 | FSUB, frt, fra, frb, rc);
1765 void Assembler::fadd(
const DoubleRegister frt,
const DoubleRegister fra,
1766 const DoubleRegister frb, RCBit rc) {
1767 a_form(EXT4 | FADD, frt, fra, frb, rc);
1771 void Assembler::fmul(
const DoubleRegister frt,
const DoubleRegister fra,
1772 const DoubleRegister frc, RCBit rc) {
1773 emit(EXT4 | FMUL | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
1778 void Assembler::fdiv(
const DoubleRegister frt,
const DoubleRegister fra,
1779 const DoubleRegister frb, RCBit rc) {
1780 a_form(EXT4 | FDIV, frt, fra, frb, rc);
1784 void Assembler::fcmpu(
const DoubleRegister fra,
const DoubleRegister frb,
1786 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1787 emit(EXT4 | FCMPU | cr.code() * B23 | fra.code() * B16 | frb.code() * B11);
1791 void Assembler::fmr(
const DoubleRegister frt,
const DoubleRegister frb,
1793 emit(EXT4 | FMR | frt.code() * B21 | frb.code() * B11 | rc);
1797 void Assembler::fctiwz(
const DoubleRegister frt,
const DoubleRegister frb) {
1798 emit(EXT4 | FCTIWZ | frt.code() * B21 | frb.code() * B11);
1802 void Assembler::fctiw(
const DoubleRegister frt,
const DoubleRegister frb) {
1803 emit(EXT4 | FCTIW | frt.code() * B21 | frb.code() * B11);
1807 void Assembler::frin(
const DoubleRegister frt,
const DoubleRegister frb,
1809 emit(EXT4 | FRIN | frt.code() * B21 | frb.code() * B11 | rc);
1813 void Assembler::friz(
const DoubleRegister frt,
const DoubleRegister frb,
1815 emit(EXT4 | FRIZ | frt.code() * B21 | frb.code() * B11 | rc);
1819 void Assembler::frip(
const DoubleRegister frt,
const DoubleRegister frb,
1821 emit(EXT4 | FRIP | frt.code() * B21 | frb.code() * B11 | rc);
1825 void Assembler::frim(
const DoubleRegister frt,
const DoubleRegister frb,
1827 emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11 | rc);
1831 void Assembler::frsp(
const DoubleRegister frt,
const DoubleRegister frb,
1833 emit(EXT4 | FRSP | frt.code() * B21 | frb.code() * B11 | rc);
1837 void Assembler::fcfid(
const DoubleRegister frt,
const DoubleRegister frb,
1839 emit(EXT4 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
1843 void Assembler::fcfidu(
const DoubleRegister frt,
const DoubleRegister frb,
1845 emit(EXT4 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
1849 void Assembler::fcfidus(
const DoubleRegister frt,
const DoubleRegister frb,
1851 emit(EXT3 | FCFIDUS | frt.code() * B21 | frb.code() * B11 | rc);
1855 void Assembler::fcfids(
const DoubleRegister frt,
const DoubleRegister frb,
1857 emit(EXT3 | FCFIDS | frt.code() * B21 | frb.code() * B11 | rc);
1861 void Assembler::fctid(
const DoubleRegister frt,
const DoubleRegister frb,
1863 emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc);
1867 void Assembler::fctidz(
const DoubleRegister frt,
const DoubleRegister frb,
1869 emit(EXT4 | FCTIDZ | frt.code() * B21 | frb.code() * B11 | rc);
1873 void Assembler::fctidu(
const DoubleRegister frt,
const DoubleRegister frb,
1875 emit(EXT4 | FCTIDU | frt.code() * B21 | frb.code() * B11 | rc);
1879 void Assembler::fctiduz(
const DoubleRegister frt,
const DoubleRegister frb,
1881 emit(EXT4 | FCTIDUZ | frt.code() * B21 | frb.code() * B11 | rc);
1885 void Assembler::fsel(
const DoubleRegister frt,
const DoubleRegister fra,
1886 const DoubleRegister frc,
const DoubleRegister frb,
1888 emit(EXT4 | FSEL | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1889 frc.code() * B6 | rc);
1893 void Assembler::fneg(
const DoubleRegister frt,
const DoubleRegister frb,
1895 emit(EXT4 | FNEG | frt.code() * B21 | frb.code() * B11 | rc);
1899 void Assembler::mtfsb0(FPSCRBit bit, RCBit rc) {
1900 DCHECK_LT(static_cast<int>(bit), 32);
1902 emit(EXT4 | MTFSB0 | bt * B21 | rc);
1906 void Assembler::mtfsb1(FPSCRBit bit, RCBit rc) {
1907 DCHECK_LT(static_cast<int>(bit), 32);
1909 emit(EXT4 | MTFSB1 | bt * B21 | rc);
1913 void Assembler::mtfsfi(
int bf,
int immediate, RCBit rc) {
1914 emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc);
1918 void Assembler::mffs(
const DoubleRegister frt, RCBit rc) {
1919 emit(EXT4 | MFFS | frt.code() * B21 | rc);
1923 void Assembler::mtfsf(
const DoubleRegister frb,
bool L,
int FLM,
bool W,
1925 emit(EXT4 | MTFSF | frb.code() * B11 | W * B16 | FLM * B17 | L * B25 | rc);
1929 void Assembler::fsqrt(
const DoubleRegister frt,
const DoubleRegister frb,
1931 emit(EXT4 | FSQRT | frt.code() * B21 | frb.code() * B11 | rc);
1935 void Assembler::fabs(
const DoubleRegister frt,
const DoubleRegister frb,
1937 emit(EXT4 | FABS | frt.code() * B21 | frb.code() * B11 | rc);
1941 void Assembler::fmadd(
const DoubleRegister frt,
const DoubleRegister fra,
1942 const DoubleRegister frc,
const DoubleRegister frb,
1944 emit(EXT4 | FMADD | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1945 frc.code() * B6 | rc);
1949 void Assembler::fmsub(
const DoubleRegister frt,
const DoubleRegister fra,
1950 const DoubleRegister frc,
const DoubleRegister frb,
1952 emit(EXT4 | FMSUB | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1953 frc.code() * B6 | rc);
1957 void Assembler::nop(
int type) {
1960 case NON_MARKING_NOP:
1963 case GROUP_ENDING_NOP:
1966 case DEBUG_BREAK_NOP:
1973 ori(reg, reg, Operand::Zero());
1977 bool Assembler::IsNop(Instr instr,
int type) {
1980 case NON_MARKING_NOP:
1983 case GROUP_ENDING_NOP:
1986 case DEBUG_BREAK_NOP:
1992 return instr == (ORI | reg * B21 | reg * B16);
1996 void Assembler::GrowBuffer(
int needed) {
1997 if (!own_buffer_) FATAL(
"external code buffer is too small");
2001 if (buffer_size_ < 4 * KB) {
2002 desc.buffer_size = 4 * KB;
2003 }
else if (buffer_size_ < 1 * MB) {
2004 desc.buffer_size = 2 * buffer_size_;
2006 desc.buffer_size = buffer_size_ + 1 * MB;
2008 int space = buffer_space() + (desc.buffer_size - buffer_size_);
2009 if (space < needed) {
2010 desc.buffer_size += needed - space;
2015 if (desc.buffer_size > kMaximalBufferSize) {
2016 V8::FatalProcessOutOfMemory(
nullptr,
"Assembler::GrowBuffer");
2020 desc.buffer = NewArray<byte>(desc.buffer_size);
2023 desc.instr_size = pc_offset();
2024 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2027 intptr_t pc_delta = desc.buffer - buffer_;
2029 (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
2030 memmove(desc.buffer, buffer_, desc.instr_size);
2031 memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
2035 DeleteArray(buffer_);
2036 buffer_ = desc.buffer;
2037 buffer_size_ = desc.buffer_size;
2039 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2040 reloc_info_writer.last_pc() + pc_delta);
2048 void Assembler::db(uint8_t data) {
2050 *
reinterpret_cast<uint8_t*
>(pc_) = data;
2051 pc_ +=
sizeof(uint8_t);
2055 void Assembler::dd(
uint32_t data) {
2057 *
reinterpret_cast<uint32_t*
>(pc_) = data;
2062 void Assembler::dq(uint64_t value) {
2064 *
reinterpret_cast<uint64_t*
>(pc_) = value;
2065 pc_ +=
sizeof(uint64_t);
2071 *
reinterpret_cast<uintptr_t*
>(pc_) = data;
2076 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2077 if (!ShouldRecordRelocInfo(rmode))
return;
2078 DeferredRelocInfo rinfo(pc_offset(), rmode, data);
2079 relocations_.push_back(rinfo);
2083 void Assembler::EmitRelocations() {
2084 EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
2086 for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
2087 it != relocations_.end(); it++) {
2088 RelocInfo::Mode rmode = it->rmode();
2089 Address pc =
reinterpret_cast<Address
>(buffer_) + it->position();
2090 RelocInfo rinfo(pc, rmode, it->data(), Code());
2093 if (RelocInfo::IsInternalReference(rmode)) {
2095 intptr_t pos =
static_cast<intptr_t
>(Memory<Address>(pc));
2096 Memory<Address>(pc) = reinterpret_cast<Address>(buffer_) + pos;
2097 }
else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
2099 intptr_t pos =
static_cast<intptr_t
>(target_address_at(pc, kNullAddress));
2100 set_target_address_at(pc, 0, reinterpret_cast<Address>(buffer_) + pos,
2104 reloc_info_writer.Write(&rinfo);
2109 void Assembler::BlockTrampolinePoolFor(
int instructions) {
2110 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2114 void Assembler::CheckTrampolinePool() {
2120 if (trampoline_pool_blocked_nesting_ > 0)
return;
2121 if (pc_offset() < no_trampoline_pool_before_) {
2122 next_trampoline_check_ = no_trampoline_pool_before_;
2126 DCHECK(!trampoline_emitted_);
2127 if (tracked_branch_count_ > 0) {
2128 int size = tracked_branch_count_ * kInstrSize;
2132 trampoline_emitted_ =
true;
2133 next_trampoline_check_ = kMaxInt;
2136 b(size + kInstrSize, LeaveLK);
2137 for (
int i = size;
i > 0;
i -= kInstrSize) {
2141 trampoline_ = Trampoline(pc_offset() - size, tracked_branch_count_);
2145 PatchingAssembler::PatchingAssembler(
const AssemblerOptions& options,
2146 byte* address,
int instructions)
2147 : Assembler(options, address, instructions * kInstrSize + kGap) {
2148 DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
2151 PatchingAssembler::~PatchingAssembler() {
2153 DCHECK_EQ(pc_, buffer_ + buffer_size_ - kGap);
2154 DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
2160 #endif // V8_TARGET_ARCH_PPC