37 #include "src/arm/assembler-arm.h" 39 #if V8_TARGET_ARCH_ARM 41 #include "src/arm/assembler-arm-inl.h" 42 #include "src/assembler-inl.h" 43 #include "src/base/bits.h" 44 #include "src/base/cpu.h" 45 #include "src/code-stubs.h" 46 #include "src/deoptimizer.h" 47 #include "src/macro-assembler.h" 48 #include "src/objects-inl.h" 49 #include "src/string-constants.h" 54 static const unsigned kArmv6 = 0u;
55 static const unsigned kArmv7 = kArmv6 | (1u << ARMv7);
56 static const unsigned kArmv7WithSudiv = kArmv7 | (1u << ARMv7_SUDIV);
57 static const unsigned kArmv8 = kArmv7WithSudiv | (1u << ARMv8);
59 static unsigned CpuFeaturesFromCommandLine() {
61 if (strcmp(FLAG_arm_arch,
"armv8") == 0) {
63 }
else if (strcmp(FLAG_arm_arch,
"armv7+sudiv") == 0) {
64 result = kArmv7WithSudiv;
65 }
else if (strcmp(FLAG_arm_arch,
"armv7") == 0) {
67 }
else if (strcmp(FLAG_arm_arch,
"armv6") == 0) {
70 fprintf(stderr,
"Error: unrecognised value for --arm-arch ('%s').\n",
73 "Supported values are: armv8\n" 83 if (FLAG_enable_armv7.has_value || FLAG_enable_vfp3.has_value ||
84 FLAG_enable_32dregs.has_value || FLAG_enable_neon.has_value ||
85 FLAG_enable_sudiv.has_value || FLAG_enable_armv8.has_value) {
88 bool enable_armv7 = (result & (1u << ARMv7)) != 0;
89 bool enable_vfp3 = (result & (1u << ARMv7)) != 0;
90 bool enable_32dregs = (result & (1u << ARMv7)) != 0;
91 bool enable_neon = (result & (1u << ARMv7)) != 0;
92 bool enable_sudiv = (result & (1u << ARMv7_SUDIV)) != 0;
93 bool enable_armv8 = (result & (1u << ARMv8)) != 0;
94 if (FLAG_enable_armv7.has_value) {
96 "Warning: --enable_armv7 is deprecated. " 97 "Use --arm_arch instead.\n");
98 enable_armv7 = FLAG_enable_armv7.value;
100 if (FLAG_enable_vfp3.has_value) {
102 "Warning: --enable_vfp3 is deprecated. " 103 "Use --arm_arch instead.\n");
104 enable_vfp3 = FLAG_enable_vfp3.value;
106 if (FLAG_enable_32dregs.has_value) {
108 "Warning: --enable_32dregs is deprecated. " 109 "Use --arm_arch instead.\n");
110 enable_32dregs = FLAG_enable_32dregs.value;
112 if (FLAG_enable_neon.has_value) {
114 "Warning: --enable_neon is deprecated. " 115 "Use --arm_arch instead.\n");
116 enable_neon = FLAG_enable_neon.value;
118 if (FLAG_enable_sudiv.has_value) {
120 "Warning: --enable_sudiv is deprecated. " 121 "Use --arm_arch instead.\n");
122 enable_sudiv = FLAG_enable_sudiv.value;
124 if (FLAG_enable_armv8.has_value) {
126 "Warning: --enable_armv8 is deprecated. " 127 "Use --arm_arch instead.\n");
128 enable_armv8 = FLAG_enable_armv8.value;
134 enable_32dregs =
true;
138 if (enable_armv7 && enable_vfp3 && enable_32dregs && enable_neon) {
143 result = kArmv7WithSudiv;
164 static constexpr
unsigned CpuFeaturesFromCompiler() {
169 #if defined(CAN_USE_ARMV8_INSTRUCTIONS) && !defined(CAN_USE_ARMV7_INSTRUCTIONS) 170 #error "CAN_USE_ARMV8_INSTRUCTIONS should imply CAN_USE_ARMV7_INSTRUCTIONS" 172 #if defined(CAN_USE_ARMV8_INSTRUCTIONS) && !defined(CAN_USE_SUDIV) 173 #error "CAN_USE_ARMV8_INSTRUCTIONS should imply CAN_USE_SUDIV" 175 #if defined(CAN_USE_ARMV7_INSTRUCTIONS) != defined(CAN_USE_VFP3_INSTRUCTIONS) 178 #error "CAN_USE_ARMV7_INSTRUCTIONS should match CAN_USE_VFP3_INSTRUCTIONS" 180 #if defined(CAN_USE_NEON) && !defined(CAN_USE_ARMV7_INSTRUCTIONS) 181 #error "CAN_USE_NEON should imply CAN_USE_ARMV7_INSTRUCTIONS" 185 #if defined(CAN_USE_ARMV8_INSTRUCTIONS) && \ 186 defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(CAN_USE_SUDIV) && \ 187 defined(CAN_USE_NEON) && defined(CAN_USE_VFP3_INSTRUCTIONS) 189 #elif defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(CAN_USE_SUDIV) && \ 190 defined(CAN_USE_NEON) && defined(CAN_USE_VFP3_INSTRUCTIONS) 191 return kArmv7WithSudiv;
192 #elif defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(CAN_USE_NEON) && \ 193 defined(CAN_USE_VFP3_INSTRUCTIONS) 201 void CpuFeatures::ProbeImpl(
bool cross_compile) {
202 dcache_line_size_ = 64;
204 unsigned command_line = CpuFeaturesFromCommandLine();
207 supported_ |= command_line & CpuFeaturesFromCompiler();
213 supported_ |= command_line;
219 unsigned runtime = kArmv6;
221 if (cpu.has_neon() && cpu.has_vfp3_d32()) {
222 DCHECK(cpu.has_vfp3());
224 if (cpu.has_idiva()) {
225 runtime |= kArmv7WithSudiv;
226 if (cpu.architecture() >= 8) {
236 supported_ |= command_line & CpuFeaturesFromCompiler();
237 supported_ |= command_line & runtime;
242 if (cpu.implementer() == base::CPU::ARM &&
243 (cpu.part() == base::CPU::ARM_CORTEX_A5 ||
244 cpu.part() == base::CPU::ARM_CORTEX_A9)) {
245 dcache_line_size_ = 32;
249 DCHECK_IMPLIES(IsSupported(ARMv7_SUDIV), IsSupported(ARMv7));
250 DCHECK_IMPLIES(IsSupported(ARMv8), IsSupported(ARMv7_SUDIV));
254 void CpuFeatures::PrintTarget() {
255 const char* arm_arch =
nullptr;
256 const char* arm_target_type =
"";
257 const char* arm_no_probe =
"";
258 const char* arm_fpu =
"";
259 const char* arm_thumb =
"";
260 const char* arm_float_abi =
nullptr;
263 arm_target_type =
" simulator";
266 #if defined ARM_TEST_NO_FEATURE_PROBE 267 arm_no_probe =
" noprobe";
270 #if defined CAN_USE_ARMV8_INSTRUCTIONS 272 #elif defined CAN_USE_ARMV7_INSTRUCTIONS 278 #if defined CAN_USE_NEON 280 #elif defined CAN_USE_VFP3_INSTRUCTIONS 281 # if defined CAN_USE_VFP32DREGS 284 arm_fpu =
" vfp3-d16";
291 arm_float_abi = base::OS::ArmUsingHardFloat() ?
"hard" :
"softfp";
292 #elif USE_EABI_HARDFLOAT 293 arm_float_abi =
"hard";
295 arm_float_abi =
"softfp";
298 #if defined __arm__ && (defined __thumb__) || (defined __thumb2__) 299 arm_thumb =
" thumb";
302 printf(
"target%s%s %s%s%s %s\n",
303 arm_target_type, arm_no_probe, arm_arch, arm_fpu, arm_thumb,
308 void CpuFeatures::PrintFeatures() {
309 printf(
"ARMv8=%d ARMv7=%d VFPv3=%d VFP32DREGS=%d NEON=%d SUDIV=%d",
310 CpuFeatures::IsSupported(ARMv8), CpuFeatures::IsSupported(ARMv7),
311 CpuFeatures::IsSupported(VFPv3), CpuFeatures::IsSupported(VFP32DREGS),
312 CpuFeatures::IsSupported(NEON), CpuFeatures::IsSupported(SUDIV));
314 bool eabi_hardfloat = base::OS::ArmUsingHardFloat();
315 #elif USE_EABI_HARDFLOAT 316 bool eabi_hardfloat =
true;
318 bool eabi_hardfloat =
false;
320 printf(
" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
328 const int RelocInfo::kApplyMask =
329 RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
331 bool RelocInfo::IsCodedSpecially() {
338 bool RelocInfo::IsInConstantPool() {
339 return Assembler::is_constant_pool_load(pc_);
342 int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
343 DCHECK(IsRuntimeEntry(rmode_));
344 return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
347 uint32_t RelocInfo::wasm_call_tag()
const {
348 DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
350 Assembler::target_address_at(pc_, constant_pool_));
357 Operand::Operand(Handle<HeapObject> handle) {
359 value_.immediate =
static_cast<intptr_t
>(handle.address());
360 rmode_ = RelocInfo::EMBEDDED_OBJECT;
364 Operand::Operand(Register rm, ShiftOp shift_op,
int shift_imm) {
365 DCHECK(is_uint5(shift_imm));
369 shift_op_ = shift_op;
370 shift_imm_ = shift_imm & 31;
372 if ((shift_op == ROR) && (shift_imm == 0)) {
376 }
else if (shift_op == RRX) {
378 DCHECK_EQ(shift_imm, 0);
385 Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
386 DCHECK(shift_op != RRX);
389 shift_op_ = shift_op;
393 Operand Operand::EmbeddedNumber(
double value) {
395 if (DoubleToSmiInteger(value, &smi))
return Operand(Smi::FromInt(smi));
396 Operand result(0, RelocInfo::EMBEDDED_OBJECT);
397 result.is_heap_object_request_ =
true;
398 result.value_.heap_object_request = HeapObjectRequest(value);
402 Operand Operand::EmbeddedCode(CodeStub* stub) {
403 Operand result(0, RelocInfo::CODE_TARGET);
404 result.is_heap_object_request_ =
true;
405 result.value_.heap_object_request = HeapObjectRequest(stub);
409 Operand Operand::EmbeddedStringConstant(
const StringConstantBase* str) {
410 Operand result(0, RelocInfo::EMBEDDED_OBJECT);
411 result.is_heap_object_request_ =
true;
412 result.value_.heap_object_request = HeapObjectRequest(str);
416 MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am)
417 : rn_(rn), rm_(no_reg), offset_(offset), am_(am) {
421 if (am == Offset) DCHECK_LE(0, offset);
422 if (am == NegOffset) DCHECK_GE(0, offset);
426 MemOperand::MemOperand(Register rn, Register rm, AddrMode am)
427 : rn_(rn), rm_(rm), shift_op_(LSL), shift_imm_(0), am_(am) {}
429 MemOperand::MemOperand(Register rn, Register rm, ShiftOp shift_op,
430 int shift_imm, AddrMode am)
434 shift_imm_(shift_imm & 31),
436 DCHECK(is_uint5(shift_imm));
439 NeonMemOperand::NeonMemOperand(Register rn, AddrMode am,
int align)
440 : rn_(rn), rm_(am == Offset ? pc : sp) {
441 DCHECK((am == Offset) || (am == PostIndex));
445 NeonMemOperand::NeonMemOperand(Register rn, Register rm,
int align)
450 void NeonMemOperand::SetAlignment(
int align) {
470 void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
471 DCHECK_IMPLIES(isolate ==
nullptr, heap_object_requests_.empty());
472 for (
auto& request : heap_object_requests_) {
473 Handle<HeapObject> object;
474 switch (request.kind()) {
475 case HeapObjectRequest::kHeapNumber:
477 isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
479 case HeapObjectRequest::kCodeStub:
480 request.code_stub()->set_isolate(isolate);
481 object = request.code_stub()->GetCode();
483 case HeapObjectRequest::kStringConstant: {
484 const StringConstantBase* str = request.string();
486 object = str->AllocateStringConstant(isolate);
490 Address pc =
reinterpret_cast<Address
>(buffer_) + request.offset();
491 Memory<Address>(constant_pool_entry_address(pc, 0 )) =
501 const Instr kPushRegPattern = al | B26 | 4 | NegPreIndex | sp.code() * B16;
504 const Instr kPopRegPattern = al | B26 | L | 4 | PostIndex | sp.code() * B16;
506 const Instr kLdrPCImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
507 const Instr kLdrPCImmedPattern = 5 * B24 | L | pc.code() * B16;
509 const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
510 const Instr kVldrDPCPattern = 13 * B24 | L | pc.code() * B16 | 11 * B8;
512 const Instr kBlxRegMask =
513 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
514 const Instr kBlxRegPattern =
515 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
516 const Instr kBlxIp = al | kBlxRegPattern | ip.code();
517 const Instr kMovMvnMask = 0x6D * B21 | 0xF * B16;
518 const Instr kMovMvnPattern = 0xD * B21;
519 const Instr kMovMvnFlip = B22;
520 const Instr kMovLeaveCCMask = 0xDFF * B16;
521 const Instr kMovLeaveCCPattern = 0x1A0 * B16;
522 const Instr kMovwPattern = 0x30 * B20;
523 const Instr kMovtPattern = 0x34 * B20;
524 const Instr kMovwLeaveCCFlip = 0x5 * B21;
525 const Instr kMovImmedMask = 0x7F * B21;
526 const Instr kMovImmedPattern = 0x1D * B21;
527 const Instr kOrrImmedMask = 0x7F * B21;
528 const Instr kOrrImmedPattern = 0x1C * B21;
529 const Instr kCmpCmnMask = 0xDD * B20 | 0xF * B12;
530 const Instr kCmpCmnPattern = 0x15 * B20;
531 const Instr kCmpCmnFlip = B21;
532 const Instr kAddSubFlip = 0x6 * B21;
533 const Instr kAndBicFlip = 0xE * B21;
536 const Instr kLdrRegFpOffsetPattern = al | B26 | L | Offset | fp.code() * B16;
537 const Instr kStrRegFpOffsetPattern = al | B26 | Offset | fp.code() * B16;
538 const Instr kLdrRegFpNegOffsetPattern =
539 al | B26 | L | NegOffset | fp.code() * B16;
540 const Instr kStrRegFpNegOffsetPattern = al | B26 | NegOffset | fp.code() * B16;
541 const Instr kLdrStrInstrTypeMask = 0xFFFF0000;
543 Assembler::Assembler(
const AssemblerOptions& options,
void* buffer,
545 : AssemblerBase(options, buffer, buffer_size),
546 pending_32_bit_constants_(),
547 scratch_register_list_(ip.bit()) {
548 pending_32_bit_constants_.reserve(kMinNumPendingConstants);
549 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
550 next_buffer_check_ = 0;
551 const_pool_blocked_nesting_ = 0;
552 no_const_pool_before_ = 0;
553 first_const_pool_32_use_ = -1;
555 if (CpuFeatures::IsSupported(VFP32DREGS)) {
559 EnableCpuFeature(VFP32DREGS);
562 scratch_vfp_register_list_ = d14.ToVfpRegList() | d15.ToVfpRegList();
566 scratch_vfp_register_list_ = d14.ToVfpRegList();
570 Assembler::~Assembler() {
571 DCHECK_EQ(const_pool_blocked_nesting_, 0);
574 void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
576 int constant_pool_offset = 0;
577 CheckConstPool(
true,
false);
578 DCHECK(pending_32_bit_constants_.empty());
580 AllocateAndInstallRequestedHeapObjects(isolate);
583 desc->buffer = buffer_;
584 desc->buffer_size = buffer_size_;
585 desc->instr_size = pc_offset();
586 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
587 desc->constant_pool_size =
588 (constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
590 desc->unwinding_info_size = 0;
591 desc->unwinding_info =
nullptr;
595 void Assembler::Align(
int m) {
596 DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
597 DCHECK_EQ(pc_offset() & (kInstrSize - 1), 0);
598 while ((pc_offset() & (m - 1)) != 0) {
604 void Assembler::CodeTargetAlign() {
610 Condition Assembler::GetCondition(Instr instr) {
611 return Instruction::ConditionField(instr);
614 bool Assembler::IsLdrRegisterImmediate(Instr instr) {
615 return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
619 bool Assembler::IsVldrDRegisterImmediate(Instr instr) {
620 return (instr & (15 * B24 | 3 * B20 | 15 * B8)) == (13 * B24 | B20 | 11 * B8);
624 int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
625 DCHECK(IsLdrRegisterImmediate(instr));
626 bool positive = (instr & B23) == B23;
627 int offset = instr & kOff12Mask;
628 return positive ? offset : -offset;
632 int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) {
633 DCHECK(IsVldrDRegisterImmediate(instr));
634 bool positive = (instr & B23) == B23;
635 int offset = instr & kOff8Mask;
637 return positive ? offset : -offset;
641 Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr,
int offset) {
642 DCHECK(IsLdrRegisterImmediate(instr));
643 bool positive = offset >= 0;
644 if (!positive) offset = -offset;
645 DCHECK(is_uint12(offset));
647 instr = (instr & ~B23) | (positive ? B23 : 0);
649 return (instr & ~kOff12Mask) | offset;
653 Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr,
int offset) {
654 DCHECK(IsVldrDRegisterImmediate(instr));
655 DCHECK((offset & ~3) == offset);
656 bool positive = offset >= 0;
657 if (!positive) offset = -offset;
658 DCHECK(is_uint10(offset));
660 instr = (instr & ~B23) | (positive ? B23 : 0);
662 return (instr & ~kOff8Mask) | (offset >> 2);
666 bool Assembler::IsStrRegisterImmediate(Instr instr) {
667 return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
671 Instr Assembler::SetStrRegisterImmediateOffset(Instr instr,
int offset) {
672 DCHECK(IsStrRegisterImmediate(instr));
673 bool positive = offset >= 0;
674 if (!positive) offset = -offset;
675 DCHECK(is_uint12(offset));
677 instr = (instr & ~B23) | (positive ? B23 : 0);
679 return (instr & ~kOff12Mask) | offset;
683 bool Assembler::IsAddRegisterImmediate(Instr instr) {
684 return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
688 Instr Assembler::SetAddRegisterImmediateOffset(Instr instr,
int offset) {
689 DCHECK(IsAddRegisterImmediate(instr));
690 DCHECK_GE(offset, 0);
691 DCHECK(is_uint12(offset));
693 return (instr & ~kOff12Mask) | offset;
697 Register Assembler::GetRd(Instr instr) {
698 return Register::from_code(Instruction::RdValue(instr));
702 Register Assembler::GetRn(Instr instr) {
703 return Register::from_code(Instruction::RnValue(instr));
707 Register Assembler::GetRm(Instr instr) {
708 return Register::from_code(Instruction::RmValue(instr));
712 bool Assembler::IsPush(Instr instr) {
713 return ((instr & ~kRdMask) == kPushRegPattern);
717 bool Assembler::IsPop(Instr instr) {
718 return ((instr & ~kRdMask) == kPopRegPattern);
722 bool Assembler::IsStrRegFpOffset(Instr instr) {
723 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
727 bool Assembler::IsLdrRegFpOffset(Instr instr) {
728 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
732 bool Assembler::IsStrRegFpNegOffset(Instr instr) {
733 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
737 bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
738 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
742 bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
745 return (instr & kLdrPCImmedMask) == kLdrPCImmedPattern;
749 bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
752 return (instr & kVldrDPCMask) == kVldrDPCPattern;
756 bool Assembler::IsBlxReg(Instr instr) {
759 return (instr & kBlxRegMask) == kBlxRegPattern;
763 bool Assembler::IsBlxIp(Instr instr) {
766 return instr == kBlxIp;
770 bool Assembler::IsTstImmediate(Instr instr) {
771 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
776 bool Assembler::IsCmpRegister(Instr instr) {
777 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
782 bool Assembler::IsCmpImmediate(Instr instr) {
783 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
788 Register Assembler::GetCmpImmediateRegister(Instr instr) {
789 DCHECK(IsCmpImmediate(instr));
794 int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
795 DCHECK(IsCmpImmediate(instr));
796 return instr & kOff12Mask;
818 int Assembler::target_at(
int pos) {
819 Instr instr = instr_at(pos);
820 if (is_uint24(instr)) {
824 DCHECK_EQ(5 * B25, instr & 7 * B25);
825 int imm26 = ((instr & kImm24Mask) << 8) >> 6;
826 if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
827 ((instr & B24) != 0)) {
831 return pos + Instruction::kPcLoadDelta + imm26;
835 void Assembler::target_at_put(
int pos,
int target_pos) {
836 Instr instr = instr_at(pos);
837 if (is_uint24(instr)) {
838 DCHECK(target_pos == pos || target_pos >= 0);
848 Register::from_code(Instruction::RmValue(instr_at(pos + kInstrSize)));
851 DCHECK(IsNop(instr_at(pos + kInstrSize), dst.code()));
852 if (!CpuFeatures::IsSupported(ARMv7)) {
853 DCHECK(IsNop(instr_at(pos + 2 * kInstrSize), dst.code()));
865 uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
866 DCHECK(is_uint24(target24));
867 if (is_uint8(target24)) {
870 PatchingAssembler patcher(options(),
871 reinterpret_cast<byte*>(buffer_ + pos), 1);
872 patcher.mov(dst, Operand(target24));
874 uint16_t target16_0 = target24 & kImm16Mask;
875 uint16_t target16_1 = target24 >> 16;
876 if (CpuFeatures::IsSupported(ARMv7)) {
878 if (target16_1 == 0) {
879 PatchingAssembler patcher(options(),
880 reinterpret_cast<byte*>(buffer_ + pos), 1);
881 CpuFeatureScope scope(&patcher, ARMv7);
882 patcher.movw(dst, target16_0);
884 PatchingAssembler patcher(options(),
885 reinterpret_cast<byte*>(buffer_ + pos), 2);
886 CpuFeatureScope scope(&patcher, ARMv7);
887 patcher.movw(dst, target16_0);
888 patcher.movt(dst, target16_1);
892 uint8_t target8_0 = target16_0 & kImm8Mask;
893 uint8_t target8_1 = target16_0 >> 8;
894 uint8_t target8_2 = target16_1 & kImm8Mask;
895 if (target8_2 == 0) {
896 PatchingAssembler patcher(options(),
897 reinterpret_cast<byte*>(buffer_ + pos), 2);
898 patcher.mov(dst, Operand(target8_0));
899 patcher.orr(dst, dst, Operand(target8_1 << 8));
901 PatchingAssembler patcher(options(),
902 reinterpret_cast<byte*>(buffer_ + pos), 3);
903 patcher.mov(dst, Operand(target8_0));
904 patcher.orr(dst, dst, Operand(target8_1 << 8));
905 patcher.orr(dst, dst, Operand(target8_2 << 16));
911 int imm26 = target_pos - (pos + Instruction::kPcLoadDelta);
912 DCHECK_EQ(5 * B25, instr & 7 * B25);
913 if (Instruction::ConditionField(instr) == kSpecialCondition) {
915 DCHECK_EQ(0, imm26 & 1);
916 instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1) * B24;
918 DCHECK_EQ(0, imm26 & 3);
919 instr &= ~kImm24Mask;
921 int imm24 = imm26 >> 2;
922 DCHECK(is_int24(imm24));
923 instr_at_put(pos, instr | (imm24 & kImm24Mask));
926 void Assembler::print(
const Label* L) {
927 if (L->is_unused()) {
928 PrintF(
"unused label\n");
929 }
else if (L->is_bound()) {
930 PrintF(
"bound label to %d\n", L->pos());
931 }
else if (L->is_linked()) {
934 PrintF(
"unbound label");
935 while (l.is_linked()) {
936 PrintF(
"@ %d ", l.pos());
937 Instr instr = instr_at(l.pos());
938 if ((instr & ~kImm24Mask) == 0) {
941 DCHECK_EQ(instr & 7 * B25, 5 * B25);
942 Condition cond = Instruction::ConditionField(instr);
945 if (cond == kSpecialCondition) {
949 if ((instr & B24) != 0)
955 case eq: c =
"eq";
break;
956 case ne: c =
"ne";
break;
957 case hs: c =
"hs";
break;
958 case lo: c =
"lo";
break;
959 case mi: c =
"mi";
break;
960 case pl: c =
"pl";
break;
961 case vs: c =
"vs";
break;
962 case vc: c =
"vc";
break;
963 case hi: c =
"hi";
break;
964 case ls: c =
"ls";
break;
965 case ge: c =
"ge";
break;
966 case lt: c =
"lt";
break;
967 case gt: c =
"gt";
break;
968 case le: c =
"le";
break;
969 case al: c =
"";
break;
975 PrintF(
"%s%s\n", b, c);
980 PrintF(
"label in inconsistent state (pos = %d)\n", L->pos_);
985 void Assembler::bind_to(Label* L,
int pos) {
986 DCHECK(0 <= pos && pos <= pc_offset());
987 while (L->is_linked()) {
988 int fixup_pos = L->pos();
990 target_at_put(fixup_pos, pos);
996 if (pos > last_bound_pos_)
997 last_bound_pos_ = pos;
1001 void Assembler::bind(Label* L) {
1002 DCHECK(!L->is_bound());
1003 bind_to(L, pc_offset());
1007 void Assembler::next(Label* L) {
1008 DCHECK(L->is_linked());
1009 int link = target_at(L->pos());
1010 if (link == L->pos()) {
1029 for (
int rot = 0; rot < 16; rot++) {
1030 uint32_t imm8 = base::bits::RotateLeft32(imm32, 2 * rot);
1031 if ((imm8 <= 0xFF)) {
1039 if (instr !=
nullptr) {
1040 if ((*instr & kMovMvnMask) == kMovMvnPattern) {
1041 if (FitsShifter(~imm32, rotate_imm, immed_8,
nullptr)) {
1042 *instr ^= kMovMvnFlip;
1044 }
else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
1045 if (CpuFeatures::IsSupported(ARMv7)) {
1046 if (imm32 < 0x10000) {
1047 *instr ^= kMovwLeaveCCFlip;
1048 *instr |= Assembler::EncodeMovwImmediate(imm32);
1049 *rotate_imm = *immed_8 = 0;
1054 }
else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
1055 if (FitsShifter(-static_cast<int>(imm32), rotate_imm, immed_8,
nullptr)) {
1056 *instr ^= kCmpCmnFlip;
1060 Instr alu_insn = (*instr & kALUMask);
1061 if (alu_insn == ADD ||
1063 if (FitsShifter(-static_cast<int>(imm32), rotate_imm, immed_8,
1065 *instr ^= kAddSubFlip;
1068 }
else if (alu_insn == AND ||
1070 if (FitsShifter(~imm32, rotate_imm, immed_8,
nullptr)) {
1071 *instr ^= kAndBicFlip;
1084 bool MustOutputRelocInfo(RelocInfo::Mode rmode,
const Assembler* assembler) {
1085 if (RelocInfo::IsOnlyForSerializer(rmode)) {
1086 if (assembler->predictable_code_size())
return true;
1087 return assembler->options().record_reloc_info_for_serialization;
1088 }
else if (RelocInfo::IsNone(rmode)) {
1094 bool UseMovImmediateLoad(
const Operand& x,
const Assembler* assembler) {
1095 DCHECK_NOT_NULL(assembler);
1096 if (x.MustOutputRelocInfo(assembler)) {
1101 return CpuFeatures::IsSupported(ARMv7);
1107 bool Operand::MustOutputRelocInfo(
const Assembler* assembler)
const {
1108 return v8::internal::MustOutputRelocInfo(rmode_, assembler);
1111 int Operand::InstructionsRequired(
const Assembler* assembler,
1112 Instr instr)
const {
1113 DCHECK_NOT_NULL(assembler);
1114 if (rm_.is_valid())
return 1;
1116 if (MustOutputRelocInfo(assembler) ||
1117 !FitsShifter(immediate(), &dummy1, &dummy2, &instr)) {
1122 if (UseMovImmediateLoad(*
this, assembler)) {
1123 DCHECK(CpuFeatures::IsSupported(ARMv7));
1130 if ((instr & ~kCondMask) != 13 * B21) {
1136 return instructions;
1144 void Assembler::Move32BitImmediate(Register rd,
const Operand& x,
1146 if (UseMovImmediateLoad(x,
this)) {
1147 CpuFeatureScope scope(
this, ARMv7);
1151 DCHECK(!x.MustOutputRelocInfo(
this));
1152 UseScratchRegisterScope temps(
this);
1154 Register target = rd != pc && rd != sp ? rd : temps.Acquire();
1156 movw(target, imm32 & 0xFFFF, cond);
1157 movt(target, imm32 >> 16, cond);
1158 if (target.code() != rd.code()) {
1159 mov(rd, target, LeaveCC, cond);
1163 if (x.IsHeapObjectRequest()) {
1164 RequestHeapObject(x.heap_object_request());
1167 immediate = x.immediate();
1169 ConstantPoolAddEntry(pc_offset(), x.rmode_, immediate);
1170 ldr_pcrel(rd, 0, cond);
1174 void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
1177 uint32_t opcode = instr & kOpCodeMask;
1178 bool set_flags = (instr & S) != 0;
1179 DCHECK((opcode == ADC) || (opcode == ADD) || (opcode == AND) ||
1180 (opcode == BIC) || (opcode == EOR) || (opcode == ORR) ||
1181 (opcode == RSB) || (opcode == RSC) || (opcode == SBC) ||
1182 (opcode == SUB) || (opcode == CMN) || (opcode == CMP) ||
1183 (opcode == TEQ) || (opcode == TST) || (opcode == MOV) ||
1186 DCHECK(rd.is_valid() || (opcode == CMN) || (opcode == CMP) ||
1187 (opcode == TEQ) || (opcode == TST));
1189 DCHECK(rn.is_valid() || (opcode == MOV) || (opcode == MVN));
1190 DCHECK(rd.is_valid() || rn.is_valid());
1191 DCHECK_EQ(instr & ~(kCondMask | kOpCodeMask | S), 0);
1192 if (!AddrMode1TryEncodeOperand(&instr, x)) {
1193 DCHECK(x.IsImmediate());
1195 DCHECK(opcode == (instr & kOpCodeMask));
1196 UseScratchRegisterScope temps(
this);
1197 Condition cond = Instruction::ConditionField(instr);
1198 if ((opcode == MOV) && !set_flags) {
1201 DCHECK(!rn.is_valid());
1202 Move32BitImmediate(rd, x, cond);
1203 }
else if ((opcode == ADD) && !set_flags && (rd == rn) &&
1204 !temps.CanAcquire()) {
1217 int trailing_zeroes = base::bits::CountTrailingZeros(imm) & ~1u;
1218 uint32_t mask = (0xFF << trailing_zeroes);
1219 add(rd, rd, Operand(imm & mask), LeaveCC, cond);
1221 }
while (!ImmediateFitsAddrMode1Instruction(imm));
1222 add(rd, rd, Operand(imm), LeaveCC, cond);
1228 Register scratch = (rd.is_valid() && rd != rn && rd != pc && rd != sp)
1231 mov(scratch, x, LeaveCC, cond);
1232 AddrMode1(instr, rd, rn, Operand(scratch));
1236 if (!rd.is_valid()) {
1238 emit(instr | rn.code() * B16);
1239 }
else if (!rn.is_valid()) {
1242 DCHECK(!x.IsRegisterShiftedRegister() || rd != pc);
1243 emit(instr | rd.code() * B12);
1245 emit(instr | rn.code() * B16 | rd.code() * B12);
1247 if (rn == pc || x.rm_ == pc) {
1249 BlockConstPoolFor(1);
1253 bool Assembler::AddrMode1TryEncodeOperand(Instr* instr,
const Operand& x) {
1254 if (x.IsImmediate()) {
1258 if (x.MustOutputRelocInfo(
this) ||
1259 !FitsShifter(x.immediate(), &rotate_imm, &immed_8, instr)) {
1263 *instr |= I | rotate_imm * B8 | immed_8;
1264 }
else if (x.IsImmediateShiftedRegister()) {
1265 *instr |= x.shift_imm_ * B7 | x.shift_op_ | x.rm_.code();
1267 DCHECK(x.IsRegisterShiftedRegister());
1269 DCHECK(x.rm_ != pc && x.rs_ != pc);
1270 *instr |= x.rs_.code() * B8 | x.shift_op_ | B4 | x.rm_.code();
1276 void Assembler::AddrMode2(Instr instr, Register rd,
const MemOperand& x) {
1277 DCHECK((instr & ~(kCondMask | B | L)) == B26);
1280 DCHECK(x.rn_ != pc);
1282 if (!x.rm_.is_valid()) {
1284 int offset_12 = x.offset_;
1285 if (offset_12 < 0) {
1286 offset_12 = -offset_12;
1289 if (!is_uint12(offset_12)) {
1292 UseScratchRegisterScope temps(
this);
1294 bool is_load = (instr & L) == L;
1295 Register scratch = (is_load && rd != x.rn_ && rd != pc && rd != sp)
1298 mov(scratch, Operand(x.offset_), LeaveCC,
1299 Instruction::ConditionField(instr));
1300 AddrMode2(instr, rd, MemOperand(x.rn_, scratch, x.am_));
1303 DCHECK_GE(offset_12, 0);
1309 DCHECK(x.rm_ != pc);
1310 instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
1312 DCHECK((am & (P | W)) == P || x.rn_ != pc);
1313 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
1316 void Assembler::AddrMode3(Instr instr, Register rd,
const MemOperand& x) {
1317 DCHECK((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
1318 DCHECK(x.rn_.is_valid());
1321 DCHECK(x.rn_ != pc);
1323 bool is_load = (instr & L) == L;
1324 if (!x.rm_.is_valid()) {
1326 int offset_8 = x.offset_;
1328 offset_8 = -offset_8;
1331 if (!is_uint8(offset_8)) {
1334 UseScratchRegisterScope temps(
this);
1336 Register scratch = (is_load && rd != x.rn_ && rd != pc && rd != sp)
1339 mov(scratch, Operand(x.offset_), LeaveCC,
1340 Instruction::ConditionField(instr));
1341 AddrMode3(instr, rd, MemOperand(x.rn_, scratch, x.am_));
1344 DCHECK_GE(offset_8, 0);
1345 instr |= B | (offset_8 >> 4) * B8 | (offset_8 & 0xF);
1346 }
else if (x.shift_imm_ != 0) {
1349 UseScratchRegisterScope temps(
this);
1352 (is_load && rd != x.rn_ && rd != pc && rd != sp) ? rd : temps.Acquire();
1353 mov(scratch, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
1354 Instruction::ConditionField(instr));
1355 AddrMode3(instr, rd, MemOperand(x.rn_, scratch, x.am_));
1359 DCHECK((am & (P | W)) == P || x.rm_ != pc);
1360 instr |= x.rm_.code();
1362 DCHECK((am & (P | W)) == P || x.rn_ != pc);
1363 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
1366 void Assembler::AddrMode4(Instr instr, Register rn, RegList rl) {
1367 DCHECK((instr & ~(kCondMask | P | U | W | L)) == B27);
1370 emit(instr | rn.code()*B16 | rl);
1373 void Assembler::AddrMode5(Instr instr, CRegister crd,
const MemOperand& x) {
1375 DCHECK_EQ((B27 | B26),
1376 (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
1377 DCHECK(x.rn_.is_valid() && !x.rm_.is_valid());
1379 int offset_8 = x.offset_;
1380 DCHECK_EQ(offset_8 & 3, 0);
1383 offset_8 = -offset_8;
1386 DCHECK(is_uint8(offset_8));
1387 DCHECK((am & (P | W)) == P || x.rn_ != pc);
1393 DCHECK_GE(offset_8, 0);
1394 emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
1398 int Assembler::branch_offset(Label* L) {
1400 if (L->is_bound()) {
1401 target_pos = L->pos();
1403 if (L->is_linked()) {
1405 target_pos = L->pos();
1408 target_pos = pc_offset();
1410 L->link_to(pc_offset());
1415 if (!is_const_pool_blocked()) BlockConstPoolFor(1);
1417 return target_pos - (pc_offset() + Instruction::kPcLoadDelta);
1422 void Assembler::b(
int branch_offset, Condition cond, RelocInfo::Mode rmode) {
1423 if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode);
1424 DCHECK_EQ(branch_offset & 3, 0);
1425 int imm24 = branch_offset >> 2;
1426 const bool b_imm_check = is_int24(imm24);
1428 emit(cond | B27 | B25 | (imm24 & kImm24Mask));
1432 CheckConstPool(
false,
false);
1436 void Assembler::bl(
int branch_offset, Condition cond, RelocInfo::Mode rmode) {
1437 if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode);
1438 DCHECK_EQ(branch_offset & 3, 0);
1439 int imm24 = branch_offset >> 2;
1440 const bool bl_imm_check = is_int24(imm24);
1441 CHECK(bl_imm_check);
1442 emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
1445 void Assembler::blx(
int branch_offset) {
1446 DCHECK_EQ(branch_offset & 1, 0);
1447 int h = ((branch_offset & 2) >> 1)*B24;
1448 int imm24 = branch_offset >> 2;
1449 const bool blx_imm_check = is_int24(imm24);
1450 CHECK(blx_imm_check);
1451 emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
1454 void Assembler::blx(Register target, Condition cond) {
1455 DCHECK(target != pc);
1456 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
1459 void Assembler::bx(Register target, Condition cond) {
1460 DCHECK(target != pc);
1461 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
1465 void Assembler::b(Label* L, Condition cond) {
1467 b(branch_offset(L), cond);
1471 void Assembler::bl(Label* L, Condition cond) {
1473 bl(branch_offset(L), cond);
1477 void Assembler::blx(Label* L) {
1479 blx(branch_offset(L));
1485 void Assembler::and_(Register dst, Register src1,
const Operand& src2,
1486 SBit s, Condition cond) {
1487 AddrMode1(cond | AND | s, dst, src1, src2);
1490 void Assembler::and_(Register dst, Register src1, Register src2, SBit s,
1492 and_(dst, src1, Operand(src2), s, cond);
1495 void Assembler::eor(Register dst, Register src1,
const Operand& src2,
1496 SBit s, Condition cond) {
1497 AddrMode1(cond | EOR | s, dst, src1, src2);
1500 void Assembler::eor(Register dst, Register src1, Register src2, SBit s,
1502 AddrMode1(cond | EOR | s, dst, src1, Operand(src2));
1505 void Assembler::sub(Register dst, Register src1,
const Operand& src2,
1506 SBit s, Condition cond) {
1507 AddrMode1(cond | SUB | s, dst, src1, src2);
1510 void Assembler::sub(Register dst, Register src1, Register src2, SBit s,
1512 sub(dst, src1, Operand(src2), s, cond);
1515 void Assembler::rsb(Register dst, Register src1,
const Operand& src2,
1516 SBit s, Condition cond) {
1517 AddrMode1(cond | RSB | s, dst, src1, src2);
1521 void Assembler::add(Register dst, Register src1,
const Operand& src2,
1522 SBit s, Condition cond) {
1523 AddrMode1(cond | ADD | s, dst, src1, src2);
1526 void Assembler::add(Register dst, Register src1, Register src2, SBit s,
1528 add(dst, src1, Operand(src2), s, cond);
1531 void Assembler::adc(Register dst, Register src1,
const Operand& src2,
1532 SBit s, Condition cond) {
1533 AddrMode1(cond | ADC | s, dst, src1, src2);
1537 void Assembler::sbc(Register dst, Register src1,
const Operand& src2,
1538 SBit s, Condition cond) {
1539 AddrMode1(cond | SBC | s, dst, src1, src2);
1543 void Assembler::rsc(Register dst, Register src1,
const Operand& src2,
1544 SBit s, Condition cond) {
1545 AddrMode1(cond | RSC | s, dst, src1, src2);
1549 void Assembler::tst(Register src1,
const Operand& src2, Condition cond) {
1550 AddrMode1(cond | TST | S, no_reg, src1, src2);
1553 void Assembler::tst(Register src1, Register src2, Condition cond) {
1554 tst(src1, Operand(src2), cond);
1557 void Assembler::teq(Register src1,
const Operand& src2, Condition cond) {
1558 AddrMode1(cond | TEQ | S, no_reg, src1, src2);
1562 void Assembler::cmp(Register src1,
const Operand& src2, Condition cond) {
1563 AddrMode1(cond | CMP | S, no_reg, src1, src2);
1566 void Assembler::cmp(Register src1, Register src2, Condition cond) {
1567 cmp(src1, Operand(src2), cond);
1570 void Assembler::cmp_raw_immediate(
1571 Register src,
int raw_immediate, Condition cond) {
1572 DCHECK(is_uint12(raw_immediate));
1573 emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
1577 void Assembler::cmn(Register src1,
const Operand& src2, Condition cond) {
1578 AddrMode1(cond | CMN | S, no_reg, src1, src2);
1582 void Assembler::orr(Register dst, Register src1,
const Operand& src2,
1583 SBit s, Condition cond) {
1584 AddrMode1(cond | ORR | s, dst, src1, src2);
1587 void Assembler::orr(Register dst, Register src1, Register src2, SBit s,
1589 orr(dst, src1, Operand(src2), s, cond);
1592 void Assembler::mov(Register dst,
const Operand& src, SBit s, Condition cond) {
1595 DCHECK(!(src.IsRegister() && src.rm() == dst && s == LeaveCC && cond == al));
1596 AddrMode1(cond | MOV | s, dst, no_reg, src);
1599 void Assembler::mov(Register dst, Register src, SBit s, Condition cond) {
1600 mov(dst, Operand(src), s, cond);
1603 void Assembler::mov_label_offset(Register dst, Label* label) {
1604 if (label->is_bound()) {
1605 mov(dst, Operand(label->pos() + (Code::kHeaderSize - kHeapObjectTag)));
1611 int link = label->is_linked() ? label->pos() : pc_offset();
1612 label->link_to(pc_offset());
1631 CHECK(is_uint24(link));
1632 BlockConstPoolScope block_const_pool(
this);
1635 if (!CpuFeatures::IsSupported(ARMv7)) {
1642 void Assembler::movw(Register reg,
uint32_t immediate, Condition cond) {
1643 DCHECK(IsEnabled(ARMv7));
1644 emit(cond | 0x30*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1648 void Assembler::movt(Register reg,
uint32_t immediate, Condition cond) {
1649 DCHECK(IsEnabled(ARMv7));
1650 emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1654 void Assembler::bic(Register dst, Register src1,
const Operand& src2,
1655 SBit s, Condition cond) {
1656 AddrMode1(cond | BIC | s, dst, src1, src2);
1660 void Assembler::mvn(Register dst,
const Operand& src, SBit s, Condition cond) {
1661 AddrMode1(cond | MVN | s, dst, no_reg, src);
1664 void Assembler::asr(Register dst, Register src1,
const Operand& src2, SBit s,
1666 if (src2.IsRegister()) {
1667 mov(dst, Operand(src1, ASR, src2.rm()), s, cond);
1669 mov(dst, Operand(src1, ASR, src2.immediate()), s, cond);
1673 void Assembler::lsl(Register dst, Register src1,
const Operand& src2, SBit s,
1675 if (src2.IsRegister()) {
1676 mov(dst, Operand(src1, LSL, src2.rm()), s, cond);
1678 mov(dst, Operand(src1, LSL, src2.immediate()), s, cond);
1682 void Assembler::lsr(Register dst, Register src1,
const Operand& src2, SBit s,
1684 if (src2.IsRegister()) {
1685 mov(dst, Operand(src1, LSR, src2.rm()), s, cond);
1687 mov(dst, Operand(src1, LSR, src2.immediate()), s, cond);
1692 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
1693 SBit s, Condition cond) {
1694 DCHECK(dst != pc && src1 != pc && src2 != pc && srcA != pc);
1695 emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
1696 src2.code()*B8 | B7 | B4 | src1.code());
1700 void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
1702 DCHECK(dst != pc && src1 != pc && src2 != pc && srcA != pc);
1703 DCHECK(IsEnabled(ARMv7));
1704 emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
1705 src2.code()*B8 | B7 | B4 | src1.code());
1709 void Assembler::sdiv(Register dst, Register src1, Register src2,
1711 DCHECK(dst != pc && src1 != pc && src2 != pc);
1712 DCHECK(IsEnabled(SUDIV));
1713 emit(cond | B26 | B25 | B24 | B20 | dst.code() * B16 | 0xF * B12 |
1714 src2.code() * B8 | B4 | src1.code());
1718 void Assembler::udiv(Register dst, Register src1, Register src2,
1720 DCHECK(dst != pc && src1 != pc && src2 != pc);
1721 DCHECK(IsEnabled(SUDIV));
1722 emit(cond | B26 | B25 | B24 | B21 | B20 | dst.code() * B16 | 0xF * B12 |
1723 src2.code() * B8 | B4 | src1.code());
1727 void Assembler::mul(Register dst, Register src1, Register src2, SBit s,
1729 DCHECK(dst != pc && src1 != pc && src2 != pc);
1731 emit(cond | s | dst.code() * B16 | src2.code() * B8 | B7 | B4 | src1.code());
1735 void Assembler::smmla(Register dst, Register src1, Register src2, Register srcA,
1737 DCHECK(dst != pc && src1 != pc && src2 != pc && srcA != pc);
1738 emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 |
1739 srcA.code() * B12 | src2.code() * B8 | B4 | src1.code());
1743 void Assembler::smmul(Register dst, Register src1, Register src2,
1745 DCHECK(dst != pc && src1 != pc && src2 != pc);
1746 emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 | 0xF * B12 |
1747 src2.code() * B8 | B4 | src1.code());
1751 void Assembler::smlal(Register dstL,
1757 DCHECK(dstL != pc && dstH != pc && src1 != pc && src2 != pc);
1758 DCHECK(dstL != dstH);
1759 emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1760 src2.code()*B8 | B7 | B4 | src1.code());
1764 void Assembler::smull(Register dstL,
1770 DCHECK(dstL != pc && dstH != pc && src1 != pc && src2 != pc);
1771 DCHECK(dstL != dstH);
1772 emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
1773 src2.code()*B8 | B7 | B4 | src1.code());
1777 void Assembler::umlal(Register dstL,
1783 DCHECK(dstL != pc && dstH != pc && src1 != pc && src2 != pc);
1784 DCHECK(dstL != dstH);
1785 emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1786 src2.code()*B8 | B7 | B4 | src1.code());
1790 void Assembler::umull(Register dstL,
1796 DCHECK(dstL != pc && dstH != pc && src1 != pc && src2 != pc);
1797 DCHECK(dstL != dstH);
1798 emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
1799 src2.code()*B8 | B7 | B4 | src1.code());
1804 void Assembler::clz(Register dst, Register src, Condition cond) {
1805 DCHECK(dst != pc && src != pc);
1806 emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
1807 15*B8 | CLZ | src.code());
1814 void Assembler::usat(Register dst,
1818 DCHECK(dst != pc && src.rm_ != pc);
1819 DCHECK((satpos >= 0) && (satpos <= 31));
1820 DCHECK(src.IsImmediateShiftedRegister());
1821 DCHECK((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
1824 if (src.shift_op_ == ASR) {
1828 emit(cond | 0x6 * B24 | 0xE * B20 | satpos * B16 | dst.code() * B12 |
1829 src.shift_imm_ * B7 | sh * B6 | 0x1 * B4 | src.rm_.code());
1839 void Assembler::ubfx(Register dst,
1844 DCHECK(IsEnabled(ARMv7));
1845 DCHECK(dst != pc && src != pc);
1846 DCHECK((lsb >= 0) && (lsb <= 31));
1847 DCHECK((width >= 1) && (width <= (32 - lsb)));
1848 emit(cond | 0xF * B23 | B22 | B21 | (width - 1) * B16 | dst.code() * B12 |
1849 lsb * B7 | B6 | B4 | src.code());
1858 void Assembler::sbfx(Register dst,
1863 DCHECK(IsEnabled(ARMv7));
1864 DCHECK(dst != pc && src != pc);
1865 DCHECK((lsb >= 0) && (lsb <= 31));
1866 DCHECK((width >= 1) && (width <= (32 - lsb)));
1867 emit(cond | 0xF * B23 | B21 | (width - 1) * B16 | dst.code() * B12 |
1868 lsb * B7 | B6 | B4 | src.code());
1876 void Assembler::bfc(Register dst,
int lsb,
int width, Condition cond) {
1877 DCHECK(IsEnabled(ARMv7));
1879 DCHECK((lsb >= 0) && (lsb <= 31));
1880 DCHECK((width >= 1) && (width <= (32 - lsb)));
1881 int msb = lsb + width - 1;
1882 emit(cond | 0x1F * B22 | msb * B16 | dst.code() * B12 | lsb * B7 | B4 | 0xF);
1890 void Assembler::bfi(Register dst,
1895 DCHECK(IsEnabled(ARMv7));
1896 DCHECK(dst != pc && src != pc);
1897 DCHECK((lsb >= 0) && (lsb <= 31));
1898 DCHECK((width >= 1) && (width <= (32 - lsb)));
1899 int msb = lsb + width - 1;
1900 emit(cond | 0x1F * B22 | msb * B16 | dst.code() * B12 | lsb * B7 | B4 |
1905 void Assembler::pkhbt(Register dst,
1907 const Operand& src2,
1914 DCHECK(src2.IsImmediateShiftedRegister());
1915 DCHECK(src2.rm() != pc);
1916 DCHECK((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
1917 DCHECK(src2.shift_op() == LSL);
1918 emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
1919 src2.shift_imm_*B7 | B4 | src2.rm().code());
1923 void Assembler::pkhtb(Register dst,
1925 const Operand& src2,
1932 DCHECK(src2.IsImmediateShiftedRegister());
1933 DCHECK(src2.rm() != pc);
1934 DCHECK((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
1935 DCHECK(src2.shift_op() == ASR);
1936 int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
1937 emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
1938 asr*B7 | B6 | B4 | src2.rm().code());
1942 void Assembler::sxtb(Register dst, Register src,
int rotate, Condition cond) {
1948 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1949 emit(cond | 0x6A * B20 | 0xF * B16 | dst.code() * B12 |
1950 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
1954 void Assembler::sxtab(Register dst, Register src1, Register src2,
int rotate,
1962 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1963 emit(cond | 0x6A * B20 | src1.code() * B16 | dst.code() * B12 |
1964 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
1968 void Assembler::sxth(Register dst, Register src,
int rotate, Condition cond) {
1974 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1975 emit(cond | 0x6B * B20 | 0xF * B16 | dst.code() * B12 |
1976 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
1980 void Assembler::sxtah(Register dst, Register src1, Register src2,
int rotate,
1988 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1989 emit(cond | 0x6B * B20 | src1.code() * B16 | dst.code() * B12 |
1990 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
1994 void Assembler::uxtb(Register dst, Register src,
int rotate, Condition cond) {
2000 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
2001 emit(cond | 0x6E * B20 | 0xF * B16 | dst.code() * B12 |
2002 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
2006 void Assembler::uxtab(Register dst, Register src1, Register src2,
int rotate,
2014 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
2015 emit(cond | 0x6E * B20 | src1.code() * B16 | dst.code() * B12 |
2016 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
2020 void Assembler::uxtb16(Register dst, Register src,
int rotate, Condition cond) {
2026 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
2027 emit(cond | 0x6C * B20 | 0xF * B16 | dst.code() * B12 |
2028 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
2032 void Assembler::uxth(Register dst, Register src,
int rotate, Condition cond) {
2038 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
2039 emit(cond | 0x6F * B20 | 0xF * B16 | dst.code() * B12 |
2040 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
2044 void Assembler::uxtah(Register dst, Register src1, Register src2,
int rotate,
2052 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
2053 emit(cond | 0x6F * B20 | src1.code() * B16 | dst.code() * B12 |
2054 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
2058 void Assembler::rbit(Register dst, Register src, Condition cond) {
2061 DCHECK(IsEnabled(ARMv7));
2064 emit(cond | 0x6FF * B16 | dst.code() * B12 | 0xF3 * B4 | src.code());
2067 void Assembler::rev(Register dst, Register src, Condition cond) {
2072 emit(cond | 0x6BF * B16 | dst.code() * B12 | 0xF3 * B4 | src.code());
2076 void Assembler::mrs(Register dst, SRegister s, Condition cond) {
2078 emit(cond | B24 | s | 15*B16 | dst.code()*B12);
2082 void Assembler::msr(SRegisterFieldMask fields,
const Operand& src,
2084 DCHECK_NE(fields & 0x000F0000, 0);
2085 DCHECK(((fields & 0xFFF0FFFF) == CPSR) || ((fields & 0xFFF0FFFF) == SPSR));
2087 if (src.IsImmediate()) {
2091 if (src.MustOutputRelocInfo(
this) ||
2092 !FitsShifter(src.immediate(), &rotate_imm, &immed_8,
nullptr)) {
2093 UseScratchRegisterScope temps(
this);
2094 Register scratch = temps.Acquire();
2097 Move32BitImmediate(scratch, src);
2098 msr(fields, Operand(scratch), cond);
2101 instr = I | rotate_imm*B8 | immed_8;
2103 DCHECK(src.IsRegister());
2104 instr = src.rm_.code();
2106 emit(cond | instr | B24 | B21 | fields | 15*B12);
2111 void Assembler::ldr(Register dst,
const MemOperand& src, Condition cond) {
2112 AddrMode2(cond | B26 | L, dst, src);
2116 void Assembler::str(Register src,
const MemOperand& dst, Condition cond) {
2117 AddrMode2(cond | B26, src, dst);
2121 void Assembler::ldrb(Register dst,
const MemOperand& src, Condition cond) {
2122 AddrMode2(cond | B26 | B | L, dst, src);
2126 void Assembler::strb(Register src,
const MemOperand& dst, Condition cond) {
2127 AddrMode2(cond | B26 | B, src, dst);
2131 void Assembler::ldrh(Register dst,
const MemOperand& src, Condition cond) {
2132 AddrMode3(cond | L | B7 | H | B4, dst, src);
2136 void Assembler::strh(Register src,
const MemOperand& dst, Condition cond) {
2137 AddrMode3(cond | B7 | H | B4, src, dst);
2141 void Assembler::ldrsb(Register dst,
const MemOperand& src, Condition cond) {
2142 AddrMode3(cond | L | B7 | S6 | B4, dst, src);
2146 void Assembler::ldrsh(Register dst,
const MemOperand& src, Condition cond) {
2147 AddrMode3(cond | L | B7 | S6 | H | B4, dst, src);
2151 void Assembler::ldrd(Register dst1, Register dst2,
2152 const MemOperand& src, Condition cond) {
2153 DCHECK(src.rm() == no_reg);
2155 DCHECK_EQ(0, dst1.code() % 2);
2156 DCHECK_EQ(dst1.code() + 1, dst2.code());
2157 AddrMode3(cond | B7 | B6 | B4, dst1, src);
2161 void Assembler::strd(Register src1, Register src2,
2162 const MemOperand& dst, Condition cond) {
2163 DCHECK(dst.rm() == no_reg);
2165 DCHECK_EQ(0, src1.code() % 2);
2166 DCHECK_EQ(src1.code() + 1, src2.code());
2167 AddrMode3(cond | B7 | B6 | B5 | B4, src1, dst);
2170 void Assembler::ldr_pcrel(Register dst,
int imm12, Condition cond) {
2171 AddrMode am = Offset;
2176 DCHECK(is_uint12(imm12));
2177 emit(cond | B26 | am | L | pc.code() * B16 | dst.code() * B12 | imm12);
2181 void Assembler::ldrex(Register dst, Register src, Condition cond) {
2186 emit(cond | B24 | B23 | B20 | src.code() * B16 | dst.code() * B12 | 0xF9F);
2189 void Assembler::strex(Register src1, Register src2, Register dst,
2197 DCHECK(src1 != dst);
2198 DCHECK(src1 != src2);
2199 emit(cond | B24 | B23 | dst.code() * B16 | src1.code() * B12 | 0xF9 * B4 |
2203 void Assembler::ldrexb(Register dst, Register src, Condition cond) {
2208 emit(cond | B24 | B23 | B22 | B20 | src.code() * B16 | dst.code() * B12 |
2212 void Assembler::strexb(Register src1, Register src2, Register dst,
2220 DCHECK(src1 != dst);
2221 DCHECK(src1 != src2);
2222 emit(cond | B24 | B23 | B22 | dst.code() * B16 | src1.code() * B12 |
2223 0xF9 * B4 | src2.code());
2226 void Assembler::ldrexh(Register dst, Register src, Condition cond) {
2231 emit(cond | B24 | B23 | B22 | B21 | B20 | src.code() * B16 |
2232 dst.code() * B12 | 0xF9F);
2235 void Assembler::strexh(Register src1, Register src2, Register dst,
2243 DCHECK(src1 != dst);
2244 DCHECK(src1 != src2);
2245 emit(cond | B24 | B23 | B22 | B21 | dst.code() * B16 | src1.code() * B12 |
2246 0xF9 * B4 | src2.code());
2249 void Assembler::ldrexd(Register dst1, Register dst2, Register src,
2255 DCHECK_EQ(0, dst1.code() % 2);
2256 DCHECK_EQ(dst1.code() + 1, dst2.code());
2257 emit(cond | B24 | B23 | B21 | B20 | src.code() * B16 | dst1.code() * B12 |
2261 void Assembler::strexd(Register res, Register src1, Register src2, Register dst,
2267 DCHECK_EQ(0, src1.code() % 2);
2268 DCHECK_EQ(src1.code() + 1, src2.code());
2269 emit(cond | B24 | B23 | B21 | dst.code() * B16 | res.code() * B12 |
2270 0xF9 * B4 | src1.code());
2274 void Assembler::pld(
const MemOperand& address) {
2278 DCHECK(address.rm() == no_reg);
2279 DCHECK(address.am() == Offset);
2281 int offset = address.offset();
2286 DCHECK_LT(offset, 4096);
2287 emit(kSpecialCondition | B26 | B24 | U | B22 | B20 |
2288 address.rn().code() * B16 | 0xF * B12 | offset);
2293 void Assembler::ldm(BlockAddrMode am,
2298 DCHECK(base == sp || (dst & sp.bit()) == 0);
2300 AddrMode4(cond | B27 | am | L, base, dst);
2303 if (cond == al && (dst & pc.bit()) != 0) {
2309 CheckConstPool(
true, no_const_pool_before_ == pc_offset() - kInstrSize);
2314 void Assembler::stm(BlockAddrMode am,
2318 AddrMode4(cond | B27 | am, base, src);
2325 void Assembler::stop(
const char* msg, Condition cond, int32_t code) {
2327 DCHECK_GE(code, kDefaultStopCode);
2329 BlockConstPoolScope block_const_pool(
this);
2331 svc(kStopCode + code, cond);
2333 svc(kStopCode + kMaxStopCode, cond);
2336 #else // def __arm__ 2339 b(&skip, NegateCondition(cond));
2345 #endif // def __arm__ 2348 void Assembler::bkpt(
uint32_t imm16) {
2349 DCHECK(is_uint16(imm16));
2350 emit(al | B24 | B21 | (imm16 >> 4) * B8 | BKPT | (imm16 & 0xF));
2354 void Assembler::svc(
uint32_t imm24, Condition cond) {
2355 DCHECK(is_uint24(imm24));
2356 emit(cond | 15*B24 | imm24);
2360 void Assembler::dmb(BarrierOption option) {
2361 if (CpuFeatures::IsSupported(ARMv7)) {
2363 emit(kSpecialCondition | 0x57FF * B12 | 5 * B4 | option);
2367 mcr(p15, 0, r0, cr7, cr10, 5);
2372 void Assembler::dsb(BarrierOption option) {
2373 if (CpuFeatures::IsSupported(ARMv7)) {
2375 emit(kSpecialCondition | 0x57FF * B12 | 4 * B4 | option);
2379 mcr(p15, 0, r0, cr7, cr10, 4);
2384 void Assembler::isb(BarrierOption option) {
2385 if (CpuFeatures::IsSupported(ARMv7)) {
2387 emit(kSpecialCondition | 0x57FF * B12 | 6 * B4 | option);
2391 mcr(p15, 0, r0, cr7, cr5, 4);
2395 void Assembler::csdb() {
2402 void Assembler::cdp(Coprocessor coproc,
2409 DCHECK(is_uint4(opcode_1) && is_uint3(opcode_2));
2410 emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
2411 crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
2414 void Assembler::cdp2(Coprocessor coproc,
int opcode_1, CRegister crd,
2415 CRegister crn, CRegister crm,
int opcode_2) {
2416 cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
2420 void Assembler::mcr(Coprocessor coproc,
2427 DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2));
2428 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
2429 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
2432 void Assembler::mcr2(Coprocessor coproc,
int opcode_1, Register rd,
2433 CRegister crn, CRegister crm,
int opcode_2) {
2434 mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
2438 void Assembler::mrc(Coprocessor coproc,
2445 DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2));
2446 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
2447 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
2450 void Assembler::mrc2(Coprocessor coproc,
int opcode_1, Register rd,
2451 CRegister crn, CRegister crm,
int opcode_2) {
2452 mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
2456 void Assembler::ldc(Coprocessor coproc,
2458 const MemOperand& src,
2461 AddrMode5(cond | B27 | B26 | l | L | coproc * B8, crd, src);
2465 void Assembler::ldc(Coprocessor coproc,
2472 DCHECK(is_uint8(option));
2473 emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
2474 coproc*B8 | (option & 255));
2477 void Assembler::ldc2(Coprocessor coproc, CRegister crd,
const MemOperand& src,
2479 ldc(coproc, crd, src, l, kSpecialCondition);
2482 void Assembler::ldc2(Coprocessor coproc, CRegister crd, Register rn,
int option,
2484 ldc(coproc, crd, rn, option, l, kSpecialCondition);
2490 void Assembler::vldr(
const DwVfpRegister dst,
2491 const Register base,
2493 const Condition cond) {
2498 DCHECK(VfpRegisterIsAvailable(dst));
2501 CHECK_NE(offset, kMinInt);
2506 dst.split_code(&vd, &d);
2508 DCHECK_GE(offset, 0);
2509 if ((offset % 4) == 0 && (offset / 4) < 256) {
2510 emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
2511 0xB*B8 | ((offset / 4) & 255));
2513 UseScratchRegisterScope temps(
this);
2514 Register scratch = temps.Acquire();
2517 DCHECK(base != scratch);
2519 add(scratch, base, Operand(offset));
2521 sub(scratch, base, Operand(offset));
2523 emit(cond | 0xD * B24 | d * B22 | B20 | scratch.code() * B16 | vd * B12 |
2529 void Assembler::vldr(
const DwVfpRegister dst,
2530 const MemOperand& operand,
2531 const Condition cond) {
2532 DCHECK(VfpRegisterIsAvailable(dst));
2533 DCHECK(operand.am_ == Offset);
2534 if (operand.rm().is_valid()) {
2535 UseScratchRegisterScope temps(
this);
2536 Register scratch = temps.Acquire();
2537 add(scratch, operand.rn(),
2538 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2539 vldr(dst, scratch, 0, cond);
2541 vldr(dst, operand.rn(), operand.offset(), cond);
2546 void Assembler::vldr(
const SwVfpRegister dst,
2547 const Register base,
2549 const Condition cond) {
2560 dst.split_code(&sd, &d);
2561 DCHECK_GE(offset, 0);
2563 if ((offset % 4) == 0 && (offset / 4) < 256) {
2564 emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
2565 0xA*B8 | ((offset / 4) & 255));
2569 UseScratchRegisterScope temps(
this);
2570 Register scratch = temps.Acquire();
2571 DCHECK(base != scratch);
2573 add(scratch, base, Operand(offset));
2575 sub(scratch, base, Operand(offset));
2577 emit(cond | d * B22 | 0xD1 * B20 | scratch.code() * B16 | sd * B12 |
2583 void Assembler::vldr(
const SwVfpRegister dst,
2584 const MemOperand& operand,
2585 const Condition cond) {
2586 DCHECK(operand.am_ == Offset);
2587 if (operand.rm().is_valid()) {
2588 UseScratchRegisterScope temps(
this);
2589 Register scratch = temps.Acquire();
2590 add(scratch, operand.rn(),
2591 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2592 vldr(dst, scratch, 0, cond);
2594 vldr(dst, operand.rn(), operand.offset(), cond);
2599 void Assembler::vstr(
const DwVfpRegister src,
2600 const Register base,
2602 const Condition cond) {
2607 DCHECK(VfpRegisterIsAvailable(src));
2610 CHECK_NE(offset, kMinInt);
2614 DCHECK_GE(offset, 0);
2616 src.split_code(&vd, &d);
2618 if ((offset % 4) == 0 && (offset / 4) < 256) {
2619 emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 |
2620 ((offset / 4) & 255));
2624 UseScratchRegisterScope temps(
this);
2625 Register scratch = temps.Acquire();
2626 DCHECK(base != scratch);
2628 add(scratch, base, Operand(offset));
2630 sub(scratch, base, Operand(offset));
2632 emit(cond | 0xD * B24 | d * B22 | scratch.code() * B16 | vd * B12 |
2638 void Assembler::vstr(
const DwVfpRegister src,
2639 const MemOperand& operand,
2640 const Condition cond) {
2641 DCHECK(VfpRegisterIsAvailable(src));
2642 DCHECK(operand.am_ == Offset);
2643 if (operand.rm().is_valid()) {
2644 UseScratchRegisterScope temps(
this);
2645 Register scratch = temps.Acquire();
2646 add(scratch, operand.rn(),
2647 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2648 vstr(src, scratch, 0, cond);
2650 vstr(src, operand.rn(), operand.offset(), cond);
2655 void Assembler::vstr(
const SwVfpRegister src,
2656 const Register base,
2658 const Condition cond) {
2665 CHECK_NE(offset, kMinInt);
2670 src.split_code(&sd, &d);
2671 DCHECK_GE(offset, 0);
2672 if ((offset % 4) == 0 && (offset / 4) < 256) {
2673 emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
2674 0xA*B8 | ((offset / 4) & 255));
2678 UseScratchRegisterScope temps(
this);
2679 Register scratch = temps.Acquire();
2680 DCHECK(base != scratch);
2682 add(scratch, base, Operand(offset));
2684 sub(scratch, base, Operand(offset));
2686 emit(cond | d * B22 | 0xD0 * B20 | scratch.code() * B16 | sd * B12 |
2692 void Assembler::vstr(
const SwVfpRegister src,
2693 const MemOperand& operand,
2694 const Condition cond) {
2695 DCHECK(operand.am_ == Offset);
2696 if (operand.rm().is_valid()) {
2697 UseScratchRegisterScope temps(
this);
2698 Register scratch = temps.Acquire();
2699 add(scratch, operand.rn(),
2700 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2701 vstr(src, scratch, 0, cond);
2703 vstr(src, operand.rn(), operand.offset(), cond);
2707 void Assembler::vldm(BlockAddrMode am, Register base, DwVfpRegister first,
2708 DwVfpRegister last, Condition cond) {
2712 DCHECK_LE(first.code(), last.code());
2713 DCHECK(VfpRegisterIsAvailable(last));
2714 DCHECK(am == ia || am == ia_w || am == db_w);
2718 first.split_code(&sd, &d);
2719 int count = last.code() - first.code() + 1;
2720 DCHECK_LE(count, 16);
2721 emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
2725 void Assembler::vstm(BlockAddrMode am, Register base, DwVfpRegister first,
2726 DwVfpRegister last, Condition cond) {
2730 DCHECK_LE(first.code(), last.code());
2731 DCHECK(VfpRegisterIsAvailable(last));
2732 DCHECK(am == ia || am == ia_w || am == db_w);
2736 first.split_code(&sd, &d);
2737 int count = last.code() - first.code() + 1;
2738 DCHECK_LE(count, 16);
2739 emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
2743 void Assembler::vldm(BlockAddrMode am, Register base, SwVfpRegister first,
2744 SwVfpRegister last, Condition cond) {
2748 DCHECK_LE(first.code(), last.code());
2749 DCHECK(am == ia || am == ia_w || am == db_w);
2753 first.split_code(&sd, &d);
2754 int count = last.code() - first.code() + 1;
2755 emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
2759 void Assembler::vstm(BlockAddrMode am, Register base, SwVfpRegister first,
2760 SwVfpRegister last, Condition cond) {
2764 DCHECK_LE(first.code(), last.code());
2765 DCHECK(am == ia || am == ia_w || am == db_w);
2769 first.split_code(&sd, &d);
2770 int count = last.code() - first.code() + 1;
2771 emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
2776 uint64_t
i = d.AsUint64();
2778 *lo =
i & 0xFFFFFFFF;
2784 static bool FitsVmovFPImmediate(Double d,
uint32_t* encoding) {
2804 DoubleAsTwoUInt32(d, &lo, &hi);
2807 if ((lo != 0) || ((hi & 0xFFFF) != 0)) {
2812 if (((hi & 0x3FC00000) != 0) && ((hi & 0x3FC00000) != 0x3FC00000)) {
2817 if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
2823 *encoding = (hi >> 16) & 0xF;
2824 *encoding |= (hi >> 4) & 0x70000;
2825 *encoding |= (hi >> 12) & 0x80000;
2830 void Assembler::vmov(
const SwVfpRegister dst, Float32 imm) {
2832 if (CpuFeatures::IsSupported(VFPv3) &&
2833 FitsVmovFPImmediate(Double(imm.get_scalar()), &enc)) {
2834 CpuFeatureScope scope(
this, VFPv3);
2842 dst.split_code(&vd, &d);
2843 emit(al | 0x1D * B23 | d * B22 | 0x3 * B20 | vd * B12 | 0x5 * B9 | enc);
2845 UseScratchRegisterScope temps(
this);
2846 Register scratch = temps.Acquire();
2847 mov(scratch, Operand(imm.get_bits()));
2852 void Assembler::vmov(
const DwVfpRegister dst, Double imm,
2853 const Register extra_scratch) {
2854 DCHECK(VfpRegisterIsAvailable(dst));
2856 if (CpuFeatures::IsSupported(VFPv3) && FitsVmovFPImmediate(imm, &enc)) {
2857 CpuFeatureScope scope(
this, VFPv3);
2865 dst.split_code(&vd, &d);
2866 emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
2870 DoubleAsTwoUInt32(imm, &lo, &hi);
2871 UseScratchRegisterScope temps(
this);
2872 Register scratch = temps.Acquire();
2877 mov(scratch, Operand(lo));
2878 vmov(dst, scratch, scratch);
2879 }
else if (extra_scratch == no_reg) {
2881 mov(scratch, Operand(lo));
2882 vmov(NeonS32, dst, 0, scratch);
2883 if (((lo & 0xFFFF) == (hi & 0xFFFF)) && CpuFeatures::IsSupported(ARMv7)) {
2884 CpuFeatureScope scope(
this, ARMv7);
2885 movt(scratch, hi >> 16);
2887 mov(scratch, Operand(hi));
2889 vmov(NeonS32, dst, 1, scratch);
2893 mov(scratch, Operand(lo));
2894 mov(extra_scratch, Operand(hi));
2895 vmov(dst, scratch, extra_scratch);
2900 void Assembler::vmov(
const SwVfpRegister dst,
2901 const SwVfpRegister src,
2902 const Condition cond) {
2906 dst.split_code(&sd, &d);
2907 src.split_code(&sm, &m);
2908 emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
2912 void Assembler::vmov(
const DwVfpRegister dst,
2913 const DwVfpRegister src,
2914 const Condition cond) {
2919 DCHECK(VfpRegisterIsAvailable(dst));
2920 DCHECK(VfpRegisterIsAvailable(src));
2922 dst.split_code(&vd, &d);
2924 src.split_code(&vm, &m);
2925 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B6 | m*B5 |
2929 void Assembler::vmov(
const DwVfpRegister dst,
2930 const Register src1,
2931 const Register src2,
2932 const Condition cond) {
2937 DCHECK(VfpRegisterIsAvailable(dst));
2938 DCHECK(src1 != pc && src2 != pc);
2940 dst.split_code(&vm, &m);
2941 emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
2942 src1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
2946 void Assembler::vmov(
const Register dst1,
2947 const Register dst2,
2948 const DwVfpRegister src,
2949 const Condition cond) {
2954 DCHECK(VfpRegisterIsAvailable(src));
2955 DCHECK(dst1 != pc && dst2 != pc);
2957 src.split_code(&vm, &m);
2958 emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
2959 dst1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
2963 void Assembler::vmov(
const SwVfpRegister dst,
2965 const Condition cond) {
2972 dst.split_code(&sn, &n);
2973 emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
2977 void Assembler::vmov(
const Register dst,
2978 const SwVfpRegister src,
2979 const Condition cond) {
2986 src.split_code(&sn, &n);
2987 emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
2992 enum VFPType { S32, U32, F32, F64 };
2995 static bool IsSignedVFPType(VFPType type) {
3007 static bool IsIntegerVFPType(VFPType type) {
3021 static bool IsDoubleVFPType(VFPType type) {
3037 static void SplitRegCode(VFPType reg_type,
3041 DCHECK((reg_code >= 0) && (reg_code <= 31));
3042 if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
3043 SwVfpRegister::split_code(reg_code, vm, m);
3045 DwVfpRegister::split_code(reg_code, vm, m);
3051 static Instr EncodeVCVT(
const VFPType dst_type,
3053 const VFPType src_type,
3055 VFPConversionMode mode,
3056 const Condition cond) {
3057 DCHECK(src_type != dst_type);
3059 SplitRegCode(src_type, src_code, &Vm, &M);
3060 SplitRegCode(dst_type, dst_code, &Vd, &D);
3062 if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
3067 DCHECK(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
3071 if (IsIntegerVFPType(dst_type)) {
3072 opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
3073 sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
3076 DCHECK(IsIntegerVFPType(src_type));
3078 sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
3079 op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
3082 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
3083 Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
3089 int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
3090 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
3091 Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
3096 void Assembler::vcvt_f64_s32(
const DwVfpRegister dst,
3097 const SwVfpRegister src,
3098 VFPConversionMode mode,
3099 const Condition cond) {
3100 DCHECK(VfpRegisterIsAvailable(dst));
3101 emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
3105 void Assembler::vcvt_f32_s32(
const SwVfpRegister dst,
3106 const SwVfpRegister src,
3107 VFPConversionMode mode,
3108 const Condition cond) {
3109 emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
3113 void Assembler::vcvt_f64_u32(
const DwVfpRegister dst,
3114 const SwVfpRegister src,
3115 VFPConversionMode mode,
3116 const Condition cond) {
3117 DCHECK(VfpRegisterIsAvailable(dst));
3118 emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
3122 void Assembler::vcvt_f32_u32(
const SwVfpRegister dst,
const SwVfpRegister src,
3123 VFPConversionMode mode,
const Condition cond) {
3124 emit(EncodeVCVT(F32, dst.code(), U32, src.code(), mode, cond));
3128 void Assembler::vcvt_s32_f32(
const SwVfpRegister dst,
const SwVfpRegister src,
3129 VFPConversionMode mode,
const Condition cond) {
3130 emit(EncodeVCVT(S32, dst.code(), F32, src.code(), mode, cond));
3134 void Assembler::vcvt_u32_f32(
const SwVfpRegister dst,
const SwVfpRegister src,
3135 VFPConversionMode mode,
const Condition cond) {
3136 emit(EncodeVCVT(U32, dst.code(), F32, src.code(), mode, cond));
3140 void Assembler::vcvt_s32_f64(
const SwVfpRegister dst,
3141 const DwVfpRegister src,
3142 VFPConversionMode mode,
3143 const Condition cond) {
3144 DCHECK(VfpRegisterIsAvailable(src));
3145 emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
3149 void Assembler::vcvt_u32_f64(
const SwVfpRegister dst,
3150 const DwVfpRegister src,
3151 VFPConversionMode mode,
3152 const Condition cond) {
3153 DCHECK(VfpRegisterIsAvailable(src));
3154 emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
3158 void Assembler::vcvt_f64_f32(
const DwVfpRegister dst,
3159 const SwVfpRegister src,
3160 VFPConversionMode mode,
3161 const Condition cond) {
3162 DCHECK(VfpRegisterIsAvailable(dst));
3163 emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
3167 void Assembler::vcvt_f32_f64(
const SwVfpRegister dst,
3168 const DwVfpRegister src,
3169 VFPConversionMode mode,
3170 const Condition cond) {
3171 DCHECK(VfpRegisterIsAvailable(src));
3172 emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
3176 void Assembler::vcvt_f64_s32(
const DwVfpRegister dst,
3178 const Condition cond) {
3182 DCHECK(IsEnabled(VFPv3));
3183 DCHECK(VfpRegisterIsAvailable(dst));
3184 DCHECK(fraction_bits > 0 && fraction_bits <= 32);
3186 dst.split_code(&vd, &d);
3187 int imm5 = 32 - fraction_bits;
3189 int imm4 = (imm5 >> 1) & 0xF;
3190 emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 |
3191 vd*B12 | 0x5*B9 | B8 | B7 | B6 |
i*B5 | imm4);
3195 void Assembler::vneg(
const DwVfpRegister dst,
3196 const DwVfpRegister src,
3197 const Condition cond) {
3201 DCHECK(VfpRegisterIsAvailable(dst));
3202 DCHECK(VfpRegisterIsAvailable(src));
3204 dst.split_code(&vd, &d);
3206 src.split_code(&vm, &m);
3208 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | B6 |
3213 void Assembler::vneg(
const SwVfpRegister dst,
const SwVfpRegister src,
3214 const Condition cond) {
3219 dst.split_code(&vd, &d);
3221 src.split_code(&vm, &m);
3223 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | B16 | vd * B12 | 0x5 * B9 |
3228 void Assembler::vabs(
const DwVfpRegister dst,
3229 const DwVfpRegister src,
3230 const Condition cond) {
3234 DCHECK(VfpRegisterIsAvailable(dst));
3235 DCHECK(VfpRegisterIsAvailable(src));
3237 dst.split_code(&vd, &d);
3239 src.split_code(&vm, &m);
3240 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B7 | B6 |
3245 void Assembler::vabs(
const SwVfpRegister dst,
const SwVfpRegister src,
3246 const Condition cond) {
3251 dst.split_code(&vd, &d);
3253 src.split_code(&vm, &m);
3254 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | vd * B12 | 0x5 * B9 | B7 | B6 |
3259 void Assembler::vadd(
const DwVfpRegister dst,
3260 const DwVfpRegister src1,
3261 const DwVfpRegister src2,
3262 const Condition cond) {
3268 DCHECK(VfpRegisterIsAvailable(dst));
3269 DCHECK(VfpRegisterIsAvailable(src1));
3270 DCHECK(VfpRegisterIsAvailable(src2));
3272 dst.split_code(&vd, &d);
3274 src1.split_code(&vn, &n);
3276 src2.split_code(&vm, &m);
3277 emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
3282 void Assembler::vadd(
const SwVfpRegister dst,
const SwVfpRegister src1,
3283 const SwVfpRegister src2,
const Condition cond) {
3290 dst.split_code(&vd, &d);
3292 src1.split_code(&vn, &n);
3294 src2.split_code(&vm, &m);
3295 emit(cond | 0x1C * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 |
3296 0x5 * B9 | n * B7 | m * B5 | vm);
3300 void Assembler::vsub(
const DwVfpRegister dst,
3301 const DwVfpRegister src1,
3302 const DwVfpRegister src2,
3303 const Condition cond) {
3309 DCHECK(VfpRegisterIsAvailable(dst));
3310 DCHECK(VfpRegisterIsAvailable(src1));
3311 DCHECK(VfpRegisterIsAvailable(src2));
3313 dst.split_code(&vd, &d);
3315 src1.split_code(&vn, &n);
3317 src2.split_code(&vm, &m);
3318 emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
3319 n*B7 | B6 | m*B5 | vm);
3323 void Assembler::vsub(
const SwVfpRegister dst,
const SwVfpRegister src1,
3324 const SwVfpRegister src2,
const Condition cond) {
3331 dst.split_code(&vd, &d);
3333 src1.split_code(&vn, &n);
3335 src2.split_code(&vm, &m);
3336 emit(cond | 0x1C * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 |
3337 0x5 * B9 | n * B7 | B6 | m * B5 | vm);
3341 void Assembler::vmul(
const DwVfpRegister dst,
3342 const DwVfpRegister src1,
3343 const DwVfpRegister src2,
3344 const Condition cond) {
3350 DCHECK(VfpRegisterIsAvailable(dst));
3351 DCHECK(VfpRegisterIsAvailable(src1));
3352 DCHECK(VfpRegisterIsAvailable(src2));
3354 dst.split_code(&vd, &d);
3356 src1.split_code(&vn, &n);
3358 src2.split_code(&vm, &m);
3359 emit(cond | 0x1C*B23 | d*B22 | 0x2*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
3364 void Assembler::vmul(
const SwVfpRegister dst,
const SwVfpRegister src1,
3365 const SwVfpRegister src2,
const Condition cond) {
3372 dst.split_code(&vd, &d);
3374 src1.split_code(&vn, &n);
3376 src2.split_code(&vm, &m);
3377 emit(cond | 0x1C * B23 | d * B22 | 0x2 * B20 | vn * B16 | vd * B12 |
3378 0x5 * B9 | n * B7 | m * B5 | vm);
3382 void Assembler::vmla(
const DwVfpRegister dst,
3383 const DwVfpRegister src1,
3384 const DwVfpRegister src2,
3385 const Condition cond) {
3389 DCHECK(VfpRegisterIsAvailable(dst));
3390 DCHECK(VfpRegisterIsAvailable(src1));
3391 DCHECK(VfpRegisterIsAvailable(src2));
3393 dst.split_code(&vd, &d);
3395 src1.split_code(&vn, &n);
3397 src2.split_code(&vm, &m);
3398 emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
3403 void Assembler::vmla(
const SwVfpRegister dst,
const SwVfpRegister src1,
3404 const SwVfpRegister src2,
const Condition cond) {
3409 dst.split_code(&vd, &d);
3411 src1.split_code(&vn, &n);
3413 src2.split_code(&vm, &m);
3414 emit(cond | 0x1C * B23 | d * B22 | vn * B16 | vd * B12 | 0x5 * B9 | n * B7 |
3419 void Assembler::vmls(
const DwVfpRegister dst,
3420 const DwVfpRegister src1,
3421 const DwVfpRegister src2,
3422 const Condition cond) {
3426 DCHECK(VfpRegisterIsAvailable(dst));
3427 DCHECK(VfpRegisterIsAvailable(src1));
3428 DCHECK(VfpRegisterIsAvailable(src2));
3430 dst.split_code(&vd, &d);
3432 src1.split_code(&vn, &n);
3434 src2.split_code(&vm, &m);
3435 emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | B6 |
3440 void Assembler::vmls(
const SwVfpRegister dst,
const SwVfpRegister src1,
3441 const SwVfpRegister src2,
const Condition cond) {
3446 dst.split_code(&vd, &d);
3448 src1.split_code(&vn, &n);
3450 src2.split_code(&vm, &m);
3451 emit(cond | 0x1C * B23 | d * B22 | vn * B16 | vd * B12 | 0x5 * B9 | n * B7 |
3456 void Assembler::vdiv(
const DwVfpRegister dst,
3457 const DwVfpRegister src1,
3458 const DwVfpRegister src2,
3459 const Condition cond) {
3465 DCHECK(VfpRegisterIsAvailable(dst));
3466 DCHECK(VfpRegisterIsAvailable(src1));
3467 DCHECK(VfpRegisterIsAvailable(src2));
3469 dst.split_code(&vd, &d);
3471 src1.split_code(&vn, &n);
3473 src2.split_code(&vm, &m);
3474 emit(cond | 0x1D*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
3479 void Assembler::vdiv(
const SwVfpRegister dst,
const SwVfpRegister src1,
3480 const SwVfpRegister src2,
const Condition cond) {
3487 dst.split_code(&vd, &d);
3489 src1.split_code(&vn, &n);
3491 src2.split_code(&vm, &m);
3492 emit(cond | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 | 0x5 * B9 | n * B7 |
3497 void Assembler::vcmp(
const DwVfpRegister src1,
3498 const DwVfpRegister src2,
3499 const Condition cond) {
3504 DCHECK(VfpRegisterIsAvailable(src1));
3505 DCHECK(VfpRegisterIsAvailable(src2));
3507 src1.split_code(&vd, &d);
3509 src2.split_code(&vm, &m);
3510 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x4*B16 | vd*B12 | 0x5*B9 | B8 | B6 |
3515 void Assembler::vcmp(
const SwVfpRegister src1,
const SwVfpRegister src2,
3516 const Condition cond) {
3522 src1.split_code(&vd, &d);
3524 src2.split_code(&vm, &m);
3525 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x4 * B16 | vd * B12 |
3526 0x5 * B9 | B6 | m * B5 | vm);
3530 void Assembler::vcmp(
const DwVfpRegister src1,
3532 const Condition cond) {
3537 DCHECK(VfpRegisterIsAvailable(src1));
3538 DCHECK_EQ(src2, 0.0);
3540 src1.split_code(&vd, &d);
3541 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6);
3545 void Assembler::vcmp(
const SwVfpRegister src1,
const float src2,
3546 const Condition cond) {
3551 DCHECK_EQ(src2, 0.0);
3553 src1.split_code(&vd, &d);
3554 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x5 * B16 | vd * B12 |
3558 void Assembler::vmaxnm(
const DwVfpRegister dst,
const DwVfpRegister src1,
3559 const DwVfpRegister src2) {
3562 DCHECK(IsEnabled(ARMv8));
3564 dst.split_code(&vd, &d);
3566 src1.split_code(&vn, &n);
3568 src2.split_code(&vm, &m);
3570 emit(kSpecialCondition | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 |
3571 0x5 * B9 | B8 | n * B7 | m * B5 | vm);
3574 void Assembler::vmaxnm(
const SwVfpRegister dst,
const SwVfpRegister src1,
3575 const SwVfpRegister src2) {
3578 DCHECK(IsEnabled(ARMv8));
3580 dst.split_code(&vd, &d);
3582 src1.split_code(&vn, &n);
3584 src2.split_code(&vm, &m);
3586 emit(kSpecialCondition | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 |
3587 0x5 * B9 | n * B7 | m * B5 | vm);
3590 void Assembler::vminnm(
const DwVfpRegister dst,
const DwVfpRegister src1,
3591 const DwVfpRegister src2) {
3594 DCHECK(IsEnabled(ARMv8));
3596 dst.split_code(&vd, &d);
3598 src1.split_code(&vn, &n);
3600 src2.split_code(&vm, &m);
3602 emit(kSpecialCondition | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 |
3603 0x5 * B9 | B8 | n * B7 | B6 | m * B5 | vm);
3606 void Assembler::vminnm(
const SwVfpRegister dst,
const SwVfpRegister src1,
3607 const SwVfpRegister src2) {
3610 DCHECK(IsEnabled(ARMv8));
3612 dst.split_code(&vd, &d);
3614 src1.split_code(&vn, &n);
3616 src2.split_code(&vm, &m);
3618 emit(kSpecialCondition | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 |
3619 0x5 * B9 | n * B7 | B6 | m * B5 | vm);
3622 void Assembler::vsel(Condition cond,
const DwVfpRegister dst,
3623 const DwVfpRegister src1,
const DwVfpRegister src2) {
3627 DCHECK(IsEnabled(ARMv8));
3629 dst.split_code(&vd, &d);
3631 src1.split_code(&vn, &n);
3633 src2.split_code(&vm, &m);
3642 int vsel_cond = (cond >> 30) & 0x3;
3643 if ((cond != eq) && (cond != ge) && (cond != gt) && (cond != vs)) {
3645 DCHECK((cond == ne) | (cond == lt) | (cond == le) | (cond == vc));
3650 emit(kSpecialCondition | 0x1C * B23 | d * B22 | vsel_cond * B20 | vn * B16 |
3651 vd * B12 | 0x5 * B9 | sz * B8 | n * B7 | m * B5 | vm);
3654 void Assembler::vsel(Condition cond,
const SwVfpRegister dst,
3655 const SwVfpRegister src1,
const SwVfpRegister src2) {
3659 DCHECK(IsEnabled(ARMv8));
3661 dst.split_code(&vd, &d);
3663 src1.split_code(&vn, &n);
3665 src2.split_code(&vm, &m);
3674 int vsel_cond = (cond >> 30) & 0x3;
3675 if ((cond != eq) && (cond != ge) && (cond != gt) && (cond != vs)) {
3677 DCHECK((cond == ne) | (cond == lt) | (cond == le) | (cond == vc));
3682 emit(kSpecialCondition | 0x1C * B23 | d * B22 | vsel_cond * B20 | vn * B16 |
3683 vd * B12 | 0x5 * B9 | sz * B8 | n * B7 | m * B5 | vm);
3686 void Assembler::vsqrt(
const DwVfpRegister dst,
3687 const DwVfpRegister src,
3688 const Condition cond) {
3692 DCHECK(VfpRegisterIsAvailable(dst));
3693 DCHECK(VfpRegisterIsAvailable(src));
3695 dst.split_code(&vd, &d);
3697 src.split_code(&vm, &m);
3698 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | 0x3*B6 |
3703 void Assembler::vsqrt(
const SwVfpRegister dst,
const SwVfpRegister src,
3704 const Condition cond) {
3709 dst.split_code(&vd, &d);
3711 src.split_code(&vm, &m);
3712 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | B16 | vd * B12 | 0x5 * B9 |
3713 0x3 * B6 | m * B5 | vm);
3717 void Assembler::vmsr(Register dst, Condition cond) {
3721 emit(cond | 0xE * B24 | 0xE * B20 | B16 | dst.code() * B12 | 0xA * B8 | B4);
3725 void Assembler::vmrs(Register dst, Condition cond) {
3729 emit(cond | 0xE * B24 | 0xF * B20 | B16 | dst.code() * B12 | 0xA * B8 | B4);
3733 void Assembler::vrinta(
const SwVfpRegister dst,
const SwVfpRegister src) {
3737 DCHECK(IsEnabled(ARMv8));
3739 dst.split_code(&vd, &d);
3741 src.split_code(&vm, &m);
3742 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | vd * B12 |
3743 0x5 * B9 | B6 | m * B5 | vm);
3747 void Assembler::vrinta(
const DwVfpRegister dst,
const DwVfpRegister src) {
3751 DCHECK(IsEnabled(ARMv8));
3753 dst.split_code(&vd, &d);
3755 src.split_code(&vm, &m);
3756 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | vd * B12 |
3757 0x5 * B9 | B8 | B6 | m * B5 | vm);
3761 void Assembler::vrintn(
const SwVfpRegister dst,
const SwVfpRegister src) {
3765 DCHECK(IsEnabled(ARMv8));
3767 dst.split_code(&vd, &d);
3769 src.split_code(&vm, &m);
3770 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x1 * B16 |
3771 vd * B12 | 0x5 * B9 | B6 | m * B5 | vm);
3775 void Assembler::vrintn(
const DwVfpRegister dst,
const DwVfpRegister src) {
3779 DCHECK(IsEnabled(ARMv8));
3781 dst.split_code(&vd, &d);
3783 src.split_code(&vm, &m);
3784 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x1 * B16 |
3785 vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
3789 void Assembler::vrintp(
const SwVfpRegister dst,
const SwVfpRegister src) {
3793 DCHECK(IsEnabled(ARMv8));
3795 dst.split_code(&vd, &d);
3797 src.split_code(&vm, &m);
3798 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x2 * B16 |
3799 vd * B12 | 0x5 * B9 | B6 | m * B5 | vm);
3803 void Assembler::vrintp(
const DwVfpRegister dst,
const DwVfpRegister src) {
3807 DCHECK(IsEnabled(ARMv8));
3809 dst.split_code(&vd, &d);
3811 src.split_code(&vm, &m);
3812 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x2 * B16 |
3813 vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
3817 void Assembler::vrintm(
const SwVfpRegister dst,
const SwVfpRegister src) {
3821 DCHECK(IsEnabled(ARMv8));
3823 dst.split_code(&vd, &d);
3825 src.split_code(&vm, &m);
3826 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x3 * B16 |
3827 vd * B12 | 0x5 * B9 | B6 | m * B5 | vm);
3831 void Assembler::vrintm(
const DwVfpRegister dst,
const DwVfpRegister src) {
3835 DCHECK(IsEnabled(ARMv8));
3837 dst.split_code(&vd, &d);
3839 src.split_code(&vm, &m);
3840 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x3 * B16 |
3841 vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
3845 void Assembler::vrintz(
const SwVfpRegister dst,
const SwVfpRegister src,
3846 const Condition cond) {
3849 DCHECK(IsEnabled(ARMv8));
3851 dst.split_code(&vd, &d);
3853 src.split_code(&vm, &m);
3854 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x3 * B17 | vd * B12 |
3855 0x5 * B9 | B7 | B6 | m * B5 | vm);
3859 void Assembler::vrintz(
const DwVfpRegister dst,
const DwVfpRegister src,
3860 const Condition cond) {
3863 DCHECK(IsEnabled(ARMv8));
3865 dst.split_code(&vd, &d);
3867 src.split_code(&vm, &m);
3868 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x3 * B17 | vd * B12 |
3869 0x5 * B9 | B8 | B7 | B6 | m * B5 | vm);
3875 void Assembler::vld1(NeonSize size,
3876 const NeonListOperand& dst,
3877 const NeonMemOperand& src) {
3881 DCHECK(IsEnabled(NEON));
3883 dst.base().split_code(&vd, &d);
3884 emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 |
3885 dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code());
3888 void Assembler::vst1(NeonSize size,
const NeonListOperand& src,
3889 const NeonMemOperand& dst) {
3893 DCHECK(IsEnabled(NEON));
3895 src.base().split_code(&vd, &d);
3896 emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 |
3897 size*B6 | dst.align()*B4 | dst.rm().code());
3901 void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
3905 DCHECK(IsEnabled(NEON));
3907 dst.split_code(&vd, &d);
3909 src.split_code(&vm, &m);
3911 int imm3 = 1 << NeonSz(dt);
3912 emit(0xFU * B28 | B25 | U * B24 | B23 | d * B22 | imm3 * B19 | vd * B12 |
3913 0xA * B8 | m * B5 | B4 | vm);
3916 void Assembler::vqmovn(NeonDataType dt, DwVfpRegister dst, QwNeonRegister src) {
3919 DCHECK(IsEnabled(NEON));
3921 dst.split_code(&vd, &d);
3923 src.split_code(&vm, &m);
3924 int size = NeonSz(dt);
3926 int op = u != 0 ? 3 : 2;
3927 emit(0x1E7U * B23 | d * B22 | 0x3 * B20 | size * B18 | 0x2 * B16 | vd * B12 |
3928 0x2 * B8 | op * B6 | m * B5 | vm);
3931 static int EncodeScalar(NeonDataType dt,
int index) {
3933 DCHECK_LE(0, index);
3937 DCHECK_GT(8, index);
3938 opc1_opc2 = 0x8 | index;
3942 DCHECK_GT(4, index);
3943 opc1_opc2 = 0x1 | (index << 1);
3947 DCHECK_GT(2, index);
3948 opc1_opc2 = index << 2;
3954 return (opc1_opc2 >> 2) * B21 | (opc1_opc2 & 0x3) * B5;
3957 void Assembler::vmov(NeonDataType dt, DwVfpRegister dst,
int index,
3961 DCHECK(dt == NeonS32 || dt == NeonU32 || IsEnabled(NEON));
3963 dst.split_code(&vd, &d);
3964 int opc1_opc2 = EncodeScalar(dt, index);
3965 emit(0xEEu * B24 | vd * B16 | src.code() * B12 | 0xB * B8 | d * B7 | B4 |
3969 void Assembler::vmov(NeonDataType dt, Register dst, DwVfpRegister src,
3973 DCHECK(dt == NeonS32 || dt == NeonU32 || IsEnabled(NEON));
3975 src.split_code(&vn, &n);
3976 int opc1_opc2 = EncodeScalar(dt, index);
3978 emit(0xEEu * B24 | u * B23 | B20 | vn * B16 | dst.code() * B12 | 0xB * B8 |
3979 n * B7 | B4 | opc1_opc2);
3982 void Assembler::vmov(QwNeonRegister dst, QwNeonRegister src) {
3985 vorr(dst, src, src);
3988 void Assembler::vdup(NeonSize size, QwNeonRegister dst, Register src) {
3989 DCHECK(IsEnabled(NEON));
4006 dst.split_code(&vd, &d);
4008 emit(al | 0x1D * B23 | B * B22 | B21 | vd * B16 | src.code() * B12 |
4009 0xB * B8 | d * B7 | E * B5 | B4);
4012 enum NeonRegType { NEON_D, NEON_Q };
4014 void NeonSplitCode(NeonRegType type,
int code,
int* vm,
int* m,
int* encoding) {
4015 if (type == NEON_D) {
4016 DwVfpRegister::split_code(code, vm, m);
4018 DCHECK_EQ(type, NEON_Q);
4019 QwNeonRegister::split_code(code, vm, m);
4024 static Instr EncodeNeonDupOp(NeonSize size, NeonRegType reg_type,
int dst_code,
4025 DwVfpRegister src,
int index) {
4026 DCHECK_NE(Neon64, size);
4027 int sz =
static_cast<int>(size);
4028 DCHECK_LE(0, index);
4029 DCHECK_GT(kSimd128Size / (1 << sz), index);
4030 int imm4 = (1 << sz) | ((index << (sz + 1)) & 0xF);
4033 NeonSplitCode(reg_type, dst_code, &vd, &d, &qbit);
4035 src.split_code(&vm, &m);
4037 return 0x1E7U * B23 | d * B22 | 0x3 * B20 | imm4 * B16 | vd * B12 |
4038 0x18 * B7 | qbit | m * B5 | vm;
4041 void Assembler::vdup(NeonSize size, DwVfpRegister dst, DwVfpRegister src,
4043 DCHECK(IsEnabled(NEON));
4045 emit(EncodeNeonDupOp(size, NEON_D, dst.code(), src, index));
4048 void Assembler::vdup(NeonSize size, QwNeonRegister dst, DwVfpRegister src,
4051 DCHECK(IsEnabled(NEON));
4052 emit(EncodeNeonDupOp(size, NEON_Q, dst.code(), src, index));
4056 static Instr EncodeNeonVCVT(VFPType dst_type, QwNeonRegister dst,
4057 VFPType src_type, QwNeonRegister src) {
4058 DCHECK(src_type != dst_type);
4059 DCHECK(src_type == F32 || dst_type == F32);
4062 dst.split_code(&vd, &d);
4064 src.split_code(&vm, &m);
4067 if (src_type == F32) {
4068 DCHECK(dst_type == S32 || dst_type == U32);
4069 op = dst_type == U32 ? 3 : 2;
4071 DCHECK(src_type == S32 || src_type == U32);
4072 op = src_type == U32 ? 1 : 0;
4075 return 0x1E7U * B23 | d * B22 | 0x3B * B16 | vd * B12 | 0x3 * B9 | op * B7 |
4079 void Assembler::vcvt_f32_s32(QwNeonRegister dst, QwNeonRegister src) {
4080 DCHECK(IsEnabled(NEON));
4081 DCHECK(VfpRegisterIsAvailable(dst));
4082 DCHECK(VfpRegisterIsAvailable(src));
4083 emit(EncodeNeonVCVT(F32, dst, S32, src));
4086 void Assembler::vcvt_f32_u32(QwNeonRegister dst, QwNeonRegister src) {
4087 DCHECK(IsEnabled(NEON));
4088 DCHECK(VfpRegisterIsAvailable(dst));
4089 DCHECK(VfpRegisterIsAvailable(src));
4090 emit(EncodeNeonVCVT(F32, dst, U32, src));
4093 void Assembler::vcvt_s32_f32(QwNeonRegister dst, QwNeonRegister src) {
4094 DCHECK(IsEnabled(NEON));
4095 DCHECK(VfpRegisterIsAvailable(dst));
4096 DCHECK(VfpRegisterIsAvailable(src));
4097 emit(EncodeNeonVCVT(S32, dst, F32, src));
4100 void Assembler::vcvt_u32_f32(QwNeonRegister dst, QwNeonRegister src) {
4101 DCHECK(IsEnabled(NEON));
4102 DCHECK(VfpRegisterIsAvailable(dst));
4103 DCHECK(VfpRegisterIsAvailable(src));
4104 emit(EncodeNeonVCVT(U32, dst, F32, src));
4107 enum UnaryOp { VMVN, VSWP, VABS, VABSF, VNEG, VNEGF };
4109 static Instr EncodeNeonUnaryOp(UnaryOp op, NeonRegType reg_type, NeonSize size,
4110 int dst_code,
int src_code) {
4111 int op_encoding = 0;
4114 DCHECK_EQ(Neon8, size);
4115 op_encoding = B10 | 0x3 * B7;
4118 DCHECK_EQ(Neon8, size);
4122 op_encoding = B16 | 0x6 * B7;
4125 DCHECK_EQ(Neon32, size);
4126 op_encoding = B16 | B10 | 0x6 * B7;
4129 op_encoding = B16 | 0x7 * B7;
4132 DCHECK_EQ(Neon32, size);
4133 op_encoding = B16 | B10 | 0x7 * B7;
4140 NeonSplitCode(reg_type, dst_code, &vd, &d, &op_encoding);
4142 NeonSplitCode(reg_type, src_code, &vm, &m, &op_encoding);
4144 return 0x1E7U * B23 | d * B22 | 0x3 * B20 | size * B18 | vd * B12 | m * B5 |
4148 void Assembler::vmvn(QwNeonRegister dst, QwNeonRegister src) {
4151 DCHECK(IsEnabled(NEON));
4152 emit(EncodeNeonUnaryOp(VMVN, NEON_Q, Neon8, dst.code(), src.code()));
4155 void Assembler::vswp(DwVfpRegister dst, DwVfpRegister src) {
4156 DCHECK(IsEnabled(NEON));
4159 DCHECK(IsEnabled(NEON));
4160 emit(EncodeNeonUnaryOp(VSWP, NEON_D, Neon8, dst.code(), src.code()));
4163 void Assembler::vswp(QwNeonRegister dst, QwNeonRegister src) {
4166 DCHECK(IsEnabled(NEON));
4167 emit(EncodeNeonUnaryOp(VSWP, NEON_Q, Neon8, dst.code(), src.code()));
4170 void Assembler::vabs(QwNeonRegister dst, QwNeonRegister src) {
4173 DCHECK(IsEnabled(NEON));
4174 emit(EncodeNeonUnaryOp(VABSF, NEON_Q, Neon32, dst.code(), src.code()));
4177 void Assembler::vabs(NeonSize size, QwNeonRegister dst, QwNeonRegister src) {
4180 DCHECK(IsEnabled(NEON));
4181 emit(EncodeNeonUnaryOp(VABS, NEON_Q, size, dst.code(), src.code()));
4184 void Assembler::vneg(QwNeonRegister dst, QwNeonRegister src) {
4187 DCHECK(IsEnabled(NEON));
4188 emit(EncodeNeonUnaryOp(VNEGF, NEON_Q, Neon32, dst.code(), src.code()));
4191 void Assembler::vneg(NeonSize size, QwNeonRegister dst, QwNeonRegister src) {
4194 DCHECK(IsEnabled(NEON));
4195 emit(EncodeNeonUnaryOp(VNEG, NEON_Q, size, dst.code(), src.code()));
4198 enum BinaryBitwiseOp { VAND, VBIC, VBIF, VBIT, VBSL, VEOR, VORR, VORN };
4200 static Instr EncodeNeonBinaryBitwiseOp(BinaryBitwiseOp op, NeonRegType reg_type,
4201 int dst_code,
int src_code1,
4203 int op_encoding = 0;
4206 op_encoding = 0x1 * B20;
4209 op_encoding = B24 | 0x3 * B20;
4212 op_encoding = B24 | 0x2 * B20;
4215 op_encoding = B24 | 0x1 * B20;
4221 op_encoding = 0x2 * B20;
4224 op_encoding = 0x3 * B20;
4234 NeonSplitCode(reg_type, dst_code, &vd, &d, &op_encoding);
4236 NeonSplitCode(reg_type, src_code1, &vn, &n, &op_encoding);
4238 NeonSplitCode(reg_type, src_code2, &vm, &m, &op_encoding);
4240 return 0x1E4U * B23 | op_encoding | d * B22 | vn * B16 | vd * B12 | B8 |
4241 n * B7 | m * B5 | B4 | vm;
4244 void Assembler::vand(QwNeonRegister dst, QwNeonRegister src1,
4245 QwNeonRegister src2) {
4248 DCHECK(IsEnabled(NEON));
4249 emit(EncodeNeonBinaryBitwiseOp(VAND, NEON_Q, dst.code(), src1.code(),
4253 void Assembler::vbsl(QwNeonRegister dst, QwNeonRegister src1,
4254 QwNeonRegister src2) {
4257 DCHECK(IsEnabled(NEON));
4258 emit(EncodeNeonBinaryBitwiseOp(VBSL, NEON_Q, dst.code(), src1.code(),
4262 void Assembler::veor(DwVfpRegister dst, DwVfpRegister src1,
4263 DwVfpRegister src2) {
4266 DCHECK(IsEnabled(NEON));
4267 emit(EncodeNeonBinaryBitwiseOp(VEOR, NEON_D, dst.code(), src1.code(),
4271 void Assembler::veor(QwNeonRegister dst, QwNeonRegister src1,
4272 QwNeonRegister src2) {
4275 DCHECK(IsEnabled(NEON));
4276 emit(EncodeNeonBinaryBitwiseOp(VEOR, NEON_Q, dst.code(), src1.code(),
4280 void Assembler::vorr(QwNeonRegister dst, QwNeonRegister src1,
4281 QwNeonRegister src2) {
4284 DCHECK(IsEnabled(NEON));
4285 emit(EncodeNeonBinaryBitwiseOp(VORR, NEON_Q, dst.code(), src1.code(),
4302 static Instr EncodeNeonBinOp(FPBinOp op, QwNeonRegister dst,
4303 QwNeonRegister src1, QwNeonRegister src2) {
4304 int op_encoding = 0;
4307 op_encoding = 0xD * B8;
4310 op_encoding = B21 | 0xD * B8;
4313 op_encoding = B24 | 0xD * B8 | B4;
4316 op_encoding = B21 | 0xF * B8;
4319 op_encoding = 0xF * B8;
4322 op_encoding = 0xF * B8 | B4;
4325 op_encoding = B21 | 0xF * B8 | B4;
4328 op_encoding = 0xE * B8;
4331 op_encoding = B24 | 0xE * B8;
4334 op_encoding = B24 | B21 | 0xE * B8;
4341 dst.split_code(&vd, &d);
4343 src1.split_code(&vn, &n);
4345 src2.split_code(&vm, &m);
4346 return 0x1E4U * B23 | d * B22 | vn * B16 | vd * B12 | n * B7 | B6 | m * B5 |
4364 static Instr EncodeNeonBinOp(IntegerBinOp op, NeonDataType dt,
4365 QwNeonRegister dst, QwNeonRegister src1,
4366 QwNeonRegister src2) {
4367 int op_encoding = 0;
4370 op_encoding = 0x8 * B8;
4376 op_encoding = B24 | 0x8 * B8;
4379 op_encoding = 0x2 * B8 | B4;
4382 op_encoding = 0x9 * B8 | B4;
4385 op_encoding = 0x6 * B8 | B4;
4388 op_encoding = 0x6 * B8;
4391 op_encoding = 0x8 * B8 | B4;
4394 op_encoding = B24 | 0x8 * B8 | B4;
4397 op_encoding = 0x3 * B8 | B4;
4400 op_encoding = 0x3 * B8;
4407 dst.split_code(&vd, &d);
4409 src1.split_code(&vn, &n);
4411 src2.split_code(&vm, &m);
4412 int size = NeonSz(dt);
4414 return 0x1E4U * B23 | u * B24 | d * B22 | size * B20 | vn * B16 | vd * B12 |
4415 n * B7 | B6 | m * B5 | vm | op_encoding;
4418 static Instr EncodeNeonBinOp(IntegerBinOp op, NeonSize size, QwNeonRegister dst,
4419 QwNeonRegister src1, QwNeonRegister src2) {
4422 return EncodeNeonBinOp(op, static_cast<NeonDataType>(size), dst, src1, src2);
4425 void Assembler::vadd(QwNeonRegister dst, QwNeonRegister src1,
4426 QwNeonRegister src2) {
4427 DCHECK(IsEnabled(NEON));
4430 emit(EncodeNeonBinOp(VADDF, dst, src1, src2));
4433 void Assembler::vadd(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
4434 QwNeonRegister src2) {
4435 DCHECK(IsEnabled(NEON));
4438 emit(EncodeNeonBinOp(VADD, size, dst, src1, src2));
4441 void Assembler::vqadd(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
4442 QwNeonRegister src2) {
4443 DCHECK(IsEnabled(NEON));
4446 emit(EncodeNeonBinOp(VQADD, dt, dst, src1, src2));
4449 void Assembler::vsub(QwNeonRegister dst, QwNeonRegister src1,
4450 QwNeonRegister src2) {
4451 DCHECK(IsEnabled(NEON));
4454 emit(EncodeNeonBinOp(VSUBF, dst, src1, src2));
4457 void Assembler::vsub(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
4458 QwNeonRegister src2) {
4459 DCHECK(IsEnabled(NEON));
4462 emit(EncodeNeonBinOp(VSUB, size, dst, src1, src2));
4465 void Assembler::vqsub(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
4466 QwNeonRegister src2) {
4467 DCHECK(IsEnabled(NEON));
4470 emit(EncodeNeonBinOp(VQSUB, dt, dst, src1, src2));
4473 void Assembler::vmul(QwNeonRegister dst, QwNeonRegister src1,
4474 QwNeonRegister src2) {
4475 DCHECK(IsEnabled(NEON));
4478 emit(EncodeNeonBinOp(VMULF, dst, src1, src2));
4481 void Assembler::vmul(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
4482 QwNeonRegister src2) {
4483 DCHECK(IsEnabled(NEON));
4486 emit(EncodeNeonBinOp(VMUL, size, dst, src1, src2));
4489 void Assembler::vmin(QwNeonRegister dst, QwNeonRegister src1,
4490 QwNeonRegister src2) {
4491 DCHECK(IsEnabled(NEON));
4494 emit(EncodeNeonBinOp(VMINF, dst, src1, src2));
4497 void Assembler::vmin(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
4498 QwNeonRegister src2) {
4499 DCHECK(IsEnabled(NEON));
4502 emit(EncodeNeonBinOp(VMIN, dt, dst, src1, src2));
4505 void Assembler::vmax(QwNeonRegister dst, QwNeonRegister src1,
4506 QwNeonRegister src2) {
4507 DCHECK(IsEnabled(NEON));
4510 emit(EncodeNeonBinOp(VMAXF, dst, src1, src2));
4513 void Assembler::vmax(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
4514 QwNeonRegister src2) {
4515 DCHECK(IsEnabled(NEON));
4518 emit(EncodeNeonBinOp(VMAX, dt, dst, src1, src2));
4521 enum NeonShiftOp { VSHL, VSHR, VSLI, VSRI };
4523 static Instr EncodeNeonShiftOp(NeonShiftOp op, NeonSize size,
bool is_unsigned,
4524 NeonRegType reg_type,
int dst_code,
int src_code,
4527 int size_in_bits = kBitsPerByte << static_cast<int>(size);
4528 int op_encoding = 0;
4531 DCHECK(shift >= 0 && size_in_bits > shift);
4532 imm6 = size_in_bits + shift;
4533 op_encoding = 0x5 * B8;
4537 DCHECK(shift > 0 && size_in_bits >= shift);
4538 imm6 = 2 * size_in_bits - shift;
4539 if (is_unsigned) op_encoding |= B24;
4543 DCHECK(shift >= 0 && size_in_bits > shift);
4544 imm6 = size_in_bits + shift;
4547 op_encoding = B24 | 0x5 * B8 | L * B7;
4551 DCHECK(shift > 0 && size_in_bits >= shift);
4552 imm6 = 2 * size_in_bits - shift;
4555 op_encoding = B24 | 0x4 * B8 | L * B7;
4564 NeonSplitCode(reg_type, dst_code, &vd, &d, &op_encoding);
4566 NeonSplitCode(reg_type, src_code, &vm, &m, &op_encoding);
4568 return 0x1E5U * B23 | d * B22 | imm6 * B16 | vd * B12 | m * B5 | B4 | vm |
4572 void Assembler::vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src,
4574 DCHECK(IsEnabled(NEON));
4577 emit(EncodeNeonShiftOp(VSHL, NeonDataTypeToSize(dt),
false, NEON_Q,
4578 dst.code(), src.code(), shift));
4581 void Assembler::vshr(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src,
4583 DCHECK(IsEnabled(NEON));
4586 emit(EncodeNeonShiftOp(VSHR, NeonDataTypeToSize(dt), NeonU(dt), NEON_Q,
4587 dst.code(), src.code(), shift));
4590 void Assembler::vsli(NeonSize size, DwVfpRegister dst, DwVfpRegister src,
4592 DCHECK(IsEnabled(NEON));
4595 emit(EncodeNeonShiftOp(VSLI, size,
false, NEON_D, dst.code(), src.code(),
4599 void Assembler::vsri(NeonSize size, DwVfpRegister dst, DwVfpRegister src,
4601 DCHECK(IsEnabled(NEON));
4604 emit(EncodeNeonShiftOp(VSRI, size,
false, NEON_D, dst.code(), src.code(),
4608 static Instr EncodeNeonEstimateOp(
bool is_rsqrt, QwNeonRegister dst,
4609 QwNeonRegister src) {
4611 dst.split_code(&vd, &d);
4613 src.split_code(&vm, &m);
4614 int rsqrt = is_rsqrt ? 1 : 0;
4615 return 0x1E7U * B23 | d * B22 | 0x3B * B16 | vd * B12 | 0x5 * B8 |
4616 rsqrt * B7 | B6 | m * B5 | vm;
4619 void Assembler::vrecpe(QwNeonRegister dst, QwNeonRegister src) {
4620 DCHECK(IsEnabled(NEON));
4623 emit(EncodeNeonEstimateOp(
false, dst, src));
4626 void Assembler::vrsqrte(QwNeonRegister dst, QwNeonRegister src) {
4627 DCHECK(IsEnabled(NEON));
4630 emit(EncodeNeonEstimateOp(
true, dst, src));
4633 void Assembler::vrecps(QwNeonRegister dst, QwNeonRegister src1,
4634 QwNeonRegister src2) {
4635 DCHECK(IsEnabled(NEON));
4638 emit(EncodeNeonBinOp(VRECPS, dst, src1, src2));
4641 void Assembler::vrsqrts(QwNeonRegister dst, QwNeonRegister src1,
4642 QwNeonRegister src2) {
4643 DCHECK(IsEnabled(NEON));
4646 emit(EncodeNeonBinOp(VRSQRTS, dst, src1, src2));
4649 enum NeonPairwiseOp { VPADD, VPMIN, VPMAX };
4651 static Instr EncodeNeonPairwiseOp(NeonPairwiseOp op, NeonDataType dt,
4652 DwVfpRegister dst, DwVfpRegister src1,
4653 DwVfpRegister src2) {
4654 int op_encoding = 0;
4657 op_encoding = 0xB * B8 | B4;
4660 op_encoding = 0xA * B8 | B4;
4663 op_encoding = 0xA * B8;
4670 dst.split_code(&vd, &d);
4672 src1.split_code(&vn, &n);
4674 src2.split_code(&vm, &m);
4675 int size = NeonSz(dt);
4677 return 0x1E4U * B23 | u * B24 | d * B22 | size * B20 | vn * B16 | vd * B12 |
4678 n * B7 | m * B5 | vm | op_encoding;
4681 void Assembler::vpadd(DwVfpRegister dst, DwVfpRegister src1,
4682 DwVfpRegister src2) {
4683 DCHECK(IsEnabled(NEON));
4687 dst.split_code(&vd, &d);
4689 src1.split_code(&vn, &n);
4691 src2.split_code(&vm, &m);
4693 emit(0x1E6U * B23 | d * B22 | vn * B16 | vd * B12 | 0xD * B8 | n * B7 |
4697 void Assembler::vpadd(NeonSize size, DwVfpRegister dst, DwVfpRegister src1,
4698 DwVfpRegister src2) {
4699 DCHECK(IsEnabled(NEON));
4702 emit(EncodeNeonPairwiseOp(VPADD, NeonSizeToDataType(size), dst, src1, src2));
4705 void Assembler::vpmin(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
4706 DwVfpRegister src2) {
4707 DCHECK(IsEnabled(NEON));
4710 emit(EncodeNeonPairwiseOp(VPMIN, dt, dst, src1, src2));
4713 void Assembler::vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
4714 DwVfpRegister src2) {
4715 DCHECK(IsEnabled(NEON));
4718 emit(EncodeNeonPairwiseOp(VPMAX, dt, dst, src1, src2));
4721 void Assembler::vtst(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
4722 QwNeonRegister src2) {
4723 DCHECK(IsEnabled(NEON));
4726 emit(EncodeNeonBinOp(VTST, size, dst, src1, src2));
4729 void Assembler::vceq(QwNeonRegister dst, QwNeonRegister src1,
4730 QwNeonRegister src2) {
4731 DCHECK(IsEnabled(NEON));
4734 emit(EncodeNeonBinOp(VCEQF, dst, src1, src2));
4737 void Assembler::vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
4738 QwNeonRegister src2) {
4739 DCHECK(IsEnabled(NEON));
4742 emit(EncodeNeonBinOp(VCEQ, size, dst, src1, src2));
4745 void Assembler::vcge(QwNeonRegister dst, QwNeonRegister src1,
4746 QwNeonRegister src2) {
4747 DCHECK(IsEnabled(NEON));
4750 emit(EncodeNeonBinOp(VCGEF, dst, src1, src2));
4753 void Assembler::vcge(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
4754 QwNeonRegister src2) {
4755 DCHECK(IsEnabled(NEON));
4758 emit(EncodeNeonBinOp(VCGE, dt, dst, src1, src2));
4761 void Assembler::vcgt(QwNeonRegister dst, QwNeonRegister src1,
4762 QwNeonRegister src2) {
4763 DCHECK(IsEnabled(NEON));
4766 emit(EncodeNeonBinOp(VCGTF, dst, src1, src2));
4769 void Assembler::vcgt(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
4770 QwNeonRegister src2) {
4771 DCHECK(IsEnabled(NEON));
4774 emit(EncodeNeonBinOp(VCGT, dt, dst, src1, src2));
4777 void Assembler::vext(QwNeonRegister dst, QwNeonRegister src1,
4778 QwNeonRegister src2,
int bytes) {
4779 DCHECK(IsEnabled(NEON));
4783 dst.split_code(&vd, &d);
4785 src1.split_code(&vn, &n);
4787 src2.split_code(&vm, &m);
4788 DCHECK_GT(16, bytes);
4789 emit(0x1E5U * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 | bytes * B8 |
4790 n * B7 | B6 | m * B5 | vm);
4793 enum NeonSizedOp { VZIP, VUZP, VREV16, VREV32, VREV64, VTRN };
4795 static Instr EncodeNeonSizedOp(NeonSizedOp op, NeonRegType reg_type,
4796 NeonSize size,
int dst_code,
int src_code) {
4797 int op_encoding = 0;
4800 op_encoding = 0x2 * B16 | 0x3 * B7;
4803 op_encoding = 0x2 * B16 | 0x2 * B7;
4806 op_encoding = 0x2 * B7;
4809 op_encoding = 0x1 * B7;
4815 op_encoding = 0x2 * B16 | B7;
4822 NeonSplitCode(reg_type, dst_code, &vd, &d, &op_encoding);
4824 NeonSplitCode(reg_type, src_code, &vm, &m, &op_encoding);
4826 int sz =
static_cast<int>(size);
4827 return 0x1E7U * B23 | d * B22 | 0x3 * B20 | sz * B18 | vd * B12 | m * B5 |
4831 void Assembler::vzip(NeonSize size, DwVfpRegister src1, DwVfpRegister src2) {
4832 if (size == Neon32) {
4833 vtrn(size, src1, src2);
4835 DCHECK(IsEnabled(NEON));
4838 emit(EncodeNeonSizedOp(VZIP, NEON_D, size, src1.code(), src2.code()));
4842 void Assembler::vzip(NeonSize size, QwNeonRegister src1, QwNeonRegister src2) {
4843 DCHECK(IsEnabled(NEON));
4846 emit(EncodeNeonSizedOp(VZIP, NEON_Q, size, src1.code(), src2.code()));
4849 void Assembler::vuzp(NeonSize size, DwVfpRegister src1, DwVfpRegister src2) {
4850 if (size == Neon32) {
4851 vtrn(size, src1, src2);
4853 DCHECK(IsEnabled(NEON));
4856 emit(EncodeNeonSizedOp(VUZP, NEON_D, size, src1.code(), src2.code()));
4860 void Assembler::vuzp(NeonSize size, QwNeonRegister src1, QwNeonRegister src2) {
4861 DCHECK(IsEnabled(NEON));
4864 emit(EncodeNeonSizedOp(VUZP, NEON_Q, size, src1.code(), src2.code()));
4867 void Assembler::vrev16(NeonSize size, QwNeonRegister dst, QwNeonRegister src) {
4868 DCHECK(IsEnabled(NEON));
4871 emit(EncodeNeonSizedOp(VREV16, NEON_Q, size, dst.code(), src.code()));
4874 void Assembler::vrev32(NeonSize size, QwNeonRegister dst, QwNeonRegister src) {
4875 DCHECK(IsEnabled(NEON));
4878 emit(EncodeNeonSizedOp(VREV32, NEON_Q, size, dst.code(), src.code()));
4881 void Assembler::vrev64(NeonSize size, QwNeonRegister dst, QwNeonRegister src) {
4882 DCHECK(IsEnabled(NEON));
4885 emit(EncodeNeonSizedOp(VREV64, NEON_Q, size, dst.code(), src.code()));
4888 void Assembler::vtrn(NeonSize size, DwVfpRegister src1, DwVfpRegister src2) {
4889 DCHECK(IsEnabled(NEON));
4892 emit(EncodeNeonSizedOp(VTRN, NEON_D, size, src1.code(), src2.code()));
4895 void Assembler::vtrn(NeonSize size, QwNeonRegister src1, QwNeonRegister src2) {
4896 DCHECK(IsEnabled(NEON));
4899 emit(EncodeNeonSizedOp(VTRN, NEON_Q, size, src1.code(), src2.code()));
4903 static Instr EncodeNeonVTB(DwVfpRegister dst,
const NeonListOperand& list,
4904 DwVfpRegister index,
bool vtbx) {
4910 dst.split_code(&vd, &d);
4912 list.base().split_code(&vn, &n);
4914 index.split_code(&vm, &m);
4915 int op = vtbx ? 1 : 0;
4916 return 0x1E7U * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 | 0x2 * B10 |
4917 list.length() * B8 | n * B7 | op * B6 | m * B5 | vm;
4920 void Assembler::vtbl(DwVfpRegister dst,
const NeonListOperand& list,
4921 DwVfpRegister index) {
4922 DCHECK(IsEnabled(NEON));
4923 emit(EncodeNeonVTB(dst, list, index,
false));
4926 void Assembler::vtbx(DwVfpRegister dst,
const NeonListOperand& list,
4927 DwVfpRegister index) {
4928 DCHECK(IsEnabled(NEON));
4929 emit(EncodeNeonVTB(dst, list, index,
true));
4933 void Assembler::nop(
int type) {
4939 DCHECK(0 <= type && type <= 14);
4940 emit(al | 13*B21 | type*B12 | type);
4943 void Assembler::pop() { add(sp, sp, Operand(kPointerSize)); }
4945 bool Assembler::IsMovT(Instr instr) {
4946 instr &= ~(((kNumberOfConditions - 1) << 28) |
4947 ((kNumRegisters-1)*B12) |
4948 EncodeMovwImmediate(0xFFFF));
4949 return instr == kMovtPattern;
4953 bool Assembler::IsMovW(Instr instr) {
4954 instr &= ~(((kNumberOfConditions - 1) << 28) |
4955 ((kNumRegisters-1)*B12) |
4956 EncodeMovwImmediate(0xFFFF));
4957 return instr == kMovwPattern;
4961 Instr Assembler::GetMovTPattern() {
return kMovtPattern; }
4964 Instr Assembler::GetMovWPattern() {
return kMovwPattern; }
4967 Instr Assembler::EncodeMovwImmediate(
uint32_t immediate) {
4968 DCHECK_LT(immediate, 0x10000);
4969 return ((immediate & 0xF000) << 4) | (immediate & 0xFFF);
4973 Instr Assembler::PatchMovwImmediate(Instr instruction,
uint32_t immediate) {
4974 instruction &= ~EncodeMovwImmediate(0xFFFF);
4975 return instruction | EncodeMovwImmediate(immediate);
4979 int Assembler::DecodeShiftImm(Instr instr) {
4980 int rotate = Instruction::RotateValue(instr) * 2;
4981 int immed8 = Instruction::Immed8Value(instr);
4982 return base::bits::RotateRight32(immed8, rotate);
4986 Instr Assembler::PatchShiftImm(Instr instr,
int immed) {
4989 bool immed_fits = FitsShifter(immed, &rotate_imm, &immed_8,
nullptr);
4992 return (instr & ~kOff12Mask) | (rotate_imm << 8) | immed_8;
4996 bool Assembler::IsNop(Instr instr,
int type) {
4997 DCHECK(0 <= type && type <= 14);
4999 return instr == (al | 13*B21 | type*B12 | type);
5003 bool Assembler::IsMovImmed(Instr instr) {
5004 return (instr & kMovImmedMask) == kMovImmedPattern;
5008 bool Assembler::IsOrrImmed(Instr instr) {
5009 return (instr & kOrrImmedMask) == kOrrImmedPattern;
5014 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
5017 return FitsShifter(imm32, &dummy1, &dummy2,
nullptr);
5021 bool Assembler::ImmediateFitsAddrMode2Instruction(int32_t imm32) {
5022 return is_uint12(abs(imm32));
5027 void Assembler::RecordConstPool(
int size) {
5030 RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
5034 void Assembler::GrowBuffer() {
5035 if (!own_buffer_) FATAL(
"external code buffer is too small");
5039 if (buffer_size_ < 1 * MB) {
5040 desc.buffer_size = 2*buffer_size_;
5042 desc.buffer_size = buffer_size_ + 1*MB;
5047 if (desc.buffer_size > kMaximalBufferSize) {
5048 V8::FatalProcessOutOfMemory(
nullptr,
"Assembler::GrowBuffer");
5052 desc.buffer = NewArray<byte>(desc.buffer_size);
5054 desc.instr_size = pc_offset();
5055 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
5059 int pc_delta = desc.buffer - buffer_;
5060 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
5061 MemMove(desc.buffer, buffer_, desc.instr_size);
5062 MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
5066 DeleteArray(buffer_);
5067 buffer_ = desc.buffer;
5068 buffer_size_ = desc.buffer_size;
5070 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
5071 reloc_info_writer.last_pc() + pc_delta);
5079 void Assembler::db(uint8_t data) {
5082 DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
5084 *
reinterpret_cast<uint8_t*
>(pc_) = data;
5085 pc_ +=
sizeof(uint8_t);
5089 void Assembler::dd(
uint32_t data) {
5092 DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
5094 *
reinterpret_cast<uint32_t*
>(pc_) = data;
5099 void Assembler::dq(uint64_t value) {
5102 DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
5104 *
reinterpret_cast<uint64_t*
>(pc_) = value;
5105 pc_ +=
sizeof(uint64_t);
5108 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
5109 if (!ShouldRecordRelocInfo(rmode))
return;
5110 DCHECK_GE(buffer_space(), kMaxRelocSize);
5111 RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
5112 reloc_info_writer.Write(&rinfo);
5115 void Assembler::ConstantPoolAddEntry(
int position, RelocInfo::Mode rmode,
5117 DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::CONST_POOL);
5122 bool sharing_ok = RelocInfo::IsShareableRelocMode(rmode) ||
5123 (rmode == RelocInfo::CODE_TARGET && value != 0);
5124 DCHECK_LT(pending_32_bit_constants_.size(), kMaxNumPending32Constants);
5125 if (pending_32_bit_constants_.empty()) {
5126 first_const_pool_32_use_ = position;
5128 ConstantPoolEntry entry(position, value, sharing_ok, rmode);
5130 bool shared =
false;
5133 for (
size_t i = 0;
i < pending_32_bit_constants_.size();
i++) {
5134 ConstantPoolEntry& current_entry = pending_32_bit_constants_[
i];
5135 if (!current_entry.sharing_ok())
continue;
5136 if (entry.value() == current_entry.value() &&
5137 entry.rmode() == current_entry.rmode()) {
5138 entry.set_merged_index(
i);
5145 pending_32_bit_constants_.push_back(entry);
5149 BlockConstPoolFor(1);
5152 if (MustOutputRelocInfo(rmode,
this) && !shared) {
5153 RecordRelocInfo(rmode);
5157 void Assembler::BlockConstPoolFor(
int instructions) {
5158 int pc_limit = pc_offset() + instructions * kInstrSize;
5159 if (no_const_pool_before_ < pc_limit) {
5162 int start = pc_limit + kInstrSize + 2 * kPointerSize;
5163 DCHECK(pending_32_bit_constants_.empty() ||
5164 (start < first_const_pool_32_use_ + kMaxDistToIntPool));
5166 no_const_pool_before_ = pc_limit;
5169 if (next_buffer_check_ < no_const_pool_before_) {
5170 next_buffer_check_ = no_const_pool_before_;
5175 void Assembler::CheckConstPool(
bool force_emit,
bool require_jump) {
5179 if (is_const_pool_blocked()) {
5181 DCHECK(!force_emit);
5186 if (pending_32_bit_constants_.empty()) {
5188 next_buffer_check_ = pc_offset() + kCheckPoolInterval;
5195 int jump_instr = require_jump ? kInstrSize : 0;
5196 int size_up_to_marker = jump_instr + kInstrSize;
5197 int estimated_size_after_marker =
5198 pending_32_bit_constants_.size() * kPointerSize;
5199 int estimated_size = size_up_to_marker + estimated_size_after_marker;
5210 DCHECK(!pending_32_bit_constants_.empty());
5211 bool need_emit =
false;
5212 int dist32 = pc_offset() + estimated_size - first_const_pool_32_use_;
5213 if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
5214 (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
5217 if (!need_emit)
return;
5221 int size_after_marker = estimated_size_after_marker;
5223 for (
size_t i = 0;
i < pending_32_bit_constants_.size();
i++) {
5224 ConstantPoolEntry& entry = pending_32_bit_constants_[
i];
5225 if (entry.is_merged()) size_after_marker -= kPointerSize;
5228 int size = size_up_to_marker + size_after_marker;
5230 int needed_space = size + kGap;
5231 while (buffer_space() <= needed_space) GrowBuffer();
5235 BlockConstPoolScope block_const_pool(
this);
5236 RecordComment(
"[ Constant Pool");
5237 RecordConstPool(size);
5250 emit(kConstantPoolMarker |
5251 EncodeConstantPoolLength(size_after_marker / kPointerSize));
5254 for (
size_t i = 0;
i < pending_32_bit_constants_.size();
i++) {
5255 ConstantPoolEntry& entry = pending_32_bit_constants_[
i];
5256 Instr instr = instr_at(entry.position());
5259 DCHECK(!IsVldrDPcImmediateOffset(instr));
5260 DCHECK(!IsMovW(instr));
5261 DCHECK(IsLdrPcImmediateOffset(instr) &&
5262 GetLdrRegisterImmediateOffset(instr) == 0);
5264 int delta = pc_offset() - entry.position() - Instruction::kPcLoadDelta;
5265 DCHECK(is_uint12(delta));
5271 if (entry.is_merged()) {
5272 DCHECK(entry.sharing_ok());
5273 ConstantPoolEntry& merged =
5274 pending_32_bit_constants_[entry.merged_index()];
5275 DCHECK(entry.value() == merged.value());
5276 Instr merged_instr = instr_at(merged.position());
5277 DCHECK(IsLdrPcImmediateOffset(merged_instr));
5278 delta = GetLdrRegisterImmediateOffset(merged_instr);
5279 delta += merged.position() - entry.position();
5281 instr_at_put(entry.position(),
5282 SetLdrRegisterImmediateOffset(instr, delta));
5283 if (!entry.is_merged()) {
5284 emit(entry.value());
5288 pending_32_bit_constants_.clear();
5290 first_const_pool_32_use_ = -1;
5294 DCHECK_EQ(size, SizeOfCodeGeneratedSince(&size_check));
5296 if (after_pool.is_linked()) {
5303 next_buffer_check_ = pc_offset() + kCheckPoolInterval;
5306 PatchingAssembler::PatchingAssembler(
const AssemblerOptions& options,
5307 byte* address,
int instructions)
5308 : Assembler(options, address, instructions * kInstrSize + kGap) {
5309 DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
5312 PatchingAssembler::~PatchingAssembler() {
5314 DCHECK(pending_32_bit_constants_.empty());
5317 DCHECK_EQ(pc_, buffer_ + buffer_size_ - kGap);
5318 DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
5321 void PatchingAssembler::Emit(Address addr) { emit(static_cast<Instr>(addr)); }
5323 void PatchingAssembler::PadWithNops() {
5324 DCHECK_LE(pc_, buffer_ + buffer_size_ - kGap);
5325 while (pc_ < buffer_ + buffer_size_ - kGap) {
5330 UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
5331 : assembler_(assembler),
5332 old_available_(*assembler->GetScratchRegisterList()),
5333 old_available_vfp_(*assembler->GetScratchVfpRegisterList()) {}
5335 UseScratchRegisterScope::~UseScratchRegisterScope() {
5336 *assembler_->GetScratchRegisterList() = old_available_;
5337 *assembler_->GetScratchVfpRegisterList() = old_available_vfp_;
5340 Register UseScratchRegisterScope::Acquire() {
5341 RegList* available = assembler_->GetScratchRegisterList();
5342 DCHECK_NOT_NULL(available);
5343 DCHECK_NE(*available, 0);
5344 int index =
static_cast<int>(base::bits::CountTrailingZeros32(*available));
5345 Register reg = Register::from_code(index);
5346 *available &= ~reg.bit();
5353 #endif // V8_TARGET_ARCH_ARM