8 #if V8_TARGET_ARCH_S390 10 #include "src/base/bits.h" 11 #include "src/base/division-by-constant.h" 12 #include "src/bootstrapper.h" 13 #include "src/callable.h" 14 #include "src/code-factory.h" 15 #include "src/code-stubs.h" 16 #include "src/counters.h" 17 #include "src/debug/debug.h" 18 #include "src/external-reference-table.h" 19 #include "src/frames-inl.h" 20 #include "src/macro-assembler.h" 21 #include "src/objects/smi.h" 22 #include "src/register-configuration.h" 23 #include "src/runtime/runtime.h" 24 #include "src/snapshot/embedded-data.h" 25 #include "src/snapshot/snapshot.h" 26 #include "src/wasm/wasm-code-manager.h" 31 #include "src/s390/macro-assembler-s390.h" 37 MacroAssembler::MacroAssembler(Isolate* isolate,
38 const AssemblerOptions& options,
void* buffer,
39 int size, CodeObjectRequired create_code_object)
40 : TurboAssembler(isolate, options, buffer, size, create_code_object) {
41 if (create_code_object == CodeObjectRequired::kYes) {
47 code_object_ = Handle<HeapObject>::New(
48 *isolate->factory()->NewSelfReferenceMarker(), isolate);
52 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
55 Register exclusion3)
const {
57 RegList exclusions = 0;
58 if (exclusion1 != no_reg) {
59 exclusions |= exclusion1.bit();
60 if (exclusion2 != no_reg) {
61 exclusions |= exclusion2.bit();
62 if (exclusion3 != no_reg) {
63 exclusions |= exclusion3.bit();
68 RegList list = kJSCallerSaved & ~exclusions;
69 bytes += NumRegs(list) * kPointerSize;
71 if (fp_mode == kSaveFPRegs) {
72 bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
78 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
79 Register exclusion2, Register exclusion3) {
81 RegList exclusions = 0;
82 if (exclusion1 != no_reg) {
83 exclusions |= exclusion1.bit();
84 if (exclusion2 != no_reg) {
85 exclusions |= exclusion2.bit();
86 if (exclusion3 != no_reg) {
87 exclusions |= exclusion3.bit();
92 RegList list = kJSCallerSaved & ~exclusions;
94 bytes += NumRegs(list) * kPointerSize;
96 if (fp_mode == kSaveFPRegs) {
97 MultiPushDoubles(kCallerSavedDoubles);
98 bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
104 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
105 Register exclusion2, Register exclusion3) {
107 if (fp_mode == kSaveFPRegs) {
108 MultiPopDoubles(kCallerSavedDoubles);
109 bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
112 RegList exclusions = 0;
113 if (exclusion1 != no_reg) {
114 exclusions |= exclusion1.bit();
115 if (exclusion2 != no_reg) {
116 exclusions |= exclusion2.bit();
117 if (exclusion3 != no_reg) {
118 exclusions |= exclusion3.bit();
123 RegList list = kJSCallerSaved & ~exclusions;
125 bytes += NumRegs(list) * kPointerSize;
130 void TurboAssembler::LoadFromConstantsTable(Register destination,
131 int constant_index) {
132 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
135 FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag;
137 CHECK(is_uint19(offset));
138 DCHECK_NE(destination, r0);
139 LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
140 LoadP(destination, MemOperand(destination, offset), r1);
143 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
144 LoadP(destination, MemOperand(kRootRegister, offset));
147 void TurboAssembler::LoadRootRegisterOffset(Register destination,
150 LoadRR(destination, kRootRegister);
151 }
else if (is_uint12(offset)) {
152 la(destination, MemOperand(kRootRegister, offset));
154 DCHECK(is_int20(offset));
155 lay(destination, MemOperand(kRootRegister, offset));
159 void TurboAssembler::Jump(Register target, Condition cond) { b(cond, target); }
161 void MacroAssembler::JumpToJSEntry(Register target) {
166 void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
170 if (cond != al) b(NegateCondition(cond), &skip);
172 DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
174 mov(ip, Operand(target, rmode));
180 void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
182 DCHECK(!RelocInfo::IsCodeTarget(rmode));
183 Jump(static_cast<intptr_t>(target), rmode, cond);
186 void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
188 DCHECK(RelocInfo::IsCodeTarget(rmode));
189 if (FLAG_embedded_builtins) {
190 if (root_array_available_ && options().isolate_independent_code) {
191 Register scratch = r1;
192 IndirectLoadConstant(scratch, code);
193 la(scratch, MemOperand(scratch, Code::kHeaderSize - kHeapObjectTag));
196 }
else if (options().inline_offheap_trampolines) {
197 int builtin_index = Builtins::kNoBuiltinId;
198 if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
199 Builtins::IsIsolateIndependent(builtin_index)) {
201 RecordCommentForOffHeapTrampoline(builtin_index);
202 EmbeddedData d = EmbeddedData::FromBlob();
203 Address entry = d.InstructionStartOfBuiltin(builtin_index);
206 mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
212 jump(code, rmode, cond);
215 void TurboAssembler::Call(Register target) {
220 void MacroAssembler::CallJSEntry(Register target) {
221 DCHECK(target == r4);
225 int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
226 RelocInfo::Mode rmode,
230 #if V8_TARGET_ARCH_S390X 238 void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
242 mov(ip, Operand(target, rmode));
246 void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
248 DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al);
250 if (FLAG_embedded_builtins) {
251 if (root_array_available_ && options().isolate_independent_code) {
254 IndirectLoadConstant(ip, code);
255 la(ip, MemOperand(ip, Code::kHeaderSize - kHeapObjectTag));
258 }
else if (options().inline_offheap_trampolines) {
259 int builtin_index = Builtins::kNoBuiltinId;
260 if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
261 Builtins::IsIsolateIndependent(builtin_index)) {
263 RecordCommentForOffHeapTrampoline(builtin_index);
264 DCHECK(Builtins::IsBuiltinId(builtin_index));
265 EmbeddedData d = EmbeddedData::FromBlob();
266 Address entry = d.InstructionStartOfBuiltin(builtin_index);
269 mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
278 void TurboAssembler::Drop(
int count) {
280 int total = count * kPointerSize;
281 if (is_uint12(total)) {
282 la(sp, MemOperand(sp, total));
283 }
else if (is_int20(total)) {
284 lay(sp, MemOperand(sp, total));
286 AddP(sp, Operand(total));
291 void TurboAssembler::Drop(Register count, Register scratch) {
292 ShiftLeftP(scratch, count, Operand(kPointerSizeLog2));
293 AddP(sp, sp, scratch);
296 void TurboAssembler::Call(Label* target) { b(r14, target); }
298 void TurboAssembler::Push(Handle<HeapObject> handle) {
299 mov(r0, Operand(handle));
303 void TurboAssembler::Push(Smi smi) {
304 mov(r0, Operand(smi));
308 void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
309 if (FLAG_embedded_builtins) {
310 if (root_array_available_ && options().isolate_independent_code) {
311 IndirectLoadConstant(dst, value);
315 mov(dst, Operand(value));
318 void TurboAssembler::Move(Register dst, ExternalReference reference) {
319 if (FLAG_embedded_builtins) {
320 if (root_array_available_ && options().isolate_independent_code) {
321 IndirectLoadExternalReference(dst, reference);
325 mov(dst, Operand(reference));
328 void TurboAssembler::Move(Register dst, Register src, Condition cond) {
333 LoadOnConditionP(cond, dst, src);
338 void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
345 void TurboAssembler::MoveChar(
const MemOperand& opnd1,
346 const MemOperand& opnd2,
347 const Operand& length) {
348 mvc(opnd1, opnd2, Operand(static_cast<intptr_t>(length.immediate() - 1)));
352 void TurboAssembler::CompareLogicalChar(
const MemOperand& opnd1,
353 const MemOperand& opnd2,
354 const Operand& length) {
355 clc(opnd1, opnd2, Operand(static_cast<intptr_t>(length.immediate() - 1)));
359 void TurboAssembler::ExclusiveOrChar(
const MemOperand& opnd1,
360 const MemOperand& opnd2,
361 const Operand& length) {
362 xc(opnd1, opnd2, Operand(static_cast<intptr_t>(length.immediate() - 1)));
366 void TurboAssembler::RotateInsertSelectBits(Register dst, Register src,
367 const Operand& startBit,
const Operand& endBit,
368 const Operand& shiftAmt,
bool zeroBits) {
371 risbg(dst, src, startBit,
372 Operand(static_cast<intptr_t>(endBit.immediate() | 0x80)), shiftAmt);
374 risbg(dst, src, startBit, endBit, shiftAmt);
377 void TurboAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc,
379 #if V8_TARGET_ARCH_S390X 383 #endif // V8_TARGET_ARCH_S390X 386 void TurboAssembler::MultiPush(RegList regs, Register location) {
387 int16_t num_to_push = base::bits::CountPopulation(regs);
388 int16_t stack_offset = num_to_push * kPointerSize;
390 SubP(location, location, Operand(stack_offset));
391 for (int16_t
i = Register::kNumRegisters - 1;
i >= 0;
i--) {
392 if ((regs & (1 <<
i)) != 0) {
393 stack_offset -= kPointerSize;
394 StoreP(ToRegister(
i), MemOperand(location, stack_offset));
399 void TurboAssembler::MultiPop(RegList regs, Register location) {
400 int16_t stack_offset = 0;
402 for (int16_t
i = 0;
i < Register::kNumRegisters;
i++) {
403 if ((regs & (1 <<
i)) != 0) {
404 LoadP(ToRegister(
i), MemOperand(location, stack_offset));
405 stack_offset += kPointerSize;
408 AddP(location, location, Operand(stack_offset));
411 void TurboAssembler::MultiPushDoubles(RegList dregs, Register location) {
412 int16_t num_to_push = base::bits::CountPopulation(dregs);
413 int16_t stack_offset = num_to_push * kDoubleSize;
415 SubP(location, location, Operand(stack_offset));
416 for (int16_t
i = DoubleRegister::kNumRegisters - 1;
i >= 0;
i--) {
417 if ((dregs & (1 <<
i)) != 0) {
418 DoubleRegister dreg = DoubleRegister::from_code(
i);
419 stack_offset -= kDoubleSize;
420 StoreDouble(dreg, MemOperand(location, stack_offset));
425 void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
426 int16_t stack_offset = 0;
428 for (int16_t
i = 0;
i < DoubleRegister::kNumRegisters;
i++) {
429 if ((dregs & (1 <<
i)) != 0) {
430 DoubleRegister dreg = DoubleRegister::from_code(
i);
431 LoadDouble(dreg, MemOperand(location, stack_offset));
432 stack_offset += kDoubleSize;
435 AddP(location, location, Operand(stack_offset));
438 void TurboAssembler::LoadRoot(Register destination, RootIndex index,
441 MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
444 void MacroAssembler::RecordWriteField(Register
object,
int offset,
445 Register value, Register dst,
446 LinkRegisterStatus lr_status,
447 SaveFPRegsMode save_fp,
448 RememberedSetAction remembered_set_action,
449 SmiCheck smi_check) {
455 if (smi_check == INLINE_SMI_CHECK) {
456 JumpIfSmi(value, &done);
461 DCHECK(IsAligned(offset, kPointerSize));
463 lay(dst, MemOperand(
object, offset - kHeapObjectTag));
464 if (emit_debug_code()) {
466 AndP(r0, dst, Operand(kPointerSize - 1));
467 beq(&ok, Label::kNear);
468 stop(
"Unaligned cell in write barrier");
472 RecordWrite(
object, dst, value, lr_status, save_fp, remembered_set_action,
479 if (emit_debug_code()) {
480 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
481 mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
485 void TurboAssembler::SaveRegisters(RegList registers) {
486 DCHECK_GT(NumRegs(registers), 0);
488 for (
int i = 0;
i < Register::kNumRegisters; ++
i) {
489 if ((registers >>
i) & 1u) {
490 regs |= Register::from_code(
i).bit();
496 void TurboAssembler::RestoreRegisters(RegList registers) {
497 DCHECK_GT(NumRegs(registers), 0);
499 for (
int i = 0;
i < Register::kNumRegisters; ++
i) {
500 if ((registers >>
i) & 1u) {
501 regs |= Register::from_code(
i).bit();
507 void TurboAssembler::CallRecordWriteStub(
508 Register
object, Register address,
509 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
511 object, address, remembered_set_action, fp_mode,
512 isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
516 void TurboAssembler::CallRecordWriteStub(
517 Register
object, Register address,
518 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
519 Address wasm_target) {
520 CallRecordWriteStub(
object, address, remembered_set_action, fp_mode,
521 Handle<Code>::null(), wasm_target);
524 void TurboAssembler::CallRecordWriteStub(
525 Register
object, Register address,
526 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
527 Handle<Code> code_target, Address wasm_target) {
528 DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
534 RecordWriteDescriptor descriptor;
535 RegList registers = descriptor.allocatable_registers();
537 SaveRegisters(registers);
538 Register object_parameter(
539 descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
540 Register slot_parameter(
541 descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
542 Register remembered_set_parameter(
543 descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
544 Register fp_mode_parameter(
545 descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
551 Pop(object_parameter);
553 Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
554 Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
555 if (code_target.is_null()) {
556 Call(wasm_target, RelocInfo::WASM_STUB_CALL);
558 Call(code_target, RelocInfo::CODE_TARGET);
561 RestoreRegisters(registers);
567 void MacroAssembler::RecordWrite(Register
object, Register address,
568 Register value, LinkRegisterStatus lr_status,
569 SaveFPRegsMode fp_mode,
570 RememberedSetAction remembered_set_action,
571 SmiCheck smi_check) {
572 DCHECK(
object != value);
573 if (emit_debug_code()) {
574 CmpP(value, MemOperand(address));
575 Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
578 if (remembered_set_action == OMIT_REMEMBERED_SET &&
579 !FLAG_incremental_marking) {
586 if (smi_check == INLINE_SMI_CHECK) {
587 JumpIfSmi(value, &done);
592 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
593 CheckPageFlag(
object,
595 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
598 if (lr_status == kLRHasNotBeenSaved) {
601 CallRecordWriteStub(
object, address, remembered_set_action, fp_mode);
602 if (lr_status == kLRHasNotBeenSaved) {
609 isolate()->counters()->write_barriers_static()->Increment();
610 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
615 if (emit_debug_code()) {
616 mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
617 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
621 void TurboAssembler::PushCommonFrame(Register marker_reg) {
624 if (marker_reg.is_valid()) {
625 Push(r14, fp, marker_reg);
631 la(fp, MemOperand(sp, fp_delta * kPointerSize));
634 void TurboAssembler::PopCommonFrame(Register marker_reg) {
635 if (marker_reg.is_valid()) {
636 Pop(r14, fp, marker_reg);
642 void TurboAssembler::PushStandardFrame(Register function_reg) {
645 if (function_reg.is_valid()) {
646 Push(r14, fp, cp, function_reg);
652 la(fp, MemOperand(sp, fp_delta * kPointerSize));
655 void TurboAssembler::RestoreFrameStateForTailCall() {
661 DCHECK(!FLAG_enable_embedded_constant_pool);
662 LoadP(r14, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
663 LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
667 void MacroAssembler::PushSafepointRegisters() {
670 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
671 DCHECK_GE(num_unsaved, 0);
672 if (num_unsaved > 0) {
673 lay(sp, MemOperand(sp, -(num_unsaved * kPointerSize)));
675 MultiPush(kSafepointSavedRegisters);
678 void MacroAssembler::PopSafepointRegisters() {
679 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
680 MultiPop(kSafepointSavedRegisters);
681 if (num_unsaved > 0) {
682 la(sp, MemOperand(sp, num_unsaved * kPointerSize));
686 int MacroAssembler::SafepointRegisterStackIndex(
int reg_code) {
689 RegList regs = kSafepointSavedRegisters;
692 DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
694 for (int16_t
i = 0;
i < reg_code;
i++) {
695 if ((regs & (1 <<
i)) != 0) {
703 void TurboAssembler::CanonicalizeNaN(
const DoubleRegister dst,
704 const DoubleRegister src) {
706 if (dst != src) ldr(dst, src);
707 lzdr(kDoubleRegZero);
708 sdbr(dst, kDoubleRegZero);
711 void TurboAssembler::ConvertIntToDouble(DoubleRegister dst, Register src) {
715 void TurboAssembler::ConvertUnsignedIntToDouble(DoubleRegister dst,
717 if (CpuFeatures::IsSupported(FLOATING_POINT_EXT)) {
718 cdlfbr(Condition(5), Condition(0), dst, src);
727 void TurboAssembler::ConvertIntToFloat(DoubleRegister dst, Register src) {
728 cefbra(Condition(4), dst, src);
731 void TurboAssembler::ConvertUnsignedIntToFloat(DoubleRegister dst,
733 celfbr(Condition(4), Condition(0), dst, src);
736 void TurboAssembler::ConvertInt64ToFloat(DoubleRegister double_dst,
738 cegbr(double_dst, src);
741 void TurboAssembler::ConvertInt64ToDouble(DoubleRegister double_dst,
743 cdgbr(double_dst, src);
746 void TurboAssembler::ConvertUnsignedInt64ToFloat(DoubleRegister double_dst,
748 celgbr(Condition(0), Condition(0), double_dst, src);
751 void TurboAssembler::ConvertUnsignedInt64ToDouble(DoubleRegister double_dst,
753 cdlgbr(Condition(0), Condition(0), double_dst, src);
756 void TurboAssembler::ConvertFloat32ToInt64(
const Register dst,
757 const DoubleRegister double_input,
758 FPRoundingMode rounding_mode) {
759 Condition m = Condition(0);
760 switch (rounding_mode) {
764 case kRoundToNearest:
767 case kRoundToPlusInf:
770 case kRoundToMinusInf:
777 cgebr(m, dst, double_input);
780 void TurboAssembler::ConvertDoubleToInt64(
const Register dst,
781 const DoubleRegister double_input,
782 FPRoundingMode rounding_mode) {
783 Condition m = Condition(0);
784 switch (rounding_mode) {
788 case kRoundToNearest:
791 case kRoundToPlusInf:
794 case kRoundToMinusInf:
801 cgdbr(m, dst, double_input);
804 void TurboAssembler::ConvertDoubleToInt32(
const Register dst,
805 const DoubleRegister double_input,
806 FPRoundingMode rounding_mode) {
807 Condition m = Condition(0);
808 switch (rounding_mode) {
812 case kRoundToNearest:
815 case kRoundToPlusInf:
818 case kRoundToMinusInf:
825 #ifdef V8_TARGET_ARCH_S390X 826 lghi(dst, Operand::Zero());
828 cfdbr(m, dst, double_input);
831 void TurboAssembler::ConvertFloat32ToInt32(
const Register result,
832 const DoubleRegister double_input,
833 FPRoundingMode rounding_mode) {
834 Condition m = Condition(0);
835 switch (rounding_mode) {
839 case kRoundToNearest:
842 case kRoundToPlusInf:
845 case kRoundToMinusInf:
852 #ifdef V8_TARGET_ARCH_S390X 853 lghi(result, Operand::Zero());
855 cfebr(m, result, double_input);
858 void TurboAssembler::ConvertFloat32ToUnsignedInt32(
859 const Register result,
const DoubleRegister double_input,
860 FPRoundingMode rounding_mode) {
861 Condition m = Condition(0);
862 switch (rounding_mode) {
866 case kRoundToNearest:
869 case kRoundToPlusInf:
872 case kRoundToMinusInf:
879 #ifdef V8_TARGET_ARCH_S390X 880 lghi(result, Operand::Zero());
882 clfebr(m, Condition(0), result, double_input);
885 void TurboAssembler::ConvertFloat32ToUnsignedInt64(
886 const Register result,
const DoubleRegister double_input,
887 FPRoundingMode rounding_mode) {
888 Condition m = Condition(0);
889 switch (rounding_mode) {
893 case kRoundToNearest:
896 case kRoundToPlusInf:
899 case kRoundToMinusInf:
906 clgebr(m, Condition(0), result, double_input);
909 void TurboAssembler::ConvertDoubleToUnsignedInt64(
910 const Register dst,
const DoubleRegister double_input,
911 FPRoundingMode rounding_mode) {
912 Condition m = Condition(0);
913 switch (rounding_mode) {
917 case kRoundToNearest:
920 case kRoundToPlusInf:
923 case kRoundToMinusInf:
930 clgdbr(m, Condition(0), dst, double_input);
933 void TurboAssembler::ConvertDoubleToUnsignedInt32(
934 const Register dst,
const DoubleRegister double_input,
935 FPRoundingMode rounding_mode) {
936 Condition m = Condition(0);
937 switch (rounding_mode) {
941 case kRoundToNearest:
944 case kRoundToPlusInf:
947 case kRoundToMinusInf:
954 #ifdef V8_TARGET_ARCH_S390X 955 lghi(dst, Operand::Zero());
957 clfdbr(m, Condition(0), dst, double_input);
960 #if !V8_TARGET_ARCH_S390X 961 void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
962 Register src_low, Register src_high,
963 Register scratch, Register shift) {
964 LoadRR(r0, src_high);
966 sldl(r0, shift, Operand::Zero());
967 LoadRR(dst_high, r0);
971 void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
972 Register src_low, Register src_high,
974 LoadRR(r0, src_high);
976 sldl(r0, r0, Operand(shift));
977 LoadRR(dst_high, r0);
981 void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
982 Register src_low, Register src_high,
983 Register scratch, Register shift) {
984 LoadRR(r0, src_high);
986 srdl(r0, shift, Operand::Zero());
987 LoadRR(dst_high, r0);
991 void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
992 Register src_low, Register src_high,
994 LoadRR(r0, src_high);
996 srdl(r0, Operand(shift));
997 LoadRR(dst_high, r0);
1001 void TurboAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
1002 Register src_low, Register src_high,
1003 Register scratch, Register shift) {
1004 LoadRR(r0, src_high);
1005 LoadRR(r1, src_low);
1006 srda(r0, shift, Operand::Zero());
1007 LoadRR(dst_high, r0);
1008 LoadRR(dst_low, r1);
1011 void TurboAssembler::ShiftRightArithPair(Register dst_low, Register dst_high,
1012 Register src_low, Register src_high,
1014 LoadRR(r0, src_high);
1015 LoadRR(r1, src_low);
1016 srda(r0, r0, Operand(shift));
1017 LoadRR(dst_high, r0);
1018 LoadRR(dst_low, r1);
1022 void TurboAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) {
1026 void TurboAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) {
1030 void TurboAssembler::StubPrologue(StackFrame::Type type, Register base,
1031 int prologue_offset) {
1033 ConstantPoolUnavailableScope constant_pool_unavailable(
this);
1034 Load(r1, Operand(StackFrame::TypeToMarker(type)));
1035 PushCommonFrame(r1);
1039 void TurboAssembler::Prologue(Register base,
int prologue_offset) {
1040 DCHECK(base != no_reg);
1041 PushStandardFrame(r3);
1044 void TurboAssembler::EnterFrame(StackFrame::Type type,
1045 bool load_constant_pool_pointer_reg) {
1053 Load(ip, Operand(StackFrame::TypeToMarker(type)));
1054 PushCommonFrame(ip);
1057 int TurboAssembler::LeaveFrame(StackFrame::Type type,
int stack_adjustment) {
1060 LoadP(r14, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
1061 if (is_int20(StandardFrameConstants::kCallerSPOffset + stack_adjustment)) {
1062 lay(r1, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
1066 Operand(StandardFrameConstants::kCallerSPOffset + stack_adjustment));
1068 LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1070 int frame_ends = pc_offset();
1098 void MacroAssembler::EnterExitFrame(
bool save_doubles,
int stack_space,
1099 StackFrame::Type frame_type) {
1100 DCHECK(frame_type == StackFrame::EXIT ||
1101 frame_type == StackFrame::BUILTIN_EXIT);
1103 DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1104 DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1105 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1106 DCHECK_GT(stack_space, 0);
1112 Load(r1, Operand(StackFrame::TypeToMarker(frame_type)));
1113 PushCommonFrame(r1);
1115 lay(sp, MemOperand(fp, -ExitFrameConstants::kFixedFrameSizeFromFp));
1117 if (emit_debug_code()) {
1118 StoreP(MemOperand(fp, ExitFrameConstants::kSPOffset), Operand::Zero(), r1);
1120 Move(r1, CodeObject());
1121 StoreP(r1, MemOperand(fp, ExitFrameConstants::kCodeOffset));
1124 Move(r1, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1126 StoreP(fp, MemOperand(r1));
1128 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1129 StoreP(cp, MemOperand(r1));
1133 MultiPushDoubles(kCallerSavedDoubles);
1140 lay(sp, MemOperand(sp, -stack_space * kPointerSize));
1144 const int frame_alignment = TurboAssembler::ActivationFrameAlignment();
1145 if (frame_alignment > 0) {
1146 DCHECK_EQ(frame_alignment, 8);
1147 ClearRightImm(sp, sp, Operand(3));
1150 lay(sp, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
1151 StoreP(MemOperand(sp), Operand::Zero(), r0);
1154 lay(r1, MemOperand(sp, kStackFrameSPSlot * kPointerSize));
1155 StoreP(r1, MemOperand(fp, ExitFrameConstants::kSPOffset));
1158 int TurboAssembler::ActivationFrameAlignment() {
1159 #if !defined(USE_SIMULATOR) 1164 return base::OS::ActivationFrameAlignment();
1170 return FLAG_sim_stack_alignment;
1174 void MacroAssembler::LeaveExitFrame(
bool save_doubles, Register argument_count,
1175 bool argument_count_is_length) {
1179 const int kNumRegs = kNumCallerSavedDoubles;
1180 lay(r5, MemOperand(fp, -(ExitFrameConstants::kFixedFrameSizeFromFp +
1181 kNumRegs * kDoubleSize)));
1182 MultiPopDoubles(kCallerSavedDoubles, r5);
1186 Move(ip, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1188 StoreP(MemOperand(ip), Operand(0, RelocInfo::NONE), r0);
1192 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1193 LoadP(cp, MemOperand(ip));
1196 mov(r1, Operand(Context::kInvalidContext));
1198 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1199 StoreP(r1, MemOperand(ip));
1203 LeaveFrame(StackFrame::EXIT);
1205 if (argument_count.is_valid()) {
1206 if (!argument_count_is_length) {
1207 ShiftLeftP(argument_count, argument_count, Operand(kPointerSizeLog2));
1209 la(sp, MemOperand(sp, argument_count));
1213 void TurboAssembler::MovFromFloatResult(
const DoubleRegister dst) {
1217 void TurboAssembler::MovFromFloatParameter(
const DoubleRegister dst) {
1221 void TurboAssembler::PrepareForTailCall(
const ParameterCount& callee_args_count,
1222 Register caller_args_count_reg,
1223 Register scratch0, Register scratch1) {
1225 if (callee_args_count.is_reg()) {
1226 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
1229 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
1236 Register dst_reg = scratch0;
1237 ShiftLeftP(dst_reg, caller_args_count_reg, Operand(kPointerSizeLog2));
1238 AddP(dst_reg, fp, dst_reg);
1239 AddP(dst_reg, dst_reg,
1240 Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
1242 Register src_reg = caller_args_count_reg;
1244 if (callee_args_count.is_reg()) {
1245 ShiftLeftP(src_reg, callee_args_count.reg(), Operand(kPointerSizeLog2));
1246 AddP(src_reg, sp, src_reg);
1247 AddP(src_reg, src_reg, Operand(kPointerSize));
1249 mov(src_reg, Operand((callee_args_count.immediate() + 1) * kPointerSize));
1250 AddP(src_reg, src_reg, sp);
1253 if (FLAG_debug_code) {
1254 CmpLogicalP(src_reg, dst_reg);
1255 Check(lt, AbortReason::kStackAccessBelowStackPointer);
1260 RestoreFrameStateForTailCall();
1267 Register tmp_reg = scratch1;
1269 if (callee_args_count.is_reg()) {
1270 AddP(tmp_reg, callee_args_count.reg(), Operand(1));
1272 mov(tmp_reg, Operand(callee_args_count.immediate() + 1));
1274 LoadRR(r1, tmp_reg);
1276 LoadP(tmp_reg, MemOperand(src_reg, -kPointerSize));
1277 StoreP(tmp_reg, MemOperand(dst_reg, -kPointerSize));
1278 lay(src_reg, MemOperand(src_reg, -kPointerSize));
1279 lay(dst_reg, MemOperand(dst_reg, -kPointerSize));
1280 BranchOnCount(r1, &loop);
1283 LoadRR(sp, dst_reg);
1286 void MacroAssembler::InvokePrologue(
const ParameterCount& expected,
1287 const ParameterCount& actual, Label* done,
1288 bool* definitely_mismatches,
1290 bool definitely_matches =
false;
1291 *definitely_mismatches =
false;
1292 Label regular_invoke;
1305 DCHECK(actual.is_immediate() || actual.reg() == r2);
1306 DCHECK(expected.is_immediate() || expected.reg() == r4);
1308 if (expected.is_immediate()) {
1309 DCHECK(actual.is_immediate());
1310 mov(r2, Operand(actual.immediate()));
1311 if (expected.immediate() == actual.immediate()) {
1312 definitely_matches =
true;
1314 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1315 if (expected.immediate() == sentinel) {
1320 definitely_matches =
true;
1322 *definitely_mismatches =
true;
1323 mov(r4, Operand(expected.immediate()));
1327 if (actual.is_immediate()) {
1328 mov(r2, Operand(actual.immediate()));
1329 CmpPH(expected.reg(), Operand(actual.immediate()));
1330 beq(®ular_invoke);
1332 CmpP(expected.reg(), actual.reg());
1333 beq(®ular_invoke);
1337 if (!definitely_matches) {
1338 Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
1339 if (flag == CALL_FUNCTION) {
1341 if (!*definitely_mismatches) {
1345 Jump(adaptor, RelocInfo::CODE_TARGET);
1347 bind(®ular_invoke);
1351 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
1352 const ParameterCount& expected,
1353 const ParameterCount& actual) {
1356 ExternalReference debug_hook_active =
1357 ExternalReference::debug_hook_on_function_call_address(isolate());
1358 Move(r6, debug_hook_active);
1359 tm(MemOperand(r6), Operand::Zero());
1364 if (actual.is_reg()) {
1365 LoadRR(r6, actual.reg());
1367 mov(r6, Operand(actual.immediate()));
1369 ShiftLeftP(r6, r6, Operand(kPointerSizeLog2));
1370 LoadP(r6, MemOperand(sp, r6));
1371 FrameScope frame(
this,
1372 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1373 if (expected.is_reg()) {
1374 SmiTag(expected.reg());
1375 Push(expected.reg());
1377 if (actual.is_reg()) {
1378 SmiTag(actual.reg());
1381 if (new_target.is_valid()) {
1385 CallRuntime(Runtime::kDebugOnFunctionCall);
1387 if (new_target.is_valid()) {
1390 if (actual.is_reg()) {
1392 SmiUntag(actual.reg());
1394 if (expected.is_reg()) {
1395 Pop(expected.reg());
1396 SmiUntag(expected.reg());
1402 void MacroAssembler::InvokeFunctionCode(Register
function, Register new_target,
1403 const ParameterCount& expected,
1404 const ParameterCount& actual,
1407 DCHECK(flag == JUMP_FUNCTION || has_frame());
1409 DCHECK(
function == r3);
1410 DCHECK_IMPLIES(new_target.is_valid(), new_target == r5);
1413 CheckDebugHook(
function, new_target, expected, actual);
1416 if (!new_target.is_valid()) {
1417 LoadRoot(r5, RootIndex::kUndefinedValue);
1421 bool definitely_mismatches =
false;
1422 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag);
1423 if (!definitely_mismatches) {
1427 Register code = kJavaScriptCallCodeStartRegister;
1428 LoadP(code, FieldMemOperand(
function, JSFunction::kCodeOffset));
1429 AddP(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
1430 if (flag == CALL_FUNCTION) {
1433 DCHECK(flag == JUMP_FUNCTION);
1434 JumpToJSEntry(code);
1443 void MacroAssembler::InvokeFunction(Register fun, Register new_target,
1444 const ParameterCount& actual,
1447 DCHECK(flag == JUMP_FUNCTION || has_frame());
1452 Register expected_reg = r4;
1453 Register temp_reg = r6;
1454 LoadP(temp_reg, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
1455 LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
1456 LoadLogicalHalfWordP(
1458 FieldMemOperand(temp_reg,
1459 SharedFunctionInfo::kFormalParameterCountOffset));
1461 ParameterCount expected(expected_reg);
1462 InvokeFunctionCode(fun, new_target, expected, actual, flag);
1465 void MacroAssembler::InvokeFunction(Register
function,
1466 const ParameterCount& expected,
1467 const ParameterCount& actual,
1470 DCHECK(flag == JUMP_FUNCTION || has_frame());
1473 DCHECK(
function == r3);
1476 LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
1478 InvokeFunctionCode(r3, no_reg, expected, actual, flag);
1481 void MacroAssembler::MaybeDropFrames() {
1483 ExternalReference restart_fp =
1484 ExternalReference::debug_restart_fp_address(isolate());
1485 Move(r3, restart_fp);
1486 LoadP(r3, MemOperand(r3));
1487 CmpP(r3, Operand::Zero());
1488 Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
1492 void MacroAssembler::PushStackHandler() {
1494 STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
1495 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1498 mov(r7, Operand(ExternalReference::Create(IsolateAddressId::kHandlerAddress,
1502 lay(sp, MemOperand(sp, -StackHandlerConstants::kSize));
1505 lghi(r0, Operand::Zero());
1506 StoreP(r0, MemOperand(sp));
1509 MoveChar(MemOperand(sp, StackHandlerConstants::kNextOffset), MemOperand(r7),
1510 Operand(kPointerSize));
1512 StoreP(sp, MemOperand(r7));
1515 void MacroAssembler::PopStackHandler() {
1516 STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
1517 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1521 mov(ip, Operand(ExternalReference::Create(IsolateAddressId::kHandlerAddress,
1523 StoreP(r3, MemOperand(ip));
1528 void MacroAssembler::CompareObjectType(Register
object, Register map,
1529 Register type_reg, InstanceType type) {
1530 const Register temp = type_reg == no_reg ? r0 : type_reg;
1532 LoadP(map, FieldMemOperand(
object, HeapObject::kMapOffset));
1533 CompareInstanceType(map, temp, type);
1536 void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
1537 InstanceType type) {
1538 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
1539 STATIC_ASSERT(LAST_TYPE <= 0xFFFF);
1540 LoadHalfWordP(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1541 CmpP(type_reg, Operand(type));
1544 void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
1545 CmpP(obj, MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
1548 void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
1549 DCHECK(AllowThisStubCall(stub));
1550 Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1553 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
1554 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1557 bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
1558 return has_frame_ || !stub->SometimesSetsUpAFrame();
1561 void MacroAssembler::TryDoubleToInt32Exact(Register result,
1562 DoubleRegister double_input,
1564 DoubleRegister double_scratch) {
1566 DCHECK(double_input != double_scratch);
1568 ConvertDoubleToInt64(result, double_input);
1570 TestIfInt32(result);
1574 cdfbr(double_scratch, result);
1575 cdbr(double_scratch, double_input);
1579 void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
1581 DoubleRegister double_input,
1582 StubCallMode stub_mode) {
1585 TryInlineTruncateDoubleToI(result, double_input, &done);
1590 lay(sp, MemOperand(sp, -kDoubleSize));
1591 StoreDouble(double_input, MemOperand(sp));
1593 if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
1594 Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
1596 Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
1599 LoadP(result, MemOperand(sp, 0));
1600 la(sp, MemOperand(sp, kDoubleSize));
1606 void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
1607 DoubleRegister double_input,
1609 ConvertDoubleToInt64(result, double_input);
1612 TestIfInt32(result);
1616 void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
1618 const Runtime::Function* f = Runtime::FunctionForId(fid);
1619 mov(r2, Operand(f->nargs));
1620 Move(r3, ExternalReference::Create(f));
1621 DCHECK(!AreAliased(centry, r2, r3));
1622 la(centry, MemOperand(centry, Code::kHeaderSize - kHeapObjectTag));
1626 void MacroAssembler::CallRuntime(
const Runtime::Function* f,
int num_arguments,
1627 SaveFPRegsMode save_doubles) {
1633 CHECK(f->nargs < 0 || f->nargs == num_arguments);
1639 mov(r2, Operand(num_arguments));
1640 Move(r3, ExternalReference::Create(f));
1641 #if V8_TARGET_ARCH_S390X 1643 CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
1645 Handle<Code> code = CodeFactory::CEntry(isolate(), 1, save_doubles);
1648 Call(code, RelocInfo::CODE_TARGET);
1651 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
1652 const Runtime::Function*
function = Runtime::FunctionForId(fid);
1653 DCHECK_EQ(1, function->result_size);
1654 if (function->nargs >= 0) {
1655 mov(r2, Operand(function->nargs));
1657 JumpToExternalReference(ExternalReference::Create(fid));
1660 void MacroAssembler::JumpToExternalReference(
const ExternalReference& builtin,
1661 bool builtin_exit_frame) {
1663 Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
1664 kArgvOnStack, builtin_exit_frame);
1665 Jump(code, RelocInfo::CODE_TARGET);
1668 void MacroAssembler::JumpToInstructionStream(Address entry) {
1669 mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
1670 Jump(kOffHeapTrampolineRegister);
1673 void MacroAssembler::LoadWeakValue(Register out, Register in,
1674 Label* target_if_cleared) {
1675 Cmp32(in, Operand(kClearedWeakHeapObjectLower32));
1676 beq(target_if_cleared);
1678 AndP(out, in, Operand(~kWeakHeapObjectMask));
1681 void MacroAssembler::IncrementCounter(StatsCounter* counter,
int value,
1682 Register scratch1, Register scratch2) {
1683 DCHECK(value > 0 && is_int8(value));
1684 if (FLAG_native_code_counters && counter->Enabled()) {
1685 Move(scratch2, ExternalReference::Create(counter));
1687 LoadW(scratch1, MemOperand(scratch2));
1688 AddP(scratch1, Operand(value));
1689 StoreW(scratch1, MemOperand(scratch2));
1693 void MacroAssembler::DecrementCounter(StatsCounter* counter,
int value,
1694 Register scratch1, Register scratch2) {
1695 DCHECK(value > 0 && is_int8(value));
1696 if (FLAG_native_code_counters && counter->Enabled()) {
1697 Move(scratch2, ExternalReference::Create(counter));
1699 LoadW(scratch1, MemOperand(scratch2));
1700 AddP(scratch1, Operand(-value));
1701 StoreW(scratch1, MemOperand(scratch2));
1705 void TurboAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) {
1706 if (emit_debug_code()) Check(cond, reason, cr);
1709 void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
1717 void TurboAssembler::Abort(AbortReason reason) {
1720 const char* msg = GetAbortReason(reason);
1722 RecordComment(
"Abort message: ");
1727 if (trap_on_abort()) {
1732 if (should_abort_hard()) {
1734 FrameScope assume_frame(
this, StackFrame::NONE);
1735 lgfi(r2, Operand(static_cast<int>(reason)));
1736 PrepareCallCFunction(1, 0, r3);
1737 Move(r3, ExternalReference::abort_with_reason());
1744 LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(reason)));
1750 FrameScope scope(
this, StackFrame::NONE);
1751 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1753 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1758 void MacroAssembler::LoadNativeContextSlot(
int index, Register dst) {
1759 LoadP(dst, NativeContextMemOperand());
1760 LoadP(dst, ContextMemOperand(dst, index));
1763 void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
1765 STATIC_ASSERT(kSmiTag == 0);
1766 STATIC_ASSERT(kSmiTagSize == 1);
1768 DCHECK(src.code() != dst.code());
1774 void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
1775 Label* on_either_smi) {
1776 STATIC_ASSERT(kSmiTag == 0);
1777 JumpIfSmi(reg1, on_either_smi);
1778 JumpIfSmi(reg2, on_either_smi);
1781 void MacroAssembler::AssertNotSmi(Register
object) {
1782 if (emit_debug_code()) {
1783 STATIC_ASSERT(kSmiTag == 0);
1785 Check(ne, AbortReason::kOperandIsASmi, cr0);
1789 void MacroAssembler::AssertSmi(Register
object) {
1790 if (emit_debug_code()) {
1791 STATIC_ASSERT(kSmiTag == 0);
1793 Check(eq, AbortReason::kOperandIsNotASmi, cr0);
1797 void MacroAssembler::AssertConstructor(Register
object, Register scratch) {
1798 if (emit_debug_code()) {
1799 STATIC_ASSERT(kSmiTag == 0);
1801 Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor);
1802 LoadP(scratch, FieldMemOperand(
object, HeapObject::kMapOffset));
1803 tm(FieldMemOperand(scratch, Map::kBitFieldOffset),
1804 Operand(Map::IsConstructorBit::kMask));
1805 Check(ne, AbortReason::kOperandIsNotAConstructor);
1809 void MacroAssembler::AssertFunction(Register
object) {
1810 if (emit_debug_code()) {
1811 STATIC_ASSERT(kSmiTag == 0);
1813 Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
1815 CompareObjectType(
object,
object,
object, JS_FUNCTION_TYPE);
1817 Check(eq, AbortReason::kOperandIsNotAFunction);
1821 void MacroAssembler::AssertBoundFunction(Register
object) {
1822 if (emit_debug_code()) {
1823 STATIC_ASSERT(kSmiTag == 0);
1825 Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, cr0);
1827 CompareObjectType(
object,
object,
object, JS_BOUND_FUNCTION_TYPE);
1829 Check(eq, AbortReason::kOperandIsNotABoundFunction);
1833 void MacroAssembler::AssertGeneratorObject(Register
object) {
1834 if (!emit_debug_code())
return;
1836 Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, cr0);
1839 Register map = object;
1841 LoadP(map, FieldMemOperand(
object, HeapObject::kMapOffset));
1845 Register instance_type = object;
1846 CompareInstanceType(map, instance_type, JS_GENERATOR_OBJECT_TYPE);
1850 CmpP(instance_type, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
1854 CmpP(instance_type, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
1859 Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
1862 void MacroAssembler::AssertUndefinedOrAllocationSite(Register
object,
1864 if (emit_debug_code()) {
1865 Label done_checking;
1866 AssertNotSmi(
object);
1867 CompareRoot(
object, RootIndex::kUndefinedValue);
1868 beq(&done_checking, Label::kNear);
1869 LoadP(scratch, FieldMemOperand(
object, HeapObject::kMapOffset));
1870 CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
1871 Assert(eq, AbortReason::kExpectedUndefinedOrCell);
1872 bind(&done_checking);
1876 static const int kRegisterPassedArguments = 5;
1878 int TurboAssembler::CalculateStackPassedWords(
int num_reg_arguments,
1879 int num_double_arguments) {
1880 int stack_passed_words = 0;
1881 if (num_double_arguments > DoubleRegister::kNumRegisters) {
1882 stack_passed_words +=
1883 2 * (num_double_arguments - DoubleRegister::kNumRegisters);
1886 if (num_reg_arguments > kRegisterPassedArguments) {
1887 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
1889 return stack_passed_words;
1892 void TurboAssembler::PrepareCallCFunction(
int num_reg_arguments,
1893 int num_double_arguments,
1895 int frame_alignment = ActivationFrameAlignment();
1896 int stack_passed_arguments =
1897 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
1898 int stack_space = kNumRequiredStackFrameSlots;
1899 if (frame_alignment > kPointerSize) {
1902 LoadRR(scratch, sp);
1903 lay(sp, MemOperand(sp, -(stack_passed_arguments + 1) * kPointerSize));
1904 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1905 ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
1906 StoreP(scratch, MemOperand(sp, (stack_passed_arguments)*kPointerSize));
1908 stack_space += stack_passed_arguments;
1910 lay(sp, MemOperand(sp, (-stack_space) * kPointerSize));
1913 void TurboAssembler::PrepareCallCFunction(
int num_reg_arguments,
1915 PrepareCallCFunction(num_reg_arguments, 0, scratch);
1918 void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(d0, src); }
1920 void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(d0, src); }
1922 void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
1923 DoubleRegister src2) {
1934 void TurboAssembler::CallCFunction(ExternalReference
function,
1935 int num_reg_arguments,
1936 int num_double_arguments) {
1938 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
1941 void TurboAssembler::CallCFunction(Register
function,
int num_reg_arguments,
1942 int num_double_arguments) {
1943 CallCFunctionHelper(
function, num_reg_arguments, num_double_arguments);
1946 void TurboAssembler::CallCFunction(ExternalReference
function,
1947 int num_arguments) {
1948 CallCFunction(
function, num_arguments, 0);
1951 void TurboAssembler::CallCFunction(Register
function,
int num_arguments) {
1952 CallCFunction(
function, num_arguments, 0);
1955 void TurboAssembler::CallCFunctionHelper(Register
function,
1956 int num_reg_arguments,
1957 int num_double_arguments) {
1958 DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
1959 DCHECK(has_frame());
1964 Register dest =
function;
1965 if (ABI_CALL_VIA_IP) {
1972 int stack_passed_arguments =
1973 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
1974 int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
1975 if (ActivationFrameAlignment() > kPointerSize) {
1977 LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
1979 la(sp, MemOperand(sp, stack_space * kPointerSize));
1983 void TurboAssembler::CheckPageFlag(
1986 int mask, Condition cc, Label* condition_met) {
1987 DCHECK(cc == ne || cc == eq);
1988 ClearRightImm(scratch,
object, Operand(kPageSizeBits));
1990 if (base::bits::IsPowerOfTwo(mask)) {
1993 int32_t byte_offset = 4;
1997 byte_offset = kPointerSize - 1;
1998 }
else if (mask < 0x8000) {
1999 byte_offset = kPointerSize - 2;
2000 shifted_mask = mask >> 8;
2001 }
else if (mask < 0x800000) {
2002 byte_offset = kPointerSize - 3;
2003 shifted_mask = mask >> 16;
2005 byte_offset = kPointerSize - 4;
2006 shifted_mask = mask >> 24;
2008 #if V8_TARGET_LITTLE_ENDIAN 2010 byte_offset = kPointerSize - byte_offset - 1;
2012 tm(MemOperand(scratch, MemoryChunk::kFlagsOffset + byte_offset),
2013 Operand(shifted_mask));
2015 LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
2016 AndP(r0, scratch, Operand(mask));
2039 void MacroAssembler::LoadRepresentation(Register dst,
const MemOperand& mem,
2040 Representation r, Register scratch) {
2041 DCHECK(!r.IsDouble());
2042 if (r.IsInteger8()) {
2044 }
else if (r.IsUInteger8()) {
2046 }
else if (r.IsInteger16()) {
2047 LoadHalfWordP(dst, mem, scratch);
2048 }
else if (r.IsUInteger16()) {
2049 LoadHalfWordP(dst, mem, scratch);
2050 #if V8_TARGET_ARCH_S390X 2051 }
else if (r.IsInteger32()) {
2052 LoadW(dst, mem, scratch);
2055 LoadP(dst, mem, scratch);
2059 void MacroAssembler::StoreRepresentation(Register src,
const MemOperand& mem,
2060 Representation r, Register scratch) {
2061 DCHECK(!r.IsDouble());
2062 if (r.IsInteger8() || r.IsUInteger8()) {
2063 StoreByte(src, mem, scratch);
2064 }
else if (r.IsInteger16() || r.IsUInteger16()) {
2065 StoreHalfWord(src, mem, scratch);
2066 #if V8_TARGET_ARCH_S390X 2067 }
else if (r.IsInteger32()) {
2068 StoreW(src, mem, scratch);
2071 if (r.IsHeapObject()) {
2073 }
else if (r.IsSmi()) {
2076 StoreP(src, mem, scratch);
2080 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
2081 Register reg4, Register reg5,
2084 if (reg1.is_valid()) regs |= reg1.bit();
2085 if (reg2.is_valid()) regs |= reg2.bit();
2086 if (reg3.is_valid()) regs |= reg3.bit();
2087 if (reg4.is_valid()) regs |= reg4.bit();
2088 if (reg5.is_valid()) regs |= reg5.bit();
2089 if (reg6.is_valid()) regs |= reg6.bit();
2091 const RegisterConfiguration* config = RegisterConfiguration::Default();
2092 for (
int i = 0;
i < config->num_allocatable_general_registers(); ++
i) {
2093 int code = config->GetAllocatableGeneralCode(
i);
2094 Register candidate = Register::from_code(code);
2095 if (regs & candidate.bit())
continue;
2101 void TurboAssembler::mov(Register dst,
const Operand& src) {
2102 #if V8_TARGET_ARCH_S390X 2107 if (src.is_heap_object_request()) {
2108 RequestHeapObject(src.heap_object_request());
2111 value = src.immediate();
2114 if (src.rmode() != RelocInfo::NONE) {
2116 RecordRelocInfo(src.rmode(), value);
2119 #if V8_TARGET_ARCH_S390X 2120 int32_t hi_32 =
static_cast<int64_t>(value) >> 32;
2121 int32_t lo_32 =
static_cast<int32_t
>(value);
2123 iihf(dst, Operand(hi_32));
2124 iilf(dst, Operand(lo_32));
2126 iilf(dst, Operand(value));
2130 void TurboAssembler::Mul32(Register dst,
const MemOperand& src1) {
2131 if (is_uint12(src1.offset())) {
2133 }
else if (is_int20(src1.offset())) {
2140 void TurboAssembler::Mul32(Register dst, Register src1) { msr(dst, src1); }
2142 void TurboAssembler::Mul32(Register dst,
const Operand& src1) {
2146 #define Generate_MulHigh32(instr) \ 2150 srlg(dst, dst, Operand(32)); \ 2153 void TurboAssembler::MulHigh32(Register dst, Register src1,
2154 const MemOperand& src2) {
2155 Generate_MulHigh32(msgf);
2158 void TurboAssembler::MulHigh32(Register dst, Register src1, Register src2) {
2160 std::swap(src1, src2);
2162 Generate_MulHigh32(msgfr);
2165 void TurboAssembler::MulHigh32(Register dst, Register src1,
2166 const Operand& src2) {
2167 Generate_MulHigh32(msgfi);
2170 #undef Generate_MulHigh32 2172 #define Generate_MulHighU32(instr) \ 2179 void TurboAssembler::MulHighU32(Register dst, Register src1,
2180 const MemOperand& src2) {
2181 Generate_MulHighU32(ml);
2184 void TurboAssembler::MulHighU32(Register dst, Register src1, Register src2) {
2185 Generate_MulHighU32(mlr);
2188 void TurboAssembler::MulHighU32(Register dst, Register src1,
2189 const Operand& src2) {
2196 #undef Generate_MulHighU32 2198 #define Generate_Mul32WithOverflowIfCCUnequal(instr) \ 2205 void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
2206 const MemOperand& src2) {
2207 Register result = dst;
2208 if (src2.rx() == dst || src2.rb() == dst) dst = r0;
2209 Generate_Mul32WithOverflowIfCCUnequal(msgf);
2210 if (result != dst) llgfr(result, dst);
2213 void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
2216 std::swap(src1, src2);
2218 Generate_Mul32WithOverflowIfCCUnequal(msgfr);
2221 void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
2222 const Operand& src2) {
2223 Generate_Mul32WithOverflowIfCCUnequal(msgfi);
2226 #undef Generate_Mul32WithOverflowIfCCUnequal 2228 void TurboAssembler::Mul64(Register dst,
const MemOperand& src1) {
2229 if (is_int20(src1.offset())) {
2236 void TurboAssembler::Mul64(Register dst, Register src1) { msgr(dst, src1); }
2238 void TurboAssembler::Mul64(Register dst,
const Operand& src1) {
2242 void TurboAssembler::Mul(Register dst, Register src1, Register src2) {
2243 if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
2244 MulPWithCondition(dst, src1, src2);
2248 }
else if (dst == src1) {
2257 void TurboAssembler::DivP(Register dividend, Register divider) {
2259 DCHECK_EQ(dividend.code() % 2, 0);
2260 #if V8_TARGET_ARCH_S390X 2261 dsgr(dividend, divider);
2263 dr(dividend, divider);
2267 #define Generate_Div32(instr) \ 2274 void TurboAssembler::Div32(Register dst, Register src1,
2275 const MemOperand& src2) {
2276 Generate_Div32(dsgf);
2279 void TurboAssembler::Div32(Register dst, Register src1, Register src2) {
2280 Generate_Div32(dsgfr);
2283 #undef Generate_Div32 2285 #define Generate_DivU32(instr) \ 2288 srdl(r0, Operand(32)); \ 2293 void TurboAssembler::DivU32(Register dst, Register src1,
2294 const MemOperand& src2) {
2295 Generate_DivU32(dl);
2298 void TurboAssembler::DivU32(Register dst, Register src1, Register src2) {
2299 Generate_DivU32(dlr);
2302 #undef Generate_DivU32 2304 #define Generate_Div64(instr) \ 2311 void TurboAssembler::Div64(Register dst, Register src1,
2312 const MemOperand& src2) {
2313 Generate_Div64(dsg);
2316 void TurboAssembler::Div64(Register dst, Register src1, Register src2) {
2317 Generate_Div64(dsgr);
2320 #undef Generate_Div64 2322 #define Generate_DivU64(instr) \ 2325 lghi(r0, Operand::Zero()); \ 2330 void TurboAssembler::DivU64(Register dst, Register src1,
2331 const MemOperand& src2) {
2332 Generate_DivU64(dlg);
2335 void TurboAssembler::DivU64(Register dst, Register src1, Register src2) {
2336 Generate_DivU64(dlgr);
2339 #undef Generate_DivU64 2341 #define Generate_Mod32(instr) \ 2348 void TurboAssembler::Mod32(Register dst, Register src1,
2349 const MemOperand& src2) {
2350 Generate_Mod32(dsgf);
2353 void TurboAssembler::Mod32(Register dst, Register src1, Register src2) {
2354 Generate_Mod32(dsgfr);
2357 #undef Generate_Mod32 2359 #define Generate_ModU32(instr) \ 2362 srdl(r0, Operand(32)); \ 2367 void TurboAssembler::ModU32(Register dst, Register src1,
2368 const MemOperand& src2) {
2369 Generate_ModU32(dl);
2372 void TurboAssembler::ModU32(Register dst, Register src1, Register src2) {
2373 Generate_ModU32(dlr);
2376 #undef Generate_ModU32 2378 #define Generate_Mod64(instr) \ 2385 void TurboAssembler::Mod64(Register dst, Register src1,
2386 const MemOperand& src2) {
2387 Generate_Mod64(dsg);
2390 void TurboAssembler::Mod64(Register dst, Register src1, Register src2) {
2391 Generate_Mod64(dsgr);
2394 #undef Generate_Mod64 2396 #define Generate_ModU64(instr) \ 2399 lghi(r0, Operand::Zero()); \ 2404 void TurboAssembler::ModU64(Register dst, Register src1,
2405 const MemOperand& src2) {
2406 Generate_ModU64(dlg);
2409 void TurboAssembler::ModU64(Register dst, Register src1, Register src2) {
2410 Generate_ModU64(dlgr);
2413 #undef Generate_ModU64 2415 void TurboAssembler::MulP(Register dst,
const Operand& opnd) {
2416 #if V8_TARGET_ARCH_S390X 2423 void TurboAssembler::MulP(Register dst, Register src) {
2424 #if V8_TARGET_ARCH_S390X 2431 void TurboAssembler::MulPWithCondition(Register dst, Register src1,
2433 CHECK(CpuFeatures::IsSupported(MISC_INSTR_EXT2));
2434 #if V8_TARGET_ARCH_S390X 2435 msgrkc(dst, src1, src2);
2437 msrkc(dst, src1, src2);
2441 void TurboAssembler::MulP(Register dst,
const MemOperand& opnd) {
2442 #if V8_TARGET_ARCH_S390X 2443 if (is_uint16(opnd.offset())) {
2445 }
else if (is_int20(opnd.offset())) {
2451 if (is_int20(opnd.offset())) {
2459 void TurboAssembler::Sqrt(DoubleRegister result, DoubleRegister input) {
2460 sqdbr(result, input);
2462 void TurboAssembler::Sqrt(DoubleRegister result,
const MemOperand& input) {
2463 if (is_uint12(input.offset())) {
2464 sqdb(result, input);
2467 sqdbr(result, result);
2475 void TurboAssembler::Add32(Register dst,
const Operand& opnd) {
2476 if (is_int16(opnd.immediate()))
2483 void TurboAssembler::Add32_RI(Register dst,
const Operand& opnd) {
2489 void TurboAssembler::AddP(Register dst,
const Operand& opnd) {
2490 #if V8_TARGET_ARCH_S390X 2491 if (is_int16(opnd.immediate()))
2501 void TurboAssembler::Add32(Register dst, Register src,
const Operand& opnd) {
2503 if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
2504 ahik(dst, src, opnd);
2513 void TurboAssembler::Add32_RRI(Register dst, Register src,
2514 const Operand& opnd) {
2516 Add32(dst, src, opnd);
2520 void TurboAssembler::AddP(Register dst, Register src,
const Operand& opnd) {
2522 if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) {
2523 AddPImm_RRI(dst, src, opnd);
2532 void TurboAssembler::Add32(Register dst, Register src) { ar(dst, src); }
2535 void TurboAssembler::AddP(Register dst, Register src) { AddRR(dst, src); }
2541 void TurboAssembler::AddP_ExtendSrc(Register dst, Register src) {
2542 #if V8_TARGET_ARCH_S390X 2550 void TurboAssembler::Add32(Register dst, Register src1, Register src2) {
2551 if (dst != src1 && dst != src2) {
2554 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
2555 ark(dst, src1, src2);
2560 }
else if (dst == src2) {
2567 void TurboAssembler::AddP(Register dst, Register src1, Register src2) {
2568 if (dst != src1 && dst != src2) {
2571 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
2572 AddP_RRR(dst, src1, src2);
2577 }
else if (dst == src2) {
2588 void TurboAssembler::AddP_ExtendSrc(Register dst, Register src1,
2590 #if V8_TARGET_ARCH_S390X 2596 if (dst != src1) LoadRR(dst, src1);
2600 AddP(dst, src1, src2);
2605 void TurboAssembler::Add32(Register dst,
const MemOperand& opnd) {
2606 DCHECK(is_int20(opnd.offset()));
2607 if (is_uint12(opnd.offset()))
2614 void TurboAssembler::AddP(Register dst,
const MemOperand& opnd) {
2615 #if V8_TARGET_ARCH_S390X 2616 DCHECK(is_int20(opnd.offset()));
2627 void TurboAssembler::AddP_ExtendSrc(Register dst,
const MemOperand& opnd) {
2628 #if V8_TARGET_ARCH_S390X 2629 DCHECK(is_int20(opnd.offset()));
2637 void TurboAssembler::Add32(
const MemOperand& opnd,
const Operand& imm) {
2638 DCHECK(is_int8(imm.immediate()));
2639 DCHECK(is_int20(opnd.offset()));
2640 DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
2645 void TurboAssembler::AddP(
const MemOperand& opnd,
const Operand& imm) {
2646 DCHECK(is_int8(imm.immediate()));
2647 DCHECK(is_int20(opnd.offset()));
2648 DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
2649 #if V8_TARGET_ARCH_S390X 2661 void TurboAssembler::AddLogicalWithCarry32(Register dst, Register src1,
2663 if (dst != src2 && dst != src1) {
2666 }
else if (dst != src2) {
2668 DCHECK(dst == src1);
2672 DCHECK(dst == src2);
2678 void TurboAssembler::AddLogical32(Register dst, Register src1, Register src2) {
2679 if (dst != src2 && dst != src1) {
2682 }
else if (dst != src2) {
2684 DCHECK(dst == src1);
2688 DCHECK(dst == src2);
2694 void TurboAssembler::AddLogical(Register dst,
const Operand& imm) {
2699 void TurboAssembler::AddLogicalP(Register dst,
const Operand& imm) {
2700 #ifdef V8_TARGET_ARCH_S390X 2703 AddLogical(dst, imm);
2708 void TurboAssembler::AddLogical(Register dst,
const MemOperand& opnd) {
2709 DCHECK(is_int20(opnd.offset()));
2710 if (is_uint12(opnd.offset()))
2717 void TurboAssembler::AddLogicalP(Register dst,
const MemOperand& opnd) {
2718 #if V8_TARGET_ARCH_S390X 2719 DCHECK(is_int20(opnd.offset()));
2722 AddLogical(dst, opnd);
2732 void TurboAssembler::SubLogicalWithBorrow32(Register dst, Register src1,
2734 if (dst != src2 && dst != src1) {
2737 }
else if (dst != src2) {
2739 DCHECK(dst == src1);
2743 DCHECK(dst == src2);
2745 SubLogicalWithBorrow32(dst, src1, r0);
2750 void TurboAssembler::SubLogical32(Register dst, Register src1, Register src2) {
2751 if (dst != src2 && dst != src1) {
2754 }
else if (dst != src2) {
2756 DCHECK(dst == src1);
2760 DCHECK(dst == src2);
2762 SubLogical32(dst, src1, r0);
2767 void TurboAssembler::Sub32(Register dst,
const Operand& imm) {
2768 Add32(dst, Operand(-(imm.immediate())));
2772 void TurboAssembler::SubP(Register dst,
const Operand& imm) {
2773 AddP(dst, Operand(-(imm.immediate())));
2777 void TurboAssembler::Sub32(Register dst, Register src,
const Operand& imm) {
2778 Add32(dst, src, Operand(-(imm.immediate())));
2782 void TurboAssembler::SubP(Register dst, Register src,
const Operand& imm) {
2783 AddP(dst, src, Operand(-(imm.immediate())));
2787 void TurboAssembler::Sub32(Register dst, Register src) { sr(dst, src); }
2790 void TurboAssembler::SubP(Register dst, Register src) { SubRR(dst, src); }
2796 void TurboAssembler::SubP_ExtendSrc(Register dst, Register src) {
2797 #if V8_TARGET_ARCH_S390X 2805 void TurboAssembler::Sub32(Register dst, Register src1, Register src2) {
2807 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
2808 srk(dst, src1, src2);
2811 if (dst != src1 && dst != src2) lr(dst, src1);
2813 if (dst != src1 && dst == src2) {
2825 void TurboAssembler::SubP(Register dst, Register src1, Register src2) {
2827 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
2828 SubP_RRR(dst, src1, src2);
2831 if (dst != src1 && dst != src2) LoadRR(dst, src1);
2833 if (dst != src1 && dst == src2) {
2835 LoadComplementRR(dst, dst);
2848 void TurboAssembler::SubP_ExtendSrc(Register dst, Register src1,
2850 #if V8_TARGET_ARCH_S390X 2851 if (dst != src1 && dst != src2) LoadRR(dst, src1);
2854 if (dst != src1 && dst == src2) {
2856 LoadComplementRR(dst, dst);
2862 SubP(dst, src1, src2);
2867 void TurboAssembler::Sub32(Register dst,
const MemOperand& opnd) {
2868 DCHECK(is_int20(opnd.offset()));
2869 if (is_uint12(opnd.offset()))
2876 void TurboAssembler::SubP(Register dst,
const MemOperand& opnd) {
2877 #if V8_TARGET_ARCH_S390X 2884 void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
2885 sllg(r0, src, Operand(32));
2889 void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
2891 srlg(dst, dst, Operand(32));
2894 void TurboAssembler::SubP_ExtendSrc(Register dst,
const MemOperand& opnd) {
2895 #if V8_TARGET_ARCH_S390X 2896 DCHECK(is_int20(opnd.offset()));
2904 void TurboAssembler::LoadAndSub32(Register dst, Register src,
2905 const MemOperand& opnd) {
2907 laa(dst, dst, opnd);
2910 void TurboAssembler::LoadAndSub64(Register dst, Register src,
2911 const MemOperand& opnd) {
2913 laag(dst, dst, opnd);
2921 void TurboAssembler::SubLogical(Register dst,
const MemOperand& opnd) {
2922 DCHECK(is_int20(opnd.offset()));
2923 if (is_uint12(opnd.offset()))
2930 void TurboAssembler::SubLogicalP(Register dst,
const MemOperand& opnd) {
2931 DCHECK(is_int20(opnd.offset()));
2932 #if V8_TARGET_ARCH_S390X 2935 SubLogical(dst, opnd);
2943 void TurboAssembler::SubLogicalP_ExtendSrc(Register dst,
2944 const MemOperand& opnd) {
2945 #if V8_TARGET_ARCH_S390X 2946 DCHECK(is_int20(opnd.offset()));
2949 SubLogical(dst, opnd);
2958 void TurboAssembler::And(Register dst, Register src) { nr(dst, src); }
2961 void TurboAssembler::AndP(Register dst, Register src) { AndRR(dst, src); }
2964 void TurboAssembler::And(Register dst, Register src1, Register src2) {
2965 if (dst != src1 && dst != src2) {
2968 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
2969 nrk(dst, src1, src2);
2974 }
else if (dst == src2) {
2981 void TurboAssembler::AndP(Register dst, Register src1, Register src2) {
2982 if (dst != src1 && dst != src2) {
2985 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
2986 AndP_RRR(dst, src1, src2);
2991 }
else if (dst == src2) {
2998 void TurboAssembler::And(Register dst,
const MemOperand& opnd) {
2999 DCHECK(is_int20(opnd.offset()));
3000 if (is_uint12(opnd.offset()))
3007 void TurboAssembler::AndP(Register dst,
const MemOperand& opnd) {
3008 DCHECK(is_int20(opnd.offset()));
3009 #if V8_TARGET_ARCH_S390X 3017 void TurboAssembler::And(Register dst,
const Operand& opnd) { nilf(dst, opnd); }
3020 void TurboAssembler::AndP(Register dst,
const Operand& opnd) {
3021 #if V8_TARGET_ARCH_S390X 3022 intptr_t value = opnd.immediate();
3023 if (value >> 32 != -1) {
3025 nihf(dst, Operand(value >> 32));
3027 nilf(dst, Operand(value & 0xFFFFFFFF));
3034 void TurboAssembler::And(Register dst, Register src,
const Operand& opnd) {
3035 if (dst != src) lr(dst, src);
3040 void TurboAssembler::AndP(Register dst, Register src,
const Operand& opnd) {
3042 intptr_t value = opnd.immediate();
3043 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
3044 intptr_t shifted_value = value;
3045 int trailing_zeros = 0;
3048 while ((0 != shifted_value) && (0 == (shifted_value & 1))) {
3050 shifted_value >>= 1;
3057 if ((0 != shifted_value) && base::bits::IsPowerOfTwo(shifted_value + 1)) {
3059 base::bits::CountLeadingZeros64(shifted_value) - trailing_zeros;
3060 int endBit = 63 - trailing_zeros;
3062 RotateInsertSelectBits(dst, src, Operand(startBit), Operand(endBit),
3063 Operand::Zero(),
true);
3065 }
else if (-1 == shifted_value) {
3068 int endBit = 63 - trailing_zeros;
3069 RotateInsertSelectBits(dst, src, Operand::Zero(), Operand(endBit),
3070 Operand::Zero(),
true);
3076 if (dst != src && (0 != value)) LoadRR(dst, src);
3081 void TurboAssembler::Or(Register dst, Register src) { or_z(dst, src); }
3084 void TurboAssembler::OrP(Register dst, Register src) { OrRR(dst, src); }
3087 void TurboAssembler::Or(Register dst, Register src1, Register src2) {
3088 if (dst != src1 && dst != src2) {
3091 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3092 ork(dst, src1, src2);
3097 }
else if (dst == src2) {
3104 void TurboAssembler::OrP(Register dst, Register src1, Register src2) {
3105 if (dst != src1 && dst != src2) {
3108 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3109 OrP_RRR(dst, src1, src2);
3114 }
else if (dst == src2) {
3121 void TurboAssembler::Or(Register dst,
const MemOperand& opnd) {
3122 DCHECK(is_int20(opnd.offset()));
3123 if (is_uint12(opnd.offset()))
3130 void TurboAssembler::OrP(Register dst,
const MemOperand& opnd) {
3131 DCHECK(is_int20(opnd.offset()));
3132 #if V8_TARGET_ARCH_S390X 3140 void TurboAssembler::Or(Register dst,
const Operand& opnd) { oilf(dst, opnd); }
3143 void TurboAssembler::OrP(Register dst,
const Operand& opnd) {
3144 #if V8_TARGET_ARCH_S390X 3145 intptr_t value = opnd.immediate();
3146 if (value >> 32 != 0) {
3148 oihf(dst, Operand(value >> 32));
3150 oilf(dst, Operand(value & 0xFFFFFFFF));
3157 void TurboAssembler::Or(Register dst, Register src,
const Operand& opnd) {
3158 if (dst != src) lr(dst, src);
3163 void TurboAssembler::OrP(Register dst, Register src,
const Operand& opnd) {
3164 if (dst != src) LoadRR(dst, src);
3169 void TurboAssembler::Xor(Register dst, Register src) { xr(dst, src); }
3172 void TurboAssembler::XorP(Register dst, Register src) { XorRR(dst, src); }
3175 void TurboAssembler::Xor(Register dst, Register src1, Register src2) {
3176 if (dst != src1 && dst != src2) {
3179 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3180 xrk(dst, src1, src2);
3185 }
else if (dst == src2) {
3192 void TurboAssembler::XorP(Register dst, Register src1, Register src2) {
3193 if (dst != src1 && dst != src2) {
3196 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3197 XorP_RRR(dst, src1, src2);
3202 }
else if (dst == src2) {
3209 void TurboAssembler::Xor(Register dst,
const MemOperand& opnd) {
3210 DCHECK(is_int20(opnd.offset()));
3211 if (is_uint12(opnd.offset()))
3218 void TurboAssembler::XorP(Register dst,
const MemOperand& opnd) {
3219 DCHECK(is_int20(opnd.offset()));
3220 #if V8_TARGET_ARCH_S390X 3228 void TurboAssembler::Xor(Register dst,
const Operand& opnd) { xilf(dst, opnd); }
3231 void TurboAssembler::XorP(Register dst,
const Operand& opnd) {
3232 #if V8_TARGET_ARCH_S390X 3233 intptr_t value = opnd.immediate();
3234 xihf(dst, Operand(value >> 32));
3235 xilf(dst, Operand(value & 0xFFFFFFFF));
3242 void TurboAssembler::Xor(Register dst, Register src,
const Operand& opnd) {
3243 if (dst != src) lr(dst, src);
3248 void TurboAssembler::XorP(Register dst, Register src,
const Operand& opnd) {
3249 if (dst != src) LoadRR(dst, src);
3253 void TurboAssembler::Not32(Register dst, Register src) {
3254 if (src != no_reg && src != dst) lr(dst, src);
3255 xilf(dst, Operand(0xFFFFFFFF));
3258 void TurboAssembler::Not64(Register dst, Register src) {
3259 if (src != no_reg && src != dst) lgr(dst, src);
3260 xihf(dst, Operand(0xFFFFFFFF));
3261 xilf(dst, Operand(0xFFFFFFFF));
3264 void TurboAssembler::NotP(Register dst, Register src) {
3265 #if V8_TARGET_ARCH_S390X 3273 void TurboAssembler::Load(Register dst,
const Operand& opnd) {
3274 intptr_t value = opnd.immediate();
3275 if (is_int16(value)) {
3276 #if V8_TARGET_ARCH_S390X 3281 }
else if (is_int32(value)) {
3282 #if V8_TARGET_ARCH_S390X 3287 }
else if (is_uint32(value)) {
3288 #if V8_TARGET_ARCH_S390X 3294 int32_t hi_32 =
static_cast<int64_t>(value) >> 32;
3295 int32_t lo_32 =
static_cast<int32_t
>(value);
3297 iihf(dst, Operand(hi_32));
3298 iilf(dst, Operand(lo_32));
3302 void TurboAssembler::Load(Register dst,
const MemOperand& opnd) {
3303 DCHECK(is_int20(opnd.offset()));
3304 #if V8_TARGET_ARCH_S390X 3307 if (is_uint12(opnd.offset())) {
3315 void TurboAssembler::LoadPositiveP(Register result, Register input) {
3316 #if V8_TARGET_ARCH_S390X 3317 lpgr(result, input);
3323 void TurboAssembler::LoadPositive32(Register result, Register input) {
3325 lgfr(result, result);
3333 void TurboAssembler::Cmp32(Register src1, Register src2) { cr_z(src1, src2); }
3336 void TurboAssembler::CmpP(Register src1, Register src2) {
3337 #if V8_TARGET_ARCH_S390X 3346 void TurboAssembler::Cmp32(Register dst,
const Operand& opnd) {
3347 if (opnd.rmode() == RelocInfo::NONE) {
3348 intptr_t value = opnd.immediate();
3349 if (is_int16(value))
3355 RecordRelocInfo(opnd.rmode(), opnd.immediate());
3362 void TurboAssembler::CmpP(Register dst,
const Operand& opnd) {
3363 #if V8_TARGET_ARCH_S390X 3364 if (opnd.rmode() == RelocInfo::NONE) {
3376 void TurboAssembler::Cmp32(Register dst,
const MemOperand& opnd) {
3378 DCHECK(is_int20(opnd.offset()));
3379 if (is_uint12(opnd.offset()))
3386 void TurboAssembler::CmpP(Register dst,
const MemOperand& opnd) {
3388 DCHECK(is_int20(opnd.offset()));
3389 #if V8_TARGET_ARCH_S390X 3397 void TurboAssembler::CmpAndSwap(Register old_val, Register new_val,
3398 const MemOperand& opnd) {
3399 if (is_uint12(opnd.offset())) {
3400 cs(old_val, new_val, opnd);
3402 csy(old_val, new_val, opnd);
3406 void TurboAssembler::CmpAndSwap64(Register old_val, Register new_val,
3407 const MemOperand& opnd) {
3408 DCHECK(is_int20(opnd.offset()));
3409 csg(old_val, new_val, opnd);
3417 void TurboAssembler::CmpLogical32(Register dst, Register src) { clr(dst, src); }
3420 void TurboAssembler::CmpLogicalP(Register dst, Register src) {
3421 #ifdef V8_TARGET_ARCH_S390X 3424 CmpLogical32(dst, src);
3429 void TurboAssembler::CmpLogical32(Register dst,
const Operand& opnd) {
3434 void TurboAssembler::CmpLogicalP(Register dst,
const Operand& opnd) {
3435 #if V8_TARGET_ARCH_S390X 3436 DCHECK_EQ(static_cast<uint32_t>(opnd.immediate() >> 32), 0);
3439 CmpLogical32(dst, opnd);
3444 void TurboAssembler::CmpLogical32(Register dst,
const MemOperand& opnd) {
3446 DCHECK(is_int20(opnd.offset()));
3447 if (is_uint12(opnd.offset()))
3454 void TurboAssembler::CmpLogicalP(Register dst,
const MemOperand& opnd) {
3456 DCHECK(is_int20(opnd.offset()));
3457 #if V8_TARGET_ARCH_S390X 3460 CmpLogical32(dst, opnd);
3465 void TurboAssembler::CmpLogicalByte(
const MemOperand& mem,
const Operand& imm) {
3466 DCHECK(is_uint8(imm.immediate()));
3467 if (is_uint12(mem.offset()))
3473 void TurboAssembler::Branch(Condition c,
const Operand& opnd) {
3474 intptr_t value = opnd.immediate();
3475 if (is_int16(value))
3482 void TurboAssembler::BranchOnCount(Register r1, Label* l) {
3483 int32_t offset = branch_offset(l);
3484 if (is_int16(offset)) {
3485 #if V8_TARGET_ARCH_S390X 3486 brctg(r1, Operand(offset));
3488 brct(r1, Operand(offset));
3491 AddP(r1, Operand(-1));
3492 Branch(ne, Operand(offset));
3496 void TurboAssembler::LoadIntLiteral(Register dst,
int value) {
3497 Load(dst, Operand(value));
3500 void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) {
3501 intptr_t value =
static_cast<intptr_t
>(smi.ptr());
3502 #if V8_TARGET_ARCH_S390X 3503 DCHECK_EQ(value & 0xFFFFFFFF, 0);
3505 llihf(dst, Operand(value >> 32));
3507 llilf(dst, Operand(value));
3511 void TurboAssembler::LoadDoubleLiteral(DoubleRegister result, uint64_t value,
3519 }
else if (lo_32 == 0) {
3520 llihf(scratch, Operand(hi_32));
3521 ldgr(result, scratch);
3523 iihf(scratch, Operand(hi_32));
3524 iilf(scratch, Operand(lo_32));
3525 ldgr(result, scratch);
3529 void TurboAssembler::LoadDoubleLiteral(DoubleRegister result,
double value,
3531 uint64_t int_val = bit_cast<uint64_t,
double>(value);
3532 LoadDoubleLiteral(result, int_val, scratch);
3535 void TurboAssembler::LoadFloat32Literal(DoubleRegister result,
float value,
3537 uint64_t int_val =
static_cast<uint64_t
>(bit_cast<
uint32_t,
float>(value))
3539 LoadDoubleLiteral(result, int_val, scratch);
3542 void TurboAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch) {
3543 #if V8_TARGET_ARCH_S390X 3544 if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
3545 cih(src1, Operand(static_cast<intptr_t>(smi.ptr()) >> 32));
3547 LoadSmiLiteral(scratch, smi);
3552 cfi(src1, Operand(smi));
3557 void TurboAssembler::LoadP(Register dst,
const MemOperand& mem,
3559 int offset = mem.offset();
3561 #if V8_TARGET_ARCH_S390X 3562 MemOperand src = mem;
3563 if (!is_int20(offset)) {
3564 DCHECK(scratch != no_reg && scratch != r0 && mem.rx() == r0);
3565 DCHECK(scratch != mem.rb());
3566 LoadIntLiteral(scratch, offset);
3567 src = MemOperand(mem.rb(), scratch);
3571 if (is_uint12(offset)) {
3573 }
else if (is_int20(offset)) {
3576 DCHECK(scratch != no_reg && scratch != r0 && mem.rx() == r0);
3577 DCHECK(scratch != mem.rb());
3578 LoadIntLiteral(scratch, offset);
3579 l(dst, MemOperand(mem.rb(), scratch));
3585 void TurboAssembler::StoreP(Register src,
const MemOperand& mem,
3587 if (!is_int20(mem.offset())) {
3588 DCHECK(scratch != no_reg);
3589 DCHECK(scratch != r0);
3590 LoadIntLiteral(scratch, mem.offset());
3591 #if V8_TARGET_ARCH_S390X 3592 stg(src, MemOperand(mem.rb(), scratch));
3594 st(src, MemOperand(mem.rb(), scratch));
3597 #if V8_TARGET_ARCH_S390X 3608 void TurboAssembler::StoreP(
const MemOperand& mem,
const Operand& opnd,
3611 DCHECK_EQ(opnd.rmode(), RelocInfo::NONE);
3614 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_uint12(mem.offset()) &&
3615 mem.getIndexRegister() == r0 && is_int16(opnd.immediate())) {
3616 #if V8_TARGET_ARCH_S390X 3622 LoadImmP(scratch, opnd);
3623 StoreP(scratch, mem);
3627 void TurboAssembler::LoadMultipleP(Register dst1, Register dst2,
3628 const MemOperand& mem) {
3629 #if V8_TARGET_ARCH_S390X 3630 DCHECK(is_int20(mem.offset()));
3631 lmg(dst1, dst2, mem);
3633 if (is_uint12(mem.offset())) {
3634 lm(dst1, dst2, mem);
3636 DCHECK(is_int20(mem.offset()));
3637 lmy(dst1, dst2, mem);
3642 void TurboAssembler::StoreMultipleP(Register src1, Register src2,
3643 const MemOperand& mem) {
3644 #if V8_TARGET_ARCH_S390X 3645 DCHECK(is_int20(mem.offset()));
3646 stmg(src1, src2, mem);
3648 if (is_uint12(mem.offset())) {
3649 stm(src1, src2, mem);
3651 DCHECK(is_int20(mem.offset()));
3652 stmy(src1, src2, mem);
3657 void TurboAssembler::LoadMultipleW(Register dst1, Register dst2,
3658 const MemOperand& mem) {
3659 if (is_uint12(mem.offset())) {
3660 lm(dst1, dst2, mem);
3662 DCHECK(is_int20(mem.offset()));
3663 lmy(dst1, dst2, mem);
3667 void TurboAssembler::StoreMultipleW(Register src1, Register src2,
3668 const MemOperand& mem) {
3669 if (is_uint12(mem.offset())) {
3670 stm(src1, src2, mem);
3672 DCHECK(is_int20(mem.offset()));
3673 stmy(src1, src2, mem);
3678 void TurboAssembler::LoadW(Register dst, Register src) {
3679 #if V8_TARGET_ARCH_S390X 3682 if (dst != src) lr(dst, src);
3687 void TurboAssembler::LoadW(Register dst,
const MemOperand& mem,
3689 int offset = mem.offset();
3691 if (!is_int20(offset)) {
3692 DCHECK(scratch != no_reg);
3693 LoadIntLiteral(scratch, offset);
3694 #if V8_TARGET_ARCH_S390X 3695 lgf(dst, MemOperand(mem.rb(), scratch));
3697 l(dst, MemOperand(mem.rb(), scratch));
3700 #if V8_TARGET_ARCH_S390X 3703 if (is_uint12(offset)) {
3713 void TurboAssembler::LoadlW(Register dst, Register src) {
3714 #if V8_TARGET_ARCH_S390X 3717 if (dst != src) lr(dst, src);
3723 void TurboAssembler::LoadlW(Register dst,
const MemOperand& mem,
3725 Register base = mem.rb();
3726 int offset = mem.offset();
3728 #if V8_TARGET_ARCH_S390X 3729 if (is_int20(offset)) {
3731 }
else if (scratch != no_reg) {
3733 LoadIntLiteral(scratch, offset);
3734 llgf(dst, MemOperand(base, scratch));
3739 bool use_RXform =
false;
3740 bool use_RXYform =
false;
3741 if (is_uint12(offset)) {
3744 }
else if (is_int20(offset)) {
3747 }
else if (scratch != no_reg) {
3749 LoadIntLiteral(scratch, offset);
3756 }
else if (use_RXYform) {
3759 ly(dst, MemOperand(base, scratch));
3764 void TurboAssembler::LoadLogicalHalfWordP(Register dst,
const MemOperand& mem) {
3765 #if V8_TARGET_ARCH_S390X 3772 void TurboAssembler::LoadLogicalHalfWordP(Register dst, Register src) {
3773 #if V8_TARGET_ARCH_S390X 3780 void TurboAssembler::LoadB(Register dst,
const MemOperand& mem) {
3781 #if V8_TARGET_ARCH_S390X 3788 void TurboAssembler::LoadB(Register dst, Register src) {
3789 #if V8_TARGET_ARCH_S390X 3796 void TurboAssembler::LoadlB(Register dst,
const MemOperand& mem) {
3797 #if V8_TARGET_ARCH_S390X 3804 void TurboAssembler::LoadlB(Register dst, Register src) {
3805 #if V8_TARGET_ARCH_S390X 3812 void TurboAssembler::LoadLogicalReversedWordP(Register dst,
3813 const MemOperand& mem) {
3818 void TurboAssembler::LoadLogicalReversedHalfWordP(Register dst,
3819 const MemOperand& mem) {
3821 LoadLogicalHalfWordP(dst, dst);
3826 void TurboAssembler::LoadAndTest32(Register dst, Register src) {
3834 void TurboAssembler::LoadAndTestP_ExtendSrc(Register dst, Register src) {
3835 #if V8_TARGET_ARCH_S390X 3843 void TurboAssembler::LoadAndTestP(Register dst, Register src) {
3844 #if V8_TARGET_ARCH_S390X 3852 void TurboAssembler::LoadAndTest32(Register dst,
const MemOperand& mem) {
3857 void TurboAssembler::LoadAndTestP(Register dst,
const MemOperand& mem) {
3858 #if V8_TARGET_ARCH_S390X 3866 void TurboAssembler::LoadOnConditionP(Condition cond, Register dst,
3868 #if V8_TARGET_ARCH_S390X 3869 locgr(cond, dst, src);
3871 locr(cond, dst, src);
3876 void TurboAssembler::LoadDouble(DoubleRegister dst,
const MemOperand& mem) {
3878 if (is_uint12(mem.offset())) {
3886 void TurboAssembler::LoadFloat32(DoubleRegister dst,
const MemOperand& mem) {
3887 if (is_uint12(mem.offset())) {
3890 DCHECK(is_int20(mem.offset()));
3897 void TurboAssembler::LoadFloat32ConvertToDouble(DoubleRegister dst,
3898 const MemOperand& mem) {
3899 LoadFloat32(dst, mem);
3904 void TurboAssembler::StoreDouble(DoubleRegister dst,
const MemOperand& mem) {
3905 if (is_uint12(mem.offset())) {
3913 void TurboAssembler::StoreFloat32(DoubleRegister src,
const MemOperand& mem) {
3914 if (is_uint12(mem.offset())) {
3923 void TurboAssembler::StoreDoubleAsFloat32(DoubleRegister src,
3924 const MemOperand& mem,
3925 DoubleRegister scratch) {
3926 ledbr(scratch, src);
3927 StoreFloat32(scratch, mem);
3930 void TurboAssembler::AddFloat32(DoubleRegister dst,
const MemOperand& opnd,
3931 DoubleRegister scratch) {
3932 if (is_uint12(opnd.offset())) {
3940 void TurboAssembler::AddFloat64(DoubleRegister dst,
const MemOperand& opnd,
3941 DoubleRegister scratch) {
3942 if (is_uint12(opnd.offset())) {
3950 void TurboAssembler::SubFloat32(DoubleRegister dst,
const MemOperand& opnd,
3951 DoubleRegister scratch) {
3952 if (is_uint12(opnd.offset())) {
3960 void TurboAssembler::SubFloat64(DoubleRegister dst,
const MemOperand& opnd,
3961 DoubleRegister scratch) {
3962 if (is_uint12(opnd.offset())) {
3970 void TurboAssembler::MulFloat32(DoubleRegister dst,
const MemOperand& opnd,
3971 DoubleRegister scratch) {
3972 if (is_uint12(opnd.offset())) {
3976 meebr(dst, scratch);
3980 void TurboAssembler::MulFloat64(DoubleRegister dst,
const MemOperand& opnd,
3981 DoubleRegister scratch) {
3982 if (is_uint12(opnd.offset())) {
3990 void TurboAssembler::DivFloat32(DoubleRegister dst,
const MemOperand& opnd,
3991 DoubleRegister scratch) {
3992 if (is_uint12(opnd.offset())) {
4000 void TurboAssembler::DivFloat64(DoubleRegister dst,
const MemOperand& opnd,
4001 DoubleRegister scratch) {
4002 if (is_uint12(opnd.offset())) {
4010 void TurboAssembler::LoadFloat32ToDouble(DoubleRegister dst,
4011 const MemOperand& opnd,
4012 DoubleRegister scratch) {
4013 if (is_uint12(opnd.offset())) {
4017 ldebr(dst, scratch);
4023 void TurboAssembler::StoreW(Register src,
const MemOperand& mem,
4025 Register base = mem.rb();
4026 int offset = mem.offset();
4028 bool use_RXform =
false;
4029 bool use_RXYform =
false;
4031 if (is_uint12(offset)) {
4034 }
else if (is_int20(offset)) {
4037 }
else if (scratch != no_reg) {
4039 LoadIntLiteral(scratch, offset);
4047 }
else if (use_RXYform) {
4050 StoreW(src, MemOperand(base, scratch));
4054 void TurboAssembler::LoadHalfWordP(Register dst, Register src) {
4055 #if V8_TARGET_ARCH_S390X 4064 void TurboAssembler::LoadHalfWordP(Register dst,
const MemOperand& mem,
4066 Register base = mem.rb();
4067 int offset = mem.offset();
4069 if (!is_int20(offset)) {
4070 DCHECK(scratch != no_reg);
4071 LoadIntLiteral(scratch, offset);
4072 #if V8_TARGET_ARCH_S390X 4073 lgh(dst, MemOperand(base, scratch));
4075 lh(dst, MemOperand(base, scratch));
4078 #if V8_TARGET_ARCH_S390X 4081 if (is_uint12(offset)) {
4092 void TurboAssembler::StoreHalfWord(Register src,
const MemOperand& mem,
4094 Register base = mem.rb();
4095 int offset = mem.offset();
4097 if (is_uint12(offset)) {
4099 }
else if (is_int20(offset)) {
4102 DCHECK(scratch != no_reg);
4103 LoadIntLiteral(scratch, offset);
4104 sth(src, MemOperand(base, scratch));
4110 void TurboAssembler::StoreByte(Register src,
const MemOperand& mem,
4112 Register base = mem.rb();
4113 int offset = mem.offset();
4115 if (is_uint12(offset)) {
4117 }
else if (is_int20(offset)) {
4120 DCHECK(scratch != no_reg);
4121 LoadIntLiteral(scratch, offset);
4122 stc(src, MemOperand(base, scratch));
4127 void TurboAssembler::ShiftLeft(Register dst, Register src,
const Operand& val) {
4130 }
else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4131 sllk(dst, src, val);
4139 void TurboAssembler::ShiftLeft(Register dst, Register src, Register val) {
4142 }
else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4143 sllk(dst, src, val);
4152 void TurboAssembler::ShiftRight(Register dst, Register src,
4153 const Operand& val) {
4156 }
else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4157 srlk(dst, src, val);
4165 void TurboAssembler::ShiftRight(Register dst, Register src, Register val) {
4168 }
else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4169 srlk(dst, src, val);
4178 void TurboAssembler::ShiftLeftArith(Register dst, Register src,
4179 const Operand& val) {
4182 }
else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4183 slak(dst, src, val);
4191 void TurboAssembler::ShiftLeftArith(Register dst, Register src, Register val) {
4194 }
else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4195 slak(dst, src, val);
4204 void TurboAssembler::ShiftRightArith(Register dst, Register src,
4205 const Operand& val) {
4208 }
else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4209 srak(dst, src, val);
4217 void TurboAssembler::ShiftRightArith(Register dst, Register src, Register val) {
4220 }
else if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
4221 srak(dst, src, val);
4230 void TurboAssembler::ClearRightImm(Register dst, Register src,
4231 const Operand& val) {
4232 int numBitsToClear = val.immediate() % (kPointerSize * 8);
4235 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
4236 int endBit = 63 - numBitsToClear;
4237 RotateInsertSelectBits(dst, src, Operand::Zero(), Operand(endBit),
4238 Operand::Zero(),
true);
4242 uint64_t hexMask = ~((1L << numBitsToClear) - 1);
4245 if (dst != src) LoadRR(dst, src);
4247 if (numBitsToClear <= 16) {
4248 nill(dst, Operand(static_cast<uint16_t>(hexMask)));
4249 }
else if (numBitsToClear <= 32) {
4250 nilf(dst, Operand(static_cast<uint32_t>(hexMask)));
4251 }
else if (numBitsToClear <= 64) {
4252 nilf(dst, Operand(static_cast<intptr_t>(0)));
4253 nihf(dst, Operand(hexMask >> 32));
4257 void TurboAssembler::Popcnt32(Register dst, Register src) {
4262 ShiftRight(r0, dst, Operand(16));
4264 ShiftRight(r0, dst, Operand(8));
4269 #ifdef V8_TARGET_ARCH_S390X 4270 void TurboAssembler::Popcnt64(Register dst, Register src) {
4275 ShiftRightP(r0, dst, Operand(32));
4277 ShiftRightP(r0, dst, Operand(16));
4279 ShiftRightP(r0, dst, Operand(8));
4285 void TurboAssembler::SwapP(Register src, Register dst, Register scratch) {
4286 if (src == dst)
return;
4287 DCHECK(!AreAliased(src, dst, scratch));
4288 LoadRR(scratch, src);
4290 LoadRR(dst, scratch);
4293 void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
4294 if (dst.rx() != r0) DCHECK(!AreAliased(src, dst.rx(), scratch));
4295 if (dst.rb() != r0) DCHECK(!AreAliased(src, dst.rb(), scratch));
4296 DCHECK(!AreAliased(src, scratch));
4297 LoadRR(scratch, src);
4299 StoreP(scratch, dst);
4302 void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
4303 Register scratch_1) {
4304 if (src.rx() != r0) DCHECK(!AreAliased(src.rx(), scratch_0, scratch_1));
4305 if (src.rb() != r0) DCHECK(!AreAliased(src.rb(), scratch_0, scratch_1));
4306 if (dst.rx() != r0) DCHECK(!AreAliased(dst.rx(), scratch_0, scratch_1));
4307 if (dst.rb() != r0) DCHECK(!AreAliased(dst.rb(), scratch_0, scratch_1));
4308 DCHECK(!AreAliased(scratch_0, scratch_1));
4309 LoadP(scratch_0, src);
4310 LoadP(scratch_1, dst);
4311 StoreP(scratch_0, dst);
4312 StoreP(scratch_1, src);
4315 void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst,
4316 DoubleRegister scratch) {
4317 if (src == dst)
return;
4318 DCHECK(!AreAliased(src, dst, scratch));
4324 void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
4325 DoubleRegister scratch) {
4326 DCHECK(!AreAliased(src, scratch));
4328 LoadFloat32(src, dst);
4329 StoreFloat32(scratch, dst);
4332 void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst,
4333 DoubleRegister scratch_0,
4334 DoubleRegister scratch_1) {
4335 DCHECK(!AreAliased(scratch_0, scratch_1));
4336 LoadFloat32(scratch_0, src);
4337 LoadFloat32(scratch_1, dst);
4338 StoreFloat32(scratch_0, dst);
4339 StoreFloat32(scratch_1, src);
4342 void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
4343 DoubleRegister scratch) {
4344 if (src == dst)
return;
4345 DCHECK(!AreAliased(src, dst, scratch));
4351 void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
4352 DoubleRegister scratch) {
4353 DCHECK(!AreAliased(src, scratch));
4355 LoadDouble(src, dst);
4356 StoreDouble(scratch, dst);
4359 void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
4360 DoubleRegister scratch_0,
4361 DoubleRegister scratch_1) {
4362 DCHECK(!AreAliased(scratch_0, scratch_1));
4363 LoadDouble(scratch_0, src);
4364 LoadDouble(scratch_1, dst);
4365 StoreDouble(scratch_0, dst);
4366 StoreDouble(scratch_1, src);
4369 void TurboAssembler::ResetSpeculationPoisonRegister() {
4370 mov(kSpeculationPoisonRegister, Operand(-1));
4373 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
4374 larl(dst, Operand(-pc_offset() / 2));
4377 void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
4378 Cmp32(x, Operand(y));
4382 void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
4383 Cmp32(x, Operand(y));
4390 #endif // V8_TARGET_ARCH_S390