9 #include "src/arm/assembler-arm-inl.h" 10 #include "src/arm/simulator-arm.h" 11 #include "src/codegen.h" 12 #include "src/macro-assembler.h" 19 #if defined(V8_HOST_ARCH_ARM) 21 MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
22 #if defined(USE_SIMULATOR) 27 byte* buffer = AllocatePage(page_allocator,
29 if (buffer ==
nullptr)
return stub;
31 MacroAssembler masm(AssemblerOptions{}, buffer,
static_cast<int>(allocated));
39 if (CpuFeatures::IsSupported(NEON)) {
40 CpuFeatureScope scope(&masm, NEON);
41 Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less;
42 Label size_less_than_8;
43 __ pld(MemOperand(src, 0));
45 __ cmp(chars, Operand(8));
46 __ b(lt, &size_less_than_8);
47 __ cmp(chars, Operand(32));
49 if (CpuFeatures::dcache_line_size() == 32) {
50 __ pld(MemOperand(src, 32));
52 __ cmp(chars, Operand(64));
54 __ pld(MemOperand(src, 64));
55 if (CpuFeatures::dcache_line_size() == 32) {
56 __ pld(MemOperand(src, 96));
58 __ cmp(chars, Operand(128));
60 __ pld(MemOperand(src, 128));
61 if (CpuFeatures::dcache_line_size() == 32) {
62 __ pld(MemOperand(src, 160));
64 __ pld(MemOperand(src, 192));
65 if (CpuFeatures::dcache_line_size() == 32) {
66 __ pld(MemOperand(src, 224));
68 __ cmp(chars, Operand(256));
70 __ sub(chars, chars, Operand(256));
73 __ pld(MemOperand(src, 256));
74 __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
75 if (CpuFeatures::dcache_line_size() == 32) {
76 __ pld(MemOperand(src, 256));
78 __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
79 __ sub(chars, chars, Operand(64), SetCC);
80 __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
81 __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
83 __ add(chars, chars, Operand(256));
86 __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
87 __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
88 __ sub(chars, chars, Operand(128));
89 __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
90 __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
91 __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
92 __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
93 __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
94 __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
95 __ cmp(chars, Operand(64));
99 __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
100 __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
101 __ sub(chars, chars, Operand(64));
102 __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
103 __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
106 __ cmp(chars, Operand(32));
108 __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
109 __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
110 __ sub(chars, chars, Operand(32));
113 __ cmp(chars, Operand(16));
114 __ b(le, &_16_or_less);
115 __ vld1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(src, PostIndex));
116 __ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
117 __ sub(chars, chars, Operand(16));
119 __ bind(&_16_or_less);
120 __ cmp(chars, Operand(8));
121 __ b(le, &_8_or_less);
122 __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
123 __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest, PostIndex));
124 __ sub(chars, chars, Operand(8));
127 __ bind(&_8_or_less);
128 __ rsb(chars, chars, Operand(8));
129 __ sub(src, src, Operand(chars));
130 __ sub(dest, dest, Operand(chars));
131 __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
132 __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest));
136 __ bind(&size_less_than_8);
138 __ bic(temp1, chars, Operand(0x3), SetCC);
140 __ ldr(temp1, MemOperand(src, 4, PostIndex));
141 __ str(temp1, MemOperand(dest, 4, PostIndex));
143 UseScratchRegisterScope temps(&masm);
144 Register temp2 = temps.Acquire();
147 __ bic(temp2, chars, Operand(0x3), SetCC);
149 __ add(temp2, dest, temp2);
152 __ ldr(temp1, MemOperand(src, 4, PostIndex));
153 __ str(temp1, MemOperand(dest, 4, PostIndex));
159 __ mov(chars, Operand(chars, LSL, 31), SetCC);
161 __ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
162 __ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
163 __ ldrb(temp1, MemOperand(src), ne);
164 __ strb(temp1, MemOperand(dest), ne);
168 masm.GetCode(
nullptr, &desc);
169 DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
171 Assembler::FlushICache(buffer, allocated);
172 CHECK(SetPermissions(page_allocator, buffer, allocated,
173 PageAllocator::kReadExecute));
174 return FUNCTION_CAST<MemCopyUint8Function>(buffer);
180 MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
181 MemCopyUint16Uint8Function stub) {
182 #if defined(USE_SIMULATOR) 186 size_t allocated = 0;
187 byte* buffer = AllocatePage(page_allocator,
189 if (buffer ==
nullptr)
return stub;
191 MacroAssembler masm(AssemblerOptions{}, buffer,
static_cast<int>(allocated));
196 if (CpuFeatures::IsSupported(NEON)) {
197 CpuFeatureScope scope(&masm, NEON);
201 __ bic(temp, chars, Operand(0x7));
202 __ sub(chars, chars, Operand(temp));
203 __ add(temp, dest, Operand(temp, LSL, 1));
206 __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
207 __ vmovl(NeonU8, q0, d0);
208 __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
213 __ rsb(chars, chars, Operand(8));
214 __ sub(src, src, Operand(chars));
215 __ sub(dest, dest, Operand(chars, LSL, 1));
216 __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
217 __ vmovl(NeonU8, q0, d0);
218 __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest));
221 UseScratchRegisterScope temps(&masm);
224 Register temp2 = temps.Acquire();
231 __ bic(temp2, chars, Operand(0x3));
232 __ add(temp2, dest, Operand(temp2, LSL, 1));
235 __ ldr(temp1, MemOperand(src, 4, PostIndex));
236 __ uxtb16(temp3, temp1);
237 __ uxtb16(temp4, temp1, 8);
238 __ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
239 __ str(temp1, MemOperand(dest));
240 __ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
241 __ str(temp1, MemOperand(dest, 4));
242 __ add(dest, dest, Operand(8));
246 __ mov(chars, Operand(chars, LSL, 31), SetCC);
248 __ ldrh(temp1, MemOperand(src, 2, PostIndex));
249 __ uxtb(temp3, temp1, 8);
250 __ mov(temp3, Operand(temp3, LSL, 16));
251 __ uxtab(temp3, temp3, temp1);
252 __ str(temp3, MemOperand(dest, 4, PostIndex));
254 __ ldrb(temp1, MemOperand(src), ne);
255 __ strh(temp1, MemOperand(dest), ne);
260 masm.GetCode(
nullptr, &desc);
262 Assembler::FlushICache(buffer, allocated);
263 CHECK(SetPermissions(page_allocator, buffer, allocated,
264 PageAllocator::kReadExecute));
265 return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
270 UnaryMathFunction CreateSqrtFunction() {
271 #if defined(USE_SIMULATOR) 275 size_t allocated = 0;
276 byte* buffer = AllocatePage(page_allocator,
278 if (buffer ==
nullptr)
return nullptr;
280 MacroAssembler masm(AssemblerOptions{}, buffer,
static_cast<int>(allocated));
282 __ MovFromFloatParameter(d0);
284 __ MovToFloatResult(d0);
288 masm.GetCode(
nullptr, &desc);
289 DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
291 Assembler::FlushICache(buffer, allocated);
292 CHECK(SetPermissions(page_allocator, buffer, allocated,
293 PageAllocator::kReadExecute));
294 return FUNCTION_CAST<UnaryMathFunction>(buffer);
303 #endif // V8_TARGET_ARCH_ARM
virtual void * GetRandomMmapAddr()=0