V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
codegen-arm.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_ARM
6 
7 #include <memory>
8 
9 #include "src/arm/assembler-arm-inl.h"
10 #include "src/arm/simulator-arm.h"
11 #include "src/codegen.h"
12 #include "src/macro-assembler.h"
13 
14 namespace v8 {
15 namespace internal {
16 
17 #define __ masm.
18 
19 #if defined(V8_HOST_ARCH_ARM)
20 
21 MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
22 #if defined(USE_SIMULATOR)
23  return stub;
24 #else
25  v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
26  size_t allocated = 0;
27  byte* buffer = AllocatePage(page_allocator,
28  page_allocator->GetRandomMmapAddr(), &allocated);
29  if (buffer == nullptr) return stub;
30 
31  MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
32 
33  Register dest = r0;
34  Register src = r1;
35  Register chars = r2;
36  Register temp1 = r3;
37  Label less_4;
38 
39  if (CpuFeatures::IsSupported(NEON)) {
40  CpuFeatureScope scope(&masm, NEON);
41  Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less;
42  Label size_less_than_8;
43  __ pld(MemOperand(src, 0));
44 
45  __ cmp(chars, Operand(8));
46  __ b(lt, &size_less_than_8);
47  __ cmp(chars, Operand(32));
48  __ b(lt, &less_32);
49  if (CpuFeatures::dcache_line_size() == 32) {
50  __ pld(MemOperand(src, 32));
51  }
52  __ cmp(chars, Operand(64));
53  __ b(lt, &less_64);
54  __ pld(MemOperand(src, 64));
55  if (CpuFeatures::dcache_line_size() == 32) {
56  __ pld(MemOperand(src, 96));
57  }
58  __ cmp(chars, Operand(128));
59  __ b(lt, &less_128);
60  __ pld(MemOperand(src, 128));
61  if (CpuFeatures::dcache_line_size() == 32) {
62  __ pld(MemOperand(src, 160));
63  }
64  __ pld(MemOperand(src, 192));
65  if (CpuFeatures::dcache_line_size() == 32) {
66  __ pld(MemOperand(src, 224));
67  }
68  __ cmp(chars, Operand(256));
69  __ b(lt, &less_256);
70  __ sub(chars, chars, Operand(256));
71 
72  __ bind(&loop);
73  __ pld(MemOperand(src, 256));
74  __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
75  if (CpuFeatures::dcache_line_size() == 32) {
76  __ pld(MemOperand(src, 256));
77  }
78  __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
79  __ sub(chars, chars, Operand(64), SetCC);
80  __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
81  __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
82  __ b(ge, &loop);
83  __ add(chars, chars, Operand(256));
84 
85  __ bind(&less_256);
86  __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
87  __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
88  __ sub(chars, chars, Operand(128));
89  __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
90  __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
91  __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
92  __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
93  __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
94  __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
95  __ cmp(chars, Operand(64));
96  __ b(lt, &less_64);
97 
98  __ bind(&less_128);
99  __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
100  __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
101  __ sub(chars, chars, Operand(64));
102  __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
103  __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
104 
105  __ bind(&less_64);
106  __ cmp(chars, Operand(32));
107  __ b(lt, &less_32);
108  __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
109  __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
110  __ sub(chars, chars, Operand(32));
111 
112  __ bind(&less_32);
113  __ cmp(chars, Operand(16));
114  __ b(le, &_16_or_less);
115  __ vld1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(src, PostIndex));
116  __ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
117  __ sub(chars, chars, Operand(16));
118 
119  __ bind(&_16_or_less);
120  __ cmp(chars, Operand(8));
121  __ b(le, &_8_or_less);
122  __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
123  __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest, PostIndex));
124  __ sub(chars, chars, Operand(8));
125 
126  // Do a last copy which may overlap with the previous copy (up to 8 bytes).
127  __ bind(&_8_or_less);
128  __ rsb(chars, chars, Operand(8));
129  __ sub(src, src, Operand(chars));
130  __ sub(dest, dest, Operand(chars));
131  __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
132  __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest));
133 
134  __ Ret();
135 
136  __ bind(&size_less_than_8);
137 
138  __ bic(temp1, chars, Operand(0x3), SetCC);
139  __ b(&less_4, eq);
140  __ ldr(temp1, MemOperand(src, 4, PostIndex));
141  __ str(temp1, MemOperand(dest, 4, PostIndex));
142  } else {
143  UseScratchRegisterScope temps(&masm);
144  Register temp2 = temps.Acquire();
145  Label loop;
146 
147  __ bic(temp2, chars, Operand(0x3), SetCC);
148  __ b(&less_4, eq);
149  __ add(temp2, dest, temp2);
150 
151  __ bind(&loop);
152  __ ldr(temp1, MemOperand(src, 4, PostIndex));
153  __ str(temp1, MemOperand(dest, 4, PostIndex));
154  __ cmp(dest, temp2);
155  __ b(&loop, ne);
156  }
157 
158  __ bind(&less_4);
159  __ mov(chars, Operand(chars, LSL, 31), SetCC);
160  // bit0 => Z (ne), bit1 => C (cs)
161  __ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
162  __ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
163  __ ldrb(temp1, MemOperand(src), ne);
164  __ strb(temp1, MemOperand(dest), ne);
165  __ Ret();
166 
167  CodeDesc desc;
168  masm.GetCode(nullptr, &desc);
169  DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
170 
171  Assembler::FlushICache(buffer, allocated);
172  CHECK(SetPermissions(page_allocator, buffer, allocated,
173  PageAllocator::kReadExecute));
174  return FUNCTION_CAST<MemCopyUint8Function>(buffer);
175 #endif
176 }
177 
178 
179 // Convert 8 to 16. The number of character to copy must be at least 8.
180 MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
181  MemCopyUint16Uint8Function stub) {
182 #if defined(USE_SIMULATOR)
183  return stub;
184 #else
185  v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
186  size_t allocated = 0;
187  byte* buffer = AllocatePage(page_allocator,
188  page_allocator->GetRandomMmapAddr(), &allocated);
189  if (buffer == nullptr) return stub;
190 
191  MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
192 
193  Register dest = r0;
194  Register src = r1;
195  Register chars = r2;
196  if (CpuFeatures::IsSupported(NEON)) {
197  CpuFeatureScope scope(&masm, NEON);
198  Register temp = r3;
199  Label loop;
200 
201  __ bic(temp, chars, Operand(0x7));
202  __ sub(chars, chars, Operand(temp));
203  __ add(temp, dest, Operand(temp, LSL, 1));
204 
205  __ bind(&loop);
206  __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
207  __ vmovl(NeonU8, q0, d0);
208  __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
209  __ cmp(dest, temp);
210  __ b(&loop, ne);
211 
212  // Do a last copy which will overlap with the previous copy (1 to 8 bytes).
213  __ rsb(chars, chars, Operand(8));
214  __ sub(src, src, Operand(chars));
215  __ sub(dest, dest, Operand(chars, LSL, 1));
216  __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
217  __ vmovl(NeonU8, q0, d0);
218  __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest));
219  __ Ret();
220  } else {
221  UseScratchRegisterScope temps(&masm);
222 
223  Register temp1 = r3;
224  Register temp2 = temps.Acquire();
225  Register temp3 = lr;
226  Register temp4 = r4;
227  Label loop;
228  Label not_two;
229 
230  __ Push(lr, r4);
231  __ bic(temp2, chars, Operand(0x3));
232  __ add(temp2, dest, Operand(temp2, LSL, 1));
233 
234  __ bind(&loop);
235  __ ldr(temp1, MemOperand(src, 4, PostIndex));
236  __ uxtb16(temp3, temp1);
237  __ uxtb16(temp4, temp1, 8);
238  __ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
239  __ str(temp1, MemOperand(dest));
240  __ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
241  __ str(temp1, MemOperand(dest, 4));
242  __ add(dest, dest, Operand(8));
243  __ cmp(dest, temp2);
244  __ b(&loop, ne);
245 
246  __ mov(chars, Operand(chars, LSL, 31), SetCC); // bit0 => ne, bit1 => cs
247  __ b(&not_two, cc);
248  __ ldrh(temp1, MemOperand(src, 2, PostIndex));
249  __ uxtb(temp3, temp1, 8);
250  __ mov(temp3, Operand(temp3, LSL, 16));
251  __ uxtab(temp3, temp3, temp1);
252  __ str(temp3, MemOperand(dest, 4, PostIndex));
253  __ bind(&not_two);
254  __ ldrb(temp1, MemOperand(src), ne);
255  __ strh(temp1, MemOperand(dest), ne);
256  __ Pop(pc, r4);
257  }
258 
259  CodeDesc desc;
260  masm.GetCode(nullptr, &desc);
261 
262  Assembler::FlushICache(buffer, allocated);
263  CHECK(SetPermissions(page_allocator, buffer, allocated,
264  PageAllocator::kReadExecute));
265  return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
266 #endif
267 }
268 #endif
269 
270 UnaryMathFunction CreateSqrtFunction() {
271 #if defined(USE_SIMULATOR)
272  return nullptr;
273 #else
274  v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
275  size_t allocated = 0;
276  byte* buffer = AllocatePage(page_allocator,
277  page_allocator->GetRandomMmapAddr(), &allocated);
278  if (buffer == nullptr) return nullptr;
279 
280  MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
281 
282  __ MovFromFloatParameter(d0);
283  __ vsqrt(d0, d0);
284  __ MovToFloatResult(d0);
285  __ Ret();
286 
287  CodeDesc desc;
288  masm.GetCode(nullptr, &desc);
289  DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
290 
291  Assembler::FlushICache(buffer, allocated);
292  CHECK(SetPermissions(page_allocator, buffer, allocated,
293  PageAllocator::kReadExecute));
294  return FUNCTION_CAST<UnaryMathFunction>(buffer);
295 #endif
296 }
297 
298 #undef __
299 
300 } // namespace internal
301 } // namespace v8
302 
303 #endif // V8_TARGET_ARCH_ARM
virtual void * GetRandomMmapAddr()=0
Definition: libplatform.h:13