V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Pages
codegen-ia32.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_IA32
6 
7 #include "src/codegen.h"
8 #include "src/heap/factory-inl.h"
9 #include "src/heap/heap.h"
10 #include "src/macro-assembler.h"
11 
12 namespace v8 {
13 namespace internal {
14 
15 #define __ masm.
16 
17 UnaryMathFunction CreateSqrtFunction() {
18  v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
19  size_t allocated = 0;
20  byte* buffer = AllocatePage(page_allocator,
21  page_allocator->GetRandomMmapAddr(), &allocated);
22  if (buffer == nullptr) return nullptr;
23 
24  MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
25  // esp[1 * kPointerSize]: raw double input
26  // esp[0 * kPointerSize]: return address
27  // Move double input into registers.
28  {
29  __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
30  __ sqrtsd(xmm0, xmm0);
31  __ movsd(Operand(esp, 1 * kPointerSize), xmm0);
32  // Load result into floating point register as return value.
33  __ fld_d(Operand(esp, 1 * kPointerSize));
34  __ Ret();
35  }
36 
37  CodeDesc desc;
38  masm.GetCode(nullptr, &desc);
39  DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
40 
41  Assembler::FlushICache(buffer, allocated);
42  CHECK(SetPermissions(page_allocator, buffer, allocated,
43  PageAllocator::kReadExecute));
44  return FUNCTION_CAST<UnaryMathFunction>(buffer);
45 }
46 
47 
48 // Helper functions for CreateMemMoveFunction.
49 #undef __
50 #define __ ACCESS_MASM(masm)
51 
52 enum Direction { FORWARD, BACKWARD };
53 enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
54 
55 // Expects registers:
56 // esi - source, aligned if alignment == ALIGNED
57 // edi - destination, always aligned
58 // ecx - count (copy size in bytes)
59 // edx - loop count (number of 64 byte chunks)
60 void MemMoveEmitMainLoop(MacroAssembler* masm,
61  Label* move_last_15,
62  Direction direction,
63  Alignment alignment) {
64  Register src = esi;
65  Register dst = edi;
66  Register count = ecx;
67  Register loop_count = edx;
68  Label loop, move_last_31, move_last_63;
69  __ cmp(loop_count, 0);
70  __ j(equal, &move_last_63);
71  __ bind(&loop);
72  // Main loop. Copy in 64 byte chunks.
73  if (direction == BACKWARD) __ sub(src, Immediate(0x40));
74  __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
75  __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
76  __ movdq(alignment == MOVE_ALIGNED, xmm2, Operand(src, 0x20));
77  __ movdq(alignment == MOVE_ALIGNED, xmm3, Operand(src, 0x30));
78  if (direction == FORWARD) __ add(src, Immediate(0x40));
79  if (direction == BACKWARD) __ sub(dst, Immediate(0x40));
80  __ movdqa(Operand(dst, 0x00), xmm0);
81  __ movdqa(Operand(dst, 0x10), xmm1);
82  __ movdqa(Operand(dst, 0x20), xmm2);
83  __ movdqa(Operand(dst, 0x30), xmm3);
84  if (direction == FORWARD) __ add(dst, Immediate(0x40));
85  __ dec(loop_count);
86  __ j(not_zero, &loop);
87  // At most 63 bytes left to copy.
88  __ bind(&move_last_63);
89  __ test(count, Immediate(0x20));
90  __ j(zero, &move_last_31);
91  if (direction == BACKWARD) __ sub(src, Immediate(0x20));
92  __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
93  __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
94  if (direction == FORWARD) __ add(src, Immediate(0x20));
95  if (direction == BACKWARD) __ sub(dst, Immediate(0x20));
96  __ movdqa(Operand(dst, 0x00), xmm0);
97  __ movdqa(Operand(dst, 0x10), xmm1);
98  if (direction == FORWARD) __ add(dst, Immediate(0x20));
99  // At most 31 bytes left to copy.
100  __ bind(&move_last_31);
101  __ test(count, Immediate(0x10));
102  __ j(zero, move_last_15);
103  if (direction == BACKWARD) __ sub(src, Immediate(0x10));
104  __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0));
105  if (direction == FORWARD) __ add(src, Immediate(0x10));
106  if (direction == BACKWARD) __ sub(dst, Immediate(0x10));
107  __ movdqa(Operand(dst, 0), xmm0);
108  if (direction == FORWARD) __ add(dst, Immediate(0x10));
109 }
110 
111 
112 void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
113  __ pop(esi);
114  __ pop(edi);
115  __ ret(0);
116 }
117 
118 
119 #undef __
120 #define __ masm.
121 
122 
123 class LabelConverter {
124  public:
125  explicit LabelConverter(byte* buffer) : buffer_(buffer) {}
126  int32_t address(Label* l) const {
127  return reinterpret_cast<int32_t>(buffer_) + l->pos();
128  }
129  private:
130  byte* buffer_;
131 };
132 
133 MemMoveFunction CreateMemMoveFunction() {
134  v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
135  size_t allocated = 0;
136  byte* buffer = AllocatePage(page_allocator,
137  page_allocator->GetRandomMmapAddr(), &allocated);
138  if (buffer == nullptr) return nullptr;
139 
140  MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
141  LabelConverter conv(buffer);
142 
143  // Generated code is put into a fixed, unmovable buffer, and not into
144  // the V8 heap. We can't, and don't, refer to any relocatable addresses
145  // (e.g. the JavaScript nan-object).
146 
147  // 32-bit C declaration function calls pass arguments on stack.
148 
149  // Stack layout:
150  // esp[12]: Third argument, size.
151  // esp[8]: Second argument, source pointer.
152  // esp[4]: First argument, destination pointer.
153  // esp[0]: return address
154 
155  const int kDestinationOffset = 1 * kPointerSize;
156  const int kSourceOffset = 2 * kPointerSize;
157  const int kSizeOffset = 3 * kPointerSize;
158 
159  // When copying up to this many bytes, use special "small" handlers.
160  const size_t kSmallCopySize = 8;
161  // When copying up to this many bytes, use special "medium" handlers.
162  const size_t kMediumCopySize = 63;
163  // When non-overlapping region of src and dst is less than this,
164  // use a more careful implementation (slightly slower).
165  const size_t kMinMoveDistance = 16;
166  // Note that these values are dictated by the implementation below,
167  // do not just change them and hope things will work!
168 
169  int stack_offset = 0; // Update if we change the stack height.
170 
171  Label backward, backward_much_overlap;
172  Label forward_much_overlap, small_size, medium_size, pop_and_return;
173  __ push(edi);
174  __ push(esi);
175  stack_offset += 2 * kPointerSize;
176  Register dst = edi;
177  Register src = esi;
178  Register count = ecx;
179  Register loop_count = edx;
180  __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
181  __ mov(src, Operand(esp, stack_offset + kSourceOffset));
182  __ mov(count, Operand(esp, stack_offset + kSizeOffset));
183 
184  __ cmp(dst, src);
185  __ j(equal, &pop_and_return);
186 
187  __ prefetch(Operand(src, 0), 1);
188  __ cmp(count, kSmallCopySize);
189  __ j(below_equal, &small_size);
190  __ cmp(count, kMediumCopySize);
191  __ j(below_equal, &medium_size);
192  __ cmp(dst, src);
193  __ j(above, &backward);
194 
195  {
196  // |dst| is a lower address than |src|. Copy front-to-back.
197  Label unaligned_source, move_last_15, skip_last_move;
198  __ mov(eax, src);
199  __ sub(eax, dst);
200  __ cmp(eax, kMinMoveDistance);
201  __ j(below, &forward_much_overlap);
202  // Copy first 16 bytes.
203  __ movdqu(xmm0, Operand(src, 0));
204  __ movdqu(Operand(dst, 0), xmm0);
205  // Determine distance to alignment: 16 - (dst & 0xF).
206  __ mov(edx, dst);
207  __ and_(edx, 0xF);
208  __ neg(edx);
209  __ add(edx, Immediate(16));
210  __ add(dst, edx);
211  __ add(src, edx);
212  __ sub(count, edx);
213  // dst is now aligned. Main copy loop.
214  __ mov(loop_count, count);
215  __ shr(loop_count, 6);
216  // Check if src is also aligned.
217  __ test(src, Immediate(0xF));
218  __ j(not_zero, &unaligned_source);
219  // Copy loop for aligned source and destination.
220  MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_ALIGNED);
221  // At most 15 bytes to copy. Copy 16 bytes at end of string.
222  __ bind(&move_last_15);
223  __ and_(count, 0xF);
224  __ j(zero, &skip_last_move, Label::kNear);
225  __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
226  __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
227  __ bind(&skip_last_move);
228  MemMoveEmitPopAndReturn(&masm);
229 
230  // Copy loop for unaligned source and aligned destination.
231  __ bind(&unaligned_source);
232  MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_UNALIGNED);
233  __ jmp(&move_last_15);
234 
235  // Less than kMinMoveDistance offset between dst and src.
236  Label loop_until_aligned, last_15_much_overlap;
237  __ bind(&loop_until_aligned);
238  __ mov_b(eax, Operand(src, 0));
239  __ inc(src);
240  __ mov_b(Operand(dst, 0), eax);
241  __ inc(dst);
242  __ dec(count);
243  __ bind(&forward_much_overlap); // Entry point into this block.
244  __ test(dst, Immediate(0xF));
245  __ j(not_zero, &loop_until_aligned);
246  // dst is now aligned, src can't be. Main copy loop.
247  __ mov(loop_count, count);
248  __ shr(loop_count, 6);
249  MemMoveEmitMainLoop(&masm, &last_15_much_overlap,
250  FORWARD, MOVE_UNALIGNED);
251  __ bind(&last_15_much_overlap);
252  __ and_(count, 0xF);
253  __ j(zero, &pop_and_return);
254  __ cmp(count, kSmallCopySize);
255  __ j(below_equal, &small_size);
256  __ jmp(&medium_size);
257  }
258 
259  {
260  // |dst| is a higher address than |src|. Copy backwards.
261  Label unaligned_source, move_first_15, skip_last_move;
262  __ bind(&backward);
263  // |dst| and |src| always point to the end of what's left to copy.
264  __ add(dst, count);
265  __ add(src, count);
266  __ mov(eax, dst);
267  __ sub(eax, src);
268  __ cmp(eax, kMinMoveDistance);
269  __ j(below, &backward_much_overlap);
270  // Copy last 16 bytes.
271  __ movdqu(xmm0, Operand(src, -0x10));
272  __ movdqu(Operand(dst, -0x10), xmm0);
273  // Find distance to alignment: dst & 0xF
274  __ mov(edx, dst);
275  __ and_(edx, 0xF);
276  __ sub(dst, edx);
277  __ sub(src, edx);
278  __ sub(count, edx);
279  // dst is now aligned. Main copy loop.
280  __ mov(loop_count, count);
281  __ shr(loop_count, 6);
282  // Check if src is also aligned.
283  __ test(src, Immediate(0xF));
284  __ j(not_zero, &unaligned_source);
285  // Copy loop for aligned source and destination.
286  MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_ALIGNED);
287  // At most 15 bytes to copy. Copy 16 bytes at beginning of string.
288  __ bind(&move_first_15);
289  __ and_(count, 0xF);
290  __ j(zero, &skip_last_move, Label::kNear);
291  __ sub(src, count);
292  __ sub(dst, count);
293  __ movdqu(xmm0, Operand(src, 0));
294  __ movdqu(Operand(dst, 0), xmm0);
295  __ bind(&skip_last_move);
296  MemMoveEmitPopAndReturn(&masm);
297 
298  // Copy loop for unaligned source and aligned destination.
299  __ bind(&unaligned_source);
300  MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_UNALIGNED);
301  __ jmp(&move_first_15);
302 
303  // Less than kMinMoveDistance offset between dst and src.
304  Label loop_until_aligned, first_15_much_overlap;
305  __ bind(&loop_until_aligned);
306  __ dec(src);
307  __ dec(dst);
308  __ mov_b(eax, Operand(src, 0));
309  __ mov_b(Operand(dst, 0), eax);
310  __ dec(count);
311  __ bind(&backward_much_overlap); // Entry point into this block.
312  __ test(dst, Immediate(0xF));
313  __ j(not_zero, &loop_until_aligned);
314  // dst is now aligned, src can't be. Main copy loop.
315  __ mov(loop_count, count);
316  __ shr(loop_count, 6);
317  MemMoveEmitMainLoop(&masm, &first_15_much_overlap,
318  BACKWARD, MOVE_UNALIGNED);
319  __ bind(&first_15_much_overlap);
320  __ and_(count, 0xF);
321  __ j(zero, &pop_and_return);
322  // Small/medium handlers expect dst/src to point to the beginning.
323  __ sub(dst, count);
324  __ sub(src, count);
325  __ cmp(count, kSmallCopySize);
326  __ j(below_equal, &small_size);
327  __ jmp(&medium_size);
328  }
329  {
330  // Special handlers for 9 <= copy_size < 64. No assumptions about
331  // alignment or move distance, so all reads must be unaligned and
332  // must happen before any writes.
333  Label medium_handlers, f9_16, f17_32, f33_48, f49_63;
334 
335  __ bind(&f9_16);
336  __ movsd(xmm0, Operand(src, 0));
337  __ movsd(xmm1, Operand(src, count, times_1, -8));
338  __ movsd(Operand(dst, 0), xmm0);
339  __ movsd(Operand(dst, count, times_1, -8), xmm1);
340  MemMoveEmitPopAndReturn(&masm);
341 
342  __ bind(&f17_32);
343  __ movdqu(xmm0, Operand(src, 0));
344  __ movdqu(xmm1, Operand(src, count, times_1, -0x10));
345  __ movdqu(Operand(dst, 0x00), xmm0);
346  __ movdqu(Operand(dst, count, times_1, -0x10), xmm1);
347  MemMoveEmitPopAndReturn(&masm);
348 
349  __ bind(&f33_48);
350  __ movdqu(xmm0, Operand(src, 0x00));
351  __ movdqu(xmm1, Operand(src, 0x10));
352  __ movdqu(xmm2, Operand(src, count, times_1, -0x10));
353  __ movdqu(Operand(dst, 0x00), xmm0);
354  __ movdqu(Operand(dst, 0x10), xmm1);
355  __ movdqu(Operand(dst, count, times_1, -0x10), xmm2);
356  MemMoveEmitPopAndReturn(&masm);
357 
358  __ bind(&f49_63);
359  __ movdqu(xmm0, Operand(src, 0x00));
360  __ movdqu(xmm1, Operand(src, 0x10));
361  __ movdqu(xmm2, Operand(src, 0x20));
362  __ movdqu(xmm3, Operand(src, count, times_1, -0x10));
363  __ movdqu(Operand(dst, 0x00), xmm0);
364  __ movdqu(Operand(dst, 0x10), xmm1);
365  __ movdqu(Operand(dst, 0x20), xmm2);
366  __ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
367  MemMoveEmitPopAndReturn(&masm);
368 
369  __ bind(&medium_handlers);
370  __ dd(conv.address(&f9_16));
371  __ dd(conv.address(&f17_32));
372  __ dd(conv.address(&f33_48));
373  __ dd(conv.address(&f49_63));
374 
375  __ bind(&medium_size); // Entry point into this block.
376  __ mov(eax, count);
377  __ dec(eax);
378  __ shr(eax, 4);
379  if (FLAG_debug_code) {
380  Label ok;
381  __ cmp(eax, 3);
382  __ j(below_equal, &ok);
383  __ int3();
384  __ bind(&ok);
385  }
386  __ mov(eax, Operand(eax, times_4, conv.address(&medium_handlers)));
387  __ jmp(eax);
388  }
389  {
390  // Specialized copiers for copy_size <= 8 bytes.
391  Label small_handlers, f0, f1, f2, f3, f4, f5_8;
392  __ bind(&f0);
393  MemMoveEmitPopAndReturn(&masm);
394 
395  __ bind(&f1);
396  __ mov_b(eax, Operand(src, 0));
397  __ mov_b(Operand(dst, 0), eax);
398  MemMoveEmitPopAndReturn(&masm);
399 
400  __ bind(&f2);
401  __ mov_w(eax, Operand(src, 0));
402  __ mov_w(Operand(dst, 0), eax);
403  MemMoveEmitPopAndReturn(&masm);
404 
405  __ bind(&f3);
406  __ mov_w(eax, Operand(src, 0));
407  __ mov_b(edx, Operand(src, 2));
408  __ mov_w(Operand(dst, 0), eax);
409  __ mov_b(Operand(dst, 2), edx);
410  MemMoveEmitPopAndReturn(&masm);
411 
412  __ bind(&f4);
413  __ mov(eax, Operand(src, 0));
414  __ mov(Operand(dst, 0), eax);
415  MemMoveEmitPopAndReturn(&masm);
416 
417  __ bind(&f5_8);
418  __ mov(eax, Operand(src, 0));
419  __ mov(edx, Operand(src, count, times_1, -4));
420  __ mov(Operand(dst, 0), eax);
421  __ mov(Operand(dst, count, times_1, -4), edx);
422  MemMoveEmitPopAndReturn(&masm);
423 
424  __ bind(&small_handlers);
425  __ dd(conv.address(&f0));
426  __ dd(conv.address(&f1));
427  __ dd(conv.address(&f2));
428  __ dd(conv.address(&f3));
429  __ dd(conv.address(&f4));
430  __ dd(conv.address(&f5_8));
431  __ dd(conv.address(&f5_8));
432  __ dd(conv.address(&f5_8));
433  __ dd(conv.address(&f5_8));
434 
435  __ bind(&small_size); // Entry point into this block.
436  if (FLAG_debug_code) {
437  Label ok;
438  __ cmp(count, 8);
439  __ j(below_equal, &ok);
440  __ int3();
441  __ bind(&ok);
442  }
443  __ mov(eax, Operand(count, times_4, conv.address(&small_handlers)));
444  __ jmp(eax);
445  }
446 
447  __ bind(&pop_and_return);
448  MemMoveEmitPopAndReturn(&masm);
449 
450  CodeDesc desc;
451  masm.GetCode(nullptr, &desc);
452  DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
453  Assembler::FlushICache(buffer, allocated);
454  CHECK(SetPermissions(page_allocator, buffer, allocated,
455  PageAllocator::kReadExecute));
456  // TODO(jkummerow): It would be nice to register this code creation event
457  // with the PROFILE / GDBJIT system.
458  return FUNCTION_CAST<MemMoveFunction>(buffer);
459 }
460 
461 
462 #undef __
463 
464 } // namespace internal
465 } // namespace v8
466 
467 #endif // V8_TARGET_ARCH_IA32
virtual void * GetRandomMmapAddr()=0
Definition: libplatform.h:13