5 #if V8_TARGET_ARCH_MIPS64 9 #include "src/codegen.h" 10 #include "src/macro-assembler.h" 11 #include "src/mips64/simulator-mips64.h" 18 #if defined(V8_HOST_ARCH_MIPS) 20 MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
21 #if defined(USE_SIMULATOR) 26 byte* buffer = AllocatePage(page_allocator,
28 if (buffer ==
nullptr)
return stub;
30 MacroAssembler masm(AssemblerOptions{}, buffer,
static_cast<int>(allocated));
35 Label lastb, unaligned, aligned, chkw,
36 loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
37 leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
38 ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
47 DCHECK(pref_chunk < max_pref_size);
52 uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
53 int32_t pref_hint_load = kPrefHintLoadStreamed;
54 int32_t pref_hint_store = kPrefHintPrepareForStore;
60 DCHECK(pref_hint_store != kPrefHintPrepareForStore ||
61 pref_chunk * 4 >= max_pref_size);
64 __ slti(a6, a2, 2 * loadstore_chunk);
65 __ bne(a6, zero_reg, &lastb);
73 __ andi(t8, t8, loadstore_chunk - 1);
74 __ bne(t8, zero_reg, &unaligned);
75 __ subu(a3, zero_reg, a0);
77 __ andi(a3, a3, loadstore_chunk - 1);
78 __ beq(a3, zero_reg, &aligned);
81 if (kArchEndian == kLittle) {
82 __ lwr(t8, MemOperand(a1));
84 __ swr(t8, MemOperand(a0));
87 __ lwl(t8, MemOperand(a1));
89 __ swl(t8, MemOperand(a0));
98 __ andi(t8, a2, 0x3F);
99 __ beq(a2, t8, &chkw);
108 if (pref_hint_store == kPrefHintPrepareForStore) {
110 __ Subu(t9, a4, pref_limit);
113 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
114 __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
115 __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
116 __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
118 if (pref_hint_store != kPrefHintPrepareForStore) {
119 __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
120 __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
121 __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
124 __ Lw(a4, MemOperand(a1));
126 if (pref_hint_store == kPrefHintPrepareForStore) {
128 __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
130 __ Lw(a5, MemOperand(a1, 1, loadstore_chunk));
132 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
133 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
136 __ Lw(a6, MemOperand(a1, 2, loadstore_chunk));
137 __ Lw(a7, MemOperand(a1, 3, loadstore_chunk));
138 __ Lw(t0, MemOperand(a1, 4, loadstore_chunk));
139 __ Lw(t1, MemOperand(a1, 5, loadstore_chunk));
140 __ Lw(t2, MemOperand(a1, 6, loadstore_chunk));
141 __ Lw(t3, MemOperand(a1, 7, loadstore_chunk));
142 __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
144 __ Sw(a4, MemOperand(a0));
145 __ Sw(a5, MemOperand(a0, 1, loadstore_chunk));
146 __ Sw(a6, MemOperand(a0, 2, loadstore_chunk));
147 __ Sw(a7, MemOperand(a0, 3, loadstore_chunk));
148 __ Sw(t0, MemOperand(a0, 4, loadstore_chunk));
149 __ Sw(t1, MemOperand(a0, 5, loadstore_chunk));
150 __ Sw(t2, MemOperand(a0, 6, loadstore_chunk));
151 __ Sw(t3, MemOperand(a0, 7, loadstore_chunk));
153 __ Lw(a4, MemOperand(a1, 8, loadstore_chunk));
154 __ Lw(a5, MemOperand(a1, 9, loadstore_chunk));
155 __ Lw(a6, MemOperand(a1, 10, loadstore_chunk));
156 __ Lw(a7, MemOperand(a1, 11, loadstore_chunk));
157 __ Lw(t0, MemOperand(a1, 12, loadstore_chunk));
158 __ Lw(t1, MemOperand(a1, 13, loadstore_chunk));
159 __ Lw(t2, MemOperand(a1, 14, loadstore_chunk));
160 __ Lw(t3, MemOperand(a1, 15, loadstore_chunk));
161 __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
163 __ Sw(a4, MemOperand(a0, 8, loadstore_chunk));
164 __ Sw(a5, MemOperand(a0, 9, loadstore_chunk));
165 __ Sw(a6, MemOperand(a0, 10, loadstore_chunk));
166 __ Sw(a7, MemOperand(a0, 11, loadstore_chunk));
167 __ Sw(t0, MemOperand(a0, 12, loadstore_chunk));
168 __ Sw(t1, MemOperand(a0, 13, loadstore_chunk));
169 __ Sw(t2, MemOperand(a0, 14, loadstore_chunk));
170 __ Sw(t3, MemOperand(a0, 15, loadstore_chunk));
171 __ addiu(a0, a0, 16 * loadstore_chunk);
172 __ bne(a0, a3, &loop16w);
173 __ addiu(a1, a1, 16 * loadstore_chunk);
180 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
181 __ andi(t8, a2, 0x1F);
182 __ beq(a2, t8, &chk1w);
184 __ Lw(a4, MemOperand(a1));
185 __ Lw(a5, MemOperand(a1, 1, loadstore_chunk));
186 __ Lw(a6, MemOperand(a1, 2, loadstore_chunk));
187 __ Lw(a7, MemOperand(a1, 3, loadstore_chunk));
188 __ Lw(t0, MemOperand(a1, 4, loadstore_chunk));
189 __ Lw(t1, MemOperand(a1, 5, loadstore_chunk));
190 __ Lw(t2, MemOperand(a1, 6, loadstore_chunk));
191 __ Lw(t3, MemOperand(a1, 7, loadstore_chunk));
192 __ addiu(a1, a1, 8 * loadstore_chunk);
193 __ Sw(a4, MemOperand(a0));
194 __ Sw(a5, MemOperand(a0, 1, loadstore_chunk));
195 __ Sw(a6, MemOperand(a0, 2, loadstore_chunk));
196 __ Sw(a7, MemOperand(a0, 3, loadstore_chunk));
197 __ Sw(t0, MemOperand(a0, 4, loadstore_chunk));
198 __ Sw(t1, MemOperand(a0, 5, loadstore_chunk));
199 __ Sw(t2, MemOperand(a0, 6, loadstore_chunk));
200 __ Sw(t3, MemOperand(a0, 7, loadstore_chunk));
201 __ addiu(a0, a0, 8 * loadstore_chunk);
209 __ andi(a2, t8, loadstore_chunk - 1);
210 __ beq(a2, t8, &lastb);
214 __ bind(&wordCopy_loop);
215 __ Lw(a7, MemOperand(a1));
216 __ addiu(a0, a0, loadstore_chunk);
217 __ addiu(a1, a1, loadstore_chunk);
218 __ bne(a0, a3, &wordCopy_loop);
219 __ Sw(a7, MemOperand(a0, -1, loadstore_chunk));
222 __ Branch(&leave, le, a2, Operand(zero_reg));
226 __ Lb(v1, MemOperand(a1));
229 __ bne(a0, a3, &lastbloop);
230 __ Sb(v1, MemOperand(a0, -1));
240 __ andi(a3, a3, loadstore_chunk - 1);
241 __ beq(a3, zero_reg, &ua_chk16w);
244 if (kArchEndian == kLittle) {
245 __ lwr(v1, MemOperand(a1));
247 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
249 __ swr(v1, MemOperand(a0));
252 __ lwl(v1, MemOperand(a1));
254 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
256 __ swl(v1, MemOperand(a0));
265 __ andi(t8, a2, 0x3F);
266 __ beq(a2, t8, &ua_chkw);
270 if (pref_hint_store == kPrefHintPrepareForStore) {
272 __ Subu(t9, a4, pref_limit);
275 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
276 __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
277 __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
279 if (pref_hint_store != kPrefHintPrepareForStore) {
280 __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
281 __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
282 __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
285 __ bind(&ua_loop16w);
286 if (kArchEndian == kLittle) {
287 __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
288 __ lwr(a4, MemOperand(a1));
289 __ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
290 __ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
292 if (pref_hint_store == kPrefHintPrepareForStore) {
294 __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
296 __ lwr(a7, MemOperand(a1, 3, loadstore_chunk));
298 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
299 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
301 __ bind(&ua_skip_pref);
302 __ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
303 __ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
304 __ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
305 __ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
307 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
309 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
311 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
313 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
315 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
317 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
319 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
321 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
323 __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
324 __ lwl(a4, MemOperand(a1));
325 __ lwl(a5, MemOperand(a1, 1, loadstore_chunk));
326 __ lwl(a6, MemOperand(a1, 2, loadstore_chunk));
328 if (pref_hint_store == kPrefHintPrepareForStore) {
330 __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
332 __ lwl(a7, MemOperand(a1, 3, loadstore_chunk));
334 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
335 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
337 __ bind(&ua_skip_pref);
338 __ lwl(t0, MemOperand(a1, 4, loadstore_chunk));
339 __ lwl(t1, MemOperand(a1, 5, loadstore_chunk));
340 __ lwl(t2, MemOperand(a1, 6, loadstore_chunk));
341 __ lwl(t3, MemOperand(a1, 7, loadstore_chunk));
343 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
345 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
347 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
349 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
351 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
353 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
355 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
357 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
359 __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
360 __ Sw(a4, MemOperand(a0));
361 __ Sw(a5, MemOperand(a0, 1, loadstore_chunk));
362 __ Sw(a6, MemOperand(a0, 2, loadstore_chunk));
363 __ Sw(a7, MemOperand(a0, 3, loadstore_chunk));
364 __ Sw(t0, MemOperand(a0, 4, loadstore_chunk));
365 __ Sw(t1, MemOperand(a0, 5, loadstore_chunk));
366 __ Sw(t2, MemOperand(a0, 6, loadstore_chunk));
367 __ Sw(t3, MemOperand(a0, 7, loadstore_chunk));
368 if (kArchEndian == kLittle) {
369 __ lwr(a4, MemOperand(a1, 8, loadstore_chunk));
370 __ lwr(a5, MemOperand(a1, 9, loadstore_chunk));
371 __ lwr(a6, MemOperand(a1, 10, loadstore_chunk));
372 __ lwr(a7, MemOperand(a1, 11, loadstore_chunk));
373 __ lwr(t0, MemOperand(a1, 12, loadstore_chunk));
374 __ lwr(t1, MemOperand(a1, 13, loadstore_chunk));
375 __ lwr(t2, MemOperand(a1, 14, loadstore_chunk));
376 __ lwr(t3, MemOperand(a1, 15, loadstore_chunk));
378 MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
380 MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
382 MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
384 MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
386 MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
388 MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
390 MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
392 MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
394 __ lwl(a4, MemOperand(a1, 8, loadstore_chunk));
395 __ lwl(a5, MemOperand(a1, 9, loadstore_chunk));
396 __ lwl(a6, MemOperand(a1, 10, loadstore_chunk));
397 __ lwl(a7, MemOperand(a1, 11, loadstore_chunk));
398 __ lwl(t0, MemOperand(a1, 12, loadstore_chunk));
399 __ lwl(t1, MemOperand(a1, 13, loadstore_chunk));
400 __ lwl(t2, MemOperand(a1, 14, loadstore_chunk));
401 __ lwl(t3, MemOperand(a1, 15, loadstore_chunk));
403 MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
405 MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
407 MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
409 MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
411 MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
413 MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
415 MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
417 MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
419 __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
420 __ Sw(a4, MemOperand(a0, 8, loadstore_chunk));
421 __ Sw(a5, MemOperand(a0, 9, loadstore_chunk));
422 __ Sw(a6, MemOperand(a0, 10, loadstore_chunk));
423 __ Sw(a7, MemOperand(a0, 11, loadstore_chunk));
424 __ Sw(t0, MemOperand(a0, 12, loadstore_chunk));
425 __ Sw(t1, MemOperand(a0, 13, loadstore_chunk));
426 __ Sw(t2, MemOperand(a0, 14, loadstore_chunk));
427 __ Sw(t3, MemOperand(a0, 15, loadstore_chunk));
428 __ addiu(a0, a0, 16 * loadstore_chunk);
429 __ bne(a0, a3, &ua_loop16w);
430 __ addiu(a1, a1, 16 * loadstore_chunk);
437 __ Pref(pref_hint_load, MemOperand(a1));
438 __ andi(t8, a2, 0x1F);
440 __ beq(a2, t8, &ua_chk1w);
442 if (kArchEndian == kLittle) {
443 __ lwr(a4, MemOperand(a1));
444 __ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
445 __ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
446 __ lwr(a7, MemOperand(a1, 3, loadstore_chunk));
447 __ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
448 __ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
449 __ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
450 __ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
452 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
454 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
456 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
458 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
460 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
462 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
464 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
466 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
468 __ lwl(a4, MemOperand(a1));
469 __ lwl(a5, MemOperand(a1, 1, loadstore_chunk));
470 __ lwl(a6, MemOperand(a1, 2, loadstore_chunk));
471 __ lwl(a7, MemOperand(a1, 3, loadstore_chunk));
472 __ lwl(t0, MemOperand(a1, 4, loadstore_chunk));
473 __ lwl(t1, MemOperand(a1, 5, loadstore_chunk));
474 __ lwl(t2, MemOperand(a1, 6, loadstore_chunk));
475 __ lwl(t3, MemOperand(a1, 7, loadstore_chunk));
477 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
479 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
481 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
483 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
485 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
487 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
489 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
491 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
493 __ addiu(a1, a1, 8 * loadstore_chunk);
494 __ Sw(a4, MemOperand(a0));
495 __ Sw(a5, MemOperand(a0, 1, loadstore_chunk));
496 __ Sw(a6, MemOperand(a0, 2, loadstore_chunk));
497 __ Sw(a7, MemOperand(a0, 3, loadstore_chunk));
498 __ Sw(t0, MemOperand(a0, 4, loadstore_chunk));
499 __ Sw(t1, MemOperand(a0, 5, loadstore_chunk));
500 __ Sw(t2, MemOperand(a0, 6, loadstore_chunk));
501 __ Sw(t3, MemOperand(a0, 7, loadstore_chunk));
502 __ addiu(a0, a0, 8 * loadstore_chunk);
507 __ andi(a2, t8, loadstore_chunk - 1);
508 __ beq(a2, t8, &ua_smallCopy);
512 __ bind(&ua_wordCopy_loop);
513 if (kArchEndian == kLittle) {
514 __ lwr(v1, MemOperand(a1));
516 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
518 __ lwl(v1, MemOperand(a1));
520 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
522 __ addiu(a0, a0, loadstore_chunk);
523 __ addiu(a1, a1, loadstore_chunk);
524 __ bne(a0, a3, &ua_wordCopy_loop);
525 __ Sw(v1, MemOperand(a0, -1, loadstore_chunk));
528 __ bind(&ua_smallCopy);
529 __ beq(a2, zero_reg, &leave);
532 __ bind(&ua_smallCopy_loop);
533 __ Lb(v1, MemOperand(a1));
536 __ bne(a0, a3, &ua_smallCopy_loop);
537 __ Sb(v1, MemOperand(a0, -1));
543 masm.GetCode(
nullptr, &desc);
544 DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
546 Assembler::FlushICache(buffer, allocated);
547 CHECK(SetPermissions(page_allocator, buffer, allocated,
548 PageAllocator::kReadExecute));
549 return FUNCTION_CAST<MemCopyUint8Function>(buffer);
554 UnaryMathFunction CreateSqrtFunction() {
555 #if defined(USE_SIMULATOR) 559 size_t allocated = 0;
560 byte* buffer = AllocatePage(page_allocator,
562 if (buffer ==
nullptr)
return nullptr;
564 MacroAssembler masm(AssemblerOptions{}, buffer,
static_cast<int>(allocated));
566 __ MovFromFloatParameter(f12);
568 __ MovToFloatResult(f0);
572 masm.GetCode(
nullptr, &desc);
573 DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
575 Assembler::FlushICache(buffer, allocated);
576 CHECK(SetPermissions(page_allocator, buffer, allocated,
577 PageAllocator::kReadExecute));
578 return FUNCTION_CAST<UnaryMathFunction>(buffer);
587 #endif // V8_TARGET_ARCH_MIPS64
virtual void * GetRandomMmapAddr()=0