V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
macro-assembler-arm.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6 
7 #if V8_TARGET_ARCH_ARM
8 
9 #include "src/assembler-inl.h"
10 #include "src/base/bits.h"
11 #include "src/base/division-by-constant.h"
12 #include "src/base/utils/random-number-generator.h"
13 #include "src/bootstrapper.h"
14 #include "src/callable.h"
15 #include "src/code-factory.h"
16 #include "src/code-stubs.h"
17 #include "src/counters.h"
18 #include "src/debug/debug.h"
19 #include "src/double.h"
20 #include "src/external-reference-table.h"
21 #include "src/frames-inl.h"
22 #include "src/macro-assembler.h"
23 #include "src/objects-inl.h"
24 #include "src/register-configuration.h"
25 #include "src/runtime/runtime.h"
26 #include "src/snapshot/embedded-data.h"
27 #include "src/snapshot/snapshot.h"
28 #include "src/wasm/wasm-code-manager.h"
29 
30 // Satisfy cpplint check, but don't include platform-specific header. It is
31 // included recursively via macro-assembler.h.
32 #if 0
33 #include "src/arm/macro-assembler-arm.h"
34 #endif
35 
36 namespace v8 {
37 namespace internal {
38 
39 MacroAssembler::MacroAssembler(Isolate* isolate,
40  const AssemblerOptions& options, void* buffer,
41  int size, CodeObjectRequired create_code_object)
42  : TurboAssembler(isolate, options, buffer, size, create_code_object) {
43  if (create_code_object == CodeObjectRequired::kYes) {
44  // Unlike TurboAssembler, which can be used off the main thread and may not
45  // allocate, macro assembler creates its own copy of the self-reference
46  // marker in order to disambiguate between self-references during nested
47  // code generation (e.g.: codegen of the current object triggers stub
48  // compilation through CodeStub::GetCode()).
49  code_object_ = Handle<HeapObject>::New(
50  *isolate->factory()->NewSelfReferenceMarker(), isolate);
51  }
52 }
53 
54 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
55  Register exclusion1,
56  Register exclusion2,
57  Register exclusion3) const {
58  int bytes = 0;
59  RegList exclusions = 0;
60  if (exclusion1 != no_reg) {
61  exclusions |= exclusion1.bit();
62  if (exclusion2 != no_reg) {
63  exclusions |= exclusion2.bit();
64  if (exclusion3 != no_reg) {
65  exclusions |= exclusion3.bit();
66  }
67  }
68  }
69 
70  RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
71 
72  bytes += NumRegs(list) * kPointerSize;
73 
74  if (fp_mode == kSaveFPRegs) {
75  bytes += DwVfpRegister::NumRegisters() * DwVfpRegister::kSizeInBytes;
76  }
77 
78  return bytes;
79 }
80 
81 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
82  Register exclusion2, Register exclusion3) {
83  int bytes = 0;
84  RegList exclusions = 0;
85  if (exclusion1 != no_reg) {
86  exclusions |= exclusion1.bit();
87  if (exclusion2 != no_reg) {
88  exclusions |= exclusion2.bit();
89  if (exclusion3 != no_reg) {
90  exclusions |= exclusion3.bit();
91  }
92  }
93  }
94 
95  RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
96  stm(db_w, sp, list);
97 
98  bytes += NumRegs(list) * kPointerSize;
99 
100  if (fp_mode == kSaveFPRegs) {
101  SaveFPRegs(sp, lr);
102  bytes += DwVfpRegister::NumRegisters() * DwVfpRegister::kSizeInBytes;
103  }
104 
105  return bytes;
106 }
107 
108 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
109  Register exclusion2, Register exclusion3) {
110  int bytes = 0;
111  if (fp_mode == kSaveFPRegs) {
112  RestoreFPRegs(sp, lr);
113  bytes += DwVfpRegister::NumRegisters() * DwVfpRegister::kSizeInBytes;
114  }
115 
116  RegList exclusions = 0;
117  if (exclusion1 != no_reg) {
118  exclusions |= exclusion1.bit();
119  if (exclusion2 != no_reg) {
120  exclusions |= exclusion2.bit();
121  if (exclusion3 != no_reg) {
122  exclusions |= exclusion3.bit();
123  }
124  }
125  }
126 
127  RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
128  ldm(ia_w, sp, list);
129 
130  bytes += NumRegs(list) * kPointerSize;
131 
132  return bytes;
133 }
134 
135 void TurboAssembler::LoadFromConstantsTable(Register destination,
136  int constant_index) {
137  DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
138 
139  // The ldr call below could end up clobbering ip when the offset does not fit
140  // into 12 bits (and thus needs to be loaded from the constant pool). In that
141  // case, we need to be extra-careful and temporarily use another register as
142  // the target.
143 
144  const uint32_t offset =
145  FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag;
146  const bool could_clobber_ip = !is_uint12(offset);
147 
148  Register reg = destination;
149  if (could_clobber_ip) {
150  Push(r7);
151  reg = r7;
152  }
153 
154  LoadRoot(reg, RootIndex::kBuiltinsConstantsTable);
155  ldr(destination, MemOperand(reg, offset));
156 
157  if (could_clobber_ip) {
158  DCHECK_EQ(reg, r7);
159  Pop(r7);
160  }
161 }
162 
163 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
164  ldr(destination, MemOperand(kRootRegister, offset));
165 }
166 
167 void TurboAssembler::LoadRootRegisterOffset(Register destination,
168  intptr_t offset) {
169  if (offset == 0) {
170  Move(destination, kRootRegister);
171  } else {
172  add(destination, kRootRegister, Operand(offset));
173  }
174 }
175 
176 void TurboAssembler::Jump(Register target, Condition cond) { bx(target, cond); }
177 
178 void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
179  Condition cond) {
180  mov(pc, Operand(target, rmode), LeaveCC, cond);
181 }
182 
183 void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
184  Condition cond) {
185  DCHECK(!RelocInfo::IsCodeTarget(rmode));
186  Jump(static_cast<intptr_t>(target), rmode, cond);
187 }
188 
189 void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
190  Condition cond) {
191  DCHECK(RelocInfo::IsCodeTarget(rmode));
192  if (FLAG_embedded_builtins) {
193  int builtin_index = Builtins::kNoBuiltinId;
194  bool target_is_isolate_independent_builtin =
195  isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
196  Builtins::IsIsolateIndependent(builtin_index);
197  if (target_is_isolate_independent_builtin &&
198  options().use_pc_relative_calls_and_jumps) {
199  int32_t code_target_index = AddCodeTarget(code);
200  b(code_target_index * kInstrSize, cond, RelocInfo::RELATIVE_CODE_TARGET);
201  return;
202  } else if (root_array_available_ && options().isolate_independent_code) {
203  UseScratchRegisterScope temps(this);
204  Register scratch = temps.Acquire();
205  IndirectLoadConstant(scratch, code);
206  add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
207  Jump(scratch, cond);
208  return;
209  } else if (target_is_isolate_independent_builtin &&
210  options().inline_offheap_trampolines) {
211  // Inline the trampoline.
212  RecordCommentForOffHeapTrampoline(builtin_index);
213  EmbeddedData d = EmbeddedData::FromBlob();
214  Address entry = d.InstructionStartOfBuiltin(builtin_index);
215  // Use ip directly instead of using UseScratchRegisterScope, as we do not
216  // preserve scratch registers across calls.
217  mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
218  Jump(ip, cond);
219  return;
220  }
221  }
222  // 'code' is always generated ARM code, never THUMB code
223  Jump(static_cast<intptr_t>(code.address()), rmode, cond);
224 }
225 
226 void TurboAssembler::Call(Register target, Condition cond) {
227  // Block constant pool for the call instruction sequence.
228  BlockConstPoolScope block_const_pool(this);
229  blx(target, cond);
230 }
231 
232 void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
233  TargetAddressStorageMode mode,
234  bool check_constant_pool) {
235  // Check if we have to emit the constant pool before we block it.
236  if (check_constant_pool) MaybeCheckConstPool();
237  // Block constant pool for the call instruction sequence.
238  BlockConstPoolScope block_const_pool(this);
239 
240  bool old_predictable_code_size = predictable_code_size();
241  if (mode == NEVER_INLINE_TARGET_ADDRESS) {
242  set_predictable_code_size(true);
243  }
244 
245  // Use ip directly instead of using UseScratchRegisterScope, as we do not
246  // preserve scratch registers across calls.
247 
248  // Call sequence on V7 or later may be :
249  // movw ip, #... @ call address low 16
250  // movt ip, #... @ call address high 16
251  // blx ip
252  // @ return address
253  // Or for pre-V7 or values that may be back-patched
254  // to avoid ICache flushes:
255  // ldr ip, [pc, #...] @ call address
256  // blx ip
257  // @ return address
258 
259  mov(ip, Operand(target, rmode));
260  blx(ip, cond);
261 
262  if (mode == NEVER_INLINE_TARGET_ADDRESS) {
263  set_predictable_code_size(old_predictable_code_size);
264  }
265 }
266 
267 void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
268  Condition cond, TargetAddressStorageMode mode,
269  bool check_constant_pool) {
270  DCHECK(RelocInfo::IsCodeTarget(rmode));
271  if (FLAG_embedded_builtins) {
272  int builtin_index = Builtins::kNoBuiltinId;
273  bool target_is_isolate_independent_builtin =
274  isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
275  Builtins::IsIsolateIndependent(builtin_index);
276  if (target_is_isolate_independent_builtin &&
277  options().use_pc_relative_calls_and_jumps) {
278  int32_t code_target_index = AddCodeTarget(code);
279  bl(code_target_index * kInstrSize, cond, RelocInfo::RELATIVE_CODE_TARGET);
280  return;
281  } else if (root_array_available_ && options().isolate_independent_code) {
282  // Use ip directly instead of using UseScratchRegisterScope, as we do not
283  // preserve scratch registers across calls.
284  IndirectLoadConstant(ip, code);
285  add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
286  Call(ip, cond);
287  return;
288  } else if (target_is_isolate_independent_builtin &&
289  options().inline_offheap_trampolines) {
290  // Inline the trampoline.
291  RecordCommentForOffHeapTrampoline(builtin_index);
292  EmbeddedData d = EmbeddedData::FromBlob();
293  Address entry = d.InstructionStartOfBuiltin(builtin_index);
294  // Use ip directly instead of using UseScratchRegisterScope, as we do not
295  // preserve scratch registers across calls.
296  mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
297  Call(ip, cond);
298  return;
299  }
300  }
301  // 'code' is always generated ARM code, never THUMB code
302  Call(code.address(), rmode, cond, mode);
303 }
304 
305 void TurboAssembler::Ret(Condition cond) { bx(lr, cond); }
306 
307 void TurboAssembler::Drop(int count, Condition cond) {
308  if (count > 0) {
309  add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
310  }
311 }
312 
313 void TurboAssembler::Drop(Register count, Condition cond) {
314  add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC, cond);
315 }
316 
317 void TurboAssembler::Ret(int drop, Condition cond) {
318  Drop(drop, cond);
319  Ret(cond);
320 }
321 
322 void TurboAssembler::Call(Label* target) { bl(target); }
323 
324 void TurboAssembler::Push(Handle<HeapObject> handle) {
325  UseScratchRegisterScope temps(this);
326  Register scratch = temps.Acquire();
327  mov(scratch, Operand(handle));
328  push(scratch);
329 }
330 
331 void TurboAssembler::Push(Smi smi) {
332  UseScratchRegisterScope temps(this);
333  Register scratch = temps.Acquire();
334  mov(scratch, Operand(smi));
335  push(scratch);
336 }
337 
338 void TurboAssembler::Move(Register dst, Smi smi) { mov(dst, Operand(smi)); }
339 
340 void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
341  if (FLAG_embedded_builtins) {
342  if (root_array_available_ && options().isolate_independent_code) {
343  IndirectLoadConstant(dst, value);
344  return;
345  }
346  }
347  mov(dst, Operand(value));
348 }
349 
350 void TurboAssembler::Move(Register dst, ExternalReference reference) {
351  if (FLAG_embedded_builtins) {
352  if (root_array_available_ && options().isolate_independent_code) {
353  IndirectLoadExternalReference(dst, reference);
354  return;
355  }
356  }
357  mov(dst, Operand(reference));
358 }
359 
360 void TurboAssembler::Move(Register dst, Register src, Condition cond) {
361  if (dst != src) {
362  mov(dst, src, LeaveCC, cond);
363  }
364 }
365 
366 void TurboAssembler::Move(SwVfpRegister dst, SwVfpRegister src,
367  Condition cond) {
368  if (dst != src) {
369  vmov(dst, src, cond);
370  }
371 }
372 
373 void TurboAssembler::Move(DwVfpRegister dst, DwVfpRegister src,
374  Condition cond) {
375  if (dst != src) {
376  vmov(dst, src, cond);
377  }
378 }
379 
380 void TurboAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
381  if (dst != src) {
382  vmov(dst, src);
383  }
384 }
385 
386 void TurboAssembler::Swap(Register srcdst0, Register srcdst1) {
387  DCHECK(srcdst0 != srcdst1);
388  UseScratchRegisterScope temps(this);
389  Register scratch = temps.Acquire();
390  mov(scratch, srcdst0);
391  mov(srcdst0, srcdst1);
392  mov(srcdst1, scratch);
393 }
394 
395 void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
396  DCHECK(srcdst0 != srcdst1);
397  DCHECK(VfpRegisterIsAvailable(srcdst0));
398  DCHECK(VfpRegisterIsAvailable(srcdst1));
399 
400  if (CpuFeatures::IsSupported(NEON)) {
401  vswp(srcdst0, srcdst1);
402  } else {
403  UseScratchRegisterScope temps(this);
404  DwVfpRegister scratch = temps.AcquireD();
405  vmov(scratch, srcdst0);
406  vmov(srcdst0, srcdst1);
407  vmov(srcdst1, scratch);
408  }
409 }
410 
411 void TurboAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
412  DCHECK(srcdst0 != srcdst1);
413  vswp(srcdst0, srcdst1);
414 }
415 
416 void MacroAssembler::Mls(Register dst, Register src1, Register src2,
417  Register srcA, Condition cond) {
418  if (CpuFeatures::IsSupported(ARMv7)) {
419  CpuFeatureScope scope(this, ARMv7);
420  mls(dst, src1, src2, srcA, cond);
421  } else {
422  UseScratchRegisterScope temps(this);
423  Register scratch = temps.Acquire();
424  DCHECK(srcA != scratch);
425  mul(scratch, src1, src2, LeaveCC, cond);
426  sub(dst, srcA, scratch, LeaveCC, cond);
427  }
428 }
429 
430 
431 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
432  Condition cond) {
433  if (!src2.IsRegister() && !src2.MustOutputRelocInfo(this) &&
434  src2.immediate() == 0) {
435  mov(dst, Operand::Zero(), LeaveCC, cond);
436  } else if (!(src2.InstructionsRequired(this) == 1) &&
437  !src2.MustOutputRelocInfo(this) &&
438  CpuFeatures::IsSupported(ARMv7) &&
439  base::bits::IsPowerOfTwo(src2.immediate() + 1)) {
440  CpuFeatureScope scope(this, ARMv7);
441  ubfx(dst, src1, 0,
442  WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
443  } else {
444  and_(dst, src1, src2, LeaveCC, cond);
445  }
446 }
447 
448 
449 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
450  Condition cond) {
451  DCHECK_LT(lsb, 32);
452  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
453  int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
454  and_(dst, src1, Operand(mask), LeaveCC, cond);
455  if (lsb != 0) {
456  mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
457  }
458  } else {
459  CpuFeatureScope scope(this, ARMv7);
460  ubfx(dst, src1, lsb, width, cond);
461  }
462 }
463 
464 
465 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
466  Condition cond) {
467  DCHECK_LT(lsb, 32);
468  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
469  int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
470  and_(dst, src1, Operand(mask), LeaveCC, cond);
471  int shift_up = 32 - lsb - width;
472  int shift_down = lsb + shift_up;
473  if (shift_up != 0) {
474  mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
475  }
476  if (shift_down != 0) {
477  mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
478  }
479  } else {
480  CpuFeatureScope scope(this, ARMv7);
481  sbfx(dst, src1, lsb, width, cond);
482  }
483 }
484 
485 
486 void TurboAssembler::Bfc(Register dst, Register src, int lsb, int width,
487  Condition cond) {
488  DCHECK_LT(lsb, 32);
489  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
490  int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
491  bic(dst, src, Operand(mask));
492  } else {
493  CpuFeatureScope scope(this, ARMv7);
494  Move(dst, src, cond);
495  bfc(dst, lsb, width, cond);
496  }
497 }
498 
499 void MacroAssembler::Load(Register dst,
500  const MemOperand& src,
501  Representation r) {
502  DCHECK(!r.IsDouble());
503  if (r.IsInteger8()) {
504  ldrsb(dst, src);
505  } else if (r.IsUInteger8()) {
506  ldrb(dst, src);
507  } else if (r.IsInteger16()) {
508  ldrsh(dst, src);
509  } else if (r.IsUInteger16()) {
510  ldrh(dst, src);
511  } else {
512  ldr(dst, src);
513  }
514 }
515 
516 void MacroAssembler::Store(Register src,
517  const MemOperand& dst,
518  Representation r) {
519  DCHECK(!r.IsDouble());
520  if (r.IsInteger8() || r.IsUInteger8()) {
521  strb(src, dst);
522  } else if (r.IsInteger16() || r.IsUInteger16()) {
523  strh(src, dst);
524  } else {
525  if (r.IsHeapObject()) {
526  AssertNotSmi(src);
527  } else if (r.IsSmi()) {
528  AssertSmi(src);
529  }
530  str(src, dst);
531  }
532 }
533 
534 void TurboAssembler::LoadRoot(Register destination, RootIndex index,
535  Condition cond) {
536  ldr(destination,
537  MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), cond);
538 }
539 
540 
541 void MacroAssembler::RecordWriteField(Register object, int offset,
542  Register value, Register dst,
543  LinkRegisterStatus lr_status,
544  SaveFPRegsMode save_fp,
545  RememberedSetAction remembered_set_action,
546  SmiCheck smi_check) {
547  // First, check if a write barrier is even needed. The tests below
548  // catch stores of Smis.
549  Label done;
550 
551  // Skip barrier if writing a smi.
552  if (smi_check == INLINE_SMI_CHECK) {
553  JumpIfSmi(value, &done);
554  }
555 
556  // Although the object register is tagged, the offset is relative to the start
557  // of the object, so so offset must be a multiple of kPointerSize.
558  DCHECK(IsAligned(offset, kPointerSize));
559 
560  add(dst, object, Operand(offset - kHeapObjectTag));
561  if (emit_debug_code()) {
562  Label ok;
563  tst(dst, Operand(kPointerSize - 1));
564  b(eq, &ok);
565  stop("Unaligned cell in write barrier");
566  bind(&ok);
567  }
568 
569  RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
570  OMIT_SMI_CHECK);
571 
572  bind(&done);
573 
574  // Clobber clobbered input registers when running with the debug-code flag
575  // turned on to provoke errors.
576  if (emit_debug_code()) {
577  mov(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
578  mov(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
579  }
580 }
581 
582 void TurboAssembler::SaveRegisters(RegList registers) {
583  DCHECK_GT(NumRegs(registers), 0);
584  RegList regs = 0;
585  for (int i = 0; i < Register::kNumRegisters; ++i) {
586  if ((registers >> i) & 1u) {
587  regs |= Register::from_code(i).bit();
588  }
589  }
590 
591  stm(db_w, sp, regs);
592 }
593 
594 void TurboAssembler::RestoreRegisters(RegList registers) {
595  DCHECK_GT(NumRegs(registers), 0);
596  RegList regs = 0;
597  for (int i = 0; i < Register::kNumRegisters; ++i) {
598  if ((registers >> i) & 1u) {
599  regs |= Register::from_code(i).bit();
600  }
601  }
602  ldm(ia_w, sp, regs);
603 }
604 
605 void TurboAssembler::CallRecordWriteStub(
606  Register object, Register address,
607  RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
608  CallRecordWriteStub(
609  object, address, remembered_set_action, fp_mode,
610  isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
611  kNullAddress);
612 }
613 
614 void TurboAssembler::CallRecordWriteStub(
615  Register object, Register address,
616  RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
617  Address wasm_target) {
618  CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
619  Handle<Code>::null(), wasm_target);
620 }
621 
622 void TurboAssembler::CallRecordWriteStub(
623  Register object, Register address,
624  RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
625  Handle<Code> code_target, Address wasm_target) {
626  DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
627  // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
628  // i.e. always emit remember set and save FP registers in RecordWriteStub. If
629  // large performance regression is observed, we should use these values to
630  // avoid unnecessary work.
631 
632  RecordWriteDescriptor descriptor;
633  RegList registers = descriptor.allocatable_registers();
634 
635  SaveRegisters(registers);
636 
637  Register object_parameter(
638  descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
639  Register slot_parameter(
640  descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
641  Register remembered_set_parameter(
642  descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
643  Register fp_mode_parameter(
644  descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
645 
646  Push(object);
647  Push(address);
648 
649  Pop(slot_parameter);
650  Pop(object_parameter);
651 
652  Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
653  Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
654  if (code_target.is_null()) {
655  Call(wasm_target, RelocInfo::WASM_STUB_CALL);
656  } else {
657  Call(code_target, RelocInfo::CODE_TARGET);
658  }
659 
660  RestoreRegisters(registers);
661 }
662 
663 // Will clobber 3 registers: object, address, and value. The register 'object'
664 // contains a heap object pointer. The heap object tag is shifted away.
665 // A scratch register also needs to be available.
666 void MacroAssembler::RecordWrite(Register object, Register address,
667  Register value, LinkRegisterStatus lr_status,
668  SaveFPRegsMode fp_mode,
669  RememberedSetAction remembered_set_action,
670  SmiCheck smi_check) {
671  DCHECK(object != value);
672  if (emit_debug_code()) {
673  {
674  UseScratchRegisterScope temps(this);
675  Register scratch = temps.Acquire();
676  ldr(scratch, MemOperand(address));
677  cmp(scratch, value);
678  }
679  Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
680  }
681 
682  if (remembered_set_action == OMIT_REMEMBERED_SET &&
683  !FLAG_incremental_marking) {
684  return;
685  }
686 
687  // First, check if a write barrier is even needed. The tests below
688  // catch stores of smis and stores into the young generation.
689  Label done;
690 
691  if (smi_check == INLINE_SMI_CHECK) {
692  JumpIfSmi(value, &done);
693  }
694 
695  CheckPageFlag(value,
696  value, // Used as scratch.
697  MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
698  CheckPageFlag(object,
699  value, // Used as scratch.
700  MemoryChunk::kPointersFromHereAreInterestingMask,
701  eq,
702  &done);
703 
704  // Record the actual write.
705  if (lr_status == kLRHasNotBeenSaved) {
706  push(lr);
707  }
708  CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
709  if (lr_status == kLRHasNotBeenSaved) {
710  pop(lr);
711  }
712 
713  bind(&done);
714 
715  // Count number of write barriers in generated code.
716  isolate()->counters()->write_barriers_static()->Increment();
717  {
718  UseScratchRegisterScope temps(this);
719  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1,
720  temps.Acquire(), value);
721  }
722 
723  // Clobber clobbered registers when running with the debug-code flag
724  // turned on to provoke errors.
725  if (emit_debug_code()) {
726  mov(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
727  mov(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
728  }
729 }
730 
731 void TurboAssembler::PushCommonFrame(Register marker_reg) {
732  if (marker_reg.is_valid()) {
733  if (marker_reg.code() > fp.code()) {
734  stm(db_w, sp, fp.bit() | lr.bit());
735  mov(fp, Operand(sp));
736  Push(marker_reg);
737  } else {
738  stm(db_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
739  add(fp, sp, Operand(kPointerSize));
740  }
741  } else {
742  stm(db_w, sp, fp.bit() | lr.bit());
743  mov(fp, sp);
744  }
745 }
746 
747 void TurboAssembler::PushStandardFrame(Register function_reg) {
748  DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code());
749  stm(db_w, sp, (function_reg.is_valid() ? function_reg.bit() : 0) | cp.bit() |
750  fp.bit() | lr.bit());
751  int offset = -StandardFrameConstants::kContextOffset;
752  offset += function_reg.is_valid() ? kPointerSize : 0;
753  add(fp, sp, Operand(offset));
754 }
755 
756 
757 // Push and pop all registers that can hold pointers.
758 void MacroAssembler::PushSafepointRegisters() {
759  // Safepoints expect a block of contiguous register values starting with r0.
760  DCHECK_EQ(kSafepointSavedRegisters, (1 << kNumSafepointSavedRegisters) - 1);
761  // Safepoints expect a block of kNumSafepointRegisters values on the
762  // stack, so adjust the stack for unsaved registers.
763  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
764  DCHECK_GE(num_unsaved, 0);
765  sub(sp, sp, Operand(num_unsaved * kPointerSize));
766  stm(db_w, sp, kSafepointSavedRegisters);
767 }
768 
769 void MacroAssembler::PopSafepointRegisters() {
770  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
771  ldm(ia_w, sp, kSafepointSavedRegisters);
772  add(sp, sp, Operand(num_unsaved * kPointerSize));
773 }
774 
775 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
776  // The registers are pushed starting with the highest encoding,
777  // which means that lowest encodings are closest to the stack pointer.
778  DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
779  return reg_code;
780 }
781 
782 void TurboAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
783  const DwVfpRegister src,
784  const Condition cond) {
785  // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
786  // become quiet NaNs. We use vsub rather than vadd because vsub preserves -0.0
787  // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
788  vsub(dst, src, kDoubleRegZero, cond);
789 }
790 
791 void TurboAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
792  const SwVfpRegister src2,
793  const Condition cond) {
794  // Compare and move FPSCR flags to the normal condition flags.
795  VFPCompareAndLoadFlags(src1, src2, pc, cond);
796 }
797 
798 void TurboAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
799  const float src2,
800  const Condition cond) {
801  // Compare and move FPSCR flags to the normal condition flags.
802  VFPCompareAndLoadFlags(src1, src2, pc, cond);
803 }
804 
805 void TurboAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
806  const DwVfpRegister src2,
807  const Condition cond) {
808  // Compare and move FPSCR flags to the normal condition flags.
809  VFPCompareAndLoadFlags(src1, src2, pc, cond);
810 }
811 
812 void TurboAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
813  const double src2,
814  const Condition cond) {
815  // Compare and move FPSCR flags to the normal condition flags.
816  VFPCompareAndLoadFlags(src1, src2, pc, cond);
817 }
818 
819 void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
820  const SwVfpRegister src2,
821  const Register fpscr_flags,
822  const Condition cond) {
823  // Compare and load FPSCR.
824  vcmp(src1, src2, cond);
825  vmrs(fpscr_flags, cond);
826 }
827 
828 void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
829  const float src2,
830  const Register fpscr_flags,
831  const Condition cond) {
832  // Compare and load FPSCR.
833  vcmp(src1, src2, cond);
834  vmrs(fpscr_flags, cond);
835 }
836 
837 void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
838  const DwVfpRegister src2,
839  const Register fpscr_flags,
840  const Condition cond) {
841  // Compare and load FPSCR.
842  vcmp(src1, src2, cond);
843  vmrs(fpscr_flags, cond);
844 }
845 
846 void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
847  const double src2,
848  const Register fpscr_flags,
849  const Condition cond) {
850  // Compare and load FPSCR.
851  vcmp(src1, src2, cond);
852  vmrs(fpscr_flags, cond);
853 }
854 
855 void TurboAssembler::VmovHigh(Register dst, DwVfpRegister src) {
856  if (src.code() < 16) {
857  const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
858  vmov(dst, loc.high());
859  } else {
860  vmov(NeonS32, dst, src, 1);
861  }
862 }
863 
864 void TurboAssembler::VmovHigh(DwVfpRegister dst, Register src) {
865  if (dst.code() < 16) {
866  const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
867  vmov(loc.high(), src);
868  } else {
869  vmov(NeonS32, dst, 1, src);
870  }
871 }
872 
873 void TurboAssembler::VmovLow(Register dst, DwVfpRegister src) {
874  if (src.code() < 16) {
875  const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
876  vmov(dst, loc.low());
877  } else {
878  vmov(NeonS32, dst, src, 0);
879  }
880 }
881 
882 void TurboAssembler::VmovLow(DwVfpRegister dst, Register src) {
883  if (dst.code() < 16) {
884  const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
885  vmov(loc.low(), src);
886  } else {
887  vmov(NeonS32, dst, 0, src);
888  }
889 }
890 
891 void TurboAssembler::VmovExtended(Register dst, int src_code) {
892  DCHECK_LE(SwVfpRegister::kNumRegisters, src_code);
893  DCHECK_GT(SwVfpRegister::kNumRegisters * 2, src_code);
894  if (src_code & 0x1) {
895  VmovHigh(dst, DwVfpRegister::from_code(src_code / 2));
896  } else {
897  VmovLow(dst, DwVfpRegister::from_code(src_code / 2));
898  }
899 }
900 
901 void TurboAssembler::VmovExtended(int dst_code, Register src) {
902  DCHECK_LE(SwVfpRegister::kNumRegisters, dst_code);
903  DCHECK_GT(SwVfpRegister::kNumRegisters * 2, dst_code);
904  if (dst_code & 0x1) {
905  VmovHigh(DwVfpRegister::from_code(dst_code / 2), src);
906  } else {
907  VmovLow(DwVfpRegister::from_code(dst_code / 2), src);
908  }
909 }
910 
911 void TurboAssembler::VmovExtended(int dst_code, int src_code) {
912  if (src_code == dst_code) return;
913 
914  if (src_code < SwVfpRegister::kNumRegisters &&
915  dst_code < SwVfpRegister::kNumRegisters) {
916  // src and dst are both s-registers.
917  vmov(SwVfpRegister::from_code(dst_code),
918  SwVfpRegister::from_code(src_code));
919  return;
920  }
921  DwVfpRegister dst_d_reg = DwVfpRegister::from_code(dst_code / 2);
922  DwVfpRegister src_d_reg = DwVfpRegister::from_code(src_code / 2);
923  int dst_offset = dst_code & 1;
924  int src_offset = src_code & 1;
925  if (CpuFeatures::IsSupported(NEON)) {
926  UseScratchRegisterScope temps(this);
927  DwVfpRegister scratch = temps.AcquireD();
928  // On Neon we can shift and insert from d-registers.
929  if (src_offset == dst_offset) {
930  // Offsets are the same, use vdup to copy the source to the opposite lane.
931  vdup(Neon32, scratch, src_d_reg, src_offset);
932  // Here we are extending the lifetime of scratch.
933  src_d_reg = scratch;
934  src_offset = dst_offset ^ 1;
935  }
936  if (dst_offset) {
937  if (dst_d_reg == src_d_reg) {
938  vdup(Neon32, dst_d_reg, src_d_reg, 0);
939  } else {
940  vsli(Neon64, dst_d_reg, src_d_reg, 32);
941  }
942  } else {
943  if (dst_d_reg == src_d_reg) {
944  vdup(Neon32, dst_d_reg, src_d_reg, 1);
945  } else {
946  vsri(Neon64, dst_d_reg, src_d_reg, 32);
947  }
948  }
949  return;
950  }
951 
952  // Without Neon, use the scratch registers to move src and/or dst into
953  // s-registers.
954  UseScratchRegisterScope temps(this);
955  LowDwVfpRegister d_scratch = temps.AcquireLowD();
956  LowDwVfpRegister d_scratch2 = temps.AcquireLowD();
957  int s_scratch_code = d_scratch.low().code();
958  int s_scratch_code2 = d_scratch2.low().code();
959  if (src_code < SwVfpRegister::kNumRegisters) {
960  // src is an s-register, dst is not.
961  vmov(d_scratch, dst_d_reg);
962  vmov(SwVfpRegister::from_code(s_scratch_code + dst_offset),
963  SwVfpRegister::from_code(src_code));
964  vmov(dst_d_reg, d_scratch);
965  } else if (dst_code < SwVfpRegister::kNumRegisters) {
966  // dst is an s-register, src is not.
967  vmov(d_scratch, src_d_reg);
968  vmov(SwVfpRegister::from_code(dst_code),
969  SwVfpRegister::from_code(s_scratch_code + src_offset));
970  } else {
971  // Neither src or dst are s-registers. Both scratch double registers are
972  // available when there are 32 VFP registers.
973  vmov(d_scratch, src_d_reg);
974  vmov(d_scratch2, dst_d_reg);
975  vmov(SwVfpRegister::from_code(s_scratch_code + dst_offset),
976  SwVfpRegister::from_code(s_scratch_code2 + src_offset));
977  vmov(dst_d_reg, d_scratch2);
978  }
979 }
980 
981 void TurboAssembler::VmovExtended(int dst_code, const MemOperand& src) {
982  if (dst_code < SwVfpRegister::kNumRegisters) {
983  vldr(SwVfpRegister::from_code(dst_code), src);
984  } else {
985  UseScratchRegisterScope temps(this);
986  LowDwVfpRegister scratch = temps.AcquireLowD();
987  // TODO(bbudge) If Neon supported, use load single lane form of vld1.
988  int dst_s_code = scratch.low().code() + (dst_code & 1);
989  vmov(scratch, DwVfpRegister::from_code(dst_code / 2));
990  vldr(SwVfpRegister::from_code(dst_s_code), src);
991  vmov(DwVfpRegister::from_code(dst_code / 2), scratch);
992  }
993 }
994 
995 void TurboAssembler::VmovExtended(const MemOperand& dst, int src_code) {
996  if (src_code < SwVfpRegister::kNumRegisters) {
997  vstr(SwVfpRegister::from_code(src_code), dst);
998  } else {
999  // TODO(bbudge) If Neon supported, use store single lane form of vst1.
1000  UseScratchRegisterScope temps(this);
1001  LowDwVfpRegister scratch = temps.AcquireLowD();
1002  int src_s_code = scratch.low().code() + (src_code & 1);
1003  vmov(scratch, DwVfpRegister::from_code(src_code / 2));
1004  vstr(SwVfpRegister::from_code(src_s_code), dst);
1005  }
1006 }
1007 
1008 void TurboAssembler::ExtractLane(Register dst, QwNeonRegister src,
1009  NeonDataType dt, int lane) {
1010  int size = NeonSz(dt); // 0, 1, 2
1011  int byte = lane << size;
1012  int double_word = byte >> kDoubleSizeLog2;
1013  int double_byte = byte & (kDoubleSize - 1);
1014  int double_lane = double_byte >> size;
1015  DwVfpRegister double_source =
1016  DwVfpRegister::from_code(src.code() * 2 + double_word);
1017  vmov(dt, dst, double_source, double_lane);
1018 }
1019 
1020 void TurboAssembler::ExtractLane(Register dst, DwVfpRegister src,
1021  NeonDataType dt, int lane) {
1022  int size = NeonSz(dt); // 0, 1, 2
1023  int byte = lane << size;
1024  int double_byte = byte & (kDoubleSize - 1);
1025  int double_lane = double_byte >> size;
1026  vmov(dt, dst, src, double_lane);
1027 }
1028 
1029 void TurboAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src,
1030  int lane) {
1031  int s_code = src.code() * 4 + lane;
1032  VmovExtended(dst.code(), s_code);
1033 }
1034 
1035 void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
1036  Register src_lane, NeonDataType dt, int lane) {
1037  Move(dst, src);
1038  int size = NeonSz(dt); // 0, 1, 2
1039  int byte = lane << size;
1040  int double_word = byte >> kDoubleSizeLog2;
1041  int double_byte = byte & (kDoubleSize - 1);
1042  int double_lane = double_byte >> size;
1043  DwVfpRegister double_dst =
1044  DwVfpRegister::from_code(dst.code() * 2 + double_word);
1045  vmov(dt, double_dst, double_lane, src_lane);
1046 }
1047 
1048 void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
1049  SwVfpRegister src_lane, int lane) {
1050  Move(dst, src);
1051  int s_code = dst.code() * 4 + lane;
1052  VmovExtended(s_code, src_lane.code());
1053 }
1054 
1055 void TurboAssembler::LslPair(Register dst_low, Register dst_high,
1056  Register src_low, Register src_high,
1057  Register shift) {
1058  DCHECK(!AreAliased(dst_high, src_low));
1059  DCHECK(!AreAliased(dst_high, shift));
1060  UseScratchRegisterScope temps(this);
1061  Register scratch = temps.Acquire();
1062 
1063  Label less_than_32;
1064  Label done;
1065  rsb(scratch, shift, Operand(32), SetCC);
1066  b(gt, &less_than_32);
1067  // If shift >= 32
1068  and_(scratch, shift, Operand(0x1F));
1069  lsl(dst_high, src_low, Operand(scratch));
1070  mov(dst_low, Operand(0));
1071  jmp(&done);
1072  bind(&less_than_32);
1073  // If shift < 32
1074  lsl(dst_high, src_high, Operand(shift));
1075  orr(dst_high, dst_high, Operand(src_low, LSR, scratch));
1076  lsl(dst_low, src_low, Operand(shift));
1077  bind(&done);
1078 }
1079 
1080 void TurboAssembler::LslPair(Register dst_low, Register dst_high,
1081  Register src_low, Register src_high,
1082  uint32_t shift) {
1083  DCHECK(!AreAliased(dst_high, src_low));
1084  Label less_than_32;
1085  Label done;
1086  if (shift == 0) {
1087  Move(dst_high, src_high);
1088  Move(dst_low, src_low);
1089  } else if (shift == 32) {
1090  Move(dst_high, src_low);
1091  Move(dst_low, Operand(0));
1092  } else if (shift >= 32) {
1093  shift &= 0x1F;
1094  lsl(dst_high, src_low, Operand(shift));
1095  mov(dst_low, Operand(0));
1096  } else {
1097  lsl(dst_high, src_high, Operand(shift));
1098  orr(dst_high, dst_high, Operand(src_low, LSR, 32 - shift));
1099  lsl(dst_low, src_low, Operand(shift));
1100  }
1101 }
1102 
1103 void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
1104  Register src_low, Register src_high,
1105  Register shift) {
1106  DCHECK(!AreAliased(dst_low, src_high));
1107  DCHECK(!AreAliased(dst_low, shift));
1108  UseScratchRegisterScope temps(this);
1109  Register scratch = temps.Acquire();
1110 
1111  Label less_than_32;
1112  Label done;
1113  rsb(scratch, shift, Operand(32), SetCC);
1114  b(gt, &less_than_32);
1115  // If shift >= 32
1116  and_(scratch, shift, Operand(0x1F));
1117  lsr(dst_low, src_high, Operand(scratch));
1118  mov(dst_high, Operand(0));
1119  jmp(&done);
1120  bind(&less_than_32);
1121  // If shift < 32
1122 
1123  lsr(dst_low, src_low, Operand(shift));
1124  orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
1125  lsr(dst_high, src_high, Operand(shift));
1126  bind(&done);
1127 }
1128 
1129 void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
1130  Register src_low, Register src_high,
1131  uint32_t shift) {
1132  DCHECK(!AreAliased(dst_low, src_high));
1133  Label less_than_32;
1134  Label done;
1135  if (shift == 32) {
1136  mov(dst_low, src_high);
1137  mov(dst_high, Operand(0));
1138  } else if (shift > 32) {
1139  shift &= 0x1F;
1140  lsr(dst_low, src_high, Operand(shift));
1141  mov(dst_high, Operand(0));
1142  } else if (shift == 0) {
1143  Move(dst_low, src_low);
1144  Move(dst_high, src_high);
1145  } else {
1146  lsr(dst_low, src_low, Operand(shift));
1147  orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
1148  lsr(dst_high, src_high, Operand(shift));
1149  }
1150 }
1151 
1152 void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
1153  Register src_low, Register src_high,
1154  Register shift) {
1155  DCHECK(!AreAliased(dst_low, src_high));
1156  DCHECK(!AreAliased(dst_low, shift));
1157  UseScratchRegisterScope temps(this);
1158  Register scratch = temps.Acquire();
1159 
1160  Label less_than_32;
1161  Label done;
1162  rsb(scratch, shift, Operand(32), SetCC);
1163  b(gt, &less_than_32);
1164  // If shift >= 32
1165  and_(scratch, shift, Operand(0x1F));
1166  asr(dst_low, src_high, Operand(scratch));
1167  asr(dst_high, src_high, Operand(31));
1168  jmp(&done);
1169  bind(&less_than_32);
1170  // If shift < 32
1171  lsr(dst_low, src_low, Operand(shift));
1172  orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
1173  asr(dst_high, src_high, Operand(shift));
1174  bind(&done);
1175 }
1176 
1177 void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
1178  Register src_low, Register src_high,
1179  uint32_t shift) {
1180  DCHECK(!AreAliased(dst_low, src_high));
1181  Label less_than_32;
1182  Label done;
1183  if (shift == 32) {
1184  mov(dst_low, src_high);
1185  asr(dst_high, src_high, Operand(31));
1186  } else if (shift > 32) {
1187  shift &= 0x1F;
1188  asr(dst_low, src_high, Operand(shift));
1189  asr(dst_high, src_high, Operand(31));
1190  } else if (shift == 0) {
1191  Move(dst_low, src_low);
1192  Move(dst_high, src_high);
1193  } else {
1194  lsr(dst_low, src_low, Operand(shift));
1195  orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
1196  asr(dst_high, src_high, Operand(shift));
1197  }
1198 }
1199 
1200 void TurboAssembler::StubPrologue(StackFrame::Type type) {
1201  UseScratchRegisterScope temps(this);
1202  Register scratch = temps.Acquire();
1203  mov(scratch, Operand(StackFrame::TypeToMarker(type)));
1204  PushCommonFrame(scratch);
1205 }
1206 
1207 void TurboAssembler::Prologue() { PushStandardFrame(r1); }
1208 
1209 void TurboAssembler::EnterFrame(StackFrame::Type type,
1210  bool load_constant_pool_pointer_reg) {
1211  // r0-r3: preserved
1212  UseScratchRegisterScope temps(this);
1213  Register scratch = temps.Acquire();
1214  mov(scratch, Operand(StackFrame::TypeToMarker(type)));
1215  PushCommonFrame(scratch);
1216 }
1217 
1218 int TurboAssembler::LeaveFrame(StackFrame::Type type) {
1219  // r0: preserved
1220  // r1: preserved
1221  // r2: preserved
1222 
1223  // Drop the execution stack down to the frame pointer and restore
1224  // the caller frame pointer and return address.
1225  mov(sp, fp);
1226  int frame_ends = pc_offset();
1227  ldm(ia_w, sp, fp.bit() | lr.bit());
1228  return frame_ends;
1229 }
1230 
1231 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
1232  StackFrame::Type frame_type) {
1233  DCHECK(frame_type == StackFrame::EXIT ||
1234  frame_type == StackFrame::BUILTIN_EXIT);
1235  UseScratchRegisterScope temps(this);
1236  Register scratch = temps.Acquire();
1237 
1238  // Set up the frame structure on the stack.
1239  DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1240  DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1241  DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1242  mov(scratch, Operand(StackFrame::TypeToMarker(frame_type)));
1243  PushCommonFrame(scratch);
1244  // Reserve room for saved entry sp and code object.
1245  sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
1246  if (emit_debug_code()) {
1247  mov(scratch, Operand::Zero());
1248  str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
1249  }
1250  Move(scratch, CodeObject());
1251  str(scratch, MemOperand(fp, ExitFrameConstants::kCodeOffset));
1252 
1253  // Save the frame pointer and the context in top.
1254  Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1255  isolate()));
1256  str(fp, MemOperand(scratch));
1257  Move(scratch,
1258  ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1259  str(cp, MemOperand(scratch));
1260 
1261  // Optionally save all double registers.
1262  if (save_doubles) {
1263  SaveFPRegs(sp, scratch);
1264  // Note that d0 will be accessible at
1265  // fp - ExitFrameConstants::kFrameSize -
1266  // DwVfpRegister::kNumRegisters * kDoubleSize,
1267  // since the sp slot and code slot were pushed after the fp.
1268  }
1269 
1270  // Reserve place for the return address and stack space and align the frame
1271  // preparing for calling the runtime function.
1272  const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1273  sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
1274  if (frame_alignment > 0) {
1275  DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1276  and_(sp, sp, Operand(-frame_alignment));
1277  }
1278 
1279  // Set the exit frame sp value to point just before the return address
1280  // location.
1281  add(scratch, sp, Operand(kPointerSize));
1282  str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
1283 }
1284 
1285 int TurboAssembler::ActivationFrameAlignment() {
1286 #if V8_HOST_ARCH_ARM
1287  // Running on the real platform. Use the alignment as mandated by the local
1288  // environment.
1289  // Note: This will break if we ever start generating snapshots on one ARM
1290  // platform for another ARM platform with a different alignment.
1291  return base::OS::ActivationFrameAlignment();
1292 #else // V8_HOST_ARCH_ARM
1293  // If we are using the simulator then we should always align to the expected
1294  // alignment. As the simulator is used to generate snapshots we do not know
1295  // if the target platform will need alignment, so this is controlled from a
1296  // flag.
1297  return FLAG_sim_stack_alignment;
1298 #endif // V8_HOST_ARCH_ARM
1299 }
1300 
1301 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1302  bool argument_count_is_length) {
1303  ConstantPoolUnavailableScope constant_pool_unavailable(this);
1304  UseScratchRegisterScope temps(this);
1305  Register scratch = temps.Acquire();
1306 
1307  // Optionally restore all double registers.
1308  if (save_doubles) {
1309  // Calculate the stack location of the saved doubles and restore them.
1310  const int offset = ExitFrameConstants::kFixedFrameSizeFromFp;
1311  sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
1312  RestoreFPRegs(r3, scratch);
1313  }
1314 
1315  // Clear top frame.
1316  mov(r3, Operand::Zero());
1317  Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1318  isolate()));
1319  str(r3, MemOperand(scratch));
1320 
1321  // Restore current context from top and clear it in debug mode.
1322  Move(scratch,
1323  ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1324  ldr(cp, MemOperand(scratch));
1325 #ifdef DEBUG
1326  mov(r3, Operand(Context::kInvalidContext));
1327  Move(scratch,
1328  ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1329  str(r3, MemOperand(scratch));
1330 #endif
1331 
1332  // Tear down the exit frame, pop the arguments, and return.
1333  mov(sp, Operand(fp));
1334  ldm(ia_w, sp, fp.bit() | lr.bit());
1335  if (argument_count.is_valid()) {
1336  if (argument_count_is_length) {
1337  add(sp, sp, argument_count);
1338  } else {
1339  add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
1340  }
1341  }
1342 }
1343 
1344 void TurboAssembler::MovFromFloatResult(const DwVfpRegister dst) {
1345  if (use_eabi_hardfloat()) {
1346  Move(dst, d0);
1347  } else {
1348  vmov(dst, r0, r1);
1349  }
1350 }
1351 
1352 
1353 // On ARM this is just a synonym to make the purpose clear.
1354 void TurboAssembler::MovFromFloatParameter(DwVfpRegister dst) {
1355  MovFromFloatResult(dst);
1356 }
1357 
1358 void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
1359  Register caller_args_count_reg,
1360  Register scratch0, Register scratch1) {
1361 #if DEBUG
1362  if (callee_args_count.is_reg()) {
1363  DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
1364  scratch1));
1365  } else {
1366  DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
1367  }
1368 #endif
1369 
1370  // Calculate the end of destination area where we will put the arguments
1371  // after we drop current frame. We add kPointerSize to count the receiver
1372  // argument which is not included into formal parameters count.
1373  Register dst_reg = scratch0;
1374  add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
1375  add(dst_reg, dst_reg,
1376  Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
1377 
1378  Register src_reg = caller_args_count_reg;
1379  // Calculate the end of source area. +kPointerSize is for the receiver.
1380  if (callee_args_count.is_reg()) {
1381  add(src_reg, sp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
1382  add(src_reg, src_reg, Operand(kPointerSize));
1383  } else {
1384  add(src_reg, sp,
1385  Operand((callee_args_count.immediate() + 1) * kPointerSize));
1386  }
1387 
1388  if (FLAG_debug_code) {
1389  cmp(src_reg, dst_reg);
1390  Check(lo, AbortReason::kStackAccessBelowStackPointer);
1391  }
1392 
1393  // Restore caller's frame pointer and return address now as they will be
1394  // overwritten by the copying loop.
1395  ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
1396  ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1397 
1398  // Now copy callee arguments to the caller frame going backwards to avoid
1399  // callee arguments corruption (source and destination areas could overlap).
1400 
1401  // Both src_reg and dst_reg are pointing to the word after the one to copy,
1402  // so they must be pre-decremented in the loop.
1403  Register tmp_reg = scratch1;
1404  Label loop, entry;
1405  b(&entry);
1406  bind(&loop);
1407  ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
1408  str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
1409  bind(&entry);
1410  cmp(sp, src_reg);
1411  b(ne, &loop);
1412 
1413  // Leave current frame.
1414  mov(sp, dst_reg);
1415 }
1416 
1417 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1418  const ParameterCount& actual, Label* done,
1419  bool* definitely_mismatches,
1420  InvokeFlag flag) {
1421  bool definitely_matches = false;
1422  *definitely_mismatches = false;
1423  Label regular_invoke;
1424 
1425  // Check whether the expected and actual arguments count match. If not,
1426  // setup registers according to contract with ArgumentsAdaptorTrampoline:
1427  // r0: actual arguments count
1428  // r1: function (passed through to callee)
1429  // r2: expected arguments count
1430 
1431  // The code below is made a lot easier because the calling code already sets
1432  // up actual and expected registers according to the contract if values are
1433  // passed in registers.
1434  DCHECK(actual.is_immediate() || actual.reg() == r0);
1435  DCHECK(expected.is_immediate() || expected.reg() == r2);
1436 
1437  if (expected.is_immediate()) {
1438  DCHECK(actual.is_immediate());
1439  mov(r0, Operand(actual.immediate()));
1440  if (expected.immediate() == actual.immediate()) {
1441  definitely_matches = true;
1442  } else {
1443  const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1444  if (expected.immediate() == sentinel) {
1445  // Don't worry about adapting arguments for builtins that
1446  // don't want that done. Skip adaption code by making it look
1447  // like we have a match between expected and actual number of
1448  // arguments.
1449  definitely_matches = true;
1450  } else {
1451  *definitely_mismatches = true;
1452  mov(r2, Operand(expected.immediate()));
1453  }
1454  }
1455  } else {
1456  if (actual.is_immediate()) {
1457  mov(r0, Operand(actual.immediate()));
1458  cmp(expected.reg(), Operand(actual.immediate()));
1459  b(eq, &regular_invoke);
1460  } else {
1461  cmp(expected.reg(), Operand(actual.reg()));
1462  b(eq, &regular_invoke);
1463  }
1464  }
1465 
1466  if (!definitely_matches) {
1467  Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
1468  if (flag == CALL_FUNCTION) {
1469  Call(adaptor);
1470  if (!*definitely_mismatches) {
1471  b(done);
1472  }
1473  } else {
1474  Jump(adaptor, RelocInfo::CODE_TARGET);
1475  }
1476  bind(&regular_invoke);
1477  }
1478 }
1479 
1480 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
1481  const ParameterCount& expected,
1482  const ParameterCount& actual) {
1483  Label skip_hook;
1484 
1485  ExternalReference debug_hook_active =
1486  ExternalReference::debug_hook_on_function_call_address(isolate());
1487  Move(r4, debug_hook_active);
1488  ldrsb(r4, MemOperand(r4));
1489  cmp(r4, Operand(0));
1490  b(eq, &skip_hook);
1491 
1492  {
1493  // Load receiver to pass it later to DebugOnFunctionCall hook.
1494  if (actual.is_reg()) {
1495  mov(r4, actual.reg());
1496  } else {
1497  mov(r4, Operand(actual.immediate()));
1498  }
1499  ldr(r4, MemOperand(sp, r4, LSL, kPointerSizeLog2));
1500  FrameScope frame(this,
1501  has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1502  if (expected.is_reg()) {
1503  SmiTag(expected.reg());
1504  Push(expected.reg());
1505  }
1506  if (actual.is_reg()) {
1507  SmiTag(actual.reg());
1508  Push(actual.reg());
1509  }
1510  if (new_target.is_valid()) {
1511  Push(new_target);
1512  }
1513  Push(fun);
1514  Push(fun);
1515  Push(r4);
1516  CallRuntime(Runtime::kDebugOnFunctionCall);
1517  Pop(fun);
1518  if (new_target.is_valid()) {
1519  Pop(new_target);
1520  }
1521  if (actual.is_reg()) {
1522  Pop(actual.reg());
1523  SmiUntag(actual.reg());
1524  }
1525  if (expected.is_reg()) {
1526  Pop(expected.reg());
1527  SmiUntag(expected.reg());
1528  }
1529  }
1530  bind(&skip_hook);
1531 }
1532 
1533 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1534  const ParameterCount& expected,
1535  const ParameterCount& actual,
1536  InvokeFlag flag) {
1537  // You can't call a function without a valid frame.
1538  DCHECK(flag == JUMP_FUNCTION || has_frame());
1539  DCHECK(function == r1);
1540  DCHECK_IMPLIES(new_target.is_valid(), new_target == r3);
1541 
1542  // On function call, call into the debugger if necessary.
1543  CheckDebugHook(function, new_target, expected, actual);
1544 
1545  // Clear the new.target register if not given.
1546  if (!new_target.is_valid()) {
1547  LoadRoot(r3, RootIndex::kUndefinedValue);
1548  }
1549 
1550  Label done;
1551  bool definitely_mismatches = false;
1552  InvokePrologue(expected, actual, &done, &definitely_mismatches, flag);
1553  if (!definitely_mismatches) {
1554  // We call indirectly through the code field in the function to
1555  // allow recompilation to take effect without changing any of the
1556  // call sites.
1557  Register code = kJavaScriptCallCodeStartRegister;
1558  ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
1559  add(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
1560  if (flag == CALL_FUNCTION) {
1561  Call(code);
1562  } else {
1563  DCHECK(flag == JUMP_FUNCTION);
1564  Jump(code);
1565  }
1566 
1567  // Continue here if InvokePrologue does handle the invocation due to
1568  // mismatched parameter counts.
1569  bind(&done);
1570  }
1571 }
1572 
1573 void MacroAssembler::InvokeFunction(Register fun, Register new_target,
1574  const ParameterCount& actual,
1575  InvokeFlag flag) {
1576  // You can't call a function without a valid frame.
1577  DCHECK(flag == JUMP_FUNCTION || has_frame());
1578 
1579  // Contract with called JS functions requires that function is passed in r1.
1580  DCHECK(fun == r1);
1581 
1582  Register expected_reg = r2;
1583  Register temp_reg = r4;
1584 
1585  ldr(temp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1586  ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1587  ldrh(expected_reg,
1588  FieldMemOperand(temp_reg,
1589  SharedFunctionInfo::kFormalParameterCountOffset));
1590 
1591  ParameterCount expected(expected_reg);
1592  InvokeFunctionCode(fun, new_target, expected, actual, flag);
1593 }
1594 
1595 void MacroAssembler::InvokeFunction(Register function,
1596  const ParameterCount& expected,
1597  const ParameterCount& actual,
1598  InvokeFlag flag) {
1599  // You can't call a function without a valid frame.
1600  DCHECK(flag == JUMP_FUNCTION || has_frame());
1601 
1602  // Contract with called JS functions requires that function is passed in r1.
1603  DCHECK(function == r1);
1604 
1605  // Get the function and setup the context.
1606  ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1607 
1608  InvokeFunctionCode(r1, no_reg, expected, actual, flag);
1609 }
1610 
1611 void MacroAssembler::MaybeDropFrames() {
1612  // Check whether we need to drop frames to restart a function on the stack.
1613  ExternalReference restart_fp =
1614  ExternalReference::debug_restart_fp_address(isolate());
1615  Move(r1, restart_fp);
1616  ldr(r1, MemOperand(r1));
1617  tst(r1, r1);
1618  Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
1619  ne);
1620 }
1621 
1622 void MacroAssembler::PushStackHandler() {
1623  // Adjust this code if not the case.
1624  STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
1625  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1626 
1627  Push(Smi::zero()); // Padding.
1628  // Link the current handler as the next handler.
1629  mov(r6, Operand(ExternalReference::Create(IsolateAddressId::kHandlerAddress,
1630  isolate())));
1631  ldr(r5, MemOperand(r6));
1632  push(r5);
1633  // Set this new handler as the current one.
1634  str(sp, MemOperand(r6));
1635 }
1636 
1637 
1638 void MacroAssembler::PopStackHandler() {
1639  UseScratchRegisterScope temps(this);
1640  Register scratch = temps.Acquire();
1641  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1642  pop(r1);
1643  mov(scratch, Operand(ExternalReference::Create(
1644  IsolateAddressId::kHandlerAddress, isolate())));
1645  str(r1, MemOperand(scratch));
1646  add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1647 }
1648 
1649 
1650 void MacroAssembler::CompareObjectType(Register object,
1651  Register map,
1652  Register type_reg,
1653  InstanceType type) {
1654  UseScratchRegisterScope temps(this);
1655  const Register temp = type_reg == no_reg ? temps.Acquire() : type_reg;
1656 
1657  ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
1658  CompareInstanceType(map, temp, type);
1659 }
1660 
1661 
1662 void MacroAssembler::CompareInstanceType(Register map,
1663  Register type_reg,
1664  InstanceType type) {
1665  ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1666  cmp(type_reg, Operand(type));
1667 }
1668 
1669 void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
1670  UseScratchRegisterScope temps(this);
1671  Register scratch = temps.Acquire();
1672  DCHECK(obj != scratch);
1673  LoadRoot(scratch, index);
1674  cmp(obj, scratch);
1675 }
1676 
1677 void MacroAssembler::CallStub(CodeStub* stub,
1678  Condition cond) {
1679  DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
1680  Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, CAN_INLINE_TARGET_ADDRESS,
1681  false);
1682 }
1683 
1684 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
1685  Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1686 }
1687 
1688 bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
1689  return has_frame() || !stub->SometimesSetsUpAFrame();
1690 }
1691 
1692 void MacroAssembler::TryDoubleToInt32Exact(Register result,
1693  DwVfpRegister double_input,
1694  LowDwVfpRegister double_scratch) {
1695  DCHECK(double_input != double_scratch);
1696  vcvt_s32_f64(double_scratch.low(), double_input);
1697  vmov(result, double_scratch.low());
1698  vcvt_f64_s32(double_scratch, double_scratch.low());
1699  VFPCompareAndSetFlags(double_input, double_scratch);
1700 }
1701 
1702 void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
1703  DwVfpRegister double_input,
1704  Label* done) {
1705  UseScratchRegisterScope temps(this);
1706  SwVfpRegister single_scratch = SwVfpRegister::no_reg();
1707  if (temps.CanAcquireVfp<SwVfpRegister>()) {
1708  single_scratch = temps.AcquireS();
1709  } else {
1710  // Re-use the input as a scratch register. However, we can only do this if
1711  // the input register is d0-d15 as there are no s32+ registers.
1712  DCHECK_LT(double_input.code(), LowDwVfpRegister::kNumRegisters);
1713  LowDwVfpRegister double_scratch =
1714  LowDwVfpRegister::from_code(double_input.code());
1715  single_scratch = double_scratch.low();
1716  }
1717  vcvt_s32_f64(single_scratch, double_input);
1718  vmov(result, single_scratch);
1719 
1720  Register scratch = temps.Acquire();
1721  // If result is not saturated (0x7FFFFFFF or 0x80000000), we are done.
1722  sub(scratch, result, Operand(1));
1723  cmp(scratch, Operand(0x7FFFFFFE));
1724  b(lt, done);
1725 }
1726 
1727 void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
1728  Register result,
1729  DwVfpRegister double_input,
1730  StubCallMode stub_mode) {
1731  Label done;
1732 
1733  TryInlineTruncateDoubleToI(result, double_input, &done);
1734 
1735  // If we fell through then inline version didn't succeed - call stub instead.
1736  push(lr);
1737  sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
1738  vstr(double_input, MemOperand(sp, 0));
1739 
1740  if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
1741  Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
1742  } else {
1743  Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
1744  }
1745  ldr(result, MemOperand(sp, 0));
1746 
1747  add(sp, sp, Operand(kDoubleSize));
1748  pop(lr);
1749 
1750  bind(&done);
1751 }
1752 
1753 void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
1754  Register centry) {
1755  const Runtime::Function* f = Runtime::FunctionForId(fid);
1756  // TODO(1236192): Most runtime routines don't need the number of
1757  // arguments passed in because it is constant. At some point we
1758  // should remove this need and make the runtime routine entry code
1759  // smarter.
1760  mov(r0, Operand(f->nargs));
1761  Move(r1, ExternalReference::Create(f));
1762  DCHECK(!AreAliased(centry, r0, r1));
1763  add(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag));
1764  Call(centry);
1765 }
1766 
1767 void MacroAssembler::CallRuntime(const Runtime::Function* f,
1768  int num_arguments,
1769  SaveFPRegsMode save_doubles) {
1770  // All parameters are on the stack. r0 has the return value after call.
1771 
1772  // If the expected number of arguments of the runtime function is
1773  // constant, we check that the actual number of arguments match the
1774  // expectation.
1775  CHECK(f->nargs < 0 || f->nargs == num_arguments);
1776 
1777  // TODO(1236192): Most runtime routines don't need the number of
1778  // arguments passed in because it is constant. At some point we
1779  // should remove this need and make the runtime routine entry code
1780  // smarter.
1781  mov(r0, Operand(num_arguments));
1782  Move(r1, ExternalReference::Create(f));
1783  Handle<Code> code =
1784  CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
1785  Call(code, RelocInfo::CODE_TARGET);
1786 }
1787 
1788 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
1789  const Runtime::Function* function = Runtime::FunctionForId(fid);
1790  DCHECK_EQ(1, function->result_size);
1791  if (function->nargs >= 0) {
1792  // TODO(1236192): Most runtime routines don't need the number of
1793  // arguments passed in because it is constant. At some point we
1794  // should remove this need and make the runtime routine entry code
1795  // smarter.
1796  mov(r0, Operand(function->nargs));
1797  }
1798  JumpToExternalReference(ExternalReference::Create(fid));
1799 }
1800 
1801 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
1802  bool builtin_exit_frame) {
1803 #if defined(__thumb__)
1804  // Thumb mode builtin.
1805  DCHECK_EQ(builtin.address() & 1, 1);
1806 #endif
1807  Move(r1, builtin);
1808  Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
1809  kArgvOnStack, builtin_exit_frame);
1810  Jump(code, RelocInfo::CODE_TARGET);
1811 }
1812 
1813 void MacroAssembler::JumpToInstructionStream(Address entry) {
1814  mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
1815  Jump(kOffHeapTrampolineRegister);
1816 }
1817 
1818 void MacroAssembler::LoadWeakValue(Register out, Register in,
1819  Label* target_if_cleared) {
1820  cmp(in, Operand(kClearedWeakHeapObjectLower32));
1821  b(eq, target_if_cleared);
1822 
1823  and_(out, in, Operand(~kWeakHeapObjectMask));
1824 }
1825 
1826 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
1827  Register scratch1, Register scratch2) {
1828  DCHECK_GT(value, 0);
1829  if (FLAG_native_code_counters && counter->Enabled()) {
1830  Move(scratch2, ExternalReference::Create(counter));
1831  ldr(scratch1, MemOperand(scratch2));
1832  add(scratch1, scratch1, Operand(value));
1833  str(scratch1, MemOperand(scratch2));
1834  }
1835 }
1836 
1837 
1838 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
1839  Register scratch1, Register scratch2) {
1840  DCHECK_GT(value, 0);
1841  if (FLAG_native_code_counters && counter->Enabled()) {
1842  Move(scratch2, ExternalReference::Create(counter));
1843  ldr(scratch1, MemOperand(scratch2));
1844  sub(scratch1, scratch1, Operand(value));
1845  str(scratch1, MemOperand(scratch2));
1846  }
1847 }
1848 
1849 void TurboAssembler::Assert(Condition cond, AbortReason reason) {
1850  if (emit_debug_code())
1851  Check(cond, reason);
1852 }
1853 
1854 void TurboAssembler::AssertUnreachable(AbortReason reason) {
1855  if (emit_debug_code()) Abort(reason);
1856 }
1857 
1858 void TurboAssembler::Check(Condition cond, AbortReason reason) {
1859  Label L;
1860  b(cond, &L);
1861  Abort(reason);
1862  // will not return here
1863  bind(&L);
1864 }
1865 
1866 void TurboAssembler::Abort(AbortReason reason) {
1867  Label abort_start;
1868  bind(&abort_start);
1869  const char* msg = GetAbortReason(reason);
1870 #ifdef DEBUG
1871  RecordComment("Abort message: ");
1872  RecordComment(msg);
1873 #endif
1874 
1875  // Avoid emitting call to builtin if requested.
1876  if (trap_on_abort()) {
1877  stop(msg);
1878  return;
1879  }
1880 
1881  if (should_abort_hard()) {
1882  // We don't care if we constructed a frame. Just pretend we did.
1883  FrameScope assume_frame(this, StackFrame::NONE);
1884  Move32BitImmediate(r0, Operand(static_cast<int>(reason)));
1885  PrepareCallCFunction(1, 0, r1);
1886  Move(r1, ExternalReference::abort_with_reason());
1887  // Use Call directly to avoid any unneeded overhead. The function won't
1888  // return anyway.
1889  Call(r1);
1890  return;
1891  }
1892 
1893  Move(r1, Smi::FromInt(static_cast<int>(reason)));
1894 
1895  // Disable stub call restrictions to always allow calls to abort.
1896  if (!has_frame()) {
1897  // We don't actually want to generate a pile of code for this, so just
1898  // claim there is a stack frame, without generating one.
1899  FrameScope scope(this, StackFrame::NONE);
1900  Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1901  } else {
1902  Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1903  }
1904  // will not return here
1905 }
1906 
1907 void MacroAssembler::LoadGlobalProxy(Register dst) {
1908  LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
1909 }
1910 
1911 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
1912  ldr(dst, NativeContextMemOperand());
1913  ldr(dst, ContextMemOperand(dst, index));
1914 }
1915 
1916 
1917 void TurboAssembler::InitializeRootRegister() {
1918  ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
1919  mov(kRootRegister, Operand(isolate_root));
1920 }
1921 
1922 void MacroAssembler::SmiTag(Register reg, SBit s) {
1923  add(reg, reg, Operand(reg), s);
1924 }
1925 
1926 void MacroAssembler::SmiTag(Register dst, Register src, SBit s) {
1927  add(dst, src, Operand(src), s);
1928 }
1929 
1930 void MacroAssembler::UntagAndJumpIfSmi(
1931  Register dst, Register src, Label* smi_case) {
1932  STATIC_ASSERT(kSmiTag == 0);
1933  SmiUntag(dst, src, SetCC);
1934  b(cc, smi_case); // Shifter carry is not set for a smi.
1935 }
1936 
1937 void MacroAssembler::SmiTst(Register value) {
1938  tst(value, Operand(kSmiTagMask));
1939 }
1940 
1941 void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
1942  tst(value, Operand(kSmiTagMask));
1943  b(eq, smi_label);
1944 }
1945 
1946 void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
1947  cmp(x, Operand(y));
1948  b(eq, dest);
1949 }
1950 
1951 void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
1952  cmp(x, Operand(y));
1953  b(lt, dest);
1954 }
1955 
1956 void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
1957  tst(value, Operand(kSmiTagMask));
1958  b(ne, not_smi_label);
1959 }
1960 
1961 void MacroAssembler::JumpIfEitherSmi(Register reg1,
1962  Register reg2,
1963  Label* on_either_smi) {
1964  STATIC_ASSERT(kSmiTag == 0);
1965  tst(reg1, Operand(kSmiTagMask));
1966  tst(reg2, Operand(kSmiTagMask), ne);
1967  b(eq, on_either_smi);
1968 }
1969 
1970 void MacroAssembler::AssertNotSmi(Register object) {
1971  if (emit_debug_code()) {
1972  STATIC_ASSERT(kSmiTag == 0);
1973  tst(object, Operand(kSmiTagMask));
1974  Check(ne, AbortReason::kOperandIsASmi);
1975  }
1976 }
1977 
1978 
1979 void MacroAssembler::AssertSmi(Register object) {
1980  if (emit_debug_code()) {
1981  STATIC_ASSERT(kSmiTag == 0);
1982  tst(object, Operand(kSmiTagMask));
1983  Check(eq, AbortReason::kOperandIsNotASmi);
1984  }
1985 }
1986 
1987 void MacroAssembler::AssertConstructor(Register object) {
1988  if (emit_debug_code()) {
1989  STATIC_ASSERT(kSmiTag == 0);
1990  tst(object, Operand(kSmiTagMask));
1991  Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor);
1992  push(object);
1993  ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
1994  ldrb(object, FieldMemOperand(object, Map::kBitFieldOffset));
1995  tst(object, Operand(Map::IsConstructorBit::kMask));
1996  pop(object);
1997  Check(ne, AbortReason::kOperandIsNotAConstructor);
1998  }
1999 }
2000 
2001 void MacroAssembler::AssertFunction(Register object) {
2002  if (emit_debug_code()) {
2003  STATIC_ASSERT(kSmiTag == 0);
2004  tst(object, Operand(kSmiTagMask));
2005  Check(ne, AbortReason::kOperandIsASmiAndNotAFunction);
2006  push(object);
2007  CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
2008  pop(object);
2009  Check(eq, AbortReason::kOperandIsNotAFunction);
2010  }
2011 }
2012 
2013 
2014 void MacroAssembler::AssertBoundFunction(Register object) {
2015  if (emit_debug_code()) {
2016  STATIC_ASSERT(kSmiTag == 0);
2017  tst(object, Operand(kSmiTagMask));
2018  Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction);
2019  push(object);
2020  CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
2021  pop(object);
2022  Check(eq, AbortReason::kOperandIsNotABoundFunction);
2023  }
2024 }
2025 
2026 void MacroAssembler::AssertGeneratorObject(Register object) {
2027  if (!emit_debug_code()) return;
2028  tst(object, Operand(kSmiTagMask));
2029  Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
2030 
2031  // Load map
2032  Register map = object;
2033  push(object);
2034  ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2035 
2036  // Check if JSGeneratorObject
2037  Label do_check;
2038  Register instance_type = object;
2039  CompareInstanceType(map, instance_type, JS_GENERATOR_OBJECT_TYPE);
2040  b(eq, &do_check);
2041 
2042  // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType)
2043  cmp(instance_type, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
2044  b(eq, &do_check);
2045 
2046  // Check if JSAsyncGeneratorObject (See MacroAssembler::CompareInstanceType)
2047  cmp(instance_type, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
2048 
2049  bind(&do_check);
2050  // Restore generator object to register and perform assertion
2051  pop(object);
2052  Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
2053 }
2054 
2055 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
2056  Register scratch) {
2057  if (emit_debug_code()) {
2058  Label done_checking;
2059  AssertNotSmi(object);
2060  CompareRoot(object, RootIndex::kUndefinedValue);
2061  b(eq, &done_checking);
2062  ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2063  CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
2064  Assert(eq, AbortReason::kExpectedUndefinedOrCell);
2065  bind(&done_checking);
2066  }
2067 }
2068 
2069 
2070 void TurboAssembler::CheckFor32DRegs(Register scratch) {
2071  Move(scratch, ExternalReference::cpu_features());
2072  ldr(scratch, MemOperand(scratch));
2073  tst(scratch, Operand(1u << VFP32DREGS));
2074 }
2075 
2076 void TurboAssembler::SaveFPRegs(Register location, Register scratch) {
2077  CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
2078  CheckFor32DRegs(scratch);
2079  vstm(db_w, location, d16, d31, ne);
2080  sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
2081  vstm(db_w, location, d0, d15);
2082 }
2083 
2084 void TurboAssembler::RestoreFPRegs(Register location, Register scratch) {
2085  CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
2086  CheckFor32DRegs(scratch);
2087  vldm(ia_w, location, d0, d15);
2088  vldm(ia_w, location, d16, d31, ne);
2089  add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
2090 }
2091 
2092 template <typename T>
2093 void TurboAssembler::FloatMaxHelper(T result, T left, T right,
2094  Label* out_of_line) {
2095  // This trivial case is caught sooner, so that the out-of-line code can be
2096  // completely avoided.
2097  DCHECK(left != right);
2098 
2099  if (CpuFeatures::IsSupported(ARMv8)) {
2100  CpuFeatureScope scope(this, ARMv8);
2101  VFPCompareAndSetFlags(left, right);
2102  b(vs, out_of_line);
2103  vmaxnm(result, left, right);
2104  } else {
2105  Label done;
2106  VFPCompareAndSetFlags(left, right);
2107  b(vs, out_of_line);
2108  // Avoid a conditional instruction if the result register is unique.
2109  bool aliased_result_reg = result == left || result == right;
2110  Move(result, right, aliased_result_reg ? mi : al);
2111  Move(result, left, gt);
2112  b(ne, &done);
2113  // Left and right are equal, but check for +/-0.
2114  VFPCompareAndSetFlags(left, 0.0);
2115  b(eq, out_of_line);
2116  // The arguments are equal and not zero, so it doesn't matter which input we
2117  // pick. We have already moved one input into the result (if it didn't
2118  // already alias) so there's nothing more to do.
2119  bind(&done);
2120  }
2121 }
2122 
2123 template <typename T>
2124 void TurboAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) {
2125  DCHECK(left != right);
2126 
2127  // ARMv8: At least one of left and right is a NaN.
2128  // Anything else: At least one of left and right is a NaN, or both left and
2129  // right are zeroes with unknown sign.
2130 
2131  // If left and right are +/-0, select the one with the most positive sign.
2132  // If left or right are NaN, vadd propagates the appropriate one.
2133  vadd(result, left, right);
2134 }
2135 
2136 template <typename T>
2137 void TurboAssembler::FloatMinHelper(T result, T left, T right,
2138  Label* out_of_line) {
2139  // This trivial case is caught sooner, so that the out-of-line code can be
2140  // completely avoided.
2141  DCHECK(left != right);
2142 
2143  if (CpuFeatures::IsSupported(ARMv8)) {
2144  CpuFeatureScope scope(this, ARMv8);
2145  VFPCompareAndSetFlags(left, right);
2146  b(vs, out_of_line);
2147  vminnm(result, left, right);
2148  } else {
2149  Label done;
2150  VFPCompareAndSetFlags(left, right);
2151  b(vs, out_of_line);
2152  // Avoid a conditional instruction if the result register is unique.
2153  bool aliased_result_reg = result == left || result == right;
2154  Move(result, left, aliased_result_reg ? mi : al);
2155  Move(result, right, gt);
2156  b(ne, &done);
2157  // Left and right are equal, but check for +/-0.
2158  VFPCompareAndSetFlags(left, 0.0);
2159  // If the arguments are equal and not zero, it doesn't matter which input we
2160  // pick. We have already moved one input into the result (if it didn't
2161  // already alias) so there's nothing more to do.
2162  b(ne, &done);
2163  // At this point, both left and right are either 0 or -0.
2164  // We could use a single 'vorr' instruction here if we had NEON support.
2165  // The algorithm used is -((-L) + (-R)), which is most efficiently expressed
2166  // as -((-L) - R).
2167  if (left == result) {
2168  DCHECK(right != result);
2169  vneg(result, left);
2170  vsub(result, result, right);
2171  vneg(result, result);
2172  } else {
2173  DCHECK(left != result);
2174  vneg(result, right);
2175  vsub(result, result, left);
2176  vneg(result, result);
2177  }
2178  bind(&done);
2179  }
2180 }
2181 
2182 template <typename T>
2183 void TurboAssembler::FloatMinOutOfLineHelper(T result, T left, T right) {
2184  DCHECK(left != right);
2185 
2186  // At least one of left and right is a NaN. Use vadd to propagate the NaN
2187  // appropriately. +/-0 is handled inline.
2188  vadd(result, left, right);
2189 }
2190 
2191 void TurboAssembler::FloatMax(SwVfpRegister result, SwVfpRegister left,
2192  SwVfpRegister right, Label* out_of_line) {
2193  FloatMaxHelper(result, left, right, out_of_line);
2194 }
2195 
2196 void TurboAssembler::FloatMin(SwVfpRegister result, SwVfpRegister left,
2197  SwVfpRegister right, Label* out_of_line) {
2198  FloatMinHelper(result, left, right, out_of_line);
2199 }
2200 
2201 void TurboAssembler::FloatMax(DwVfpRegister result, DwVfpRegister left,
2202  DwVfpRegister right, Label* out_of_line) {
2203  FloatMaxHelper(result, left, right, out_of_line);
2204 }
2205 
2206 void TurboAssembler::FloatMin(DwVfpRegister result, DwVfpRegister left,
2207  DwVfpRegister right, Label* out_of_line) {
2208  FloatMinHelper(result, left, right, out_of_line);
2209 }
2210 
2211 void TurboAssembler::FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left,
2212  SwVfpRegister right) {
2213  FloatMaxOutOfLineHelper(result, left, right);
2214 }
2215 
2216 void TurboAssembler::FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left,
2217  SwVfpRegister right) {
2218  FloatMinOutOfLineHelper(result, left, right);
2219 }
2220 
2221 void TurboAssembler::FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left,
2222  DwVfpRegister right) {
2223  FloatMaxOutOfLineHelper(result, left, right);
2224 }
2225 
2226 void TurboAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
2227  DwVfpRegister right) {
2228  FloatMinOutOfLineHelper(result, left, right);
2229 }
2230 
2231 static const int kRegisterPassedArguments = 4;
2232 
2233 int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
2234  int num_double_arguments) {
2235  int stack_passed_words = 0;
2236  if (use_eabi_hardfloat()) {
2237  // In the hard floating point calling convention, we can use
2238  // all double registers to pass doubles.
2239  if (num_double_arguments > DoubleRegister::NumRegisters()) {
2240  stack_passed_words +=
2241  2 * (num_double_arguments - DoubleRegister::NumRegisters());
2242  }
2243  } else {
2244  // In the soft floating point calling convention, every double
2245  // argument is passed using two registers.
2246  num_reg_arguments += 2 * num_double_arguments;
2247  }
2248  // Up to four simple arguments are passed in registers r0..r3.
2249  if (num_reg_arguments > kRegisterPassedArguments) {
2250  stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
2251  }
2252  return stack_passed_words;
2253 }
2254 
2255 void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
2256  int num_double_arguments,
2257  Register scratch) {
2258  int frame_alignment = ActivationFrameAlignment();
2259  int stack_passed_arguments = CalculateStackPassedWords(
2260  num_reg_arguments, num_double_arguments);
2261  if (frame_alignment > kPointerSize) {
2262  UseScratchRegisterScope temps(this);
2263  if (!scratch.is_valid()) scratch = temps.Acquire();
2264  // Make stack end at alignment and make room for num_arguments - 4 words
2265  // and the original value of sp.
2266  mov(scratch, sp);
2267  sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
2268  DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
2269  and_(sp, sp, Operand(-frame_alignment));
2270  str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
2271  } else if (stack_passed_arguments > 0) {
2272  sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
2273  }
2274 }
2275 
2276 void TurboAssembler::MovToFloatParameter(DwVfpRegister src) {
2277  DCHECK(src == d0);
2278  if (!use_eabi_hardfloat()) {
2279  vmov(r0, r1, src);
2280  }
2281 }
2282 
2283 
2284 // On ARM this is just a synonym to make the purpose clear.
2285 void TurboAssembler::MovToFloatResult(DwVfpRegister src) {
2286  MovToFloatParameter(src);
2287 }
2288 
2289 void TurboAssembler::MovToFloatParameters(DwVfpRegister src1,
2290  DwVfpRegister src2) {
2291  DCHECK(src1 == d0);
2292  DCHECK(src2 == d1);
2293  if (!use_eabi_hardfloat()) {
2294  vmov(r0, r1, src1);
2295  vmov(r2, r3, src2);
2296  }
2297 }
2298 
2299 void TurboAssembler::CallCFunction(ExternalReference function,
2300  int num_reg_arguments,
2301  int num_double_arguments) {
2302  UseScratchRegisterScope temps(this);
2303  Register scratch = temps.Acquire();
2304  Move(scratch, function);
2305  CallCFunctionHelper(scratch, num_reg_arguments, num_double_arguments);
2306 }
2307 
2308 void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
2309  int num_double_arguments) {
2310  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
2311 }
2312 
2313 void TurboAssembler::CallCFunction(ExternalReference function,
2314  int num_arguments) {
2315  CallCFunction(function, num_arguments, 0);
2316 }
2317 
2318 void TurboAssembler::CallCFunction(Register function, int num_arguments) {
2319  CallCFunction(function, num_arguments, 0);
2320 }
2321 
2322 void TurboAssembler::CallCFunctionHelper(Register function,
2323  int num_reg_arguments,
2324  int num_double_arguments) {
2325  DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
2326  DCHECK(has_frame());
2327  // Make sure that the stack is aligned before calling a C function unless
2328  // running in the simulator. The simulator has its own alignment check which
2329  // provides more information.
2330 #if V8_HOST_ARCH_ARM
2331  if (emit_debug_code()) {
2332  int frame_alignment = base::OS::ActivationFrameAlignment();
2333  int frame_alignment_mask = frame_alignment - 1;
2334  if (frame_alignment > kPointerSize) {
2335  DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
2336  Label alignment_as_expected;
2337  tst(sp, Operand(frame_alignment_mask));
2338  b(eq, &alignment_as_expected);
2339  // Don't use Check here, as it will call Runtime_Abort possibly
2340  // re-entering here.
2341  stop("Unexpected alignment");
2342  bind(&alignment_as_expected);
2343  }
2344  }
2345 #endif
2346 
2347  // Just call directly. The function called cannot cause a GC, or
2348  // allow preemption, so the return address in the link register
2349  // stays correct.
2350  Call(function);
2351  int stack_passed_arguments = CalculateStackPassedWords(
2352  num_reg_arguments, num_double_arguments);
2353  if (ActivationFrameAlignment() > kPointerSize) {
2354  ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
2355  } else {
2356  add(sp, sp, Operand(stack_passed_arguments * kPointerSize));
2357  }
2358 }
2359 
2360 void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
2361  Condition cc, Label* condition_met) {
2362  DCHECK(cc == eq || cc == ne);
2363  Bfc(scratch, object, 0, kPageSizeBits);
2364  ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
2365  tst(scratch, Operand(mask));
2366  b(cc, condition_met);
2367 }
2368 
2369 Register GetRegisterThatIsNotOneOf(Register reg1,
2370  Register reg2,
2371  Register reg3,
2372  Register reg4,
2373  Register reg5,
2374  Register reg6) {
2375  RegList regs = 0;
2376  if (reg1.is_valid()) regs |= reg1.bit();
2377  if (reg2.is_valid()) regs |= reg2.bit();
2378  if (reg3.is_valid()) regs |= reg3.bit();
2379  if (reg4.is_valid()) regs |= reg4.bit();
2380  if (reg5.is_valid()) regs |= reg5.bit();
2381  if (reg6.is_valid()) regs |= reg6.bit();
2382 
2383  const RegisterConfiguration* config = RegisterConfiguration::Default();
2384  for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
2385  int code = config->GetAllocatableGeneralCode(i);
2386  Register candidate = Register::from_code(code);
2387  if (regs & candidate.bit()) continue;
2388  return candidate;
2389  }
2390  UNREACHABLE();
2391 }
2392 
2393 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
2394  // We can use the register pc - 8 for the address of the current instruction.
2395  sub(dst, pc, Operand(pc_offset() + Instruction::kPcLoadDelta));
2396 }
2397 
2398 void TurboAssembler::ResetSpeculationPoisonRegister() {
2399  mov(kSpeculationPoisonRegister, Operand(-1));
2400 }
2401 
2402 } // namespace internal
2403 } // namespace v8
2404 
2405 #endif // V8_TARGET_ARCH_ARM
Definition: libplatform.h:13