V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
macro-assembler-x64.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_X64
6 
7 #include "src/base/bits.h"
8 #include "src/base/division-by-constant.h"
9 #include "src/base/utils/random-number-generator.h"
10 #include "src/bootstrapper.h"
11 #include "src/callable.h"
12 #include "src/code-factory.h"
13 #include "src/code-stubs.h"
14 #include "src/counters.h"
15 #include "src/debug/debug.h"
16 #include "src/external-reference-table.h"
17 #include "src/frames-inl.h"
18 #include "src/globals.h"
19 #include "src/macro-assembler.h"
20 #include "src/objects-inl.h"
21 #include "src/objects/smi.h"
22 #include "src/register-configuration.h"
23 #include "src/snapshot/embedded-data.h"
24 #include "src/snapshot/snapshot.h"
25 #include "src/string-constants.h"
26 #include "src/x64/assembler-x64.h"
27 
28 // Satisfy cpplint check, but don't include platform-specific header. It is
29 // included recursively via macro-assembler.h.
30 #if 0
31 #include "src/x64/macro-assembler-x64.h"
32 #endif
33 
34 namespace v8 {
35 namespace internal {
36 
37 Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
38  DCHECK_GE(index, 0);
39  int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;
40  int displacement_to_last_argument =
41  base_reg_ == rsp ? kPCOnStackSize : kFPOnStackSize + kPCOnStackSize;
42  displacement_to_last_argument += extra_displacement_to_last_argument_;
43  if (argument_count_reg_ == no_reg) {
44  // argument[0] is at base_reg_ + displacement_to_last_argument +
45  // (argument_count_immediate_ + receiver - 1) * kPointerSize.
46  DCHECK_GT(argument_count_immediate_ + receiver, 0);
47  return Operand(
48  base_reg_,
49  displacement_to_last_argument +
50  (argument_count_immediate_ + receiver - 1 - index) * kPointerSize);
51  } else {
52  // argument[0] is at base_reg_ + displacement_to_last_argument +
53  // argument_count_reg_ * times_pointer_size + (receiver - 1) * kPointerSize.
54  return Operand(
55  base_reg_, argument_count_reg_, times_pointer_size,
56  displacement_to_last_argument + (receiver - 1 - index) * kPointerSize);
57  }
58 }
59 
60 StackArgumentsAccessor::StackArgumentsAccessor(
61  Register base_reg, const ParameterCount& parameter_count,
62  StackArgumentsAccessorReceiverMode receiver_mode,
63  int extra_displacement_to_last_argument)
64  : base_reg_(base_reg),
65  argument_count_reg_(parameter_count.is_reg() ? parameter_count.reg()
66  : no_reg),
67  argument_count_immediate_(
68  parameter_count.is_immediate() ? parameter_count.immediate() : 0),
69  receiver_mode_(receiver_mode),
70  extra_displacement_to_last_argument_(
71  extra_displacement_to_last_argument) {}
72 
73 MacroAssembler::MacroAssembler(Isolate* isolate,
74  const AssemblerOptions& options, void* buffer,
75  int size, CodeObjectRequired create_code_object)
76  : TurboAssembler(isolate, options, buffer, size, create_code_object) {
77  if (create_code_object == CodeObjectRequired::kYes) {
78  // Unlike TurboAssembler, which can be used off the main thread and may not
79  // allocate, macro assembler creates its own copy of the self-reference
80  // marker in order to disambiguate between self-references during nested
81  // code generation (e.g.: codegen of the current object triggers stub
82  // compilation through CodeStub::GetCode()).
83  code_object_ = Handle<HeapObject>::New(
84  *isolate->factory()->NewSelfReferenceMarker(), isolate);
85  }
86 }
87 
88 
89 void MacroAssembler::Load(Register destination, ExternalReference source) {
90  if (root_array_available_ && options().enable_root_array_delta_access) {
91  intptr_t delta = RootRegisterOffsetForExternalReference(isolate(), source);
92  if (is_int32(delta)) {
93  movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
94  return;
95  }
96  }
97  // Safe code.
98  if (FLAG_embedded_builtins) {
99  if (root_array_available_ && options().isolate_independent_code) {
100  IndirectLoadExternalReference(kScratchRegister, source);
101  movp(destination, Operand(kScratchRegister, 0));
102  return;
103  }
104  }
105  if (destination == rax) {
106  load_rax(source);
107  } else {
108  Move(kScratchRegister, source);
109  movp(destination, Operand(kScratchRegister, 0));
110  }
111 }
112 
113 
114 void MacroAssembler::Store(ExternalReference destination, Register source) {
115  if (root_array_available_ && options().enable_root_array_delta_access) {
116  intptr_t delta =
117  RootRegisterOffsetForExternalReference(isolate(), destination);
118  if (is_int32(delta)) {
119  movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
120  return;
121  }
122  }
123  // Safe code.
124  if (source == rax) {
125  store_rax(destination);
126  } else {
127  Move(kScratchRegister, destination);
128  movp(Operand(kScratchRegister, 0), source);
129  }
130 }
131 
132 void TurboAssembler::LoadFromConstantsTable(Register destination,
133  int constant_index) {
134  DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
135  LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
136  movp(destination,
137  FieldOperand(destination,
138  FixedArray::kHeaderSize + constant_index * kPointerSize));
139 }
140 
141 void TurboAssembler::LoadRootRegisterOffset(Register destination,
142  intptr_t offset) {
143  DCHECK(is_int32(offset));
144  if (offset == 0) {
145  Move(destination, kRootRegister);
146  } else {
147  leap(destination, Operand(kRootRegister, static_cast<int32_t>(offset)));
148  }
149 }
150 
151 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
152  movp(destination, Operand(kRootRegister, offset));
153 }
154 
155 void TurboAssembler::LoadAddress(Register destination,
156  ExternalReference source) {
157  if (root_array_available_ && options().enable_root_array_delta_access) {
158  intptr_t delta = RootRegisterOffsetForExternalReference(isolate(), source);
159  if (is_int32(delta)) {
160  leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
161  return;
162  }
163  }
164  // Safe code.
165  if (FLAG_embedded_builtins) {
166  if (root_array_available_ && options().isolate_independent_code) {
167  IndirectLoadExternalReference(destination, source);
168  return;
169  }
170  }
171  Move(destination, source);
172 }
173 
174 Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
175  Register scratch) {
176  if (root_array_available_ && options().enable_root_array_delta_access) {
177  int64_t delta =
178  RootRegisterOffsetForExternalReference(isolate(), reference);
179  if (is_int32(delta)) {
180  return Operand(kRootRegister, static_cast<int32_t>(delta));
181  }
182  }
183  if (root_array_available_ && options().isolate_independent_code) {
184  if (IsAddressableThroughRootRegister(isolate(), reference)) {
185  // Some external references can be efficiently loaded as an offset from
186  // kRootRegister.
187  intptr_t offset =
188  RootRegisterOffsetForExternalReference(isolate(), reference);
189  CHECK(is_int32(offset));
190  return Operand(kRootRegister, static_cast<int32_t>(offset));
191  } else {
192  // Otherwise, do a memory load from the external reference table.
193  movp(scratch, Operand(kRootRegister,
194  RootRegisterOffsetForExternalReferenceTableEntry(
195  isolate(), reference)));
196  return Operand(scratch, 0);
197  }
198  }
199  Move(scratch, reference);
200  return Operand(scratch, 0);
201 }
202 
203 void MacroAssembler::PushAddress(ExternalReference source) {
204  LoadAddress(kScratchRegister, source);
205  Push(kScratchRegister);
206 }
207 
208 void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
209  DCHECK(root_array_available_);
210  movp(destination,
211  Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
212 }
213 
214 void MacroAssembler::PushRoot(RootIndex index) {
215  DCHECK(root_array_available_);
216  Push(Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
217 }
218 
219 void TurboAssembler::CompareRoot(Register with, RootIndex index) {
220  DCHECK(root_array_available_);
221  cmpp(with, Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
222 }
223 
224 void TurboAssembler::CompareRoot(Operand with, RootIndex index) {
225  DCHECK(root_array_available_);
226  DCHECK(!with.AddressUsesRegister(kScratchRegister));
227  LoadRoot(kScratchRegister, index);
228  cmpp(with, kScratchRegister);
229 }
230 
231 void TurboAssembler::DecompressTaggedSigned(Register destination,
232  Operand field_operand,
233  Register scratch_for_debug) {
234  RecordComment("[ DecompressTaggedSigned");
235  if (DEBUG_BOOL && scratch_for_debug.is_valid()) {
236  Register expected_value = scratch_for_debug;
237  movq(expected_value, field_operand);
238  movsxlq(destination, expected_value);
239  Label check_passed;
240  cmpq(destination, expected_value);
241  j(equal, &check_passed);
242  RecordComment("DecompressTaggedSigned failed");
243  int3();
244  bind(&check_passed);
245  } else {
246  movsxlq(destination, field_operand);
247  }
248  RecordComment("]");
249 }
250 
251 void TurboAssembler::DecompressTaggedPointer(Register destination,
252  Operand field_operand,
253  Register scratch_for_debug) {
254  RecordComment("[ DecompressTaggedPointer");
255  if (DEBUG_BOOL && scratch_for_debug.is_valid()) {
256  Register expected_value = scratch_for_debug;
257  movq(expected_value, field_operand);
258  movsxlq(destination, expected_value);
259  addq(destination, kRootRegister);
260  Label check_passed;
261  cmpq(destination, expected_value);
262  j(equal, &check_passed);
263  RecordComment("DecompressTaggedPointer failed");
264  int3();
265  bind(&check_passed);
266  } else {
267  movsxlq(destination, field_operand);
268  addq(destination, kRootRegister);
269  }
270  RecordComment("]");
271 }
272 
273 void TurboAssembler::DecompressAnyTagged(Register destination,
274  Operand field_operand,
275  Register scratch,
276  Register scratch_for_debug) {
277  RecordComment("[ DecompressAnyTagged");
278  Register expected_value = scratch_for_debug;
279  if (DEBUG_BOOL && expected_value.is_valid()) {
280  movq(expected_value, field_operand);
281  movsxlq(destination, expected_value);
282  } else {
283  movsxlq(destination, field_operand);
284  }
285  // Branchlessly compute |masked_root|:
286  // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
287  STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag < 32));
288  Register masked_root = scratch;
289  movl(masked_root, destination);
290  andl(masked_root, Immediate(kSmiTagMask));
291  negq(masked_root);
292  andq(masked_root, kRootRegister);
293  // Now this add operation will either leave the value unchanged if it is a smi
294  // or add the isolate root if it is a heap object.
295  addq(destination, masked_root);
296  if (DEBUG_BOOL && expected_value.is_valid()) {
297  Label check_passed;
298  cmpq(destination, expected_value);
299  j(equal, &check_passed);
300  RecordComment("Decompression failed: Tagged");
301  int3();
302  bind(&check_passed);
303  }
304  RecordComment("]");
305 }
306 
307 void MacroAssembler::RecordWriteField(Register object, int offset,
308  Register value, Register dst,
309  SaveFPRegsMode save_fp,
310  RememberedSetAction remembered_set_action,
311  SmiCheck smi_check) {
312  // First, check if a write barrier is even needed. The tests below
313  // catch stores of Smis.
314  Label done;
315 
316  // Skip barrier if writing a smi.
317  if (smi_check == INLINE_SMI_CHECK) {
318  JumpIfSmi(value, &done);
319  }
320 
321  // Although the object register is tagged, the offset is relative to the start
322  // of the object, so so offset must be a multiple of kPointerSize.
323  DCHECK(IsAligned(offset, kPointerSize));
324 
325  leap(dst, FieldOperand(object, offset));
326  if (emit_debug_code()) {
327  Label ok;
328  testb(dst, Immediate(kPointerSize - 1));
329  j(zero, &ok, Label::kNear);
330  int3();
331  bind(&ok);
332  }
333 
334  RecordWrite(object, dst, value, save_fp, remembered_set_action,
335  OMIT_SMI_CHECK);
336 
337  bind(&done);
338 
339  // Clobber clobbered input registers when running with the debug-code flag
340  // turned on to provoke errors.
341  if (emit_debug_code()) {
342  Move(value, kZapValue, RelocInfo::NONE);
343  Move(dst, kZapValue, RelocInfo::NONE);
344  }
345 }
346 
347 void TurboAssembler::SaveRegisters(RegList registers) {
348  DCHECK_GT(NumRegs(registers), 0);
349  for (int i = 0; i < Register::kNumRegisters; ++i) {
350  if ((registers >> i) & 1u) {
351  pushq(Register::from_code(i));
352  }
353  }
354 }
355 
356 void TurboAssembler::RestoreRegisters(RegList registers) {
357  DCHECK_GT(NumRegs(registers), 0);
358  for (int i = Register::kNumRegisters - 1; i >= 0; --i) {
359  if ((registers >> i) & 1u) {
360  popq(Register::from_code(i));
361  }
362  }
363 }
364 
365 void TurboAssembler::CallRecordWriteStub(
366  Register object, Register address,
367  RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
368  CallRecordWriteStub(
369  object, address, remembered_set_action, fp_mode,
370  isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
371  kNullAddress);
372 }
373 
374 void TurboAssembler::CallRecordWriteStub(
375  Register object, Register address,
376  RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
377  Address wasm_target) {
378  CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
379  Handle<Code>::null(), wasm_target);
380 }
381 
382 void TurboAssembler::CallRecordWriteStub(
383  Register object, Register address,
384  RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
385  Handle<Code> code_target, Address wasm_target) {
386  DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
387 
388  RecordWriteDescriptor descriptor;
389  RegList registers = descriptor.allocatable_registers();
390 
391  SaveRegisters(registers);
392 
393  Register object_parameter(
394  descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
395  Register slot_parameter(
396  descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
397  Register remembered_set_parameter(
398  descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
399  Register fp_mode_parameter(
400  descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
401 
402  // Prepare argument registers for calling RecordWrite
403  // slot_parameter <= address
404  // object_parameter <= object
405  if (slot_parameter != object) {
406  // Normal case
407  Move(slot_parameter, address);
408  Move(object_parameter, object);
409  } else if (object_parameter != address) {
410  // Only slot_parameter and object are the same register
411  // object_parameter <= object
412  // slot_parameter <= address
413  Move(object_parameter, object);
414  Move(slot_parameter, address);
415  } else {
416  // slot_parameter \/ address
417  // object_parameter /\ object
418  xchgq(slot_parameter, object_parameter);
419  }
420 
421  Smi smi_rsa = Smi::FromEnum(remembered_set_action);
422  Smi smi_fm = Smi::FromEnum(fp_mode);
423  Move(remembered_set_parameter, smi_rsa);
424  if (smi_rsa != smi_fm) {
425  Move(fp_mode_parameter, smi_fm);
426  } else {
427  movq(fp_mode_parameter, remembered_set_parameter);
428  }
429  if (code_target.is_null()) {
430  // Use {near_call} for direct Wasm call within a module.
431  near_call(wasm_target, RelocInfo::WASM_STUB_CALL);
432  } else {
433  Call(code_target, RelocInfo::CODE_TARGET);
434  }
435 
436  RestoreRegisters(registers);
437 }
438 
439 void MacroAssembler::RecordWrite(Register object, Register address,
440  Register value, SaveFPRegsMode fp_mode,
441  RememberedSetAction remembered_set_action,
442  SmiCheck smi_check) {
443  DCHECK(object != value);
444  DCHECK(object != address);
445  DCHECK(value != address);
446  AssertNotSmi(object);
447 
448  if (remembered_set_action == OMIT_REMEMBERED_SET &&
449  !FLAG_incremental_marking) {
450  return;
451  }
452 
453  if (emit_debug_code()) {
454  Label ok;
455  cmpp(value, Operand(address, 0));
456  j(equal, &ok, Label::kNear);
457  int3();
458  bind(&ok);
459  }
460 
461  // First, check if a write barrier is even needed. The tests below
462  // catch stores of smis and stores into the young generation.
463  Label done;
464 
465  if (smi_check == INLINE_SMI_CHECK) {
466  // Skip barrier if writing a smi.
467  JumpIfSmi(value, &done);
468  }
469 
470  CheckPageFlag(value,
471  value, // Used as scratch.
472  MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
473  Label::kNear);
474 
475  CheckPageFlag(object,
476  value, // Used as scratch.
477  MemoryChunk::kPointersFromHereAreInterestingMask,
478  zero,
479  &done,
480  Label::kNear);
481 
482  CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
483 
484  bind(&done);
485 
486  // Count number of write barriers in generated code.
487  isolate()->counters()->write_barriers_static()->Increment();
488  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
489 
490  // Clobber clobbered registers when running with the debug-code flag
491  // turned on to provoke errors.
492  if (emit_debug_code()) {
493  Move(address, kZapValue, RelocInfo::NONE);
494  Move(value, kZapValue, RelocInfo::NONE);
495  }
496 }
497 
498 void TurboAssembler::Assert(Condition cc, AbortReason reason) {
499  if (emit_debug_code()) Check(cc, reason);
500 }
501 
502 void TurboAssembler::AssertUnreachable(AbortReason reason) {
503  if (emit_debug_code()) Abort(reason);
504 }
505 
506 void TurboAssembler::Check(Condition cc, AbortReason reason) {
507  Label L;
508  j(cc, &L, Label::kNear);
509  Abort(reason);
510  // Control will not return here.
511  bind(&L);
512 }
513 
514 void TurboAssembler::CheckStackAlignment() {
515  int frame_alignment = base::OS::ActivationFrameAlignment();
516  int frame_alignment_mask = frame_alignment - 1;
517  if (frame_alignment > kPointerSize) {
518  DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
519  Label alignment_as_expected;
520  testp(rsp, Immediate(frame_alignment_mask));
521  j(zero, &alignment_as_expected, Label::kNear);
522  // Abort if stack is not aligned.
523  int3();
524  bind(&alignment_as_expected);
525  }
526 }
527 
528 void TurboAssembler::Abort(AbortReason reason) {
529 #ifdef DEBUG
530  const char* msg = GetAbortReason(reason);
531  RecordComment("Abort message: ");
532  RecordComment(msg);
533 #endif
534 
535  // Avoid emitting call to builtin if requested.
536  if (trap_on_abort()) {
537  int3();
538  return;
539  }
540 
541  if (should_abort_hard()) {
542  // We don't care if we constructed a frame. Just pretend we did.
543  FrameScope assume_frame(this, StackFrame::NONE);
544  movl(arg_reg_1, Immediate(static_cast<int>(reason)));
545  PrepareCallCFunction(1);
546  LoadAddress(rax, ExternalReference::abort_with_reason());
547  call(rax);
548  return;
549  }
550 
551  Move(rdx, Smi::FromInt(static_cast<int>(reason)));
552 
553  if (!has_frame()) {
554  // We don't actually want to generate a pile of code for this, so just
555  // claim there is a stack frame, without generating one.
556  FrameScope scope(this, StackFrame::NONE);
557  Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
558  } else {
559  Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
560  }
561  // Control will not return here.
562  int3();
563 }
564 
565 void MacroAssembler::CallStub(CodeStub* stub) {
566  DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
567  Call(stub->GetCode(), RelocInfo::CODE_TARGET);
568 }
569 
570 
571 void MacroAssembler::TailCallStub(CodeStub* stub) {
572  Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
573 }
574 
575 bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
576  return has_frame() || !stub->SometimesSetsUpAFrame();
577 }
578 
579 void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
580  Register centry) {
581  const Runtime::Function* f = Runtime::FunctionForId(fid);
582  // TODO(1236192): Most runtime routines don't need the number of
583  // arguments passed in because it is constant. At some point we
584  // should remove this need and make the runtime routine entry code
585  // smarter.
586  Set(rax, f->nargs);
587  LoadAddress(rbx, ExternalReference::Create(f));
588  DCHECK(!AreAliased(centry, rax, rbx));
589  addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
590  Call(centry);
591 }
592 
593 void MacroAssembler::CallRuntime(const Runtime::Function* f,
594  int num_arguments,
595  SaveFPRegsMode save_doubles) {
596  // If the expected number of arguments of the runtime function is
597  // constant, we check that the actual number of arguments match the
598  // expectation.
599  CHECK(f->nargs < 0 || f->nargs == num_arguments);
600 
601  // TODO(1236192): Most runtime routines don't need the number of
602  // arguments passed in because it is constant. At some point we
603  // should remove this need and make the runtime routine entry code
604  // smarter.
605  Set(rax, num_arguments);
606  LoadAddress(rbx, ExternalReference::Create(f));
607  Handle<Code> code =
608  CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
609  Call(code, RelocInfo::CODE_TARGET);
610 }
611 
612 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
613  // ----------- S t a t e -------------
614  // -- rsp[0] : return address
615  // -- rsp[8] : argument num_arguments - 1
616  // ...
617  // -- rsp[8 * num_arguments] : argument 0 (receiver)
618  //
619  // For runtime functions with variable arguments:
620  // -- rax : number of arguments
621  // -----------------------------------
622 
623  const Runtime::Function* function = Runtime::FunctionForId(fid);
624  DCHECK_EQ(1, function->result_size);
625  if (function->nargs >= 0) {
626  Set(rax, function->nargs);
627  }
628  JumpToExternalReference(ExternalReference::Create(fid));
629 }
630 
631 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
632  bool builtin_exit_frame) {
633  // Set the entry point and jump to the C entry runtime stub.
634  LoadAddress(rbx, ext);
635  Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
636  kArgvOnStack, builtin_exit_frame);
637  Jump(code, RelocInfo::CODE_TARGET);
638 }
639 
640 static constexpr Register saved_regs[] = {rax, rcx, rdx, rbx, rbp, rsi,
641  rdi, r8, r9, r10, r11};
642 
643 static constexpr int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
644 
645 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
646  Register exclusion1,
647  Register exclusion2,
648  Register exclusion3) const {
649  int bytes = 0;
650  for (int i = 0; i < kNumberOfSavedRegs; i++) {
651  Register reg = saved_regs[i];
652  if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
653  bytes += kPointerSize;
654  }
655  }
656 
657  // R12 to r15 are callee save on all platforms.
658  if (fp_mode == kSaveFPRegs) {
659  bytes += kDoubleSize * XMMRegister::kNumRegisters;
660  }
661 
662  return bytes;
663 }
664 
665 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
666  Register exclusion2, Register exclusion3) {
667  // We don't allow a GC during a store buffer overflow so there is no need to
668  // store the registers in any particular way, but we do have to store and
669  // restore them.
670  int bytes = 0;
671  for (int i = 0; i < kNumberOfSavedRegs; i++) {
672  Register reg = saved_regs[i];
673  if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
674  pushq(reg);
675  bytes += kPointerSize;
676  }
677  }
678 
679  // R12 to r15 are callee save on all platforms.
680  if (fp_mode == kSaveFPRegs) {
681  int delta = kDoubleSize * XMMRegister::kNumRegisters;
682  subp(rsp, Immediate(delta));
683  for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
684  XMMRegister reg = XMMRegister::from_code(i);
685  Movsd(Operand(rsp, i * kDoubleSize), reg);
686  }
687  bytes += delta;
688  }
689 
690  return bytes;
691 }
692 
693 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
694  Register exclusion2, Register exclusion3) {
695  int bytes = 0;
696  if (fp_mode == kSaveFPRegs) {
697  for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
698  XMMRegister reg = XMMRegister::from_code(i);
699  Movsd(reg, Operand(rsp, i * kDoubleSize));
700  }
701  int delta = kDoubleSize * XMMRegister::kNumRegisters;
702  addp(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
703  bytes += delta;
704  }
705 
706  for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
707  Register reg = saved_regs[i];
708  if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
709  popq(reg);
710  bytes += kPointerSize;
711  }
712  }
713 
714  return bytes;
715 }
716 
717 void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
718  if (CpuFeatures::IsSupported(AVX)) {
719  CpuFeatureScope scope(this, AVX);
720  vcvtss2sd(dst, src, src);
721  } else {
722  cvtss2sd(dst, src);
723  }
724 }
725 
726 void TurboAssembler::Cvtss2sd(XMMRegister dst, Operand src) {
727  if (CpuFeatures::IsSupported(AVX)) {
728  CpuFeatureScope scope(this, AVX);
729  vcvtss2sd(dst, dst, src);
730  } else {
731  cvtss2sd(dst, src);
732  }
733 }
734 
735 void TurboAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) {
736  if (CpuFeatures::IsSupported(AVX)) {
737  CpuFeatureScope scope(this, AVX);
738  vcvtsd2ss(dst, src, src);
739  } else {
740  cvtsd2ss(dst, src);
741  }
742 }
743 
744 void TurboAssembler::Cvtsd2ss(XMMRegister dst, Operand src) {
745  if (CpuFeatures::IsSupported(AVX)) {
746  CpuFeatureScope scope(this, AVX);
747  vcvtsd2ss(dst, dst, src);
748  } else {
749  cvtsd2ss(dst, src);
750  }
751 }
752 
753 void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
754  if (CpuFeatures::IsSupported(AVX)) {
755  CpuFeatureScope scope(this, AVX);
756  vxorpd(dst, dst, dst);
757  vcvtlsi2sd(dst, dst, src);
758  } else {
759  xorpd(dst, dst);
760  cvtlsi2sd(dst, src);
761  }
762 }
763 
764 void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Operand src) {
765  if (CpuFeatures::IsSupported(AVX)) {
766  CpuFeatureScope scope(this, AVX);
767  vxorpd(dst, dst, dst);
768  vcvtlsi2sd(dst, dst, src);
769  } else {
770  xorpd(dst, dst);
771  cvtlsi2sd(dst, src);
772  }
773 }
774 
775 void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Register src) {
776  if (CpuFeatures::IsSupported(AVX)) {
777  CpuFeatureScope scope(this, AVX);
778  vxorps(dst, dst, dst);
779  vcvtlsi2ss(dst, dst, src);
780  } else {
781  xorps(dst, dst);
782  cvtlsi2ss(dst, src);
783  }
784 }
785 
786 void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Operand src) {
787  if (CpuFeatures::IsSupported(AVX)) {
788  CpuFeatureScope scope(this, AVX);
789  vxorps(dst, dst, dst);
790  vcvtlsi2ss(dst, dst, src);
791  } else {
792  xorps(dst, dst);
793  cvtlsi2ss(dst, src);
794  }
795 }
796 
797 void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
798  if (CpuFeatures::IsSupported(AVX)) {
799  CpuFeatureScope scope(this, AVX);
800  vxorps(dst, dst, dst);
801  vcvtqsi2ss(dst, dst, src);
802  } else {
803  xorps(dst, dst);
804  cvtqsi2ss(dst, src);
805  }
806 }
807 
808 void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Operand src) {
809  if (CpuFeatures::IsSupported(AVX)) {
810  CpuFeatureScope scope(this, AVX);
811  vxorps(dst, dst, dst);
812  vcvtqsi2ss(dst, dst, src);
813  } else {
814  xorps(dst, dst);
815  cvtqsi2ss(dst, src);
816  }
817 }
818 
819 void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Register src) {
820  if (CpuFeatures::IsSupported(AVX)) {
821  CpuFeatureScope scope(this, AVX);
822  vxorpd(dst, dst, dst);
823  vcvtqsi2sd(dst, dst, src);
824  } else {
825  xorpd(dst, dst);
826  cvtqsi2sd(dst, src);
827  }
828 }
829 
830 void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Operand src) {
831  if (CpuFeatures::IsSupported(AVX)) {
832  CpuFeatureScope scope(this, AVX);
833  vxorpd(dst, dst, dst);
834  vcvtqsi2sd(dst, dst, src);
835  } else {
836  xorpd(dst, dst);
837  cvtqsi2sd(dst, src);
838  }
839 }
840 
841 void TurboAssembler::Cvtlui2ss(XMMRegister dst, Register src) {
842  // Zero-extend the 32 bit value to 64 bit.
843  movl(kScratchRegister, src);
844  Cvtqsi2ss(dst, kScratchRegister);
845 }
846 
847 void TurboAssembler::Cvtlui2ss(XMMRegister dst, Operand src) {
848  // Zero-extend the 32 bit value to 64 bit.
849  movl(kScratchRegister, src);
850  Cvtqsi2ss(dst, kScratchRegister);
851 }
852 
853 void TurboAssembler::Cvtlui2sd(XMMRegister dst, Register src) {
854  // Zero-extend the 32 bit value to 64 bit.
855  movl(kScratchRegister, src);
856  Cvtqsi2sd(dst, kScratchRegister);
857 }
858 
859 void TurboAssembler::Cvtlui2sd(XMMRegister dst, Operand src) {
860  // Zero-extend the 32 bit value to 64 bit.
861  movl(kScratchRegister, src);
862  Cvtqsi2sd(dst, kScratchRegister);
863 }
864 
865 void TurboAssembler::Cvtqui2ss(XMMRegister dst, Register src) {
866  Label done;
867  Cvtqsi2ss(dst, src);
868  testq(src, src);
869  j(positive, &done, Label::kNear);
870 
871  // Compute {src/2 | (src&1)} (retain the LSB to avoid rounding errors).
872  if (src != kScratchRegister) movq(kScratchRegister, src);
873  shrq(kScratchRegister, Immediate(1));
874  // The LSB is shifted into CF. If it is set, set the LSB in {tmp}.
875  Label msb_not_set;
876  j(not_carry, &msb_not_set, Label::kNear);
877  orq(kScratchRegister, Immediate(1));
878  bind(&msb_not_set);
879  Cvtqsi2ss(dst, kScratchRegister);
880  addss(dst, dst);
881  bind(&done);
882 }
883 
884 void TurboAssembler::Cvtqui2ss(XMMRegister dst, Operand src) {
885  movq(kScratchRegister, src);
886  Cvtqui2ss(dst, kScratchRegister);
887 }
888 
889 void TurboAssembler::Cvtqui2sd(XMMRegister dst, Register src) {
890  Label done;
891  Cvtqsi2sd(dst, src);
892  testq(src, src);
893  j(positive, &done, Label::kNear);
894 
895  // Compute {src/2 | (src&1)} (retain the LSB to avoid rounding errors).
896  if (src != kScratchRegister) movq(kScratchRegister, src);
897  shrq(kScratchRegister, Immediate(1));
898  // The LSB is shifted into CF. If it is set, set the LSB in {tmp}.
899  Label msb_not_set;
900  j(not_carry, &msb_not_set, Label::kNear);
901  orq(kScratchRegister, Immediate(1));
902  bind(&msb_not_set);
903  Cvtqsi2sd(dst, kScratchRegister);
904  addsd(dst, dst);
905  bind(&done);
906 }
907 
908 void TurboAssembler::Cvtqui2sd(XMMRegister dst, Operand src) {
909  movq(kScratchRegister, src);
910  Cvtqui2sd(dst, kScratchRegister);
911 }
912 
913 void TurboAssembler::Cvttss2si(Register dst, XMMRegister src) {
914  if (CpuFeatures::IsSupported(AVX)) {
915  CpuFeatureScope scope(this, AVX);
916  vcvttss2si(dst, src);
917  } else {
918  cvttss2si(dst, src);
919  }
920 }
921 
922 void TurboAssembler::Cvttss2si(Register dst, Operand src) {
923  if (CpuFeatures::IsSupported(AVX)) {
924  CpuFeatureScope scope(this, AVX);
925  vcvttss2si(dst, src);
926  } else {
927  cvttss2si(dst, src);
928  }
929 }
930 
931 void TurboAssembler::Cvttsd2si(Register dst, XMMRegister src) {
932  if (CpuFeatures::IsSupported(AVX)) {
933  CpuFeatureScope scope(this, AVX);
934  vcvttsd2si(dst, src);
935  } else {
936  cvttsd2si(dst, src);
937  }
938 }
939 
940 void TurboAssembler::Cvttsd2si(Register dst, Operand src) {
941  if (CpuFeatures::IsSupported(AVX)) {
942  CpuFeatureScope scope(this, AVX);
943  vcvttsd2si(dst, src);
944  } else {
945  cvttsd2si(dst, src);
946  }
947 }
948 
949 void TurboAssembler::Cvttss2siq(Register dst, XMMRegister src) {
950  if (CpuFeatures::IsSupported(AVX)) {
951  CpuFeatureScope scope(this, AVX);
952  vcvttss2siq(dst, src);
953  } else {
954  cvttss2siq(dst, src);
955  }
956 }
957 
958 void TurboAssembler::Cvttss2siq(Register dst, Operand src) {
959  if (CpuFeatures::IsSupported(AVX)) {
960  CpuFeatureScope scope(this, AVX);
961  vcvttss2siq(dst, src);
962  } else {
963  cvttss2siq(dst, src);
964  }
965 }
966 
967 void TurboAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
968  if (CpuFeatures::IsSupported(AVX)) {
969  CpuFeatureScope scope(this, AVX);
970  vcvttsd2siq(dst, src);
971  } else {
972  cvttsd2siq(dst, src);
973  }
974 }
975 
976 void TurboAssembler::Cvttsd2siq(Register dst, Operand src) {
977  if (CpuFeatures::IsSupported(AVX)) {
978  CpuFeatureScope scope(this, AVX);
979  vcvttsd2siq(dst, src);
980  } else {
981  cvttsd2siq(dst, src);
982  }
983 }
984 
985 namespace {
986 template <typename OperandOrXMMRegister, bool is_double>
987 void ConvertFloatToUint64(TurboAssembler* tasm, Register dst,
988  OperandOrXMMRegister src, Label* fail) {
989  Label success;
990  // There does not exist a native float-to-uint instruction, so we have to use
991  // a float-to-int, and postprocess the result.
992  if (is_double) {
993  tasm->Cvttsd2siq(dst, src);
994  } else {
995  tasm->Cvttss2siq(dst, src);
996  }
997  // If the result of the conversion is positive, we are already done.
998  tasm->testq(dst, dst);
999  tasm->j(positive, &success);
1000  // The result of the first conversion was negative, which means that the
1001  // input value was not within the positive int64 range. We subtract 2^63
1002  // and convert it again to see if it is within the uint64 range.
1003  if (is_double) {
1004  tasm->Move(kScratchDoubleReg, -9223372036854775808.0);
1005  tasm->addsd(kScratchDoubleReg, src);
1006  tasm->Cvttsd2siq(dst, kScratchDoubleReg);
1007  } else {
1008  tasm->Move(kScratchDoubleReg, -9223372036854775808.0f);
1009  tasm->addss(kScratchDoubleReg, src);
1010  tasm->Cvttss2siq(dst, kScratchDoubleReg);
1011  }
1012  tasm->testq(dst, dst);
1013  // The only possible negative value here is 0x80000000000000000, which is
1014  // used on x64 to indicate an integer overflow.
1015  tasm->j(negative, fail ? fail : &success);
1016  // The input value is within uint64 range and the second conversion worked
1017  // successfully, but we still have to undo the subtraction we did
1018  // earlier.
1019  tasm->Set(kScratchRegister, 0x8000000000000000);
1020  tasm->orq(dst, kScratchRegister);
1021  tasm->bind(&success);
1022 }
1023 } // namespace
1024 
1025 void TurboAssembler::Cvttsd2uiq(Register dst, Operand src, Label* success) {
1026  ConvertFloatToUint64<Operand, true>(this, dst, src, success);
1027 }
1028 
1029 void TurboAssembler::Cvttsd2uiq(Register dst, XMMRegister src, Label* success) {
1030  ConvertFloatToUint64<XMMRegister, true>(this, dst, src, success);
1031 }
1032 
1033 void TurboAssembler::Cvttss2uiq(Register dst, Operand src, Label* success) {
1034  ConvertFloatToUint64<Operand, false>(this, dst, src, success);
1035 }
1036 
1037 void TurboAssembler::Cvttss2uiq(Register dst, XMMRegister src, Label* success) {
1038  ConvertFloatToUint64<XMMRegister, false>(this, dst, src, success);
1039 }
1040 
1041 void MacroAssembler::Load(Register dst, Operand src, Representation r) {
1042  DCHECK(!r.IsDouble());
1043  if (r.IsInteger8()) {
1044  movsxbq(dst, src);
1045  } else if (r.IsUInteger8()) {
1046  movzxbl(dst, src);
1047  } else if (r.IsInteger16()) {
1048  movsxwq(dst, src);
1049  } else if (r.IsUInteger16()) {
1050  movzxwl(dst, src);
1051  } else if (r.IsInteger32()) {
1052  movl(dst, src);
1053  } else {
1054  movp(dst, src);
1055  }
1056 }
1057 
1058 void MacroAssembler::Store(Operand dst, Register src, Representation r) {
1059  DCHECK(!r.IsDouble());
1060  if (r.IsInteger8() || r.IsUInteger8()) {
1061  movb(dst, src);
1062  } else if (r.IsInteger16() || r.IsUInteger16()) {
1063  movw(dst, src);
1064  } else if (r.IsInteger32()) {
1065  movl(dst, src);
1066  } else {
1067  if (r.IsHeapObject()) {
1068  AssertNotSmi(src);
1069  } else if (r.IsSmi()) {
1070  AssertSmi(src);
1071  }
1072  movp(dst, src);
1073  }
1074 }
1075 
1076 void TurboAssembler::Set(Register dst, int64_t x) {
1077  if (x == 0) {
1078  xorl(dst, dst);
1079  } else if (is_uint32(x)) {
1080  movl(dst, Immediate(static_cast<uint32_t>(x)));
1081  } else if (is_int32(x)) {
1082  movq(dst, Immediate(static_cast<int32_t>(x)));
1083  } else {
1084  movq(dst, x);
1085  }
1086 }
1087 
1088 void TurboAssembler::Set(Operand dst, intptr_t x) {
1089  if (kPointerSize == kInt64Size) {
1090  if (is_int32(x)) {
1091  movp(dst, Immediate(static_cast<int32_t>(x)));
1092  } else {
1093  Set(kScratchRegister, x);
1094  movp(dst, kScratchRegister);
1095  }
1096  } else {
1097  movp(dst, Immediate(static_cast<int32_t>(x)));
1098  }
1099 }
1100 
1101 
1102 // ----------------------------------------------------------------------------
1103 // Smi tagging, untagging and tag detection.
1104 
1105 Register TurboAssembler::GetSmiConstant(Smi source) {
1106  STATIC_ASSERT(kSmiTag == 0);
1107  int value = source->value();
1108  if (value == 0) {
1109  xorl(kScratchRegister, kScratchRegister);
1110  return kScratchRegister;
1111  }
1112  Move(kScratchRegister, source);
1113  return kScratchRegister;
1114 }
1115 
1116 void TurboAssembler::Move(Register dst, Smi source) {
1117  STATIC_ASSERT(kSmiTag == 0);
1118  int value = source->value();
1119  if (value == 0) {
1120  xorl(dst, dst);
1121  } else {
1122  Move(dst, source.ptr(), RelocInfo::NONE);
1123  }
1124 }
1125 
1126 void TurboAssembler::Move(Register dst, ExternalReference ext) {
1127  if (FLAG_embedded_builtins) {
1128  if (root_array_available_ && options().isolate_independent_code) {
1129  IndirectLoadExternalReference(dst, ext);
1130  return;
1131  }
1132  }
1133  movp(dst, ext.address(), RelocInfo::EXTERNAL_REFERENCE);
1134 }
1135 
1136 void MacroAssembler::SmiTag(Register dst, Register src) {
1137  STATIC_ASSERT(kSmiTag == 0);
1138  if (dst != src) {
1139  movp(dst, src);
1140  }
1141  DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
1142  shlp(dst, Immediate(kSmiShift));
1143 }
1144 
1145 void TurboAssembler::SmiUntag(Register dst, Register src) {
1146  STATIC_ASSERT(kSmiTag == 0);
1147  if (dst != src) {
1148  movp(dst, src);
1149  }
1150  DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
1151  sarp(dst, Immediate(kSmiShift));
1152 }
1153 
1154 void TurboAssembler::SmiUntag(Register dst, Operand src) {
1155  if (SmiValuesAre32Bits()) {
1156  movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1157  // Sign extend to 64-bit.
1158  movsxlq(dst, dst);
1159  } else {
1160  DCHECK(SmiValuesAre31Bits());
1161  movp(dst, src);
1162  sarp(dst, Immediate(kSmiShift));
1163  }
1164 }
1165 
1166 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1167  AssertSmi(smi1);
1168  AssertSmi(smi2);
1169  cmpp(smi1, smi2);
1170 }
1171 
1172 void MacroAssembler::SmiCompare(Register dst, Smi src) {
1173  AssertSmi(dst);
1174  Cmp(dst, src);
1175 }
1176 
1177 void MacroAssembler::Cmp(Register dst, Smi src) {
1178  DCHECK_NE(dst, kScratchRegister);
1179  if (src->value() == 0) {
1180  testp(dst, dst);
1181  } else {
1182  Register constant_reg = GetSmiConstant(src);
1183  cmpp(dst, constant_reg);
1184  }
1185 }
1186 
1187 void MacroAssembler::SmiCompare(Register dst, Operand src) {
1188  AssertSmi(dst);
1189  AssertSmi(src);
1190  cmpp(dst, src);
1191 }
1192 
1193 void MacroAssembler::SmiCompare(Operand dst, Register src) {
1194  AssertSmi(dst);
1195  AssertSmi(src);
1196  cmpp(dst, src);
1197 }
1198 
1199 void MacroAssembler::SmiCompare(Operand dst, Smi src) {
1200  AssertSmi(dst);
1201  if (SmiValuesAre32Bits()) {
1202  cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1203  } else {
1204  DCHECK(SmiValuesAre31Bits());
1205  cmpl(dst, Immediate(src));
1206  }
1207 }
1208 
1209 void MacroAssembler::Cmp(Operand dst, Smi src) {
1210  // The Operand cannot use the smi register.
1211  Register smi_reg = GetSmiConstant(src);
1212  DCHECK(!dst.AddressUsesRegister(smi_reg));
1213  cmpp(dst, smi_reg);
1214 }
1215 
1216 
1217 Condition TurboAssembler::CheckSmi(Register src) {
1218  STATIC_ASSERT(kSmiTag == 0);
1219  testb(src, Immediate(kSmiTagMask));
1220  return zero;
1221 }
1222 
1223 Condition TurboAssembler::CheckSmi(Operand src) {
1224  STATIC_ASSERT(kSmiTag == 0);
1225  testb(src, Immediate(kSmiTagMask));
1226  return zero;
1227 }
1228 
1229 void TurboAssembler::JumpIfSmi(Register src, Label* on_smi,
1230  Label::Distance near_jump) {
1231  Condition smi = CheckSmi(src);
1232  j(smi, on_smi, near_jump);
1233 }
1234 
1235 void MacroAssembler::JumpIfNotSmi(Register src,
1236  Label* on_not_smi,
1237  Label::Distance near_jump) {
1238  Condition smi = CheckSmi(src);
1239  j(NegateCondition(smi), on_not_smi, near_jump);
1240 }
1241 
1242 void MacroAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi,
1243  Label::Distance near_jump) {
1244  Condition smi = CheckSmi(src);
1245  j(NegateCondition(smi), on_not_smi, near_jump);
1246 }
1247 
1248 void MacroAssembler::SmiAddConstant(Operand dst, Smi constant) {
1249  if (constant->value() != 0) {
1250  if (SmiValuesAre32Bits()) {
1251  addl(Operand(dst, kSmiShift / kBitsPerByte),
1252  Immediate(constant->value()));
1253  } else {
1254  DCHECK(SmiValuesAre31Bits());
1255  if (kPointerSize == kInt64Size) {
1256  // Sign-extend value after addition
1257  movl(kScratchRegister, dst);
1258  addl(kScratchRegister, Immediate(constant));
1259  movsxlq(kScratchRegister, kScratchRegister);
1260  movq(dst, kScratchRegister);
1261  } else {
1262  DCHECK_EQ(kSmiShiftSize, 32);
1263  addp(dst, Immediate(constant));
1264  }
1265  }
1266  }
1267 }
1268 
1269 SmiIndex MacroAssembler::SmiToIndex(Register dst,
1270  Register src,
1271  int shift) {
1272  if (SmiValuesAre32Bits()) {
1273  DCHECK(is_uint6(shift));
1274  // There is a possible optimization if shift is in the range 60-63, but that
1275  // will (and must) never happen.
1276  if (dst != src) {
1277  movp(dst, src);
1278  }
1279  if (shift < kSmiShift) {
1280  sarp(dst, Immediate(kSmiShift - shift));
1281  } else {
1282  shlp(dst, Immediate(shift - kSmiShift));
1283  }
1284  return SmiIndex(dst, times_1);
1285  } else {
1286  DCHECK(SmiValuesAre31Bits());
1287  if (dst != src) {
1288  movp(dst, src);
1289  }
1290  // We have to sign extend the index register to 64-bit as the SMI might
1291  // be negative.
1292  movsxlq(dst, dst);
1293  if (shift < kSmiShift) {
1294  sarq(dst, Immediate(kSmiShift - shift));
1295  } else if (shift != kSmiShift) {
1296  if (shift - kSmiShift <= static_cast<int>(times_8)) {
1297  return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiShift));
1298  }
1299  shlq(dst, Immediate(shift - kSmiShift));
1300  }
1301  return SmiIndex(dst, times_1);
1302  }
1303 }
1304 
1305 void TurboAssembler::Push(Smi source) {
1306  intptr_t smi = static_cast<intptr_t>(source.ptr());
1307  if (is_int32(smi)) {
1308  Push(Immediate(static_cast<int32_t>(smi)));
1309  return;
1310  }
1311  int first_byte_set = base::bits::CountTrailingZeros64(smi) / 8;
1312  int last_byte_set = (63 - base::bits::CountLeadingZeros64(smi)) / 8;
1313  if (first_byte_set == last_byte_set && kPointerSize == kInt64Size) {
1314  // This sequence has only 7 bytes, compared to the 12 bytes below.
1315  Push(Immediate(0));
1316  movb(Operand(rsp, first_byte_set),
1317  Immediate(static_cast<int8_t>(smi >> (8 * first_byte_set))));
1318  return;
1319  }
1320  Register constant = GetSmiConstant(source);
1321  Push(constant);
1322 }
1323 
1324 // ----------------------------------------------------------------------------
1325 
1326 void TurboAssembler::Move(Register dst, Register src) {
1327  if (dst != src) {
1328  movp(dst, src);
1329  }
1330 }
1331 
1332 void TurboAssembler::MoveNumber(Register dst, double value) {
1333  int32_t smi;
1334  if (DoubleToSmiInteger(value, &smi)) {
1335  Move(dst, Smi::FromInt(smi));
1336  } else {
1337  movp_heap_number(dst, value);
1338  }
1339 }
1340 
1341 void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
1342  if (src == 0) {
1343  Xorps(dst, dst);
1344  } else {
1345  unsigned nlz = base::bits::CountLeadingZeros(src);
1346  unsigned ntz = base::bits::CountTrailingZeros(src);
1347  unsigned pop = base::bits::CountPopulation(src);
1348  DCHECK_NE(0u, pop);
1349  if (pop + ntz + nlz == 32) {
1350  Pcmpeqd(dst, dst);
1351  if (ntz) Pslld(dst, static_cast<byte>(ntz + nlz));
1352  if (nlz) Psrld(dst, static_cast<byte>(nlz));
1353  } else {
1354  movl(kScratchRegister, Immediate(src));
1355  Movd(dst, kScratchRegister);
1356  }
1357  }
1358 }
1359 
1360 void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
1361  if (src == 0) {
1362  Xorpd(dst, dst);
1363  } else {
1364  unsigned nlz = base::bits::CountLeadingZeros(src);
1365  unsigned ntz = base::bits::CountTrailingZeros(src);
1366  unsigned pop = base::bits::CountPopulation(src);
1367  DCHECK_NE(0u, pop);
1368  if (pop + ntz + nlz == 64) {
1369  Pcmpeqd(dst, dst);
1370  if (ntz) Psllq(dst, static_cast<byte>(ntz + nlz));
1371  if (nlz) Psrlq(dst, static_cast<byte>(nlz));
1372  } else {
1373  uint32_t lower = static_cast<uint32_t>(src);
1374  uint32_t upper = static_cast<uint32_t>(src >> 32);
1375  if (upper == 0) {
1376  Move(dst, lower);
1377  } else {
1378  movq(kScratchRegister, src);
1379  Movq(dst, kScratchRegister);
1380  }
1381  }
1382  }
1383 }
1384 
1385 // ----------------------------------------------------------------------------
1386 
1387 void MacroAssembler::Absps(XMMRegister dst) {
1388  Andps(dst, ExternalReferenceAsOperand(
1389  ExternalReference::address_of_float_abs_constant()));
1390 }
1391 
1392 void MacroAssembler::Negps(XMMRegister dst) {
1393  Xorps(dst, ExternalReferenceAsOperand(
1394  ExternalReference::address_of_float_neg_constant()));
1395 }
1396 
1397 void MacroAssembler::Abspd(XMMRegister dst) {
1398  Andps(dst, ExternalReferenceAsOperand(
1399  ExternalReference::address_of_double_abs_constant()));
1400 }
1401 
1402 void MacroAssembler::Negpd(XMMRegister dst) {
1403  Xorps(dst, ExternalReferenceAsOperand(
1404  ExternalReference::address_of_double_neg_constant()));
1405 }
1406 
1407 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
1408  AllowDeferredHandleDereference smi_check;
1409  if (source->IsSmi()) {
1410  Cmp(dst, Smi::cast(*source));
1411  } else {
1412  Move(kScratchRegister, Handle<HeapObject>::cast(source));
1413  cmpp(dst, kScratchRegister);
1414  }
1415 }
1416 
1417 void MacroAssembler::Cmp(Operand dst, Handle<Object> source) {
1418  AllowDeferredHandleDereference smi_check;
1419  if (source->IsSmi()) {
1420  Cmp(dst, Smi::cast(*source));
1421  } else {
1422  Move(kScratchRegister, Handle<HeapObject>::cast(source));
1423  cmpp(dst, kScratchRegister);
1424  }
1425 }
1426 
1427 void TurboAssembler::Push(Handle<HeapObject> source) {
1428  Move(kScratchRegister, source);
1429  Push(kScratchRegister);
1430 }
1431 
1432 void TurboAssembler::Move(Register result, Handle<HeapObject> object,
1433  RelocInfo::Mode rmode) {
1434  if (FLAG_embedded_builtins) {
1435  if (root_array_available_ && options().isolate_independent_code) {
1436  IndirectLoadConstant(result, object);
1437  return;
1438  }
1439  }
1440  movp(result, object.address(), rmode);
1441 }
1442 
1443 void TurboAssembler::Move(Operand dst, Handle<HeapObject> object,
1444  RelocInfo::Mode rmode) {
1445  Move(kScratchRegister, object, rmode);
1446  movp(dst, kScratchRegister);
1447 }
1448 
1449 void TurboAssembler::MoveStringConstant(Register result,
1450  const StringConstantBase* string,
1451  RelocInfo::Mode rmode) {
1452  movp_string(result, string);
1453 }
1454 
1455 void MacroAssembler::Drop(int stack_elements) {
1456  if (stack_elements > 0) {
1457  addp(rsp, Immediate(stack_elements * kPointerSize));
1458  }
1459 }
1460 
1461 
1462 void MacroAssembler::DropUnderReturnAddress(int stack_elements,
1463  Register scratch) {
1464  DCHECK_GT(stack_elements, 0);
1465  if (kPointerSize == kInt64Size && stack_elements == 1) {
1466  popq(MemOperand(rsp, 0));
1467  return;
1468  }
1469 
1470  PopReturnAddressTo(scratch);
1471  Drop(stack_elements);
1472  PushReturnAddressFrom(scratch);
1473 }
1474 
1475 void TurboAssembler::Push(Register src) {
1476  if (kPointerSize == kInt64Size) {
1477  pushq(src);
1478  } else {
1479  // x32 uses 64-bit push for rbp in the prologue.
1480  DCHECK(src.code() != rbp.code());
1481  leal(rsp, Operand(rsp, -4));
1482  movp(Operand(rsp, 0), src);
1483  }
1484 }
1485 
1486 void TurboAssembler::Push(Operand src) {
1487  if (kPointerSize == kInt64Size) {
1488  pushq(src);
1489  } else {
1490  movp(kScratchRegister, src);
1491  leal(rsp, Operand(rsp, -4));
1492  movp(Operand(rsp, 0), kScratchRegister);
1493  }
1494 }
1495 
1496 void MacroAssembler::PushQuad(Operand src) {
1497  if (kPointerSize == kInt64Size) {
1498  pushq(src);
1499  } else {
1500  movp(kScratchRegister, src);
1501  pushq(kScratchRegister);
1502  }
1503 }
1504 
1505 void TurboAssembler::Push(Immediate value) {
1506  if (kPointerSize == kInt64Size) {
1507  pushq(value);
1508  } else {
1509  leal(rsp, Operand(rsp, -4));
1510  movp(Operand(rsp, 0), value);
1511  }
1512 }
1513 
1514 
1515 void MacroAssembler::PushImm32(int32_t imm32) {
1516  if (kPointerSize == kInt64Size) {
1517  pushq_imm32(imm32);
1518  } else {
1519  leal(rsp, Operand(rsp, -4));
1520  movp(Operand(rsp, 0), Immediate(imm32));
1521  }
1522 }
1523 
1524 
1525 void MacroAssembler::Pop(Register dst) {
1526  if (kPointerSize == kInt64Size) {
1527  popq(dst);
1528  } else {
1529  // x32 uses 64-bit pop for rbp in the epilogue.
1530  DCHECK(dst.code() != rbp.code());
1531  movp(dst, Operand(rsp, 0));
1532  leal(rsp, Operand(rsp, 4));
1533  }
1534 }
1535 
1536 void MacroAssembler::Pop(Operand dst) {
1537  if (kPointerSize == kInt64Size) {
1538  popq(dst);
1539  } else {
1540  Register scratch = dst.AddressUsesRegister(kScratchRegister)
1541  ? kRootRegister : kScratchRegister;
1542  movp(scratch, Operand(rsp, 0));
1543  movp(dst, scratch);
1544  leal(rsp, Operand(rsp, 4));
1545  if (scratch == kRootRegister) {
1546  // Restore kRootRegister.
1547  InitializeRootRegister();
1548  }
1549  }
1550 }
1551 
1552 void MacroAssembler::PopQuad(Operand dst) {
1553  if (kPointerSize == kInt64Size) {
1554  popq(dst);
1555  } else {
1556  popq(kScratchRegister);
1557  movp(dst, kScratchRegister);
1558  }
1559 }
1560 
1561 void TurboAssembler::Jump(ExternalReference ext) {
1562  LoadAddress(kScratchRegister, ext);
1563  jmp(kScratchRegister);
1564 }
1565 
1566 void TurboAssembler::Jump(Operand op) {
1567  if (kPointerSize == kInt64Size) {
1568  jmp(op);
1569  } else {
1570  movp(kScratchRegister, op);
1571  jmp(kScratchRegister);
1572  }
1573 }
1574 
1575 void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
1576  Move(kScratchRegister, destination, rmode);
1577  jmp(kScratchRegister);
1578 }
1579 
1580 void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
1581  Condition cc) {
1582  // TODO(X64): Inline this
1583  if (FLAG_embedded_builtins) {
1584  if (root_array_available_ && options().isolate_independent_code &&
1585  !Builtins::IsIsolateIndependentBuiltin(*code_object)) {
1586  // Calls to embedded targets are initially generated as standard
1587  // pc-relative calls below. When creating the embedded blob, call offsets
1588  // are patched up to point directly to the off-heap instruction start.
1589  // Note: It is safe to dereference code_object above since code generation
1590  // for builtins and code stubs happens on the main thread.
1591  Label skip;
1592  if (cc != always) {
1593  if (cc == never) return;
1594  j(NegateCondition(cc), &skip, Label::kNear);
1595  }
1596  IndirectLoadConstant(kScratchRegister, code_object);
1597  leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
1598  jmp(kScratchRegister);
1599  bind(&skip);
1600  return;
1601  } else if (options().inline_offheap_trampolines) {
1602  int builtin_index = Builtins::kNoBuiltinId;
1603  if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
1604  Builtins::IsIsolateIndependent(builtin_index)) {
1605  // Inline the trampoline.
1606  RecordCommentForOffHeapTrampoline(builtin_index);
1607  CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
1608  EmbeddedData d = EmbeddedData::FromBlob();
1609  Address entry = d.InstructionStartOfBuiltin(builtin_index);
1610  Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
1611  jmp(kScratchRegister);
1612  return;
1613  }
1614  }
1615  }
1616  j(cc, code_object, rmode);
1617 }
1618 
1619 void MacroAssembler::JumpToInstructionStream(Address entry) {
1620  Move(kOffHeapTrampolineRegister, entry, RelocInfo::OFF_HEAP_TARGET);
1621  jmp(kOffHeapTrampolineRegister);
1622 }
1623 
1624 void TurboAssembler::Call(ExternalReference ext) {
1625  LoadAddress(kScratchRegister, ext);
1626  call(kScratchRegister);
1627 }
1628 
1629 void TurboAssembler::Call(Operand op) {
1630  if (kPointerSize == kInt64Size && !CpuFeatures::IsSupported(ATOM)) {
1631  call(op);
1632  } else {
1633  movp(kScratchRegister, op);
1634  call(kScratchRegister);
1635  }
1636 }
1637 
1638 void TurboAssembler::Call(Address destination, RelocInfo::Mode rmode) {
1639  Move(kScratchRegister, destination, rmode);
1640  call(kScratchRegister);
1641 }
1642 
1643 void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
1644  if (FLAG_embedded_builtins) {
1645  if (root_array_available_ && options().isolate_independent_code &&
1646  !Builtins::IsIsolateIndependentBuiltin(*code_object)) {
1647  // Calls to embedded targets are initially generated as standard
1648  // pc-relative calls below. When creating the embedded blob, call offsets
1649  // are patched up to point directly to the off-heap instruction start.
1650  // Note: It is safe to dereference code_object above since code generation
1651  // for builtins and code stubs happens on the main thread.
1652  IndirectLoadConstant(kScratchRegister, code_object);
1653  leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
1654  call(kScratchRegister);
1655  return;
1656  } else if (options().inline_offheap_trampolines) {
1657  int builtin_index = Builtins::kNoBuiltinId;
1658  if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
1659  Builtins::IsIsolateIndependent(builtin_index)) {
1660  // Inline the trampoline.
1661  RecordCommentForOffHeapTrampoline(builtin_index);
1662  CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
1663  EmbeddedData d = EmbeddedData::FromBlob();
1664  Address entry = d.InstructionStartOfBuiltin(builtin_index);
1665  Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
1666  call(kScratchRegister);
1667  return;
1668  }
1669  }
1670  }
1671  DCHECK(RelocInfo::IsCodeTarget(rmode));
1672  call(code_object, rmode);
1673 }
1674 
1675 void TurboAssembler::RetpolineCall(Register reg) {
1676  Label setup_return, setup_target, inner_indirect_branch, capture_spec;
1677 
1678  jmp(&setup_return); // Jump past the entire retpoline below.
1679 
1680  bind(&inner_indirect_branch);
1681  call(&setup_target);
1682 
1683  bind(&capture_spec);
1684  pause();
1685  jmp(&capture_spec);
1686 
1687  bind(&setup_target);
1688  movq(Operand(rsp, 0), reg);
1689  ret(0);
1690 
1691  bind(&setup_return);
1692  call(&inner_indirect_branch); // Callee will return after this instruction.
1693 }
1694 
1695 void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
1696  Move(kScratchRegister, destination, rmode);
1697  RetpolineCall(kScratchRegister);
1698 }
1699 
1700 void TurboAssembler::RetpolineJump(Register reg) {
1701  Label setup_target, capture_spec;
1702 
1703  call(&setup_target);
1704 
1705  bind(&capture_spec);
1706  pause();
1707  jmp(&capture_spec);
1708 
1709  bind(&setup_target);
1710  movq(Operand(rsp, 0), reg);
1711  ret(0);
1712 }
1713 
1714 void TurboAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
1715  if (imm8 == 0) {
1716  Movd(dst, src);
1717  return;
1718  }
1719  if (CpuFeatures::IsSupported(SSE4_1)) {
1720  CpuFeatureScope sse_scope(this, SSE4_1);
1721  pextrd(dst, src, imm8);
1722  return;
1723  }
1724  DCHECK_EQ(1, imm8);
1725  movq(dst, src);
1726  shrq(dst, Immediate(32));
1727 }
1728 
1729 void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
1730  if (CpuFeatures::IsSupported(SSE4_1)) {
1731  CpuFeatureScope sse_scope(this, SSE4_1);
1732  pinsrd(dst, src, imm8);
1733  return;
1734  }
1735  Movd(kScratchDoubleReg, src);
1736  if (imm8 == 1) {
1737  punpckldq(dst, kScratchDoubleReg);
1738  } else {
1739  DCHECK_EQ(0, imm8);
1740  Movss(dst, kScratchDoubleReg);
1741  }
1742 }
1743 
1744 void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) {
1745  if (CpuFeatures::IsSupported(SSE4_1)) {
1746  CpuFeatureScope sse_scope(this, SSE4_1);
1747  pinsrd(dst, src, imm8);
1748  return;
1749  }
1750  Movd(kScratchDoubleReg, src);
1751  if (imm8 == 1) {
1752  punpckldq(dst, kScratchDoubleReg);
1753  } else {
1754  DCHECK_EQ(0, imm8);
1755  Movss(dst, kScratchDoubleReg);
1756  }
1757 }
1758 
1759 void TurboAssembler::Lzcntl(Register dst, Register src) {
1760  if (CpuFeatures::IsSupported(LZCNT)) {
1761  CpuFeatureScope scope(this, LZCNT);
1762  lzcntl(dst, src);
1763  return;
1764  }
1765  Label not_zero_src;
1766  bsrl(dst, src);
1767  j(not_zero, &not_zero_src, Label::kNear);
1768  Set(dst, 63); // 63^31 == 32
1769  bind(&not_zero_src);
1770  xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
1771 }
1772 
1773 void TurboAssembler::Lzcntl(Register dst, Operand src) {
1774  if (CpuFeatures::IsSupported(LZCNT)) {
1775  CpuFeatureScope scope(this, LZCNT);
1776  lzcntl(dst, src);
1777  return;
1778  }
1779  Label not_zero_src;
1780  bsrl(dst, src);
1781  j(not_zero, &not_zero_src, Label::kNear);
1782  Set(dst, 63); // 63^31 == 32
1783  bind(&not_zero_src);
1784  xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
1785 }
1786 
1787 void TurboAssembler::Lzcntq(Register dst, Register src) {
1788  if (CpuFeatures::IsSupported(LZCNT)) {
1789  CpuFeatureScope scope(this, LZCNT);
1790  lzcntq(dst, src);
1791  return;
1792  }
1793  Label not_zero_src;
1794  bsrq(dst, src);
1795  j(not_zero, &not_zero_src, Label::kNear);
1796  Set(dst, 127); // 127^63 == 64
1797  bind(&not_zero_src);
1798  xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
1799 }
1800 
1801 void TurboAssembler::Lzcntq(Register dst, Operand src) {
1802  if (CpuFeatures::IsSupported(LZCNT)) {
1803  CpuFeatureScope scope(this, LZCNT);
1804  lzcntq(dst, src);
1805  return;
1806  }
1807  Label not_zero_src;
1808  bsrq(dst, src);
1809  j(not_zero, &not_zero_src, Label::kNear);
1810  Set(dst, 127); // 127^63 == 64
1811  bind(&not_zero_src);
1812  xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
1813 }
1814 
1815 void TurboAssembler::Tzcntq(Register dst, Register src) {
1816  if (CpuFeatures::IsSupported(BMI1)) {
1817  CpuFeatureScope scope(this, BMI1);
1818  tzcntq(dst, src);
1819  return;
1820  }
1821  Label not_zero_src;
1822  bsfq(dst, src);
1823  j(not_zero, &not_zero_src, Label::kNear);
1824  // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
1825  Set(dst, 64);
1826  bind(&not_zero_src);
1827 }
1828 
1829 void TurboAssembler::Tzcntq(Register dst, Operand src) {
1830  if (CpuFeatures::IsSupported(BMI1)) {
1831  CpuFeatureScope scope(this, BMI1);
1832  tzcntq(dst, src);
1833  return;
1834  }
1835  Label not_zero_src;
1836  bsfq(dst, src);
1837  j(not_zero, &not_zero_src, Label::kNear);
1838  // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
1839  Set(dst, 64);
1840  bind(&not_zero_src);
1841 }
1842 
1843 void TurboAssembler::Tzcntl(Register dst, Register src) {
1844  if (CpuFeatures::IsSupported(BMI1)) {
1845  CpuFeatureScope scope(this, BMI1);
1846  tzcntl(dst, src);
1847  return;
1848  }
1849  Label not_zero_src;
1850  bsfl(dst, src);
1851  j(not_zero, &not_zero_src, Label::kNear);
1852  Set(dst, 32); // The result of tzcnt is 32 if src = 0.
1853  bind(&not_zero_src);
1854 }
1855 
1856 void TurboAssembler::Tzcntl(Register dst, Operand src) {
1857  if (CpuFeatures::IsSupported(BMI1)) {
1858  CpuFeatureScope scope(this, BMI1);
1859  tzcntl(dst, src);
1860  return;
1861  }
1862  Label not_zero_src;
1863  bsfl(dst, src);
1864  j(not_zero, &not_zero_src, Label::kNear);
1865  Set(dst, 32); // The result of tzcnt is 32 if src = 0.
1866  bind(&not_zero_src);
1867 }
1868 
1869 void TurboAssembler::Popcntl(Register dst, Register src) {
1870  if (CpuFeatures::IsSupported(POPCNT)) {
1871  CpuFeatureScope scope(this, POPCNT);
1872  popcntl(dst, src);
1873  return;
1874  }
1875  UNREACHABLE();
1876 }
1877 
1878 void TurboAssembler::Popcntl(Register dst, Operand src) {
1879  if (CpuFeatures::IsSupported(POPCNT)) {
1880  CpuFeatureScope scope(this, POPCNT);
1881  popcntl(dst, src);
1882  return;
1883  }
1884  UNREACHABLE();
1885 }
1886 
1887 void TurboAssembler::Popcntq(Register dst, Register src) {
1888  if (CpuFeatures::IsSupported(POPCNT)) {
1889  CpuFeatureScope scope(this, POPCNT);
1890  popcntq(dst, src);
1891  return;
1892  }
1893  UNREACHABLE();
1894 }
1895 
1896 void TurboAssembler::Popcntq(Register dst, Operand src) {
1897  if (CpuFeatures::IsSupported(POPCNT)) {
1898  CpuFeatureScope scope(this, POPCNT);
1899  popcntq(dst, src);
1900  return;
1901  }
1902  UNREACHABLE();
1903 }
1904 
1905 
1906 void MacroAssembler::Pushad() {
1907  Push(rax);
1908  Push(rcx);
1909  Push(rdx);
1910  Push(rbx);
1911  // Not pushing rsp or rbp.
1912  Push(rsi);
1913  Push(rdi);
1914  Push(r8);
1915  Push(r9);
1916  // r10 is kScratchRegister.
1917  Push(r11);
1918  Push(r12);
1919  // r13 is kRootRegister.
1920  Push(r14);
1921  Push(r15);
1922  STATIC_ASSERT(12 == kNumSafepointSavedRegisters);
1923  // Use lea for symmetry with Popad.
1924  int sp_delta =
1925  (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
1926  leap(rsp, Operand(rsp, -sp_delta));
1927 }
1928 
1929 
1930 void MacroAssembler::Popad() {
1931  // Popad must not change the flags, so use lea instead of addq.
1932  int sp_delta =
1933  (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
1934  leap(rsp, Operand(rsp, sp_delta));
1935  Pop(r15);
1936  Pop(r14);
1937  Pop(r12);
1938  Pop(r11);
1939  Pop(r9);
1940  Pop(r8);
1941  Pop(rdi);
1942  Pop(rsi);
1943  Pop(rbx);
1944  Pop(rdx);
1945  Pop(rcx);
1946  Pop(rax);
1947 }
1948 
1949 
1950 // Order general registers are pushed by Pushad:
1951 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
1952 const int
1953 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
1954  0,
1955  1,
1956  2,
1957  3,
1958  -1,
1959  -1,
1960  4,
1961  5,
1962  6,
1963  7,
1964  -1,
1965  8,
1966  9,
1967  -1,
1968  10,
1969  11
1970 };
1971 
1972 void MacroAssembler::PushStackHandler() {
1973  // Adjust this code if not the case.
1974  STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
1975  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1976 
1977  Push(Immediate(0)); // Padding.
1978 
1979  // Link the current handler as the next handler.
1980  ExternalReference handler_address =
1981  ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
1982  Push(ExternalReferenceAsOperand(handler_address));
1983 
1984  // Set this new handler as the current one.
1985  movp(ExternalReferenceAsOperand(handler_address), rsp);
1986 }
1987 
1988 
1989 void MacroAssembler::PopStackHandler() {
1990  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1991  ExternalReference handler_address =
1992  ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
1993  Pop(ExternalReferenceAsOperand(handler_address));
1994  addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
1995 }
1996 
1997 void TurboAssembler::Ret() { ret(0); }
1998 
1999 void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
2000  if (is_uint16(bytes_dropped)) {
2001  ret(bytes_dropped);
2002  } else {
2003  PopReturnAddressTo(scratch);
2004  addp(rsp, Immediate(bytes_dropped));
2005  PushReturnAddressFrom(scratch);
2006  ret(0);
2007  }
2008 }
2009 
2010 void MacroAssembler::CmpObjectType(Register heap_object,
2011  InstanceType type,
2012  Register map) {
2013  movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
2014  CmpInstanceType(map, type);
2015 }
2016 
2017 
2018 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
2019  cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
2020 }
2021 
2022 void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
2023  XMMRegister scratch, Label* lost_precision,
2024  Label* is_nan, Label::Distance dst) {
2025  Cvttsd2si(result_reg, input_reg);
2026  Cvtlsi2sd(kScratchDoubleReg, result_reg);
2027  Ucomisd(kScratchDoubleReg, input_reg);
2028  j(not_equal, lost_precision, dst);
2029  j(parity_even, is_nan, dst); // NaN.
2030 }
2031 
2032 
2033 void MacroAssembler::AssertNotSmi(Register object) {
2034  if (emit_debug_code()) {
2035  Condition is_smi = CheckSmi(object);
2036  Check(NegateCondition(is_smi), AbortReason::kOperandIsASmi);
2037  }
2038 }
2039 
2040 
2041 void MacroAssembler::AssertSmi(Register object) {
2042  if (emit_debug_code()) {
2043  Condition is_smi = CheckSmi(object);
2044  Check(is_smi, AbortReason::kOperandIsNotASmi);
2045  }
2046 }
2047 
2048 void MacroAssembler::AssertSmi(Operand object) {
2049  if (emit_debug_code()) {
2050  Condition is_smi = CheckSmi(object);
2051  Check(is_smi, AbortReason::kOperandIsNotASmi);
2052  }
2053 }
2054 
2055 void TurboAssembler::AssertZeroExtended(Register int32_register) {
2056  if (emit_debug_code()) {
2057  DCHECK_NE(int32_register, kScratchRegister);
2058  movq(kScratchRegister, int64_t{0x0000000100000000});
2059  cmpq(kScratchRegister, int32_register);
2060  Check(above_equal, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
2061  }
2062 }
2063 
2064 void MacroAssembler::AssertConstructor(Register object) {
2065  if (emit_debug_code()) {
2066  testb(object, Immediate(kSmiTagMask));
2067  Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor);
2068  Push(object);
2069  movq(object, FieldOperand(object, HeapObject::kMapOffset));
2070  testb(FieldOperand(object, Map::kBitFieldOffset),
2071  Immediate(Map::IsConstructorBit::kMask));
2072  Pop(object);
2073  Check(not_zero, AbortReason::kOperandIsNotAConstructor);
2074  }
2075 }
2076 
2077 void MacroAssembler::AssertFunction(Register object) {
2078  if (emit_debug_code()) {
2079  testb(object, Immediate(kSmiTagMask));
2080  Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
2081  Push(object);
2082  CmpObjectType(object, JS_FUNCTION_TYPE, object);
2083  Pop(object);
2084  Check(equal, AbortReason::kOperandIsNotAFunction);
2085  }
2086 }
2087 
2088 
2089 void MacroAssembler::AssertBoundFunction(Register object) {
2090  if (emit_debug_code()) {
2091  testb(object, Immediate(kSmiTagMask));
2092  Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
2093  Push(object);
2094  CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
2095  Pop(object);
2096  Check(equal, AbortReason::kOperandIsNotABoundFunction);
2097  }
2098 }
2099 
2100 void MacroAssembler::AssertGeneratorObject(Register object) {
2101  if (!emit_debug_code()) return;
2102  testb(object, Immediate(kSmiTagMask));
2103  Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
2104 
2105  // Load map
2106  Register map = object;
2107  Push(object);
2108  movp(map, FieldOperand(object, HeapObject::kMapOffset));
2109 
2110  Label do_check;
2111  // Check if JSGeneratorObject
2112  CmpInstanceType(map, JS_GENERATOR_OBJECT_TYPE);
2113  j(equal, &do_check);
2114 
2115  // Check if JSAsyncFunctionObject
2116  CmpInstanceType(map, JS_ASYNC_FUNCTION_OBJECT_TYPE);
2117  j(equal, &do_check);
2118 
2119  // Check if JSAsyncGeneratorObject
2120  CmpInstanceType(map, JS_ASYNC_GENERATOR_OBJECT_TYPE);
2121 
2122  bind(&do_check);
2123  // Restore generator object to register and perform assertion
2124  Pop(object);
2125  Check(equal, AbortReason::kOperandIsNotAGeneratorObject);
2126 }
2127 
2128 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
2129  if (emit_debug_code()) {
2130  Label done_checking;
2131  AssertNotSmi(object);
2132  Cmp(object, isolate()->factory()->undefined_value());
2133  j(equal, &done_checking);
2134  Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
2135  Assert(equal, AbortReason::kExpectedUndefinedOrCell);
2136  bind(&done_checking);
2137  }
2138 }
2139 
2140 void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
2141  cmpl(in_out, Immediate(kClearedWeakHeapObjectLower32));
2142  j(equal, target_if_cleared);
2143 
2144  andp(in_out, Immediate(~static_cast<int32_t>(kWeakHeapObjectMask)));
2145 }
2146 
2147 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
2148  DCHECK_GT(value, 0);
2149  if (FLAG_native_code_counters && counter->Enabled()) {
2150  Operand counter_operand =
2151  ExternalReferenceAsOperand(ExternalReference::Create(counter));
2152  if (value == 1) {
2153  incl(counter_operand);
2154  } else {
2155  addl(counter_operand, Immediate(value));
2156  }
2157  }
2158 }
2159 
2160 
2161 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
2162  DCHECK_GT(value, 0);
2163  if (FLAG_native_code_counters && counter->Enabled()) {
2164  Operand counter_operand =
2165  ExternalReferenceAsOperand(ExternalReference::Create(counter));
2166  if (value == 1) {
2167  decl(counter_operand);
2168  } else {
2169  subl(counter_operand, Immediate(value));
2170  }
2171  }
2172 }
2173 
2174 void MacroAssembler::MaybeDropFrames() {
2175  // Check whether we need to drop frames to restart a function on the stack.
2176  ExternalReference restart_fp =
2177  ExternalReference::debug_restart_fp_address(isolate());
2178  Load(rbx, restart_fp);
2179  testp(rbx, rbx);
2180 
2181  Label dont_drop;
2182  j(zero, &dont_drop, Label::kNear);
2183  Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET);
2184 
2185  bind(&dont_drop);
2186 }
2187 
2188 void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
2189  Register caller_args_count_reg,
2190  Register scratch0, Register scratch1) {
2191 #if DEBUG
2192  if (callee_args_count.is_reg()) {
2193  DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
2194  scratch1));
2195  } else {
2196  DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
2197  }
2198 #endif
2199 
2200  // Calculate the destination address where we will put the return address
2201  // after we drop current frame.
2202  Register new_sp_reg = scratch0;
2203  if (callee_args_count.is_reg()) {
2204  subp(caller_args_count_reg, callee_args_count.reg());
2205  leap(new_sp_reg, Operand(rbp, caller_args_count_reg, times_pointer_size,
2206  StandardFrameConstants::kCallerPCOffset));
2207  } else {
2208  leap(new_sp_reg, Operand(rbp, caller_args_count_reg, times_pointer_size,
2209  StandardFrameConstants::kCallerPCOffset -
2210  callee_args_count.immediate() * kPointerSize));
2211  }
2212 
2213  if (FLAG_debug_code) {
2214  cmpp(rsp, new_sp_reg);
2215  Check(below, AbortReason::kStackAccessBelowStackPointer);
2216  }
2217 
2218  // Copy return address from caller's frame to current frame's return address
2219  // to avoid its trashing and let the following loop copy it to the right
2220  // place.
2221  Register tmp_reg = scratch1;
2222  movp(tmp_reg, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
2223  movp(Operand(rsp, 0), tmp_reg);
2224 
2225  // Restore caller's frame pointer now as it could be overwritten by
2226  // the copying loop.
2227  movp(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2228 
2229  // +2 here is to copy both receiver and return address.
2230  Register count_reg = caller_args_count_reg;
2231  if (callee_args_count.is_reg()) {
2232  leap(count_reg, Operand(callee_args_count.reg(), 2));
2233  } else {
2234  movp(count_reg, Immediate(callee_args_count.immediate() + 2));
2235  // TODO(ishell): Unroll copying loop for small immediate values.
2236  }
2237 
2238  // Now copy callee arguments to the caller frame going backwards to avoid
2239  // callee arguments corruption (source and destination areas could overlap).
2240  Label loop, entry;
2241  jmp(&entry, Label::kNear);
2242  bind(&loop);
2243  decp(count_reg);
2244  movp(tmp_reg, Operand(rsp, count_reg, times_pointer_size, 0));
2245  movp(Operand(new_sp_reg, count_reg, times_pointer_size, 0), tmp_reg);
2246  bind(&entry);
2247  cmpp(count_reg, Immediate(0));
2248  j(not_equal, &loop, Label::kNear);
2249 
2250  // Leave current frame.
2251  movp(rsp, new_sp_reg);
2252 }
2253 
2254 void MacroAssembler::InvokeFunction(Register function, Register new_target,
2255  const ParameterCount& actual,
2256  InvokeFlag flag) {
2257  movp(rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
2258  movzxwq(rbx,
2259  FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset));
2260 
2261  ParameterCount expected(rbx);
2262  InvokeFunction(function, new_target, expected, actual, flag);
2263 }
2264 
2265 void MacroAssembler::InvokeFunction(Register function, Register new_target,
2266  const ParameterCount& expected,
2267  const ParameterCount& actual,
2268  InvokeFlag flag) {
2269  DCHECK(function == rdi);
2270  movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
2271  InvokeFunctionCode(rdi, new_target, expected, actual, flag);
2272 }
2273 
2274 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
2275  const ParameterCount& expected,
2276  const ParameterCount& actual,
2277  InvokeFlag flag) {
2278  // You can't call a function without a valid frame.
2279  DCHECK(flag == JUMP_FUNCTION || has_frame());
2280  DCHECK(function == rdi);
2281  DCHECK_IMPLIES(new_target.is_valid(), new_target == rdx);
2282 
2283  // On function call, call into the debugger if necessary.
2284  CheckDebugHook(function, new_target, expected, actual);
2285 
2286  // Clear the new.target register if not given.
2287  if (!new_target.is_valid()) {
2288  LoadRoot(rdx, RootIndex::kUndefinedValue);
2289  }
2290 
2291  Label done;
2292  bool definitely_mismatches = false;
2293  InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
2294  Label::kNear);
2295  if (!definitely_mismatches) {
2296  // We call indirectly through the code field in the function to
2297  // allow recompilation to take effect without changing any of the
2298  // call sites.
2299  static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
2300  movp(rcx, FieldOperand(function, JSFunction::kCodeOffset));
2301  addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
2302  if (flag == CALL_FUNCTION) {
2303  call(rcx);
2304  } else {
2305  DCHECK(flag == JUMP_FUNCTION);
2306  jmp(rcx);
2307  }
2308  bind(&done);
2309  }
2310 }
2311 
2312 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2313  const ParameterCount& actual, Label* done,
2314  bool* definitely_mismatches,
2315  InvokeFlag flag,
2316  Label::Distance near_jump) {
2317  bool definitely_matches = false;
2318  *definitely_mismatches = false;
2319  Label invoke;
2320  if (expected.is_immediate()) {
2321  DCHECK(actual.is_immediate());
2322  Set(rax, actual.immediate());
2323  if (expected.immediate() == actual.immediate()) {
2324  definitely_matches = true;
2325  } else {
2326  if (expected.immediate() ==
2327  SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
2328  // Don't worry about adapting arguments for built-ins that
2329  // don't want that done. Skip adaption code by making it look
2330  // like we have a match between expected and actual number of
2331  // arguments.
2332  definitely_matches = true;
2333  } else {
2334  *definitely_mismatches = true;
2335  Set(rbx, expected.immediate());
2336  }
2337  }
2338  } else {
2339  if (actual.is_immediate()) {
2340  // Expected is in register, actual is immediate. This is the
2341  // case when we invoke function values without going through the
2342  // IC mechanism.
2343  Set(rax, actual.immediate());
2344  cmpp(expected.reg(), Immediate(actual.immediate()));
2345  j(equal, &invoke, Label::kNear);
2346  DCHECK(expected.reg() == rbx);
2347  } else if (expected.reg() != actual.reg()) {
2348  // Both expected and actual are in (different) registers. This
2349  // is the case when we invoke functions using call and apply.
2350  cmpp(expected.reg(), actual.reg());
2351  j(equal, &invoke, Label::kNear);
2352  DCHECK(actual.reg() == rax);
2353  DCHECK(expected.reg() == rbx);
2354  } else {
2355  definitely_matches = true;
2356  Move(rax, actual.reg());
2357  }
2358  }
2359 
2360  if (!definitely_matches) {
2361  Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
2362  if (flag == CALL_FUNCTION) {
2363  Call(adaptor, RelocInfo::CODE_TARGET);
2364  if (!*definitely_mismatches) {
2365  jmp(done, near_jump);
2366  }
2367  } else {
2368  Jump(adaptor, RelocInfo::CODE_TARGET);
2369  }
2370  bind(&invoke);
2371  }
2372 }
2373 
2374 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
2375  const ParameterCount& expected,
2376  const ParameterCount& actual) {
2377  Label skip_hook;
2378  ExternalReference debug_hook_active =
2379  ExternalReference::debug_hook_on_function_call_address(isolate());
2380  Operand debug_hook_active_operand =
2381  ExternalReferenceAsOperand(debug_hook_active);
2382  cmpb(debug_hook_active_operand, Immediate(0));
2383  j(equal, &skip_hook);
2384 
2385  {
2386  FrameScope frame(this,
2387  has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
2388  if (expected.is_reg()) {
2389  SmiTag(expected.reg(), expected.reg());
2390  Push(expected.reg());
2391  }
2392  if (actual.is_reg()) {
2393  SmiTag(actual.reg(), actual.reg());
2394  Push(actual.reg());
2395  SmiUntag(actual.reg(), actual.reg());
2396  }
2397  if (new_target.is_valid()) {
2398  Push(new_target);
2399  }
2400  Push(fun);
2401  Push(fun);
2402  Push(StackArgumentsAccessor(rbp, actual).GetReceiverOperand());
2403  CallRuntime(Runtime::kDebugOnFunctionCall);
2404  Pop(fun);
2405  if (new_target.is_valid()) {
2406  Pop(new_target);
2407  }
2408  if (actual.is_reg()) {
2409  Pop(actual.reg());
2410  SmiUntag(actual.reg(), actual.reg());
2411  }
2412  if (expected.is_reg()) {
2413  Pop(expected.reg());
2414  SmiUntag(expected.reg(), expected.reg());
2415  }
2416  }
2417  bind(&skip_hook);
2418 }
2419 
2420 void TurboAssembler::StubPrologue(StackFrame::Type type) {
2421  pushq(rbp); // Caller's frame pointer.
2422  movp(rbp, rsp);
2423  Push(Immediate(StackFrame::TypeToMarker(type)));
2424 }
2425 
2426 void TurboAssembler::Prologue() {
2427  pushq(rbp); // Caller's frame pointer.
2428  movp(rbp, rsp);
2429  Push(rsi); // Callee's context.
2430  Push(rdi); // Callee's JS function.
2431 }
2432 
2433 void TurboAssembler::EnterFrame(StackFrame::Type type) {
2434  pushq(rbp);
2435  movp(rbp, rsp);
2436  Push(Immediate(StackFrame::TypeToMarker(type)));
2437 }
2438 
2439 void TurboAssembler::LeaveFrame(StackFrame::Type type) {
2440  if (emit_debug_code()) {
2441  cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
2442  Immediate(StackFrame::TypeToMarker(type)));
2443  Check(equal, AbortReason::kStackFrameTypesMustMatch);
2444  }
2445  movp(rsp, rbp);
2446  popq(rbp);
2447 }
2448 
2449 void MacroAssembler::EnterExitFramePrologue(bool save_rax,
2450  StackFrame::Type frame_type) {
2451  DCHECK(frame_type == StackFrame::EXIT ||
2452  frame_type == StackFrame::BUILTIN_EXIT);
2453 
2454  // Set up the frame structure on the stack.
2455  // All constants are relative to the frame pointer of the exit frame.
2456  DCHECK_EQ(kFPOnStackSize + kPCOnStackSize,
2457  ExitFrameConstants::kCallerSPDisplacement);
2458  DCHECK_EQ(kFPOnStackSize, ExitFrameConstants::kCallerPCOffset);
2459  DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
2460  pushq(rbp);
2461  movp(rbp, rsp);
2462 
2463  // Reserve room for entry stack pointer and push the code object.
2464  Push(Immediate(StackFrame::TypeToMarker(frame_type)));
2465  DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
2466  Push(Immediate(0)); // Saved entry sp, patched before call.
2467  Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
2468  Push(kScratchRegister); // Accessed from ExitFrame::code_slot.
2469 
2470  // Save the frame pointer and the context in top.
2471  if (save_rax) {
2472  movp(r14, rax); // Backup rax in callee-save register.
2473  }
2474 
2475  Store(
2476  ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()),
2477  rbp);
2478  Store(ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()),
2479  rsi);
2480  Store(
2481  ExternalReference::Create(IsolateAddressId::kCFunctionAddress, isolate()),
2482  rbx);
2483 }
2484 
2485 
2486 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
2487  bool save_doubles) {
2488 #ifdef _WIN64
2489  const int kShadowSpace = 4;
2490  arg_stack_space += kShadowSpace;
2491 #endif
2492  // Optionally save all XMM registers.
2493  if (save_doubles) {
2494  int space = XMMRegister::kNumRegisters * kDoubleSize +
2495  arg_stack_space * kRegisterSize;
2496  subp(rsp, Immediate(space));
2497  int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
2498  const RegisterConfiguration* config = RegisterConfiguration::Default();
2499  for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
2500  DoubleRegister reg =
2501  DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
2502  Movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
2503  }
2504  } else if (arg_stack_space > 0) {
2505  subp(rsp, Immediate(arg_stack_space * kRegisterSize));
2506  }
2507 
2508  // Get the required frame alignment for the OS.
2509  const int kFrameAlignment = base::OS::ActivationFrameAlignment();
2510  if (kFrameAlignment > 0) {
2511  DCHECK(base::bits::IsPowerOfTwo(kFrameAlignment));
2512  DCHECK(is_int8(kFrameAlignment));
2513  andp(rsp, Immediate(-kFrameAlignment));
2514  }
2515 
2516  // Patch the saved entry sp.
2517  movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
2518 }
2519 
2520 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles,
2521  StackFrame::Type frame_type) {
2522  EnterExitFramePrologue(true, frame_type);
2523 
2524  // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
2525  // so it must be retained across the C-call.
2526  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
2527  leap(r15, Operand(rbp, r14, times_pointer_size, offset));
2528 
2529  EnterExitFrameEpilogue(arg_stack_space, save_doubles);
2530 }
2531 
2532 
2533 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
2534  EnterExitFramePrologue(false, StackFrame::EXIT);
2535  EnterExitFrameEpilogue(arg_stack_space, false);
2536 }
2537 
2538 
2539 void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
2540  // Registers:
2541  // r15 : argv
2542  if (save_doubles) {
2543  int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
2544  const RegisterConfiguration* config = RegisterConfiguration::Default();
2545  for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
2546  DoubleRegister reg =
2547  DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
2548  Movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
2549  }
2550  }
2551 
2552  if (pop_arguments) {
2553  // Get the return address from the stack and restore the frame pointer.
2554  movp(rcx, Operand(rbp, kFPOnStackSize));
2555  movp(rbp, Operand(rbp, 0 * kPointerSize));
2556 
2557  // Drop everything up to and including the arguments and the receiver
2558  // from the caller stack.
2559  leap(rsp, Operand(r15, 1 * kPointerSize));
2560 
2561  PushReturnAddressFrom(rcx);
2562  } else {
2563  // Otherwise just leave the exit frame.
2564  leave();
2565  }
2566 
2567  LeaveExitFrameEpilogue();
2568 }
2569 
2570 void MacroAssembler::LeaveApiExitFrame() {
2571  movp(rsp, rbp);
2572  popq(rbp);
2573 
2574  LeaveExitFrameEpilogue();
2575 }
2576 
2577 void MacroAssembler::LeaveExitFrameEpilogue() {
2578  // Restore current context from top and clear it in debug mode.
2579  ExternalReference context_address =
2580  ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
2581  Operand context_operand = ExternalReferenceAsOperand(context_address);
2582  movp(rsi, context_operand);
2583 #ifdef DEBUG
2584  movp(context_operand, Immediate(Context::kInvalidContext));
2585 #endif
2586 
2587  // Clear the top frame.
2588  ExternalReference c_entry_fp_address =
2589  ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
2590  Operand c_entry_fp_operand = ExternalReferenceAsOperand(c_entry_fp_address);
2591  movp(c_entry_fp_operand, Immediate(0));
2592 }
2593 
2594 
2595 #ifdef _WIN64
2596 static const int kRegisterPassedArguments = 4;
2597 #else
2598 static const int kRegisterPassedArguments = 6;
2599 #endif
2600 
2601 
2602 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
2603  movp(dst, NativeContextOperand());
2604  movp(dst, ContextOperand(dst, index));
2605 }
2606 
2607 
2608 int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
2609  // On Windows 64 stack slots are reserved by the caller for all arguments
2610  // including the ones passed in registers, and space is always allocated for
2611  // the four register arguments even if the function takes fewer than four
2612  // arguments.
2613  // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
2614  // and the caller does not reserve stack slots for them.
2615  DCHECK_GE(num_arguments, 0);
2616 #ifdef _WIN64
2617  const int kMinimumStackSlots = kRegisterPassedArguments;
2618  if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
2619  return num_arguments;
2620 #else
2621  if (num_arguments < kRegisterPassedArguments) return 0;
2622  return num_arguments - kRegisterPassedArguments;
2623 #endif
2624 }
2625 
2626 void TurboAssembler::PrepareCallCFunction(int num_arguments) {
2627  int frame_alignment = base::OS::ActivationFrameAlignment();
2628  DCHECK_NE(frame_alignment, 0);
2629  DCHECK_GE(num_arguments, 0);
2630 
2631  // Make stack end at alignment and allocate space for arguments and old rsp.
2632  movp(kScratchRegister, rsp);
2633  DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
2634  int argument_slots_on_stack =
2635  ArgumentStackSlotsForCFunctionCall(num_arguments);
2636  subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
2637  andp(rsp, Immediate(-frame_alignment));
2638  movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
2639 }
2640 
2641 void TurboAssembler::CallCFunction(ExternalReference function,
2642  int num_arguments) {
2643  LoadAddress(rax, function);
2644  CallCFunction(rax, num_arguments);
2645 }
2646 
2647 void TurboAssembler::CallCFunction(Register function, int num_arguments) {
2648  DCHECK_LE(num_arguments, kMaxCParameters);
2649  DCHECK(has_frame());
2650  // Check stack alignment.
2651  if (emit_debug_code()) {
2652  CheckStackAlignment();
2653  }
2654 
2655  call(function);
2656  DCHECK_NE(base::OS::ActivationFrameAlignment(), 0);
2657  DCHECK_GE(num_arguments, 0);
2658  int argument_slots_on_stack =
2659  ArgumentStackSlotsForCFunctionCall(num_arguments);
2660  movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
2661 }
2662 
2663 void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
2664  Condition cc, Label* condition_met,
2665  Label::Distance condition_met_distance) {
2666  DCHECK(cc == zero || cc == not_zero);
2667  if (scratch == object) {
2668  andp(scratch, Immediate(~kPageAlignmentMask));
2669  } else {
2670  movp(scratch, Immediate(~kPageAlignmentMask));
2671  andp(scratch, object);
2672  }
2673  if (mask < (1 << kBitsPerByte)) {
2674  testb(Operand(scratch, MemoryChunk::kFlagsOffset),
2675  Immediate(static_cast<uint8_t>(mask)));
2676  } else {
2677  testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
2678  }
2679  j(cc, condition_met, condition_met_distance);
2680 }
2681 
2682 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
2683  Label current;
2684  bind(&current);
2685  int pc = pc_offset();
2686  // Load effective address to get the address of the current instruction.
2687  leaq(dst, Operand(&current, -pc));
2688 }
2689 
2690 void TurboAssembler::ResetSpeculationPoisonRegister() {
2691  // TODO(tebbi): Perhaps, we want to put an lfence here.
2692  Set(kSpeculationPoisonRegister, -1);
2693 }
2694 
2695 } // namespace internal
2696 } // namespace v8
2697 
2698 #endif // V8_TARGET_ARCH_X64
Definition: libplatform.h:13