V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
macro-assembler-ia32.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_IA32
6 
7 #include "src/base/bits.h"
8 #include "src/base/division-by-constant.h"
9 #include "src/base/utils/random-number-generator.h"
10 #include "src/bootstrapper.h"
11 #include "src/callable.h"
12 #include "src/code-factory.h"
13 #include "src/code-stubs.h"
14 #include "src/counters.h"
15 #include "src/debug/debug.h"
16 #include "src/external-reference-table.h"
17 #include "src/frame-constants.h"
18 #include "src/frames-inl.h"
19 #include "src/ia32/assembler-ia32-inl.h"
20 #include "src/macro-assembler.h"
21 #include "src/runtime/runtime.h"
22 #include "src/snapshot/embedded-data.h"
23 #include "src/snapshot/snapshot.h"
24 
25 // Satisfy cpplint check, but don't include platform-specific header. It is
26 // included recursively via macro-assembler.h.
27 #if 0
28 #include "src/ia32/macro-assembler-ia32.h"
29 #endif
30 
31 namespace v8 {
32 namespace internal {
33 
34 // -------------------------------------------------------------------------
35 // MacroAssembler implementation.
36 
37 MacroAssembler::MacroAssembler(Isolate* isolate,
38  const AssemblerOptions& options, void* buffer,
39  int size, CodeObjectRequired create_code_object)
40  : TurboAssembler(isolate, options, buffer, size, create_code_object) {
41  if (create_code_object == CodeObjectRequired::kYes) {
42  // Unlike TurboAssembler, which can be used off the main thread and may not
43  // allocate, macro assembler creates its own copy of the self-reference
44  // marker in order to disambiguate between self-references during nested
45  // code generation (e.g.: codegen of the current object triggers stub
46  // compilation through CodeStub::GetCode()).
47  code_object_ = Handle<HeapObject>::New(
48  *isolate->factory()->NewSelfReferenceMarker(), isolate);
49  }
50 }
51 
52 void TurboAssembler::InitializeRootRegister() {
53  ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
54  Move(kRootRegister, Immediate(isolate_root));
55 }
56 
57 void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
58  if (root_array_available()) {
59  mov(destination,
60  Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
61  return;
62  }
63 
64  if (RootsTable::IsImmortalImmovable(index)) {
65  Handle<Object> object = isolate()->root_handle(index);
66  if (object->IsSmi()) {
67  mov(destination, Immediate(Smi::cast(*object)));
68  return;
69  } else {
70  DCHECK(object->IsHeapObject());
71  mov(destination, Handle<HeapObject>::cast(object));
72  return;
73  }
74  }
75 
76  ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
77  lea(destination,
78  Operand(isolate_root.address(), RelocInfo::EXTERNAL_REFERENCE));
79  mov(destination, Operand(destination, RootRegisterOffsetForRootIndex(index)));
80 }
81 
82 void TurboAssembler::CompareRoot(Register with, Register scratch,
83  RootIndex index) {
84  if (root_array_available()) {
85  CompareRoot(with, index);
86  } else {
87  ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
88  lea(scratch,
89  Operand(isolate_root.address(), RelocInfo::EXTERNAL_REFERENCE));
90  cmp(with, Operand(scratch, RootRegisterOffsetForRootIndex(index)));
91  }
92 }
93 
94 void TurboAssembler::CompareRoot(Register with, RootIndex index) {
95  if (root_array_available()) {
96  cmp(with, Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
97  return;
98  }
99 
100  DCHECK(RootsTable::IsImmortalImmovable(index));
101  Handle<Object> object = isolate()->root_handle(index);
102  if (object->IsHeapObject()) {
103  cmp(with, Handle<HeapObject>::cast(object));
104  } else {
105  cmp(with, Immediate(Smi::cast(*object)));
106  }
107 }
108 
109 void TurboAssembler::CompareStackLimit(Register with) {
110  if (root_array_available()) {
111  CompareRoot(with, RootIndex::kStackLimit);
112  } else {
113  DCHECK(!options().isolate_independent_code);
114  ExternalReference ref =
115  ExternalReference::address_of_stack_limit(isolate());
116  cmp(with, Operand(ref.address(), RelocInfo::EXTERNAL_REFERENCE));
117  }
118 }
119 
120 void TurboAssembler::CompareRealStackLimit(Register with) {
121  if (root_array_available()) {
122  CompareRoot(with, RootIndex::kRealStackLimit);
123  } else {
124  DCHECK(!options().isolate_independent_code);
125  ExternalReference ref =
126  ExternalReference::address_of_real_stack_limit(isolate());
127  cmp(with, Operand(ref.address(), RelocInfo::EXTERNAL_REFERENCE));
128  }
129 }
130 
131 void MacroAssembler::PushRoot(RootIndex index) {
132  if (root_array_available()) {
133  DCHECK(RootsTable::IsImmortalImmovable(index));
134  push(Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
135  return;
136  }
137 
138  // TODO(v8:6666): Add a scratch register or remove all uses.
139  DCHECK(RootsTable::IsImmortalImmovable(index));
140  Handle<Object> object = isolate()->root_handle(index);
141  if (object->IsHeapObject()) {
142  Push(Handle<HeapObject>::cast(object));
143  } else {
144  Push(Smi::cast(*object));
145  }
146 }
147 
148 Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
149  Register scratch) {
150  // TODO(jgruber): Add support for enable_root_array_delta_access.
151  if (root_array_available() && options().isolate_independent_code) {
152  if (IsAddressableThroughRootRegister(isolate(), reference)) {
153  // Some external references can be efficiently loaded as an offset from
154  // kRootRegister.
155  intptr_t offset =
156  RootRegisterOffsetForExternalReference(isolate(), reference);
157  return Operand(kRootRegister, offset);
158  } else {
159  // Otherwise, do a memory load from the external reference table.
160  mov(scratch, Operand(kRootRegister,
161  RootRegisterOffsetForExternalReferenceTableEntry(
162  isolate(), reference)));
163  return Operand(scratch, 0);
164  }
165  }
166  Move(scratch, Immediate(reference));
167  return Operand(scratch, 0);
168 }
169 
170 // TODO(v8:6666): If possible, refactor into a platform-independent function in
171 // TurboAssembler.
172 Operand TurboAssembler::ExternalReferenceAddressAsOperand(
173  ExternalReference reference) {
174  DCHECK(FLAG_embedded_builtins);
175  DCHECK(root_array_available());
176  DCHECK(options().isolate_independent_code);
177  return Operand(
178  kRootRegister,
179  RootRegisterOffsetForExternalReferenceTableEntry(isolate(), reference));
180 }
181 
182 // TODO(v8:6666): If possible, refactor into a platform-independent function in
183 // TurboAssembler.
184 Operand TurboAssembler::HeapObjectAsOperand(Handle<HeapObject> object) {
185  DCHECK(FLAG_embedded_builtins);
186  DCHECK(root_array_available());
187 
188  int builtin_index;
189  RootIndex root_index;
190  if (isolate()->roots_table().IsRootHandle(object, &root_index)) {
191  return Operand(kRootRegister, RootRegisterOffsetForRootIndex(root_index));
192  } else if (isolate()->builtins()->IsBuiltinHandle(object, &builtin_index)) {
193  return Operand(kRootRegister,
194  RootRegisterOffsetForBuiltinIndex(builtin_index));
195  } else if (object.is_identical_to(code_object_) &&
196  Builtins::IsBuiltinId(maybe_builtin_index_)) {
197  return Operand(kRootRegister,
198  RootRegisterOffsetForBuiltinIndex(maybe_builtin_index_));
199  } else {
200  // Objects in the constants table need an additional indirection, which
201  // cannot be represented as a single Operand.
202  UNREACHABLE();
203  }
204 }
205 
206 void TurboAssembler::LoadFromConstantsTable(Register destination,
207  int constant_index) {
208  DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
209  LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
210  mov(destination,
211  FieldOperand(destination,
212  FixedArray::kHeaderSize + constant_index * kPointerSize));
213 }
214 
215 void TurboAssembler::LoadRootRegisterOffset(Register destination,
216  intptr_t offset) {
217  DCHECK(is_int32(offset));
218  DCHECK(root_array_available());
219  if (offset == 0) {
220  mov(destination, kRootRegister);
221  } else {
222  lea(destination, Operand(kRootRegister, static_cast<int32_t>(offset)));
223  }
224 }
225 
226 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
227  DCHECK(root_array_available());
228  mov(destination, Operand(kRootRegister, offset));
229 }
230 
231 void TurboAssembler::LoadAddress(Register destination,
232  ExternalReference source) {
233  // TODO(jgruber): Add support for enable_root_array_delta_access.
234  if (root_array_available() && options().isolate_independent_code) {
235  IndirectLoadExternalReference(destination, source);
236  return;
237  }
238  mov(destination, Immediate(source));
239 }
240 
241 static constexpr Register saved_regs[] = {eax, ecx, edx};
242 
243 static constexpr int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
244 
245 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
246  Register exclusion1,
247  Register exclusion2,
248  Register exclusion3) const {
249  int bytes = 0;
250  for (int i = 0; i < kNumberOfSavedRegs; i++) {
251  Register reg = saved_regs[i];
252  if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
253  bytes += kPointerSize;
254  }
255  }
256 
257  if (fp_mode == kSaveFPRegs) {
258  // Count all XMM registers except XMM0.
259  bytes += kDoubleSize * (XMMRegister::kNumRegisters - 1);
260  }
261 
262  return bytes;
263 }
264 
265 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
266  Register exclusion2, Register exclusion3) {
267  // We don't allow a GC during a store buffer overflow so there is no need to
268  // store the registers in any particular way, but we do have to store and
269  // restore them.
270  int bytes = 0;
271  for (int i = 0; i < kNumberOfSavedRegs; i++) {
272  Register reg = saved_regs[i];
273  if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
274  push(reg);
275  bytes += kPointerSize;
276  }
277  }
278 
279  if (fp_mode == kSaveFPRegs) {
280  // Save all XMM registers except XMM0.
281  int delta = kDoubleSize * (XMMRegister::kNumRegisters - 1);
282  sub(esp, Immediate(delta));
283  for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
284  XMMRegister reg = XMMRegister::from_code(i);
285  movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
286  }
287  bytes += delta;
288  }
289 
290  return bytes;
291 }
292 
293 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
294  Register exclusion2, Register exclusion3) {
295  int bytes = 0;
296  if (fp_mode == kSaveFPRegs) {
297  // Restore all XMM registers except XMM0.
298  int delta = kDoubleSize * (XMMRegister::kNumRegisters - 1);
299  for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
300  XMMRegister reg = XMMRegister::from_code(i);
301  movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
302  }
303  add(esp, Immediate(delta));
304  bytes += delta;
305  }
306 
307  for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
308  Register reg = saved_regs[i];
309  if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
310  pop(reg);
311  bytes += kPointerSize;
312  }
313  }
314 
315  return bytes;
316 }
317 
318 void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
319  XMMRegister scratch, Label* lost_precision,
320  Label* is_nan, Label::Distance dst) {
321  DCHECK(input_reg != scratch);
322  cvttsd2si(result_reg, Operand(input_reg));
323  Cvtsi2sd(scratch, Operand(result_reg));
324  ucomisd(scratch, input_reg);
325  j(not_equal, lost_precision, dst);
326  j(parity_even, is_nan, dst);
327 }
328 
329 void MacroAssembler::RecordWriteField(Register object, int offset,
330  Register value, Register dst,
331  SaveFPRegsMode save_fp,
332  RememberedSetAction remembered_set_action,
333  SmiCheck smi_check) {
334  // First, check if a write barrier is even needed. The tests below
335  // catch stores of Smis.
336  Label done;
337 
338  // Skip barrier if writing a smi.
339  if (smi_check == INLINE_SMI_CHECK) {
340  JumpIfSmi(value, &done);
341  }
342 
343  // Although the object register is tagged, the offset is relative to the start
344  // of the object, so so offset must be a multiple of kPointerSize.
345  DCHECK(IsAligned(offset, kPointerSize));
346 
347  lea(dst, FieldOperand(object, offset));
348  if (emit_debug_code()) {
349  Label ok;
350  test_b(dst, Immediate(kPointerSize - 1));
351  j(zero, &ok, Label::kNear);
352  int3();
353  bind(&ok);
354  }
355 
356  RecordWrite(object, dst, value, save_fp, remembered_set_action,
357  OMIT_SMI_CHECK);
358 
359  bind(&done);
360 
361  // Clobber clobbered input registers when running with the debug-code flag
362  // turned on to provoke errors.
363  if (emit_debug_code()) {
364  mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
365  mov(dst, Immediate(bit_cast<int32_t>(kZapValue)));
366  }
367 }
368 
369 void TurboAssembler::SaveRegisters(RegList registers) {
370  DCHECK_GT(NumRegs(registers), 0);
371  for (int i = 0; i < Register::kNumRegisters; ++i) {
372  if ((registers >> i) & 1u) {
373  push(Register::from_code(i));
374  }
375  }
376 }
377 
378 void TurboAssembler::RestoreRegisters(RegList registers) {
379  DCHECK_GT(NumRegs(registers), 0);
380  for (int i = Register::kNumRegisters - 1; i >= 0; --i) {
381  if ((registers >> i) & 1u) {
382  pop(Register::from_code(i));
383  }
384  }
385 }
386 
387 void TurboAssembler::CallRecordWriteStub(
388  Register object, Register address,
389  RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
390  CallRecordWriteStub(
391  object, address, remembered_set_action, fp_mode,
392  isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
393  kNullAddress);
394 }
395 
396 void TurboAssembler::CallRecordWriteStub(
397  Register object, Register address,
398  RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
399  Address wasm_target) {
400  CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
401  Handle<Code>::null(), wasm_target);
402 }
403 
404 void TurboAssembler::CallRecordWriteStub(
405  Register object, Register address,
406  RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
407  Handle<Code> code_target, Address wasm_target) {
408  DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
409  // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
410  // i.e. always emit remember set and save FP registers in RecordWriteStub. If
411  // large performance regression is observed, we should use these values to
412  // avoid unnecessary work.
413 
414  RecordWriteDescriptor descriptor;
415  RegList registers = descriptor.allocatable_registers();
416 
417  SaveRegisters(registers);
418 
419  Register object_parameter(
420  descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
421  Register slot_parameter(
422  descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
423  Register remembered_set_parameter(
424  descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
425  Register fp_mode_parameter(
426  descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
427 
428  push(object);
429  push(address);
430 
431  pop(slot_parameter);
432  pop(object_parameter);
433 
434  Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
435  Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
436  if (code_target.is_null()) {
437  // Use {wasm_call} for direct Wasm call within a module.
438  wasm_call(wasm_target, RelocInfo::WASM_STUB_CALL);
439  } else {
440  Call(code_target, RelocInfo::CODE_TARGET);
441  }
442 
443  RestoreRegisters(registers);
444 }
445 
446 void MacroAssembler::RecordWrite(Register object, Register address,
447  Register value, SaveFPRegsMode fp_mode,
448  RememberedSetAction remembered_set_action,
449  SmiCheck smi_check) {
450  DCHECK(object != value);
451  DCHECK(object != address);
452  DCHECK(value != address);
453  AssertNotSmi(object);
454 
455  if (remembered_set_action == OMIT_REMEMBERED_SET &&
456  !FLAG_incremental_marking) {
457  return;
458  }
459 
460  if (emit_debug_code()) {
461  Label ok;
462  cmp(value, Operand(address, 0));
463  j(equal, &ok, Label::kNear);
464  int3();
465  bind(&ok);
466  }
467 
468  // First, check if a write barrier is even needed. The tests below
469  // catch stores of Smis and stores into young gen.
470  Label done;
471 
472  if (smi_check == INLINE_SMI_CHECK) {
473  // Skip barrier if writing a smi.
474  JumpIfSmi(value, &done, Label::kNear);
475  }
476 
477  CheckPageFlag(value,
478  value, // Used as scratch.
479  MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
480  Label::kNear);
481  CheckPageFlag(object,
482  value, // Used as scratch.
483  MemoryChunk::kPointersFromHereAreInterestingMask,
484  zero,
485  &done,
486  Label::kNear);
487 
488  CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
489 
490  bind(&done);
491 
492  // Count number of write barriers in generated code.
493  isolate()->counters()->write_barriers_static()->Increment();
494  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, value);
495 
496  // Clobber clobbered registers when running with the debug-code flag
497  // turned on to provoke errors.
498  if (emit_debug_code()) {
499  mov(address, Immediate(bit_cast<int32_t>(kZapValue)));
500  mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
501  }
502 }
503 
504 void MacroAssembler::MaybeDropFrames() {
505  // Check whether we need to drop frames to restart a function on the stack.
506  Label dont_drop;
507  ExternalReference restart_fp =
508  ExternalReference::debug_restart_fp_address(isolate());
509  mov(eax, ExternalReferenceAsOperand(restart_fp, eax));
510  test(eax, eax);
511  j(zero, &dont_drop, Label::kNear);
512 
513  Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET);
514  bind(&dont_drop);
515 }
516 
517 void TurboAssembler::Cvtsi2ss(XMMRegister dst, Operand src) {
518  xorps(dst, dst);
519  cvtsi2ss(dst, src);
520 }
521 
522 void TurboAssembler::Cvtsi2sd(XMMRegister dst, Operand src) {
523  xorpd(dst, dst);
524  cvtsi2sd(dst, src);
525 }
526 
527 void TurboAssembler::Cvtui2ss(XMMRegister dst, Operand src, Register tmp) {
528  Label done;
529  Register src_reg = src.is_reg_only() ? src.reg() : tmp;
530  if (src_reg == tmp) mov(tmp, src);
531  cvtsi2ss(dst, src_reg);
532  test(src_reg, src_reg);
533  j(positive, &done, Label::kNear);
534 
535  // Compute {src/2 | (src&1)} (retain the LSB to avoid rounding errors).
536  if (src_reg != tmp) mov(tmp, src_reg);
537  shr(tmp, 1);
538  // The LSB is shifted into CF. If it is set, set the LSB in {tmp}.
539  Label msb_not_set;
540  j(not_carry, &msb_not_set, Label::kNear);
541  or_(tmp, Immediate(1));
542  bind(&msb_not_set);
543  cvtsi2ss(dst, tmp);
544  addss(dst, dst);
545  bind(&done);
546 }
547 
548 void TurboAssembler::Cvttss2ui(Register dst, Operand src, XMMRegister tmp) {
549  Label done;
550  cvttss2si(dst, src);
551  test(dst, dst);
552  j(positive, &done);
553  Move(tmp, static_cast<float>(INT32_MIN));
554  addss(tmp, src);
555  cvttss2si(dst, tmp);
556  or_(dst, Immediate(0x80000000));
557  bind(&done);
558 }
559 
560 void TurboAssembler::Cvtui2sd(XMMRegister dst, Operand src, Register scratch) {
561  Label done;
562  cmp(src, Immediate(0));
563  ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias();
564  Cvtsi2sd(dst, src);
565  j(not_sign, &done, Label::kNear);
566  addsd(dst, ExternalReferenceAsOperand(uint32_bias, scratch));
567  bind(&done);
568 }
569 
570 void TurboAssembler::Cvttsd2ui(Register dst, Operand src, XMMRegister tmp) {
571  Move(tmp, -2147483648.0);
572  addsd(tmp, src);
573  cvttsd2si(dst, tmp);
574  add(dst, Immediate(0x80000000));
575 }
576 
577 void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) {
578  if (shift >= 32) {
579  mov(high, low);
580  shl(high, shift - 32);
581  xor_(low, low);
582  } else {
583  shld(high, low, shift);
584  shl(low, shift);
585  }
586 }
587 
588 void TurboAssembler::ShlPair_cl(Register high, Register low) {
589  shld_cl(high, low);
590  shl_cl(low);
591  Label done;
592  test(ecx, Immediate(0x20));
593  j(equal, &done, Label::kNear);
594  mov(high, low);
595  xor_(low, low);
596  bind(&done);
597 }
598 
599 void TurboAssembler::ShrPair(Register high, Register low, uint8_t shift) {
600  if (shift >= 32) {
601  mov(low, high);
602  shr(low, shift - 32);
603  xor_(high, high);
604  } else {
605  shrd(high, low, shift);
606  shr(high, shift);
607  }
608 }
609 
610 void TurboAssembler::ShrPair_cl(Register high, Register low) {
611  shrd_cl(low, high);
612  shr_cl(high);
613  Label done;
614  test(ecx, Immediate(0x20));
615  j(equal, &done, Label::kNear);
616  mov(low, high);
617  xor_(high, high);
618  bind(&done);
619 }
620 
621 void TurboAssembler::SarPair(Register high, Register low, uint8_t shift) {
622  if (shift >= 32) {
623  mov(low, high);
624  sar(low, shift - 32);
625  sar(high, 31);
626  } else {
627  shrd(high, low, shift);
628  sar(high, shift);
629  }
630 }
631 
632 void TurboAssembler::SarPair_cl(Register high, Register low) {
633  shrd_cl(low, high);
634  sar_cl(high);
635  Label done;
636  test(ecx, Immediate(0x20));
637  j(equal, &done, Label::kNear);
638  mov(low, high);
639  sar(high, 31);
640  bind(&done);
641 }
642 
643 void MacroAssembler::CmpObjectType(Register heap_object,
644  InstanceType type,
645  Register map) {
646  mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
647  CmpInstanceType(map, type);
648 }
649 
650 
651 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
652  cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
653 }
654 
655 void MacroAssembler::AssertSmi(Register object) {
656  if (emit_debug_code()) {
657  test(object, Immediate(kSmiTagMask));
658  Check(equal, AbortReason::kOperandIsNotASmi);
659  }
660 }
661 
662 void MacroAssembler::AssertConstructor(Register object) {
663  if (emit_debug_code()) {
664  test(object, Immediate(kSmiTagMask));
665  Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor);
666  Push(object);
667  mov(object, FieldOperand(object, HeapObject::kMapOffset));
668  test_b(FieldOperand(object, Map::kBitFieldOffset),
669  Immediate(Map::IsConstructorBit::kMask));
670  Pop(object);
671  Check(not_zero, AbortReason::kOperandIsNotAConstructor);
672  }
673 }
674 
675 void MacroAssembler::AssertFunction(Register object) {
676  if (emit_debug_code()) {
677  test(object, Immediate(kSmiTagMask));
678  Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
679  Push(object);
680  CmpObjectType(object, JS_FUNCTION_TYPE, object);
681  Pop(object);
682  Check(equal, AbortReason::kOperandIsNotAFunction);
683  }
684 }
685 
686 
687 void MacroAssembler::AssertBoundFunction(Register object) {
688  if (emit_debug_code()) {
689  test(object, Immediate(kSmiTagMask));
690  Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
691  Push(object);
692  CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
693  Pop(object);
694  Check(equal, AbortReason::kOperandIsNotABoundFunction);
695  }
696 }
697 
698 void MacroAssembler::AssertGeneratorObject(Register object) {
699  if (!emit_debug_code()) return;
700 
701  test(object, Immediate(kSmiTagMask));
702  Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
703 
704  {
705  Push(object);
706  Register map = object;
707 
708  // Load map
709  mov(map, FieldOperand(object, HeapObject::kMapOffset));
710 
711  Label do_check;
712  // Check if JSGeneratorObject
713  CmpInstanceType(map, JS_GENERATOR_OBJECT_TYPE);
714  j(equal, &do_check, Label::kNear);
715 
716  // Check if JSAsyncFunctionObject.
717  CmpInstanceType(map, JS_ASYNC_FUNCTION_OBJECT_TYPE);
718  j(equal, &do_check, Label::kNear);
719 
720  // Check if JSAsyncGeneratorObject
721  CmpInstanceType(map, JS_ASYNC_GENERATOR_OBJECT_TYPE);
722 
723  bind(&do_check);
724  Pop(object);
725  }
726 
727  Check(equal, AbortReason::kOperandIsNotAGeneratorObject);
728 }
729 
730 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
731  Register scratch) {
732  if (emit_debug_code()) {
733  Label done_checking;
734  AssertNotSmi(object);
735  CompareRoot(object, scratch, RootIndex::kUndefinedValue);
736  j(equal, &done_checking);
737  LoadRoot(scratch, RootIndex::kAllocationSiteWithWeakNextMap);
738  cmp(FieldOperand(object, 0), scratch);
739  Assert(equal, AbortReason::kExpectedUndefinedOrCell);
740  bind(&done_checking);
741  }
742 }
743 
744 
745 void MacroAssembler::AssertNotSmi(Register object) {
746  if (emit_debug_code()) {
747  test(object, Immediate(kSmiTagMask));
748  Check(not_equal, AbortReason::kOperandIsASmi);
749  }
750 }
751 
752 void TurboAssembler::StubPrologue(StackFrame::Type type) {
753  push(ebp); // Caller's frame pointer.
754  mov(ebp, esp);
755  push(Immediate(StackFrame::TypeToMarker(type)));
756 }
757 
758 void TurboAssembler::Prologue() {
759  push(ebp); // Caller's frame pointer.
760  mov(ebp, esp);
761  push(esi); // Callee's context.
762  push(edi); // Callee's JS function.
763 }
764 
765 void TurboAssembler::EnterFrame(StackFrame::Type type) {
766  push(ebp);
767  mov(ebp, esp);
768  push(Immediate(StackFrame::TypeToMarker(type)));
769 }
770 
771 void TurboAssembler::LeaveFrame(StackFrame::Type type) {
772  if (emit_debug_code()) {
773  cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
774  Immediate(StackFrame::TypeToMarker(type)));
775  Check(equal, AbortReason::kStackFrameTypesMustMatch);
776  }
777  leave();
778 }
779 
780 #ifdef V8_OS_WIN
781 void TurboAssembler::AllocateStackFrame(Register bytes_scratch) {
782  // In windows, we cannot increment the stack size by more than one page
783  // (minimum page size is 4KB) without accessing at least one byte on the
784  // page. Check this:
785  // https://msdn.microsoft.com/en-us/library/aa227153(v=vs.60).aspx.
786  constexpr int kPageSize = 4 * 1024;
787  Label check_offset;
788  Label touch_next_page;
789  jmp(&check_offset);
790  bind(&touch_next_page);
791  sub(esp, Immediate(kPageSize));
792  // Just to touch the page, before we increment further.
793  mov(Operand(esp, 0), Immediate(0));
794  sub(bytes_scratch, Immediate(kPageSize));
795 
796  bind(&check_offset);
797  cmp(bytes_scratch, kPageSize);
798  j(greater, &touch_next_page);
799 
800  sub(esp, bytes_scratch);
801 }
802 #endif
803 
804 void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type,
805  Register scratch) {
806  DCHECK(frame_type == StackFrame::EXIT ||
807  frame_type == StackFrame::BUILTIN_EXIT);
808 
809  // Set up the frame structure on the stack.
810  DCHECK_EQ(+2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
811  DCHECK_EQ(+1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
812  DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
813  push(ebp);
814  mov(ebp, esp);
815 
816  // Reserve room for entry stack pointer and push the code object.
817  push(Immediate(StackFrame::TypeToMarker(frame_type)));
818  DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
819  push(Immediate(0)); // Saved entry sp, patched before call.
820  DCHECK_EQ(-3 * kPointerSize, ExitFrameConstants::kCodeOffset);
821  Move(scratch, CodeObject());
822  push(scratch); // Accessed from ExitFrame::code_slot.
823 
824  STATIC_ASSERT(edx == kRuntimeCallFunctionRegister);
825  STATIC_ASSERT(esi == kContextRegister);
826 
827  // Save the frame pointer and the context in top.
828  ExternalReference c_entry_fp_address =
829  ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
830  ExternalReference context_address =
831  ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
832  ExternalReference c_function_address =
833  ExternalReference::Create(IsolateAddressId::kCFunctionAddress, isolate());
834 
835  DCHECK(!AreAliased(scratch, ebp, esi, edx));
836  mov(ExternalReferenceAsOperand(c_entry_fp_address, scratch), ebp);
837  mov(ExternalReferenceAsOperand(context_address, scratch), esi);
838  mov(ExternalReferenceAsOperand(c_function_address, scratch), edx);
839 }
840 
841 
842 void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
843  // Optionally save all XMM registers.
844  if (save_doubles) {
845  int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
846  sub(esp, Immediate(space));
847  const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
848  for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
849  XMMRegister reg = XMMRegister::from_code(i);
850  movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
851  }
852  } else {
853  sub(esp, Immediate(argc * kPointerSize));
854  }
855 
856  // Get the required frame alignment for the OS.
857  const int kFrameAlignment = base::OS::ActivationFrameAlignment();
858  if (kFrameAlignment > 0) {
859  DCHECK(base::bits::IsPowerOfTwo(kFrameAlignment));
860  and_(esp, -kFrameAlignment);
861  }
862 
863  // Patch the saved entry sp.
864  mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
865 }
866 
867 void MacroAssembler::EnterExitFrame(int argc, bool save_doubles,
868  StackFrame::Type frame_type) {
869  EnterExitFramePrologue(frame_type, edi);
870 
871  // Set up argc and argv in callee-saved registers.
872  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
873  mov(edi, eax);
874  lea(esi, Operand(ebp, eax, times_4, offset));
875 
876  // Reserve space for argc, argv and isolate.
877  EnterExitFrameEpilogue(argc, save_doubles);
878 }
879 
880 void MacroAssembler::EnterApiExitFrame(int argc, Register scratch) {
881  EnterExitFramePrologue(StackFrame::EXIT, scratch);
882  EnterExitFrameEpilogue(argc, false);
883 }
884 
885 
886 void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
887  // Optionally restore all XMM registers.
888  if (save_doubles) {
889  const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
890  for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
891  XMMRegister reg = XMMRegister::from_code(i);
892  movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
893  }
894  }
895 
896  if (pop_arguments) {
897  // Get the return address from the stack and restore the frame pointer.
898  mov(ecx, Operand(ebp, 1 * kPointerSize));
899  mov(ebp, Operand(ebp, 0 * kPointerSize));
900 
901  // Pop the arguments and the receiver from the caller stack.
902  lea(esp, Operand(esi, 1 * kPointerSize));
903 
904  // Push the return address to get ready to return.
905  push(ecx);
906  } else {
907  // Otherwise just leave the exit frame.
908  leave();
909  }
910 
911  LeaveExitFrameEpilogue();
912 }
913 
914 void MacroAssembler::LeaveExitFrameEpilogue() {
915  // Clear the top frame.
916  ExternalReference c_entry_fp_address =
917  ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
918  mov(ExternalReferenceAsOperand(c_entry_fp_address, esi), Immediate(0));
919 
920  // Restore current context from top and clear it in debug mode.
921  ExternalReference context_address =
922  ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
923  mov(esi, ExternalReferenceAsOperand(context_address, esi));
924 #ifdef DEBUG
925  push(eax);
926  mov(ExternalReferenceAsOperand(context_address, eax),
927  Immediate(Context::kInvalidContext));
928  pop(eax);
929 #endif
930 }
931 
932 void MacroAssembler::LeaveApiExitFrame() {
933  mov(esp, ebp);
934  pop(ebp);
935 
936  LeaveExitFrameEpilogue();
937 }
938 
939 void MacroAssembler::PushStackHandler(Register scratch) {
940  // Adjust this code if not the case.
941  STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
942  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
943 
944  push(Immediate(0)); // Padding.
945 
946  // Link the current handler as the next handler.
947  ExternalReference handler_address =
948  ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
949  push(ExternalReferenceAsOperand(handler_address, scratch));
950 
951  // Set this new handler as the current one.
952  mov(ExternalReferenceAsOperand(handler_address, scratch), esp);
953 }
954 
955 void MacroAssembler::PopStackHandler(Register scratch) {
956  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
957  ExternalReference handler_address =
958  ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
959  pop(ExternalReferenceAsOperand(handler_address, scratch));
960  add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
961 }
962 
963 
964 void MacroAssembler::CallStub(CodeStub* stub) {
965  DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
966  Call(stub->GetCode(), RelocInfo::CODE_TARGET);
967 }
968 
969 void MacroAssembler::TailCallStub(CodeStub* stub) {
970  Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
971 }
972 
973 bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
974  return has_frame() || !stub->SometimesSetsUpAFrame();
975 }
976 
977 void MacroAssembler::CallRuntime(const Runtime::Function* f,
978  int num_arguments,
979  SaveFPRegsMode save_doubles) {
980  // If the expected number of arguments of the runtime function is
981  // constant, we check that the actual number of arguments match the
982  // expectation.
983  CHECK(f->nargs < 0 || f->nargs == num_arguments);
984 
985  // TODO(1236192): Most runtime routines don't need the number of
986  // arguments passed in because it is constant. At some point we
987  // should remove this need and make the runtime routine entry code
988  // smarter.
989  Move(kRuntimeCallArgCountRegister, Immediate(num_arguments));
990  Move(kRuntimeCallFunctionRegister, Immediate(ExternalReference::Create(f)));
991  Handle<Code> code =
992  CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
993  Call(code, RelocInfo::CODE_TARGET);
994 }
995 
996 void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
997  Register centry) {
998  const Runtime::Function* f = Runtime::FunctionForId(fid);
999  // TODO(1236192): Most runtime routines don't need the number of
1000  // arguments passed in because it is constant. At some point we
1001  // should remove this need and make the runtime routine entry code
1002  // smarter.
1003  Move(kRuntimeCallArgCountRegister, Immediate(f->nargs));
1004  Move(kRuntimeCallFunctionRegister, Immediate(ExternalReference::Create(f)));
1005  DCHECK(!AreAliased(centry, kRuntimeCallArgCountRegister,
1006  kRuntimeCallFunctionRegister));
1007  add(centry, Immediate(Code::kHeaderSize - kHeapObjectTag));
1008  Call(centry);
1009 }
1010 
1011 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
1012  // ----------- S t a t e -------------
1013  // -- esp[0] : return address
1014  // -- esp[8] : argument num_arguments - 1
1015  // ...
1016  // -- esp[8 * num_arguments] : argument 0 (receiver)
1017  //
1018  // For runtime functions with variable arguments:
1019  // -- eax : number of arguments
1020  // -----------------------------------
1021 
1022  const Runtime::Function* function = Runtime::FunctionForId(fid);
1023  DCHECK_EQ(1, function->result_size);
1024  if (function->nargs >= 0) {
1025  // TODO(1236192): Most runtime routines don't need the number of
1026  // arguments passed in because it is constant. At some point we
1027  // should remove this need and make the runtime routine entry code
1028  // smarter.
1029  Move(kRuntimeCallArgCountRegister, Immediate(function->nargs));
1030  }
1031  JumpToExternalReference(ExternalReference::Create(fid));
1032 }
1033 
1034 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
1035  bool builtin_exit_frame) {
1036  // Set the entry point and jump to the C entry runtime stub.
1037  Move(kRuntimeCallFunctionRegister, Immediate(ext));
1038  Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
1039  kArgvOnStack, builtin_exit_frame);
1040  Jump(code, RelocInfo::CODE_TARGET);
1041 }
1042 
1043 void MacroAssembler::JumpToInstructionStream(Address entry) {
1044  jmp(entry, RelocInfo::OFF_HEAP_TARGET);
1045 }
1046 
1047 void TurboAssembler::PrepareForTailCall(
1048  const ParameterCount& callee_args_count, Register caller_args_count_reg,
1049  Register scratch0, Register scratch1,
1050  int number_of_temp_values_after_return_address) {
1051 #if DEBUG
1052  if (callee_args_count.is_reg()) {
1053  DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
1054  scratch1));
1055  } else {
1056  DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
1057  }
1058 #endif
1059 
1060  // Calculate the destination address where we will put the return address
1061  // after we drop current frame.
1062  Register new_sp_reg = scratch0;
1063  if (callee_args_count.is_reg()) {
1064  sub(caller_args_count_reg, callee_args_count.reg());
1065  lea(new_sp_reg,
1066  Operand(ebp, caller_args_count_reg, times_pointer_size,
1067  StandardFrameConstants::kCallerPCOffset -
1068  number_of_temp_values_after_return_address * kPointerSize));
1069  } else {
1070  lea(new_sp_reg, Operand(ebp, caller_args_count_reg, times_pointer_size,
1071  StandardFrameConstants::kCallerPCOffset -
1072  (callee_args_count.immediate() +
1073  number_of_temp_values_after_return_address) *
1074  kPointerSize));
1075  }
1076 
1077  if (FLAG_debug_code) {
1078  cmp(esp, new_sp_reg);
1079  Check(below, AbortReason::kStackAccessBelowStackPointer);
1080  }
1081 
1082  // Copy return address from caller's frame to current frame's return address
1083  // to avoid its trashing and let the following loop copy it to the right
1084  // place.
1085  Register tmp_reg = scratch1;
1086  mov(tmp_reg, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
1087  mov(Operand(esp, number_of_temp_values_after_return_address * kPointerSize),
1088  tmp_reg);
1089 
1090  // Restore caller's frame pointer now as it could be overwritten by
1091  // the copying loop.
1092  mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
1093 
1094  // +2 here is to copy both receiver and return address.
1095  Register count_reg = caller_args_count_reg;
1096  if (callee_args_count.is_reg()) {
1097  lea(count_reg, Operand(callee_args_count.reg(),
1098  2 + number_of_temp_values_after_return_address));
1099  } else {
1100  mov(count_reg, Immediate(callee_args_count.immediate() + 2 +
1101  number_of_temp_values_after_return_address));
1102  // TODO(ishell): Unroll copying loop for small immediate values.
1103  }
1104 
1105  // Now copy callee arguments to the caller frame going backwards to avoid
1106  // callee arguments corruption (source and destination areas could overlap).
1107  Label loop, entry;
1108  jmp(&entry, Label::kNear);
1109  bind(&loop);
1110  dec(count_reg);
1111  mov(tmp_reg, Operand(esp, count_reg, times_pointer_size, 0));
1112  mov(Operand(new_sp_reg, count_reg, times_pointer_size, 0), tmp_reg);
1113  bind(&entry);
1114  cmp(count_reg, Immediate(0));
1115  j(not_equal, &loop, Label::kNear);
1116 
1117  // Leave current frame.
1118  mov(esp, new_sp_reg);
1119 }
1120 
1121 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1122  const ParameterCount& actual, Label* done,
1123  bool* definitely_mismatches,
1124  InvokeFlag flag,
1125  Label::Distance done_near) {
1126  DCHECK_IMPLIES(expected.is_reg(), expected.reg() == ecx);
1127  DCHECK_IMPLIES(actual.is_reg(), actual.reg() == eax);
1128 
1129  bool definitely_matches = false;
1130  *definitely_mismatches = false;
1131  Label invoke;
1132  if (expected.is_immediate()) {
1133  DCHECK(actual.is_immediate());
1134  mov(eax, actual.immediate());
1135  if (expected.immediate() == actual.immediate()) {
1136  definitely_matches = true;
1137  } else {
1138  const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1139  if (expected.immediate() == sentinel) {
1140  // Don't worry about adapting arguments for builtins that
1141  // don't want that done. Skip adaption code by making it look
1142  // like we have a match between expected and actual number of
1143  // arguments.
1144  definitely_matches = true;
1145  } else {
1146  *definitely_mismatches = true;
1147  mov(ecx, expected.immediate());
1148  }
1149  }
1150  } else {
1151  if (actual.is_immediate()) {
1152  // Expected is in register, actual is immediate. This is the
1153  // case when we invoke function values without going through the
1154  // IC mechanism.
1155  mov(eax, actual.immediate());
1156  cmp(expected.reg(), actual.immediate());
1157  j(equal, &invoke);
1158  DCHECK(expected.reg() == ecx);
1159  } else if (expected.reg() != actual.reg()) {
1160  // Both expected and actual are in (different) registers. This
1161  // is the case when we invoke functions using call and apply.
1162  cmp(expected.reg(), actual.reg());
1163  j(equal, &invoke);
1164  DCHECK(actual.reg() == eax);
1165  DCHECK(expected.reg() == ecx);
1166  } else {
1167  definitely_matches = true;
1168  Move(eax, actual.reg());
1169  }
1170  }
1171 
1172  if (!definitely_matches) {
1173  Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
1174  if (flag == CALL_FUNCTION) {
1175  Call(adaptor, RelocInfo::CODE_TARGET);
1176  if (!*definitely_mismatches) {
1177  jmp(done, done_near);
1178  }
1179  } else {
1180  Jump(adaptor, RelocInfo::CODE_TARGET);
1181  }
1182  bind(&invoke);
1183  }
1184 }
1185 
1186 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
1187  const ParameterCount& expected,
1188  const ParameterCount& actual) {
1189  Label skip_hook;
1190 
1191  ExternalReference debug_hook_active =
1192  ExternalReference::debug_hook_on_function_call_address(isolate());
1193  push(eax);
1194  cmpb(ExternalReferenceAsOperand(debug_hook_active, eax), Immediate(0));
1195  pop(eax);
1196  j(equal, &skip_hook);
1197 
1198  {
1199  FrameScope frame(this,
1200  has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1201  if (expected.is_reg()) {
1202  SmiTag(expected.reg());
1203  Push(expected.reg());
1204  }
1205  if (actual.is_reg()) {
1206  SmiTag(actual.reg());
1207  Push(actual.reg());
1208  SmiUntag(actual.reg());
1209  }
1210  if (new_target.is_valid()) {
1211  Push(new_target);
1212  }
1213  Push(fun);
1214  Push(fun);
1215  Operand receiver_op =
1216  actual.is_reg()
1217  ? Operand(ebp, actual.reg(), times_pointer_size, kPointerSize * 2)
1218  : Operand(ebp, actual.immediate() * times_pointer_size +
1219  kPointerSize * 2);
1220  Push(receiver_op);
1221  CallRuntime(Runtime::kDebugOnFunctionCall);
1222  Pop(fun);
1223  if (new_target.is_valid()) {
1224  Pop(new_target);
1225  }
1226  if (actual.is_reg()) {
1227  Pop(actual.reg());
1228  SmiUntag(actual.reg());
1229  }
1230  if (expected.is_reg()) {
1231  Pop(expected.reg());
1232  SmiUntag(expected.reg());
1233  }
1234  }
1235  bind(&skip_hook);
1236 }
1237 
1238 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1239  const ParameterCount& expected,
1240  const ParameterCount& actual,
1241  InvokeFlag flag) {
1242  // You can't call a function without a valid frame.
1243  DCHECK(flag == JUMP_FUNCTION || has_frame());
1244  DCHECK(function == edi);
1245  DCHECK_IMPLIES(new_target.is_valid(), new_target == edx);
1246  DCHECK_IMPLIES(expected.is_reg(), expected.reg() == ecx);
1247  DCHECK_IMPLIES(actual.is_reg(), actual.reg() == eax);
1248 
1249  // On function call, call into the debugger if necessary.
1250  CheckDebugHook(function, new_target, expected, actual);
1251 
1252  // Clear the new.target register if not given.
1253  if (!new_target.is_valid()) {
1254  Move(edx, isolate()->factory()->undefined_value());
1255  }
1256 
1257  Label done;
1258  bool definitely_mismatches = false;
1259  InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
1260  Label::kNear);
1261  if (!definitely_mismatches) {
1262  // We call indirectly through the code field in the function to
1263  // allow recompilation to take effect without changing any of the
1264  // call sites.
1265  static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
1266  mov(ecx, FieldOperand(function, JSFunction::kCodeOffset));
1267  add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
1268  if (flag == CALL_FUNCTION) {
1269  call(ecx);
1270  } else {
1271  DCHECK(flag == JUMP_FUNCTION);
1272  jmp(ecx);
1273  }
1274  bind(&done);
1275  }
1276 }
1277 
1278 void MacroAssembler::InvokeFunction(Register fun, Register new_target,
1279  const ParameterCount& actual,
1280  InvokeFlag flag) {
1281  // You can't call a function without a valid frame.
1282  DCHECK(flag == JUMP_FUNCTION || has_frame());
1283 
1284  DCHECK(fun == edi);
1285  mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
1286  mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
1287  movzx_w(ecx,
1288  FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
1289 
1290  ParameterCount expected(ecx);
1291  InvokeFunctionCode(edi, new_target, expected, actual, flag);
1292 }
1293 
1294 void MacroAssembler::LoadGlobalProxy(Register dst) {
1295  mov(dst, NativeContextOperand());
1296  mov(dst, ContextOperand(dst, Context::GLOBAL_PROXY_INDEX));
1297 }
1298 
1299 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
1300  // Load the native context from the current context.
1301  mov(function, NativeContextOperand());
1302  // Load the function from the native context.
1303  mov(function, ContextOperand(function, index));
1304 }
1305 
1306 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
1307  // The registers are pushed starting with the lowest encoding,
1308  // which means that lowest encodings are furthest away from
1309  // the stack pointer.
1310  DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
1311  return kNumSafepointRegisters - reg_code - 1;
1312 }
1313 
1314 void TurboAssembler::Ret() { ret(0); }
1315 
1316 void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
1317  if (is_uint16(bytes_dropped)) {
1318  ret(bytes_dropped);
1319  } else {
1320  pop(scratch);
1321  add(esp, Immediate(bytes_dropped));
1322  push(scratch);
1323  ret(0);
1324  }
1325 }
1326 
1327 void TurboAssembler::Push(Immediate value) {
1328  if (root_array_available() && options().isolate_independent_code) {
1329  if (value.is_embedded_object()) {
1330  Push(HeapObjectAsOperand(value.embedded_object()));
1331  return;
1332  } else if (value.is_external_reference()) {
1333  Push(ExternalReferenceAddressAsOperand(value.external_reference()));
1334  return;
1335  }
1336  }
1337  push(value);
1338 }
1339 
1340 void MacroAssembler::Drop(int stack_elements) {
1341  if (stack_elements > 0) {
1342  add(esp, Immediate(stack_elements * kPointerSize));
1343  }
1344 }
1345 
1346 void TurboAssembler::Move(Register dst, Register src) {
1347  if (dst != src) {
1348  mov(dst, src);
1349  }
1350 }
1351 
1352 void TurboAssembler::Move(Register dst, const Immediate& src) {
1353  if (!src.is_heap_object_request() && src.is_zero()) {
1354  xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
1355  } else if (src.is_external_reference()) {
1356  LoadAddress(dst, src.external_reference());
1357  } else {
1358  mov(dst, src);
1359  }
1360 }
1361 
1362 void TurboAssembler::Move(Operand dst, const Immediate& src) {
1363  // Since there's no scratch register available, take a detour through the
1364  // stack.
1365  if (root_array_available() && options().isolate_independent_code) {
1366  if (src.is_embedded_object() || src.is_external_reference() ||
1367  src.is_heap_object_request()) {
1368  Push(src);
1369  pop(dst);
1370  return;
1371  }
1372  }
1373 
1374  if (src.is_embedded_object()) {
1375  mov(dst, src.embedded_object());
1376  } else {
1377  mov(dst, src);
1378  }
1379 }
1380 
1381 void TurboAssembler::Move(Register dst, Handle<HeapObject> src) {
1382  if (root_array_available() && options().isolate_independent_code) {
1383  IndirectLoadConstant(dst, src);
1384  return;
1385  }
1386  mov(dst, src);
1387 }
1388 
1389 void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
1390  if (src == 0) {
1391  pxor(dst, dst);
1392  } else {
1393  unsigned cnt = base::bits::CountPopulation(src);
1394  unsigned nlz = base::bits::CountLeadingZeros32(src);
1395  unsigned ntz = base::bits::CountTrailingZeros32(src);
1396  if (nlz + cnt + ntz == 32) {
1397  pcmpeqd(dst, dst);
1398  if (ntz == 0) {
1399  psrld(dst, 32 - cnt);
1400  } else {
1401  pslld(dst, 32 - cnt);
1402  if (nlz != 0) psrld(dst, nlz);
1403  }
1404  } else {
1405  push(eax);
1406  mov(eax, Immediate(src));
1407  movd(dst, Operand(eax));
1408  pop(eax);
1409  }
1410  }
1411 }
1412 
1413 void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
1414  if (src == 0) {
1415  pxor(dst, dst);
1416  } else {
1417  uint32_t lower = static_cast<uint32_t>(src);
1418  uint32_t upper = static_cast<uint32_t>(src >> 32);
1419  unsigned cnt = base::bits::CountPopulation(src);
1420  unsigned nlz = base::bits::CountLeadingZeros64(src);
1421  unsigned ntz = base::bits::CountTrailingZeros64(src);
1422  if (nlz + cnt + ntz == 64) {
1423  pcmpeqd(dst, dst);
1424  if (ntz == 0) {
1425  psrlq(dst, 64 - cnt);
1426  } else {
1427  psllq(dst, 64 - cnt);
1428  if (nlz != 0) psrlq(dst, nlz);
1429  }
1430  } else if (lower == 0) {
1431  Move(dst, upper);
1432  psllq(dst, 32);
1433  } else if (CpuFeatures::IsSupported(SSE4_1)) {
1434  CpuFeatureScope scope(this, SSE4_1);
1435  push(eax);
1436  Move(eax, Immediate(lower));
1437  movd(dst, Operand(eax));
1438  if (upper != lower) {
1439  Move(eax, Immediate(upper));
1440  }
1441  pinsrd(dst, Operand(eax), 1);
1442  pop(eax);
1443  } else {
1444  push(Immediate(upper));
1445  push(Immediate(lower));
1446  movsd(dst, Operand(esp, 0));
1447  add(esp, Immediate(kDoubleSize));
1448  }
1449  }
1450 }
1451 
1452 void TurboAssembler::Pshufhw(XMMRegister dst, Operand src, uint8_t shuffle) {
1453  if (CpuFeatures::IsSupported(AVX)) {
1454  CpuFeatureScope scope(this, AVX);
1455  vpshufhw(dst, src, shuffle);
1456  } else {
1457  pshufhw(dst, src, shuffle);
1458  }
1459 }
1460 
1461 void TurboAssembler::Pshuflw(XMMRegister dst, Operand src, uint8_t shuffle) {
1462  if (CpuFeatures::IsSupported(AVX)) {
1463  CpuFeatureScope scope(this, AVX);
1464  vpshuflw(dst, src, shuffle);
1465  } else {
1466  pshuflw(dst, src, shuffle);
1467  }
1468 }
1469 
1470 void TurboAssembler::Pshufd(XMMRegister dst, Operand src, uint8_t shuffle) {
1471  if (CpuFeatures::IsSupported(AVX)) {
1472  CpuFeatureScope scope(this, AVX);
1473  vpshufd(dst, src, shuffle);
1474  } else {
1475  pshufd(dst, src, shuffle);
1476  }
1477 }
1478 
1479 void TurboAssembler::Psraw(XMMRegister dst, uint8_t shift) {
1480  if (CpuFeatures::IsSupported(AVX)) {
1481  CpuFeatureScope scope(this, AVX);
1482  vpsraw(dst, dst, shift);
1483  } else {
1484  psraw(dst, shift);
1485  }
1486 }
1487 
1488 void TurboAssembler::Psrlw(XMMRegister dst, uint8_t shift) {
1489  if (CpuFeatures::IsSupported(AVX)) {
1490  CpuFeatureScope scope(this, AVX);
1491  vpsrlw(dst, dst, shift);
1492  } else {
1493  psrlw(dst, shift);
1494  }
1495 }
1496 
1497 void TurboAssembler::Psignb(XMMRegister dst, Operand src) {
1498  if (CpuFeatures::IsSupported(AVX)) {
1499  CpuFeatureScope scope(this, AVX);
1500  vpsignb(dst, dst, src);
1501  return;
1502  }
1503  if (CpuFeatures::IsSupported(SSSE3)) {
1504  CpuFeatureScope sse_scope(this, SSSE3);
1505  psignb(dst, src);
1506  return;
1507  }
1508  FATAL("no AVX or SSE3 support");
1509 }
1510 
1511 void TurboAssembler::Psignw(XMMRegister dst, Operand src) {
1512  if (CpuFeatures::IsSupported(AVX)) {
1513  CpuFeatureScope scope(this, AVX);
1514  vpsignw(dst, dst, src);
1515  return;
1516  }
1517  if (CpuFeatures::IsSupported(SSSE3)) {
1518  CpuFeatureScope sse_scope(this, SSSE3);
1519  psignw(dst, src);
1520  return;
1521  }
1522  FATAL("no AVX or SSE3 support");
1523 }
1524 
1525 void TurboAssembler::Psignd(XMMRegister dst, Operand src) {
1526  if (CpuFeatures::IsSupported(AVX)) {
1527  CpuFeatureScope scope(this, AVX);
1528  vpsignd(dst, dst, src);
1529  return;
1530  }
1531  if (CpuFeatures::IsSupported(SSSE3)) {
1532  CpuFeatureScope sse_scope(this, SSSE3);
1533  psignd(dst, src);
1534  return;
1535  }
1536  FATAL("no AVX or SSE3 support");
1537 }
1538 
1539 void TurboAssembler::Pshufb(XMMRegister dst, Operand src) {
1540  if (CpuFeatures::IsSupported(AVX)) {
1541  CpuFeatureScope scope(this, AVX);
1542  vpshufb(dst, dst, src);
1543  return;
1544  }
1545  if (CpuFeatures::IsSupported(SSSE3)) {
1546  CpuFeatureScope sse_scope(this, SSSE3);
1547  pshufb(dst, src);
1548  return;
1549  }
1550  FATAL("no AVX or SSE3 support");
1551 }
1552 
1553 void TurboAssembler::Pblendw(XMMRegister dst, Operand src, uint8_t imm8) {
1554  if (CpuFeatures::IsSupported(AVX)) {
1555  CpuFeatureScope scope(this, AVX);
1556  vpblendw(dst, dst, src, imm8);
1557  return;
1558  }
1559  if (CpuFeatures::IsSupported(SSE4_1)) {
1560  CpuFeatureScope sse_scope(this, SSE4_1);
1561  pblendw(dst, src, imm8);
1562  return;
1563  }
1564  FATAL("no AVX or SSE4.1 support");
1565 }
1566 
1567 void TurboAssembler::Palignr(XMMRegister dst, Operand src, uint8_t imm8) {
1568  if (CpuFeatures::IsSupported(AVX)) {
1569  CpuFeatureScope scope(this, AVX);
1570  vpalignr(dst, dst, src, imm8);
1571  return;
1572  }
1573  if (CpuFeatures::IsSupported(SSSE3)) {
1574  CpuFeatureScope sse_scope(this, SSSE3);
1575  palignr(dst, src, imm8);
1576  return;
1577  }
1578  FATAL("no AVX or SSE3 support");
1579 }
1580 
1581 void TurboAssembler::Pextrb(Register dst, XMMRegister src, uint8_t imm8) {
1582  if (CpuFeatures::IsSupported(AVX)) {
1583  CpuFeatureScope scope(this, AVX);
1584  vpextrb(dst, src, imm8);
1585  return;
1586  }
1587  if (CpuFeatures::IsSupported(SSE4_1)) {
1588  CpuFeatureScope sse_scope(this, SSE4_1);
1589  pextrb(dst, src, imm8);
1590  return;
1591  }
1592  FATAL("no AVX or SSE4.1 support");
1593 }
1594 
1595 void TurboAssembler::Pextrw(Register dst, XMMRegister src, uint8_t imm8) {
1596  if (CpuFeatures::IsSupported(AVX)) {
1597  CpuFeatureScope scope(this, AVX);
1598  vpextrw(dst, src, imm8);
1599  return;
1600  }
1601  if (CpuFeatures::IsSupported(SSE4_1)) {
1602  CpuFeatureScope sse_scope(this, SSE4_1);
1603  pextrw(dst, src, imm8);
1604  return;
1605  }
1606  FATAL("no AVX or SSE4.1 support");
1607 }
1608 
1609 void TurboAssembler::Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
1610  if (imm8 == 0) {
1611  Movd(dst, src);
1612  return;
1613  }
1614  if (CpuFeatures::IsSupported(AVX)) {
1615  CpuFeatureScope scope(this, AVX);
1616  vpextrd(dst, src, imm8);
1617  return;
1618  }
1619  if (CpuFeatures::IsSupported(SSE4_1)) {
1620  CpuFeatureScope sse_scope(this, SSE4_1);
1621  pextrd(dst, src, imm8);
1622  return;
1623  }
1624  // Without AVX or SSE, we can only have 64-bit values in xmm registers.
1625  // We don't have an xmm scratch register, so move the data via the stack. This
1626  // path is rarely required, so it's acceptable to be slow.
1627  DCHECK_LT(imm8, 2);
1628  sub(esp, Immediate(kDoubleSize));
1629  movsd(Operand(esp, 0), src);
1630  mov(dst, Operand(esp, imm8 * kUInt32Size));
1631  add(esp, Immediate(kDoubleSize));
1632 }
1633 
1634 void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, uint8_t imm8) {
1635  if (CpuFeatures::IsSupported(AVX)) {
1636  CpuFeatureScope scope(this, AVX);
1637  vpinsrd(dst, dst, src, imm8);
1638  return;
1639  }
1640  if (CpuFeatures::IsSupported(SSE4_1)) {
1641  CpuFeatureScope sse_scope(this, SSE4_1);
1642  pinsrd(dst, src, imm8);
1643  return;
1644  }
1645  // Without AVX or SSE, we can only have 64-bit values in xmm registers.
1646  // We don't have an xmm scratch register, so move the data via the stack. This
1647  // path is rarely required, so it's acceptable to be slow.
1648  DCHECK_LT(imm8, 2);
1649  sub(esp, Immediate(kDoubleSize));
1650  // Write original content of {dst} to the stack.
1651  movsd(Operand(esp, 0), dst);
1652  // Overwrite the portion specified in {imm8}.
1653  if (src.is_reg_only()) {
1654  mov(Operand(esp, imm8 * kUInt32Size), src.reg());
1655  } else {
1656  movss(dst, src);
1657  movss(Operand(esp, imm8 * kUInt32Size), dst);
1658  }
1659  // Load back the full value into {dst}.
1660  movsd(dst, Operand(esp, 0));
1661  add(esp, Immediate(kDoubleSize));
1662 }
1663 
1664 void TurboAssembler::Lzcnt(Register dst, Operand src) {
1665  if (CpuFeatures::IsSupported(LZCNT)) {
1666  CpuFeatureScope scope(this, LZCNT);
1667  lzcnt(dst, src);
1668  return;
1669  }
1670  Label not_zero_src;
1671  bsr(dst, src);
1672  j(not_zero, &not_zero_src, Label::kNear);
1673  Move(dst, Immediate(63)); // 63^31 == 32
1674  bind(&not_zero_src);
1675  xor_(dst, Immediate(31)); // for x in [0..31], 31^x == 31-x.
1676 }
1677 
1678 void TurboAssembler::Tzcnt(Register dst, Operand src) {
1679  if (CpuFeatures::IsSupported(BMI1)) {
1680  CpuFeatureScope scope(this, BMI1);
1681  tzcnt(dst, src);
1682  return;
1683  }
1684  Label not_zero_src;
1685  bsf(dst, src);
1686  j(not_zero, &not_zero_src, Label::kNear);
1687  Move(dst, Immediate(32)); // The result of tzcnt is 32 if src = 0.
1688  bind(&not_zero_src);
1689 }
1690 
1691 void TurboAssembler::Popcnt(Register dst, Operand src) {
1692  if (CpuFeatures::IsSupported(POPCNT)) {
1693  CpuFeatureScope scope(this, POPCNT);
1694  popcnt(dst, src);
1695  return;
1696  }
1697  FATAL("no POPCNT support");
1698 }
1699 
1700 void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
1701  cmp(in_out, Immediate(kClearedWeakHeapObjectLower32));
1702  j(equal, target_if_cleared);
1703 
1704  and_(in_out, Immediate(~kWeakHeapObjectMask));
1705 }
1706 
1707 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
1708  Register scratch) {
1709  DCHECK_GT(value, 0);
1710  if (FLAG_native_code_counters && counter->Enabled()) {
1711  Operand operand =
1712  ExternalReferenceAsOperand(ExternalReference::Create(counter), scratch);
1713  if (value == 1) {
1714  inc(operand);
1715  } else {
1716  add(operand, Immediate(value));
1717  }
1718  }
1719 }
1720 
1721 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
1722  Register scratch) {
1723  DCHECK_GT(value, 0);
1724  if (FLAG_native_code_counters && counter->Enabled()) {
1725  Operand operand =
1726  ExternalReferenceAsOperand(ExternalReference::Create(counter), scratch);
1727  if (value == 1) {
1728  dec(operand);
1729  } else {
1730  sub(operand, Immediate(value));
1731  }
1732  }
1733 }
1734 
1735 void TurboAssembler::Assert(Condition cc, AbortReason reason) {
1736  if (emit_debug_code()) Check(cc, reason);
1737 }
1738 
1739 void TurboAssembler::AssertUnreachable(AbortReason reason) {
1740  if (emit_debug_code()) Abort(reason);
1741 }
1742 
1743 void TurboAssembler::Check(Condition cc, AbortReason reason) {
1744  Label L;
1745  j(cc, &L);
1746  Abort(reason);
1747  // will not return here
1748  bind(&L);
1749 }
1750 
1751 void TurboAssembler::CheckStackAlignment() {
1752  int frame_alignment = base::OS::ActivationFrameAlignment();
1753  int frame_alignment_mask = frame_alignment - 1;
1754  if (frame_alignment > kPointerSize) {
1755  DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1756  Label alignment_as_expected;
1757  test(esp, Immediate(frame_alignment_mask));
1758  j(zero, &alignment_as_expected);
1759  // Abort if stack is not aligned.
1760  int3();
1761  bind(&alignment_as_expected);
1762  }
1763 }
1764 
1765 void TurboAssembler::Abort(AbortReason reason) {
1766 #ifdef DEBUG
1767  const char* msg = GetAbortReason(reason);
1768  RecordComment("Abort message: ");
1769  RecordComment(msg);
1770 #endif
1771 
1772  // Avoid emitting call to builtin if requested.
1773  if (trap_on_abort()) {
1774  int3();
1775  return;
1776  }
1777 
1778  if (should_abort_hard()) {
1779  // We don't care if we constructed a frame. Just pretend we did.
1780  FrameScope assume_frame(this, StackFrame::NONE);
1781  PrepareCallCFunction(1, eax);
1782  mov(Operand(esp, 0), Immediate(static_cast<int>(reason)));
1783  CallCFunction(ExternalReference::abort_with_reason(), 1);
1784  return;
1785  }
1786 
1787  Move(edx, Smi::FromInt(static_cast<int>(reason)));
1788 
1789  // Disable stub call restrictions to always allow calls to abort.
1790  if (!has_frame()) {
1791  // We don't actually want to generate a pile of code for this, so just
1792  // claim there is a stack frame, without generating one.
1793  FrameScope scope(this, StackFrame::NONE);
1794  Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1795  } else {
1796  Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1797  }
1798  // will not return here
1799  int3();
1800 }
1801 
1802 
1803 void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
1804  int frame_alignment = base::OS::ActivationFrameAlignment();
1805  if (frame_alignment != 0) {
1806  // Make stack end at alignment and make room for num_arguments words
1807  // and the original value of esp.
1808  mov(scratch, esp);
1809  sub(esp, Immediate((num_arguments + 1) * kPointerSize));
1810  DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1811  and_(esp, -frame_alignment);
1812  mov(Operand(esp, num_arguments * kPointerSize), scratch);
1813  } else {
1814  sub(esp, Immediate(num_arguments * kPointerSize));
1815  }
1816 }
1817 
1818 void TurboAssembler::CallCFunction(ExternalReference function,
1819  int num_arguments) {
1820  // Trashing eax is ok as it will be the return value.
1821  Move(eax, Immediate(function));
1822  CallCFunction(eax, num_arguments);
1823 }
1824 
1825 void TurboAssembler::CallCFunction(Register function, int num_arguments) {
1826  DCHECK_LE(num_arguments, kMaxCParameters);
1827  DCHECK(has_frame());
1828  // Check stack alignment.
1829  if (emit_debug_code()) {
1830  CheckStackAlignment();
1831  }
1832 
1833  call(function);
1834  if (base::OS::ActivationFrameAlignment() != 0) {
1835  mov(esp, Operand(esp, num_arguments * kPointerSize));
1836  } else {
1837  add(esp, Immediate(num_arguments * kPointerSize));
1838  }
1839 }
1840 
1841 void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
1842  if (FLAG_embedded_builtins) {
1843  if (root_array_available() && options().isolate_independent_code &&
1844  !Builtins::IsIsolateIndependentBuiltin(*code_object)) {
1845  // All call targets are expected to be isolate-independent builtins.
1846  // If this assumption is ever violated, we could add back support for
1847  // calls through a virtual target register.
1848  UNREACHABLE();
1849  } else if (options().inline_offheap_trampolines) {
1850  int builtin_index = Builtins::kNoBuiltinId;
1851  if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
1852  Builtins::IsIsolateIndependent(builtin_index)) {
1853  // Inline the trampoline.
1854  RecordCommentForOffHeapTrampoline(builtin_index);
1855  CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
1856  EmbeddedData d = EmbeddedData::FromBlob();
1857  Address entry = d.InstructionStartOfBuiltin(builtin_index);
1858  call(entry, RelocInfo::OFF_HEAP_TARGET);
1859  return;
1860  }
1861  }
1862  }
1863  DCHECK(RelocInfo::IsCodeTarget(rmode));
1864  call(code_object, rmode);
1865 }
1866 
1867 void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
1868  if (FLAG_embedded_builtins) {
1869  if (root_array_available() && options().isolate_independent_code &&
1870  !Builtins::IsIsolateIndependentBuiltin(*code_object)) {
1871  // All call targets are expected to be isolate-independent builtins.
1872  // If this assumption is ever violated, we could add back support for
1873  // calls through a virtual target register.
1874  UNREACHABLE();
1875  } else if (options().inline_offheap_trampolines) {
1876  int builtin_index = Builtins::kNoBuiltinId;
1877  if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
1878  Builtins::IsIsolateIndependent(builtin_index)) {
1879  // Inline the trampoline.
1880  RecordCommentForOffHeapTrampoline(builtin_index);
1881  CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
1882  EmbeddedData d = EmbeddedData::FromBlob();
1883  Address entry = d.InstructionStartOfBuiltin(builtin_index);
1884  jmp(entry, RelocInfo::OFF_HEAP_TARGET);
1885  return;
1886  }
1887  }
1888  }
1889  DCHECK(RelocInfo::IsCodeTarget(rmode));
1890  jmp(code_object, rmode);
1891 }
1892 
1893 void TurboAssembler::RetpolineCall(Register reg) {
1894  Label setup_return, setup_target, inner_indirect_branch, capture_spec;
1895 
1896  jmp(&setup_return); // Jump past the entire retpoline below.
1897 
1898  bind(&inner_indirect_branch);
1899  call(&setup_target);
1900 
1901  bind(&capture_spec);
1902  pause();
1903  jmp(&capture_spec);
1904 
1905  bind(&setup_target);
1906  mov(Operand(esp, 0), reg);
1907  ret(0);
1908 
1909  bind(&setup_return);
1910  call(&inner_indirect_branch); // Callee will return after this instruction.
1911 }
1912 
1913 void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
1914  Label setup_return, setup_target, inner_indirect_branch, capture_spec;
1915 
1916  jmp(&setup_return); // Jump past the entire retpoline below.
1917 
1918  bind(&inner_indirect_branch);
1919  call(&setup_target);
1920 
1921  bind(&capture_spec);
1922  pause();
1923  jmp(&capture_spec);
1924 
1925  bind(&setup_target);
1926  mov(Operand(esp, 0), destination, rmode);
1927  ret(0);
1928 
1929  bind(&setup_return);
1930  call(&inner_indirect_branch); // Callee will return after this instruction.
1931 }
1932 
1933 void TurboAssembler::RetpolineJump(Register reg) {
1934  Label setup_target, capture_spec;
1935 
1936  call(&setup_target);
1937 
1938  bind(&capture_spec);
1939  pause();
1940  jmp(&capture_spec);
1941 
1942  bind(&setup_target);
1943  mov(Operand(esp, 0), reg);
1944  ret(0);
1945 }
1946 
1947 void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
1948  Condition cc, Label* condition_met,
1949  Label::Distance condition_met_distance) {
1950  DCHECK(cc == zero || cc == not_zero);
1951  if (scratch == object) {
1952  and_(scratch, Immediate(~kPageAlignmentMask));
1953  } else {
1954  mov(scratch, Immediate(~kPageAlignmentMask));
1955  and_(scratch, object);
1956  }
1957  if (mask < (1 << kBitsPerByte)) {
1958  test_b(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
1959  } else {
1960  test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
1961  }
1962  j(cc, condition_met, condition_met_distance);
1963 }
1964 
1965 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
1966  // In order to get the address of the current instruction, we first need
1967  // to use a call and then use a pop, thus pushing the return address to
1968  // the stack and then popping it into the register.
1969  Label current;
1970  call(&current);
1971  int pc = pc_offset();
1972  bind(&current);
1973  pop(dst);
1974  if (pc != 0) {
1975  sub(dst, Immediate(pc));
1976  }
1977 }
1978 
1979 } // namespace internal
1980 } // namespace v8
1981 
1982 #endif // V8_TARGET_ARCH_IA32
Definition: libplatform.h:13