V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
macro-assembler-ppc.cc
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <assert.h> // For assert
6 #include <limits.h> // For LONG_MIN, LONG_MAX.
7 
8 #if V8_TARGET_ARCH_PPC
9 
10 #include "src/base/bits.h"
11 #include "src/base/division-by-constant.h"
12 #include "src/bootstrapper.h"
13 #include "src/callable.h"
14 #include "src/code-factory.h"
15 #include "src/code-stubs.h"
16 #include "src/counters.h"
17 #include "src/debug/debug.h"
18 #include "src/external-reference-table.h"
19 #include "src/frames-inl.h"
20 #include "src/macro-assembler.h"
21 #include "src/register-configuration.h"
22 #include "src/runtime/runtime.h"
23 #include "src/snapshot/embedded-data.h"
24 #include "src/snapshot/snapshot.h"
25 #include "src/wasm/wasm-code-manager.h"
26 
27 // Satisfy cpplint check, but don't include platform-specific header. It is
28 // included recursively via macro-assembler.h.
29 #if 0
30 #include "src/ppc/macro-assembler-ppc.h"
31 #endif
32 
33 namespace v8 {
34 namespace internal {
35 
36 MacroAssembler::MacroAssembler(Isolate* isolate,
37  const AssemblerOptions& options, void* buffer,
38  int size, CodeObjectRequired create_code_object)
39  : TurboAssembler(isolate, options, buffer, size, create_code_object) {
40  if (create_code_object == CodeObjectRequired::kYes) {
41  // Unlike TurboAssembler, which can be used off the main thread and may not
42  // allocate, macro assembler creates its own copy of the self-reference
43  // marker in order to disambiguate between self-references during nested
44  // code generation (e.g.: codegen of the current object triggers stub
45  // compilation through CodeStub::GetCode()).
46  code_object_ = Handle<HeapObject>::New(
47  *isolate->factory()->NewSelfReferenceMarker(), isolate);
48  }
49 }
50 
51 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
52  Register exclusion1,
53  Register exclusion2,
54  Register exclusion3) const {
55  int bytes = 0;
56  RegList exclusions = 0;
57  if (exclusion1 != no_reg) {
58  exclusions |= exclusion1.bit();
59  if (exclusion2 != no_reg) {
60  exclusions |= exclusion2.bit();
61  if (exclusion3 != no_reg) {
62  exclusions |= exclusion3.bit();
63  }
64  }
65  }
66 
67  RegList list = kJSCallerSaved & ~exclusions;
68  bytes += NumRegs(list) * kPointerSize;
69 
70  if (fp_mode == kSaveFPRegs) {
71  bytes += kNumCallerSavedDoubles * kDoubleSize;
72  }
73 
74  return bytes;
75 }
76 
77 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
78  Register exclusion2, Register exclusion3) {
79  int bytes = 0;
80  RegList exclusions = 0;
81  if (exclusion1 != no_reg) {
82  exclusions |= exclusion1.bit();
83  if (exclusion2 != no_reg) {
84  exclusions |= exclusion2.bit();
85  if (exclusion3 != no_reg) {
86  exclusions |= exclusion3.bit();
87  }
88  }
89  }
90 
91  RegList list = kJSCallerSaved & ~exclusions;
92  MultiPush(list);
93  bytes += NumRegs(list) * kPointerSize;
94 
95  if (fp_mode == kSaveFPRegs) {
96  MultiPushDoubles(kCallerSavedDoubles);
97  bytes += kNumCallerSavedDoubles * kDoubleSize;
98  }
99 
100  return bytes;
101 }
102 
103 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
104  Register exclusion2, Register exclusion3) {
105  int bytes = 0;
106  if (fp_mode == kSaveFPRegs) {
107  MultiPopDoubles(kCallerSavedDoubles);
108  bytes += kNumCallerSavedDoubles * kDoubleSize;
109  }
110 
111  RegList exclusions = 0;
112  if (exclusion1 != no_reg) {
113  exclusions |= exclusion1.bit();
114  if (exclusion2 != no_reg) {
115  exclusions |= exclusion2.bit();
116  if (exclusion3 != no_reg) {
117  exclusions |= exclusion3.bit();
118  }
119  }
120  }
121 
122  RegList list = kJSCallerSaved & ~exclusions;
123  MultiPop(list);
124  bytes += NumRegs(list) * kPointerSize;
125 
126  return bytes;
127 }
128 
129 void TurboAssembler::Jump(Register target) {
130  mtctr(target);
131  bctr();
132 }
133 
134 void TurboAssembler::LoadFromConstantsTable(Register destination,
135  int constant_index) {
136  DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
137 
138  const uint32_t offset =
139  FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag;
140 
141  CHECK(is_uint19(offset));
142  DCHECK_NE(destination, r0);
143  LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
144  LoadP(destination, MemOperand(destination, offset), r0);
145 }
146 
147 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
148  LoadP(destination, MemOperand(kRootRegister, offset), r0);
149 }
150 
151 void TurboAssembler::LoadRootRegisterOffset(Register destination,
152  intptr_t offset) {
153  if (offset == 0) {
154  mr(destination, kRootRegister);
155  } else {
156  addi(destination, kRootRegister, Operand(offset));
157  }
158 }
159 
160 void MacroAssembler::JumpToJSEntry(Register target) {
161  Move(ip, target);
162  Jump(ip);
163 }
164 
165 void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
166  Condition cond, CRegister cr) {
167  Label skip;
168 
169  if (cond != al) b(NegateCondition(cond), &skip, cr);
170 
171  DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
172 
173  mov(ip, Operand(target, rmode));
174  mtctr(ip);
175  bctr();
176 
177  bind(&skip);
178 }
179 
180 void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
181  CRegister cr) {
182  DCHECK(!RelocInfo::IsCodeTarget(rmode));
183  Jump(static_cast<intptr_t>(target), rmode, cond, cr);
184 }
185 
186 void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
187  Condition cond, CRegister cr) {
188  DCHECK(RelocInfo::IsCodeTarget(rmode));
189  // 'code' is always generated ppc code, never THUMB code
190  if (FLAG_embedded_builtins) {
191  if (root_array_available_ && options().isolate_independent_code) {
192  Register scratch = ip;
193  IndirectLoadConstant(scratch, code);
194  addi(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
195  Label skip;
196  if (cond != al) b(NegateCondition(cond), &skip, cr);
197  Jump(scratch);
198  bind(&skip);
199  return;
200  } else if (options().inline_offheap_trampolines) {
201  int builtin_index = Builtins::kNoBuiltinId;
202  if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
203  Builtins::IsIsolateIndependent(builtin_index)) {
204  // Inline the trampoline.
205  RecordCommentForOffHeapTrampoline(builtin_index);
206  EmbeddedData d = EmbeddedData::FromBlob();
207  Address entry = d.InstructionStartOfBuiltin(builtin_index);
208  // Use ip directly instead of using UseScratchRegisterScope, as we do
209  // not preserve scratch registers across calls.
210  mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
211  Label skip;
212  if (cond != al) b(NegateCondition(cond), &skip, cr);
213  Jump(ip);
214  bind(&skip);
215  return;
216  }
217  }
218  }
219  Jump(static_cast<intptr_t>(code.address()), rmode, cond, cr);
220 }
221 
222 void TurboAssembler::Call(Register target) {
223  BlockTrampolinePoolScope block_trampoline_pool(this);
224  // branch via link register and set LK bit for return point
225  mtctr(target);
226  bctrl();
227 }
228 
229 void MacroAssembler::CallJSEntry(Register target) {
230  CHECK(target == r5);
231  Call(target);
232 }
233 
234 int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
235  RelocInfo::Mode rmode,
236  Condition cond) {
237  return (2 + kMovInstructionsNoConstantPool) * kInstrSize;
238 }
239 
240 void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
241  Condition cond) {
242  BlockTrampolinePoolScope block_trampoline_pool(this);
243  DCHECK(cond == al);
244 
245  // This can likely be optimized to make use of bc() with 24bit relative
246  //
247  // RecordRelocInfo(x.rmode_, x.immediate);
248  // bc( BA, .... offset, LKset);
249  //
250 
251  mov(ip, Operand(target, rmode));
252  mtctr(ip);
253  bctrl();
254 }
255 
256 void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
257  Condition cond) {
258  BlockTrampolinePoolScope block_trampoline_pool(this);
259  DCHECK(RelocInfo::IsCodeTarget(rmode));
260 
261  if (FLAG_embedded_builtins) {
262  if (root_array_available_ && options().isolate_independent_code) {
263  // Use ip directly instead of using UseScratchRegisterScope, as we do not
264  // preserve scratch registers across calls.
265  IndirectLoadConstant(ip, code);
266  addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
267  Label skip;
268  if (cond != al) b(NegateCondition(cond), &skip);
269  Call(ip);
270  bind(&skip);
271  return;
272  } else if (options().inline_offheap_trampolines) {
273  int builtin_index = Builtins::kNoBuiltinId;
274  if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
275  Builtins::IsIsolateIndependent(builtin_index)) {
276  // Inline the trampoline.
277  RecordCommentForOffHeapTrampoline(builtin_index);
278  DCHECK(Builtins::IsBuiltinId(builtin_index));
279  EmbeddedData d = EmbeddedData::FromBlob();
280  Address entry = d.InstructionStartOfBuiltin(builtin_index);
281  // Use ip directly instead of using UseScratchRegisterScope, as we do
282  // not preserve scratch registers across calls.
283  mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
284  Label skip;
285  if (cond != al) b(NegateCondition(cond), &skip);
286  Call(ip);
287  bind(&skip);
288  return;
289  }
290  }
291  }
292  Call(code.address(), rmode, cond);
293 }
294 
295 void TurboAssembler::Drop(int count) {
296  if (count > 0) {
297  Add(sp, sp, count * kPointerSize, r0);
298  }
299 }
300 
301 void TurboAssembler::Drop(Register count, Register scratch) {
302  ShiftLeftImm(scratch, count, Operand(kPointerSizeLog2));
303  add(sp, sp, scratch);
304 }
305 
306 void TurboAssembler::Call(Label* target) { b(target, SetLK); }
307 
308 void TurboAssembler::Push(Handle<HeapObject> handle) {
309  mov(r0, Operand(handle));
310  push(r0);
311 }
312 
313 void TurboAssembler::Push(Smi smi) {
314  mov(r0, Operand(smi));
315  push(r0);
316 }
317 
318 void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
319  if (FLAG_embedded_builtins) {
320  if (root_array_available_ && options().isolate_independent_code) {
321  IndirectLoadConstant(dst, value);
322  return;
323  }
324  }
325  mov(dst, Operand(value));
326 }
327 
328 void TurboAssembler::Move(Register dst, ExternalReference reference) {
329  if (FLAG_embedded_builtins) {
330  if (root_array_available_ && options().isolate_independent_code) {
331  IndirectLoadExternalReference(dst, reference);
332  return;
333  }
334  }
335  mov(dst, Operand(reference));
336 }
337 
338 void TurboAssembler::Move(Register dst, Register src, Condition cond) {
339  DCHECK(cond == al);
340  if (dst != src) {
341  mr(dst, src);
342  }
343 }
344 
345 void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
346  if (dst != src) {
347  fmr(dst, src);
348  }
349 }
350 
351 void TurboAssembler::MultiPush(RegList regs, Register location) {
352  int16_t num_to_push = base::bits::CountPopulation(regs);
353  int16_t stack_offset = num_to_push * kPointerSize;
354 
355  subi(location, location, Operand(stack_offset));
356  for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
357  if ((regs & (1 << i)) != 0) {
358  stack_offset -= kPointerSize;
359  StoreP(ToRegister(i), MemOperand(location, stack_offset));
360  }
361  }
362 }
363 
364 void TurboAssembler::MultiPop(RegList regs, Register location) {
365  int16_t stack_offset = 0;
366 
367  for (int16_t i = 0; i < Register::kNumRegisters; i++) {
368  if ((regs & (1 << i)) != 0) {
369  LoadP(ToRegister(i), MemOperand(location, stack_offset));
370  stack_offset += kPointerSize;
371  }
372  }
373  addi(location, location, Operand(stack_offset));
374 }
375 
376 void TurboAssembler::MultiPushDoubles(RegList dregs, Register location) {
377  int16_t num_to_push = base::bits::CountPopulation(dregs);
378  int16_t stack_offset = num_to_push * kDoubleSize;
379 
380  subi(location, location, Operand(stack_offset));
381  for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
382  if ((dregs & (1 << i)) != 0) {
383  DoubleRegister dreg = DoubleRegister::from_code(i);
384  stack_offset -= kDoubleSize;
385  stfd(dreg, MemOperand(location, stack_offset));
386  }
387  }
388 }
389 
390 void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
391  int16_t stack_offset = 0;
392 
393  for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
394  if ((dregs & (1 << i)) != 0) {
395  DoubleRegister dreg = DoubleRegister::from_code(i);
396  lfd(dreg, MemOperand(location, stack_offset));
397  stack_offset += kDoubleSize;
398  }
399  }
400  addi(location, location, Operand(stack_offset));
401 }
402 
403 void TurboAssembler::LoadRoot(Register destination, RootIndex index,
404  Condition cond) {
405  DCHECK(cond == al);
406  LoadP(destination,
407  MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
408 }
409 
410 void MacroAssembler::RecordWriteField(Register object, int offset,
411  Register value, Register dst,
412  LinkRegisterStatus lr_status,
413  SaveFPRegsMode save_fp,
414  RememberedSetAction remembered_set_action,
415  SmiCheck smi_check) {
416  // First, check if a write barrier is even needed. The tests below
417  // catch stores of Smis.
418  Label done;
419 
420  // Skip barrier if writing a smi.
421  if (smi_check == INLINE_SMI_CHECK) {
422  JumpIfSmi(value, &done);
423  }
424 
425  // Although the object register is tagged, the offset is relative to the start
426  // of the object, so so offset must be a multiple of kPointerSize.
427  DCHECK(IsAligned(offset, kPointerSize));
428 
429  Add(dst, object, offset - kHeapObjectTag, r0);
430  if (emit_debug_code()) {
431  Label ok;
432  andi(r0, dst, Operand(kPointerSize - 1));
433  beq(&ok, cr0);
434  stop("Unaligned cell in write barrier");
435  bind(&ok);
436  }
437 
438  RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
439  OMIT_SMI_CHECK);
440 
441  bind(&done);
442 
443  // Clobber clobbered input registers when running with the debug-code flag
444  // turned on to provoke errors.
445  if (emit_debug_code()) {
446  mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
447  mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
448  }
449 }
450 
451 void TurboAssembler::SaveRegisters(RegList registers) {
452  DCHECK_GT(NumRegs(registers), 0);
453  RegList regs = 0;
454  for (int i = 0; i < Register::kNumRegisters; ++i) {
455  if ((registers >> i) & 1u) {
456  regs |= Register::from_code(i).bit();
457  }
458  }
459 
460  MultiPush(regs);
461 }
462 
463 void TurboAssembler::RestoreRegisters(RegList registers) {
464  DCHECK_GT(NumRegs(registers), 0);
465  RegList regs = 0;
466  for (int i = 0; i < Register::kNumRegisters; ++i) {
467  if ((registers >> i) & 1u) {
468  regs |= Register::from_code(i).bit();
469  }
470  }
471  MultiPop(regs);
472 }
473 
474 void TurboAssembler::CallRecordWriteStub(
475  Register object, Register address,
476  RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
477  CallRecordWriteStub(
478  object, address, remembered_set_action, fp_mode,
479  isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
480  kNullAddress);
481 }
482 
483 void TurboAssembler::CallRecordWriteStub(
484  Register object, Register address,
485  RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
486  Address wasm_target) {
487  CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
488  Handle<Code>::null(), wasm_target);
489 }
490 
491 void TurboAssembler::CallRecordWriteStub(
492  Register object, Register address,
493  RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
494  Handle<Code> code_target, Address wasm_target) {
495  DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
496  // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
497  // i.e. always emit remember set and save FP registers in RecordWriteStub. If
498  // large performance regression is observed, we should use these values to
499  // avoid unnecessary work.
500 
501  RecordWriteDescriptor descriptor;
502  RegList registers = descriptor.allocatable_registers();
503 
504  SaveRegisters(registers);
505 
506  Register object_parameter(
507  descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
508  Register slot_parameter(
509  descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
510  Register remembered_set_parameter(
511  descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
512  Register fp_mode_parameter(
513  descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
514 
515  push(object);
516  push(address);
517 
518  pop(slot_parameter);
519  pop(object_parameter);
520 
521  Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
522  Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
523  if (code_target.is_null()) {
524  Call(wasm_target, RelocInfo::WASM_STUB_CALL);
525  } else {
526  Call(code_target, RelocInfo::CODE_TARGET);
527  }
528 
529  RestoreRegisters(registers);
530 }
531 
532 // Will clobber 4 registers: object, address, scratch, ip. The
533 // register 'object' contains a heap object pointer. The heap object
534 // tag is shifted away.
535 void MacroAssembler::RecordWrite(Register object, Register address,
536  Register value, LinkRegisterStatus lr_status,
537  SaveFPRegsMode fp_mode,
538  RememberedSetAction remembered_set_action,
539  SmiCheck smi_check) {
540  DCHECK(object != value);
541  if (emit_debug_code()) {
542  LoadP(r0, MemOperand(address));
543  cmp(r0, value);
544  Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
545  }
546 
547  if (remembered_set_action == OMIT_REMEMBERED_SET &&
548  !FLAG_incremental_marking) {
549  return;
550  }
551 
552  // First, check if a write barrier is even needed. The tests below
553  // catch stores of smis and stores into the young generation.
554  Label done;
555 
556  if (smi_check == INLINE_SMI_CHECK) {
557  JumpIfSmi(value, &done);
558  }
559 
560  CheckPageFlag(value,
561  value, // Used as scratch.
562  MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
563  CheckPageFlag(object,
564  value, // Used as scratch.
565  MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
566 
567  // Record the actual write.
568  if (lr_status == kLRHasNotBeenSaved) {
569  mflr(r0);
570  push(r0);
571  }
572  CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
573  if (lr_status == kLRHasNotBeenSaved) {
574  pop(r0);
575  mtlr(r0);
576  }
577 
578  bind(&done);
579 
580  // Count number of write barriers in generated code.
581  isolate()->counters()->write_barriers_static()->Increment();
582  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
583  value);
584 
585  // Clobber clobbered registers when running with the debug-code flag
586  // turned on to provoke errors.
587  if (emit_debug_code()) {
588  mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
589  mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
590  }
591 }
592 
593 void TurboAssembler::PushCommonFrame(Register marker_reg) {
594  int fp_delta = 0;
595  mflr(r0);
596  if (FLAG_enable_embedded_constant_pool) {
597  if (marker_reg.is_valid()) {
598  Push(r0, fp, kConstantPoolRegister, marker_reg);
599  fp_delta = 2;
600  } else {
601  Push(r0, fp, kConstantPoolRegister);
602  fp_delta = 1;
603  }
604  } else {
605  if (marker_reg.is_valid()) {
606  Push(r0, fp, marker_reg);
607  fp_delta = 1;
608  } else {
609  Push(r0, fp);
610  fp_delta = 0;
611  }
612  }
613  addi(fp, sp, Operand(fp_delta * kPointerSize));
614 }
615 
616 void TurboAssembler::PushStandardFrame(Register function_reg) {
617  int fp_delta = 0;
618  mflr(r0);
619  if (FLAG_enable_embedded_constant_pool) {
620  if (function_reg.is_valid()) {
621  Push(r0, fp, kConstantPoolRegister, cp, function_reg);
622  fp_delta = 3;
623  } else {
624  Push(r0, fp, kConstantPoolRegister, cp);
625  fp_delta = 2;
626  }
627  } else {
628  if (function_reg.is_valid()) {
629  Push(r0, fp, cp, function_reg);
630  fp_delta = 2;
631  } else {
632  Push(r0, fp, cp);
633  fp_delta = 1;
634  }
635  }
636  addi(fp, sp, Operand(fp_delta * kPointerSize));
637 }
638 
639 void TurboAssembler::RestoreFrameStateForTailCall() {
640  if (FLAG_enable_embedded_constant_pool) {
641  LoadP(kConstantPoolRegister,
642  MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
643  set_constant_pool_available(false);
644  }
645  LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
646  LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
647  mtlr(r0);
648 }
649 
650 // Push and pop all registers that can hold pointers.
651 void MacroAssembler::PushSafepointRegisters() {
652  // Safepoints expect a block of kNumSafepointRegisters values on the
653  // stack, so adjust the stack for unsaved registers.
654  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
655  DCHECK_GE(num_unsaved, 0);
656  if (num_unsaved > 0) {
657  subi(sp, sp, Operand(num_unsaved * kPointerSize));
658  }
659  MultiPush(kSafepointSavedRegisters);
660 }
661 
662 
663 void MacroAssembler::PopSafepointRegisters() {
664  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
665  MultiPop(kSafepointSavedRegisters);
666  if (num_unsaved > 0) {
667  addi(sp, sp, Operand(num_unsaved * kPointerSize));
668  }
669 }
670 
671 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
672  // The registers are pushed starting with the highest encoding,
673  // which means that lowest encodings are closest to the stack pointer.
674  RegList regs = kSafepointSavedRegisters;
675  int index = 0;
676 
677  DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
678 
679  for (int16_t i = 0; i < reg_code; i++) {
680  if ((regs & (1 << i)) != 0) {
681  index++;
682  }
683  }
684 
685  return index;
686 }
687 
688 
689 void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst,
690  const DoubleRegister src) {
691  // Turn potential sNaN into qNaN.
692  fsub(dst, src, kDoubleRegZero);
693 }
694 
695 void TurboAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
696  MovIntToDouble(dst, src, r0);
697  fcfid(dst, dst);
698 }
699 
700 void TurboAssembler::ConvertUnsignedIntToDouble(Register src,
701  DoubleRegister dst) {
702  MovUnsignedIntToDouble(dst, src, r0);
703  fcfid(dst, dst);
704 }
705 
706 void TurboAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
707  MovIntToDouble(dst, src, r0);
708  fcfids(dst, dst);
709 }
710 
711 void TurboAssembler::ConvertUnsignedIntToFloat(Register src,
712  DoubleRegister dst) {
713  MovUnsignedIntToDouble(dst, src, r0);
714  fcfids(dst, dst);
715 }
716 
717 #if V8_TARGET_ARCH_PPC64
718 void TurboAssembler::ConvertInt64ToDouble(Register src,
719  DoubleRegister double_dst) {
720  MovInt64ToDouble(double_dst, src);
721  fcfid(double_dst, double_dst);
722 }
723 
724 void TurboAssembler::ConvertUnsignedInt64ToFloat(Register src,
725  DoubleRegister double_dst) {
726  MovInt64ToDouble(double_dst, src);
727  fcfidus(double_dst, double_dst);
728 }
729 
730 void TurboAssembler::ConvertUnsignedInt64ToDouble(Register src,
731  DoubleRegister double_dst) {
732  MovInt64ToDouble(double_dst, src);
733  fcfidu(double_dst, double_dst);
734 }
735 
736 void TurboAssembler::ConvertInt64ToFloat(Register src,
737  DoubleRegister double_dst) {
738  MovInt64ToDouble(double_dst, src);
739  fcfids(double_dst, double_dst);
740 }
741 #endif
742 
743 void TurboAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
744 #if !V8_TARGET_ARCH_PPC64
745  const Register dst_hi,
746 #endif
747  const Register dst,
748  const DoubleRegister double_dst,
749  FPRoundingMode rounding_mode) {
750  if (rounding_mode == kRoundToZero) {
751  fctidz(double_dst, double_input);
752  } else {
753  SetRoundingMode(rounding_mode);
754  fctid(double_dst, double_input);
755  ResetRoundingMode();
756  }
757 
758  MovDoubleToInt64(
759 #if !V8_TARGET_ARCH_PPC64
760  dst_hi,
761 #endif
762  dst, double_dst);
763 }
764 
765 #if V8_TARGET_ARCH_PPC64
766 void TurboAssembler::ConvertDoubleToUnsignedInt64(
767  const DoubleRegister double_input, const Register dst,
768  const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
769  if (rounding_mode == kRoundToZero) {
770  fctiduz(double_dst, double_input);
771  } else {
772  SetRoundingMode(rounding_mode);
773  fctidu(double_dst, double_input);
774  ResetRoundingMode();
775  }
776 
777  MovDoubleToInt64(dst, double_dst);
778 }
779 #endif
780 
781 #if !V8_TARGET_ARCH_PPC64
782 void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
783  Register src_low, Register src_high,
784  Register scratch, Register shift) {
785  DCHECK(!AreAliased(dst_low, src_high));
786  DCHECK(!AreAliased(dst_high, src_low));
787  DCHECK(!AreAliased(dst_low, dst_high, shift));
788  Label less_than_32;
789  Label done;
790  cmpi(shift, Operand(32));
791  blt(&less_than_32);
792  // If shift >= 32
793  andi(scratch, shift, Operand(0x1F));
794  slw(dst_high, src_low, scratch);
795  li(dst_low, Operand::Zero());
796  b(&done);
797  bind(&less_than_32);
798  // If shift < 32
799  subfic(scratch, shift, Operand(32));
800  slw(dst_high, src_high, shift);
801  srw(scratch, src_low, scratch);
802  orx(dst_high, dst_high, scratch);
803  slw(dst_low, src_low, shift);
804  bind(&done);
805 }
806 
807 void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
808  Register src_low, Register src_high,
809  uint32_t shift) {
810  DCHECK(!AreAliased(dst_low, src_high));
811  DCHECK(!AreAliased(dst_high, src_low));
812  if (shift == 32) {
813  Move(dst_high, src_low);
814  li(dst_low, Operand::Zero());
815  } else if (shift > 32) {
816  shift &= 0x1F;
817  slwi(dst_high, src_low, Operand(shift));
818  li(dst_low, Operand::Zero());
819  } else if (shift == 0) {
820  Move(dst_low, src_low);
821  Move(dst_high, src_high);
822  } else {
823  slwi(dst_high, src_high, Operand(shift));
824  rlwimi(dst_high, src_low, shift, 32 - shift, 31);
825  slwi(dst_low, src_low, Operand(shift));
826  }
827 }
828 
829 void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
830  Register src_low, Register src_high,
831  Register scratch, Register shift) {
832  DCHECK(!AreAliased(dst_low, src_high));
833  DCHECK(!AreAliased(dst_high, src_low));
834  DCHECK(!AreAliased(dst_low, dst_high, shift));
835  Label less_than_32;
836  Label done;
837  cmpi(shift, Operand(32));
838  blt(&less_than_32);
839  // If shift >= 32
840  andi(scratch, shift, Operand(0x1F));
841  srw(dst_low, src_high, scratch);
842  li(dst_high, Operand::Zero());
843  b(&done);
844  bind(&less_than_32);
845  // If shift < 32
846  subfic(scratch, shift, Operand(32));
847  srw(dst_low, src_low, shift);
848  slw(scratch, src_high, scratch);
849  orx(dst_low, dst_low, scratch);
850  srw(dst_high, src_high, shift);
851  bind(&done);
852 }
853 
854 void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
855  Register src_low, Register src_high,
856  uint32_t shift) {
857  DCHECK(!AreAliased(dst_low, src_high));
858  DCHECK(!AreAliased(dst_high, src_low));
859  if (shift == 32) {
860  Move(dst_low, src_high);
861  li(dst_high, Operand::Zero());
862  } else if (shift > 32) {
863  shift &= 0x1F;
864  srwi(dst_low, src_high, Operand(shift));
865  li(dst_high, Operand::Zero());
866  } else if (shift == 0) {
867  Move(dst_low, src_low);
868  Move(dst_high, src_high);
869  } else {
870  srwi(dst_low, src_low, Operand(shift));
871  rlwimi(dst_low, src_high, 32 - shift, 0, shift - 1);
872  srwi(dst_high, src_high, Operand(shift));
873  }
874 }
875 
876 void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
877  Register src_low, Register src_high,
878  Register scratch, Register shift) {
879  DCHECK(!AreAliased(dst_low, src_high, shift));
880  DCHECK(!AreAliased(dst_high, src_low, shift));
881  Label less_than_32;
882  Label done;
883  cmpi(shift, Operand(32));
884  blt(&less_than_32);
885  // If shift >= 32
886  andi(scratch, shift, Operand(0x1F));
887  sraw(dst_low, src_high, scratch);
888  srawi(dst_high, src_high, 31);
889  b(&done);
890  bind(&less_than_32);
891  // If shift < 32
892  subfic(scratch, shift, Operand(32));
893  srw(dst_low, src_low, shift);
894  slw(scratch, src_high, scratch);
895  orx(dst_low, dst_low, scratch);
896  sraw(dst_high, src_high, shift);
897  bind(&done);
898 }
899 
900 void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
901  Register src_low, Register src_high,
902  uint32_t shift) {
903  DCHECK(!AreAliased(dst_low, src_high));
904  DCHECK(!AreAliased(dst_high, src_low));
905  if (shift == 32) {
906  Move(dst_low, src_high);
907  srawi(dst_high, src_high, 31);
908  } else if (shift > 32) {
909  shift &= 0x1F;
910  srawi(dst_low, src_high, shift);
911  srawi(dst_high, src_high, 31);
912  } else if (shift == 0) {
913  Move(dst_low, src_low);
914  Move(dst_high, src_high);
915  } else {
916  srwi(dst_low, src_low, Operand(shift));
917  rlwimi(dst_low, src_high, 32 - shift, 0, shift - 1);
918  srawi(dst_high, src_high, shift);
919  }
920 }
921 #endif
922 
923 void TurboAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
924  Register code_target_address) {
925  lwz(kConstantPoolRegister,
926  MemOperand(code_target_address,
927  Code::kConstantPoolOffset - Code::kHeaderSize));
928  add(kConstantPoolRegister, kConstantPoolRegister, code_target_address);
929 }
930 
931 void TurboAssembler::LoadPC(Register dst) {
932  b(4, SetLK);
933  mflr(dst);
934 }
935 
936 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
937  mflr(r0);
938  LoadPC(dst);
939  subi(dst, dst, Operand(pc_offset() - kInstrSize));
940  mtlr(r0);
941 }
942 
943 void TurboAssembler::LoadConstantPoolPointerRegister() {
944  LoadPC(kConstantPoolRegister);
945  int32_t delta = -pc_offset() + 4;
946  add_label_offset(kConstantPoolRegister, kConstantPoolRegister,
947  ConstantPoolPosition(), delta);
948 }
949 
950 void TurboAssembler::StubPrologue(StackFrame::Type type) {
951  {
952  ConstantPoolUnavailableScope constant_pool_unavailable(this);
953  mov(r11, Operand(StackFrame::TypeToMarker(type)));
954  PushCommonFrame(r11);
955  }
956  if (FLAG_enable_embedded_constant_pool) {
957  LoadConstantPoolPointerRegister();
958  set_constant_pool_available(true);
959  }
960 }
961 
962 void TurboAssembler::Prologue() {
963  PushStandardFrame(r4);
964  if (FLAG_enable_embedded_constant_pool) {
965  // base contains prologue address
966  LoadConstantPoolPointerRegister();
967  set_constant_pool_available(true);
968  }
969 }
970 
971 void TurboAssembler::EnterFrame(StackFrame::Type type,
972  bool load_constant_pool_pointer_reg) {
973  if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
974  // Push type explicitly so we can leverage the constant pool.
975  // This path cannot rely on ip containing code entry.
976  PushCommonFrame();
977  LoadConstantPoolPointerRegister();
978  mov(ip, Operand(StackFrame::TypeToMarker(type)));
979  push(ip);
980  } else {
981  mov(ip, Operand(StackFrame::TypeToMarker(type)));
982  PushCommonFrame(ip);
983  }
984 }
985 
986 int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
987  ConstantPoolUnavailableScope constant_pool_unavailable(this);
988  // r3: preserved
989  // r4: preserved
990  // r5: preserved
991 
992  // Drop the execution stack down to the frame pointer and restore
993  // the caller's state.
994  int frame_ends;
995  LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
996  LoadP(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
997  if (FLAG_enable_embedded_constant_pool) {
998  LoadP(kConstantPoolRegister,
999  MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
1000  }
1001  mtlr(r0);
1002  frame_ends = pc_offset();
1003  Add(sp, fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment, r0);
1004  mr(fp, ip);
1005  return frame_ends;
1006 }
1007 
1008 // ExitFrame layout (probably wrongish.. needs updating)
1009 //
1010 // SP -> previousSP
1011 // LK reserved
1012 // code
1013 // sp_on_exit (for debug?)
1014 // oldSP->prev SP
1015 // LK
1016 // <parameters on stack>
1017 
1018 // Prior to calling EnterExitFrame, we've got a bunch of parameters
1019 // on the stack that we need to wrap a real frame around.. so first
1020 // we reserve a slot for LK and push the previous SP which is captured
1021 // in the fp register (r31)
1022 // Then - we buy a new frame
1023 
1024 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
1025  StackFrame::Type frame_type) {
1026  DCHECK(frame_type == StackFrame::EXIT ||
1027  frame_type == StackFrame::BUILTIN_EXIT);
1028  // Set up the frame structure on the stack.
1029  DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1030  DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1031  DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1032  DCHECK_GT(stack_space, 0);
1033 
1034  // This is an opportunity to build a frame to wrap
1035  // all of the pushes that have happened inside of V8
1036  // since we were called from C code
1037 
1038  mov(ip, Operand(StackFrame::TypeToMarker(frame_type)));
1039  PushCommonFrame(ip);
1040  // Reserve room for saved entry sp and code object.
1041  subi(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
1042 
1043  if (emit_debug_code()) {
1044  li(r8, Operand::Zero());
1045  StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
1046  }
1047  if (FLAG_enable_embedded_constant_pool) {
1048  StoreP(kConstantPoolRegister,
1049  MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1050  }
1051  Move(r8, CodeObject());
1052  StoreP(r8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
1053 
1054  // Save the frame pointer and the context in top.
1055  Move(r8, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1056  isolate()));
1057  StoreP(fp, MemOperand(r8));
1058  Move(r8,
1059  ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1060  StoreP(cp, MemOperand(r8));
1061 
1062  // Optionally save all volatile double registers.
1063  if (save_doubles) {
1064  MultiPushDoubles(kCallerSavedDoubles);
1065  // Note that d0 will be accessible at
1066  // fp - ExitFrameConstants::kFrameSize -
1067  // kNumCallerSavedDoubles * kDoubleSize,
1068  // since the sp slot and code slot were pushed after the fp.
1069  }
1070 
1071  addi(sp, sp, Operand(-stack_space * kPointerSize));
1072 
1073  // Allocate and align the frame preparing for calling the runtime
1074  // function.
1075  const int frame_alignment = ActivationFrameAlignment();
1076  if (frame_alignment > kPointerSize) {
1077  DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1078  ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
1079  }
1080  li(r0, Operand::Zero());
1081  StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
1082 
1083  // Set the exit frame sp value to point just before the return address
1084  // location.
1085  addi(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
1086  StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
1087 }
1088 
1089 int TurboAssembler::ActivationFrameAlignment() {
1090 #if !defined(USE_SIMULATOR)
1091  // Running on the real platform. Use the alignment as mandated by the local
1092  // environment.
1093  // Note: This will break if we ever start generating snapshots on one PPC
1094  // platform for another PPC platform with a different alignment.
1095  return base::OS::ActivationFrameAlignment();
1096 #else // Simulated
1097  // If we are using the simulator then we should always align to the expected
1098  // alignment. As the simulator is used to generate snapshots we do not know
1099  // if the target platform will need alignment, so this is controlled from a
1100  // flag.
1101  return FLAG_sim_stack_alignment;
1102 #endif
1103 }
1104 
1105 
1106 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1107  bool argument_count_is_length) {
1108  ConstantPoolUnavailableScope constant_pool_unavailable(this);
1109  // Optionally restore all double registers.
1110  if (save_doubles) {
1111  // Calculate the stack location of the saved doubles and restore them.
1112  const int kNumRegs = kNumCallerSavedDoubles;
1113  const int offset =
1114  (ExitFrameConstants::kFixedFrameSizeFromFp + kNumRegs * kDoubleSize);
1115  addi(r6, fp, Operand(-offset));
1116  MultiPopDoubles(kCallerSavedDoubles, r6);
1117  }
1118 
1119  // Clear top frame.
1120  li(r6, Operand::Zero());
1121  Move(ip, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1122  isolate()));
1123  StoreP(r6, MemOperand(ip));
1124 
1125  // Restore current context from top and clear it in debug mode.
1126  Move(ip,
1127  ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1128  LoadP(cp, MemOperand(ip));
1129 
1130 #ifdef DEBUG
1131  mov(r6, Operand(Context::kInvalidContext));
1132  Move(ip,
1133  ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1134  StoreP(r6, MemOperand(ip));
1135 #endif
1136 
1137  // Tear down the exit frame, pop the arguments, and return.
1138  LeaveFrame(StackFrame::EXIT);
1139 
1140  if (argument_count.is_valid()) {
1141  if (!argument_count_is_length) {
1142  ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2));
1143  }
1144  add(sp, sp, argument_count);
1145  }
1146 }
1147 
1148 void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
1149  Move(dst, d1);
1150 }
1151 
1152 void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
1153  Move(dst, d1);
1154 }
1155 
1156 void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
1157  Register caller_args_count_reg,
1158  Register scratch0, Register scratch1) {
1159 #if DEBUG
1160  if (callee_args_count.is_reg()) {
1161  DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
1162  scratch1));
1163  } else {
1164  DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
1165  }
1166 #endif
1167 
1168  // Calculate the end of destination area where we will put the arguments
1169  // after we drop current frame. We add kPointerSize to count the receiver
1170  // argument which is not included into formal parameters count.
1171  Register dst_reg = scratch0;
1172  ShiftLeftImm(dst_reg, caller_args_count_reg, Operand(kPointerSizeLog2));
1173  add(dst_reg, fp, dst_reg);
1174  addi(dst_reg, dst_reg,
1175  Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
1176 
1177  Register src_reg = caller_args_count_reg;
1178  // Calculate the end of source area. +kPointerSize is for the receiver.
1179  if (callee_args_count.is_reg()) {
1180  ShiftLeftImm(src_reg, callee_args_count.reg(), Operand(kPointerSizeLog2));
1181  add(src_reg, sp, src_reg);
1182  addi(src_reg, src_reg, Operand(kPointerSize));
1183  } else {
1184  Add(src_reg, sp, (callee_args_count.immediate() + 1) * kPointerSize, r0);
1185  }
1186 
1187  if (FLAG_debug_code) {
1188  cmpl(src_reg, dst_reg);
1189  Check(lt, AbortReason::kStackAccessBelowStackPointer);
1190  }
1191 
1192  // Restore caller's frame pointer and return address now as they will be
1193  // overwritten by the copying loop.
1194  RestoreFrameStateForTailCall();
1195 
1196  // Now copy callee arguments to the caller frame going backwards to avoid
1197  // callee arguments corruption (source and destination areas could overlap).
1198 
1199  // Both src_reg and dst_reg are pointing to the word after the one to copy,
1200  // so they must be pre-decremented in the loop.
1201  Register tmp_reg = scratch1;
1202  Label loop;
1203  if (callee_args_count.is_reg()) {
1204  addi(tmp_reg, callee_args_count.reg(), Operand(1)); // +1 for receiver
1205  } else {
1206  mov(tmp_reg, Operand(callee_args_count.immediate() + 1));
1207  }
1208  mtctr(tmp_reg);
1209  bind(&loop);
1210  LoadPU(tmp_reg, MemOperand(src_reg, -kPointerSize));
1211  StorePU(tmp_reg, MemOperand(dst_reg, -kPointerSize));
1212  bdnz(&loop);
1213 
1214  // Leave current frame.
1215  mr(sp, dst_reg);
1216 }
1217 
1218 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1219  const ParameterCount& actual, Label* done,
1220  bool* definitely_mismatches,
1221  InvokeFlag flag) {
1222  bool definitely_matches = false;
1223  *definitely_mismatches = false;
1224  Label regular_invoke;
1225 
1226  // Check whether the expected and actual arguments count match. If not,
1227  // setup registers according to contract with ArgumentsAdaptorTrampoline:
1228  // r3: actual arguments count
1229  // r4: function (passed through to callee)
1230  // r5: expected arguments count
1231 
1232  // The code below is made a lot easier because the calling code already sets
1233  // up actual and expected registers according to the contract if values are
1234  // passed in registers.
1235 
1236  // ARM has some sanity checks as per below, considering add them for PPC
1237  // DCHECK(actual.is_immediate() || actual.reg() == r3);
1238  // DCHECK(expected.is_immediate() || expected.reg() == r5);
1239 
1240  if (expected.is_immediate()) {
1241  DCHECK(actual.is_immediate());
1242  mov(r3, Operand(actual.immediate()));
1243  if (expected.immediate() == actual.immediate()) {
1244  definitely_matches = true;
1245  } else {
1246  const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1247  if (expected.immediate() == sentinel) {
1248  // Don't worry about adapting arguments for builtins that
1249  // don't want that done. Skip adaption code by making it look
1250  // like we have a match between expected and actual number of
1251  // arguments.
1252  definitely_matches = true;
1253  } else {
1254  *definitely_mismatches = true;
1255  mov(r5, Operand(expected.immediate()));
1256  }
1257  }
1258  } else {
1259  if (actual.is_immediate()) {
1260  mov(r3, Operand(actual.immediate()));
1261  cmpi(expected.reg(), Operand(actual.immediate()));
1262  beq(&regular_invoke);
1263  } else {
1264  cmp(expected.reg(), actual.reg());
1265  beq(&regular_invoke);
1266  }
1267  }
1268 
1269  if (!definitely_matches) {
1270  Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
1271  if (flag == CALL_FUNCTION) {
1272  Call(adaptor);
1273  if (!*definitely_mismatches) {
1274  b(done);
1275  }
1276  } else {
1277  Jump(adaptor, RelocInfo::CODE_TARGET);
1278  }
1279  bind(&regular_invoke);
1280  }
1281 }
1282 
1283 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
1284  const ParameterCount& expected,
1285  const ParameterCount& actual) {
1286  Label skip_hook;
1287 
1288  ExternalReference debug_hook_active =
1289  ExternalReference::debug_hook_on_function_call_address(isolate());
1290  Move(r7, debug_hook_active);
1291  LoadByte(r7, MemOperand(r7), r0);
1292  extsb(r7, r7);
1293  CmpSmiLiteral(r7, Smi::zero(), r0);
1294  beq(&skip_hook);
1295 
1296  {
1297  // Load receiver to pass it later to DebugOnFunctionCall hook.
1298  if (actual.is_reg()) {
1299  mr(r7, actual.reg());
1300  } else {
1301  mov(r7, Operand(actual.immediate()));
1302  }
1303  ShiftLeftImm(r7, r7, Operand(kPointerSizeLog2));
1304  LoadPX(r7, MemOperand(sp, r7));
1305  FrameScope frame(this,
1306  has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1307  if (expected.is_reg()) {
1308  SmiTag(expected.reg());
1309  Push(expected.reg());
1310  }
1311  if (actual.is_reg()) {
1312  SmiTag(actual.reg());
1313  Push(actual.reg());
1314  }
1315  if (new_target.is_valid()) {
1316  Push(new_target);
1317  }
1318  Push(fun, fun, r7);
1319  CallRuntime(Runtime::kDebugOnFunctionCall);
1320  Pop(fun);
1321  if (new_target.is_valid()) {
1322  Pop(new_target);
1323  }
1324  if (actual.is_reg()) {
1325  Pop(actual.reg());
1326  SmiUntag(actual.reg());
1327  }
1328  if (expected.is_reg()) {
1329  Pop(expected.reg());
1330  SmiUntag(expected.reg());
1331  }
1332  }
1333  bind(&skip_hook);
1334 }
1335 
1336 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1337  const ParameterCount& expected,
1338  const ParameterCount& actual,
1339  InvokeFlag flag) {
1340  // You can't call a function without a valid frame.
1341  DCHECK(flag == JUMP_FUNCTION || has_frame());
1342  DCHECK(function == r4);
1343  DCHECK_IMPLIES(new_target.is_valid(), new_target == r6);
1344 
1345  // On function call, call into the debugger if necessary.
1346  CheckDebugHook(function, new_target, expected, actual);
1347 
1348  // Clear the new.target register if not given.
1349  if (!new_target.is_valid()) {
1350  LoadRoot(r6, RootIndex::kUndefinedValue);
1351  }
1352 
1353  Label done;
1354  bool definitely_mismatches = false;
1355  InvokePrologue(expected, actual, &done, &definitely_mismatches, flag);
1356  if (!definitely_mismatches) {
1357  // We call indirectly through the code field in the function to
1358  // allow recompilation to take effect without changing any of the
1359  // call sites.
1360  Register code = kJavaScriptCallCodeStartRegister;
1361  LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
1362  addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
1363  if (flag == CALL_FUNCTION) {
1364  CallJSEntry(code);
1365  } else {
1366  DCHECK(flag == JUMP_FUNCTION);
1367  JumpToJSEntry(code);
1368  }
1369 
1370  // Continue here if InvokePrologue does handle the invocation due to
1371  // mismatched parameter counts.
1372  bind(&done);
1373  }
1374 }
1375 
1376 void MacroAssembler::InvokeFunction(Register fun, Register new_target,
1377  const ParameterCount& actual,
1378  InvokeFlag flag) {
1379  // You can't call a function without a valid frame.
1380  DCHECK(flag == JUMP_FUNCTION || has_frame());
1381 
1382  // Contract with called JS functions requires that function is passed in r4.
1383  DCHECK(fun == r4);
1384 
1385  Register expected_reg = r5;
1386  Register temp_reg = r7;
1387 
1388  LoadP(temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
1389  LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1390  LoadHalfWord(expected_reg,
1391  FieldMemOperand(
1392  temp_reg, SharedFunctionInfo::kFormalParameterCountOffset));
1393 
1394  ParameterCount expected(expected_reg);
1395  InvokeFunctionCode(fun, new_target, expected, actual, flag);
1396 }
1397 
1398 void MacroAssembler::InvokeFunction(Register function,
1399  const ParameterCount& expected,
1400  const ParameterCount& actual,
1401  InvokeFlag flag) {
1402  // You can't call a function without a valid frame.
1403  DCHECK(flag == JUMP_FUNCTION || has_frame());
1404 
1405  // Contract with called JS functions requires that function is passed in r4.
1406  DCHECK(function == r4);
1407 
1408  // Get the function and setup the context.
1409  LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1410 
1411  InvokeFunctionCode(r4, no_reg, expected, actual, flag);
1412 }
1413 
1414 void MacroAssembler::MaybeDropFrames() {
1415  // Check whether we need to drop frames to restart a function on the stack.
1416  ExternalReference restart_fp =
1417  ExternalReference::debug_restart_fp_address(isolate());
1418  Move(r4, restart_fp);
1419  LoadP(r4, MemOperand(r4));
1420  cmpi(r4, Operand::Zero());
1421  Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
1422  ne);
1423 }
1424 
1425 void MacroAssembler::PushStackHandler() {
1426  // Adjust this code if not the case.
1427  STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
1428  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1429 
1430  Push(Smi::zero()); // Padding.
1431 
1432  // Link the current handler as the next handler.
1433  // Preserve r3-r7.
1434  mov(r8, Operand(ExternalReference::Create(IsolateAddressId::kHandlerAddress,
1435  isolate())));
1436  LoadP(r0, MemOperand(r8));
1437  push(r0);
1438 
1439  // Set this new handler as the current one.
1440  StoreP(sp, MemOperand(r8));
1441 }
1442 
1443 
1444 void MacroAssembler::PopStackHandler() {
1445  STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
1446  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1447 
1448  pop(r4);
1449  mov(ip, Operand(ExternalReference::Create(IsolateAddressId::kHandlerAddress,
1450  isolate())));
1451  StoreP(r4, MemOperand(ip));
1452 
1453  Drop(1); // Drop padding.
1454 }
1455 
1456 
1457 void MacroAssembler::CompareObjectType(Register object, Register map,
1458  Register type_reg, InstanceType type) {
1459  const Register temp = type_reg == no_reg ? r0 : type_reg;
1460 
1461  LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
1462  CompareInstanceType(map, temp, type);
1463 }
1464 
1465 
1466 void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
1467  InstanceType type) {
1468  STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
1469  STATIC_ASSERT(LAST_TYPE <= 0xFFFF);
1470  lhz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1471  cmpi(type_reg, Operand(type));
1472 }
1473 
1474 void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
1475  DCHECK(obj != r0);
1476  LoadRoot(r0, index);
1477  cmp(obj, r0);
1478 }
1479 
1480 void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
1481  Register right,
1482  Register overflow_dst,
1483  Register scratch) {
1484  DCHECK(dst != overflow_dst);
1485  DCHECK(dst != scratch);
1486  DCHECK(overflow_dst != scratch);
1487  DCHECK(overflow_dst != left);
1488  DCHECK(overflow_dst != right);
1489 
1490  bool left_is_right = left == right;
1491  RCBit xorRC = left_is_right ? SetRC : LeaveRC;
1492 
1493  // C = A+B; C overflows if A/B have same sign and C has diff sign than A
1494  if (dst == left) {
1495  mr(scratch, left); // Preserve left.
1496  add(dst, left, right); // Left is overwritten.
1497  xor_(overflow_dst, dst, scratch, xorRC); // Original left.
1498  if (!left_is_right) xor_(scratch, dst, right);
1499  } else if (dst == right) {
1500  mr(scratch, right); // Preserve right.
1501  add(dst, left, right); // Right is overwritten.
1502  xor_(overflow_dst, dst, left, xorRC);
1503  if (!left_is_right) xor_(scratch, dst, scratch); // Original right.
1504  } else {
1505  add(dst, left, right);
1506  xor_(overflow_dst, dst, left, xorRC);
1507  if (!left_is_right) xor_(scratch, dst, right);
1508  }
1509  if (!left_is_right) and_(overflow_dst, scratch, overflow_dst, SetRC);
1510 }
1511 
1512 void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
1513  intptr_t right,
1514  Register overflow_dst,
1515  Register scratch) {
1516  Register original_left = left;
1517  DCHECK(dst != overflow_dst);
1518  DCHECK(dst != scratch);
1519  DCHECK(overflow_dst != scratch);
1520  DCHECK(overflow_dst != left);
1521 
1522  // C = A+B; C overflows if A/B have same sign and C has diff sign than A
1523  if (dst == left) {
1524  // Preserve left.
1525  original_left = overflow_dst;
1526  mr(original_left, left);
1527  }
1528  Add(dst, left, right, scratch);
1529  xor_(overflow_dst, dst, original_left);
1530  if (right >= 0) {
1531  and_(overflow_dst, overflow_dst, dst, SetRC);
1532  } else {
1533  andc(overflow_dst, overflow_dst, dst, SetRC);
1534  }
1535 }
1536 
1537 void TurboAssembler::SubAndCheckForOverflow(Register dst, Register left,
1538  Register right,
1539  Register overflow_dst,
1540  Register scratch) {
1541  DCHECK(dst != overflow_dst);
1542  DCHECK(dst != scratch);
1543  DCHECK(overflow_dst != scratch);
1544  DCHECK(overflow_dst != left);
1545  DCHECK(overflow_dst != right);
1546 
1547  // C = A-B; C overflows if A/B have diff signs and C has diff sign than A
1548  if (dst == left) {
1549  mr(scratch, left); // Preserve left.
1550  sub(dst, left, right); // Left is overwritten.
1551  xor_(overflow_dst, dst, scratch);
1552  xor_(scratch, scratch, right);
1553  and_(overflow_dst, overflow_dst, scratch, SetRC);
1554  } else if (dst == right) {
1555  mr(scratch, right); // Preserve right.
1556  sub(dst, left, right); // Right is overwritten.
1557  xor_(overflow_dst, dst, left);
1558  xor_(scratch, left, scratch);
1559  and_(overflow_dst, overflow_dst, scratch, SetRC);
1560  } else {
1561  sub(dst, left, right);
1562  xor_(overflow_dst, dst, left);
1563  xor_(scratch, left, right);
1564  and_(overflow_dst, scratch, overflow_dst, SetRC);
1565  }
1566 }
1567 
1568 
1569 void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
1570  DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
1571  Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1572 }
1573 
1574 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
1575  Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1576 }
1577 
1578 bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
1579  return has_frame_ || !stub->SometimesSetsUpAFrame();
1580 }
1581 
1582 void MacroAssembler::TryDoubleToInt32Exact(Register result,
1583  DoubleRegister double_input,
1584  Register scratch,
1585  DoubleRegister double_scratch) {
1586  Label done;
1587  DCHECK(double_input != double_scratch);
1588 
1589  ConvertDoubleToInt64(double_input,
1590 #if !V8_TARGET_ARCH_PPC64
1591  scratch,
1592 #endif
1593  result, double_scratch);
1594 
1595 #if V8_TARGET_ARCH_PPC64
1596  TestIfInt32(result, r0);
1597 #else
1598  TestIfInt32(scratch, result, r0);
1599 #endif
1600  bne(&done);
1601 
1602  // convert back and compare
1603  fcfid(double_scratch, double_scratch);
1604  fcmpu(double_scratch, double_input);
1605  bind(&done);
1606 }
1607 
1608 void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
1609  Register result,
1610  DoubleRegister double_input,
1611  StubCallMode stub_mode) {
1612  Label done;
1613 
1614  TryInlineTruncateDoubleToI(result, double_input, &done);
1615 
1616  // If we fell through then inline version didn't succeed - call stub instead.
1617  mflr(r0);
1618  push(r0);
1619  // Put input on stack.
1620  stfdu(double_input, MemOperand(sp, -kDoubleSize));
1621 
1622  if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
1623  Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
1624  } else {
1625  Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
1626  }
1627 
1628  LoadP(result, MemOperand(sp));
1629  addi(sp, sp, Operand(kDoubleSize));
1630  pop(r0);
1631  mtlr(r0);
1632 
1633  bind(&done);
1634 }
1635 
1636 void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
1637  DoubleRegister double_input,
1638  Label* done) {
1639  DoubleRegister double_scratch = kScratchDoubleReg;
1640 #if !V8_TARGET_ARCH_PPC64
1641  Register scratch = ip;
1642 #endif
1643 
1644  ConvertDoubleToInt64(double_input,
1645 #if !V8_TARGET_ARCH_PPC64
1646  scratch,
1647 #endif
1648  result, double_scratch);
1649 
1650 // Test for overflow
1651 #if V8_TARGET_ARCH_PPC64
1652  TestIfInt32(result, r0);
1653 #else
1654  TestIfInt32(scratch, result, r0);
1655 #endif
1656  beq(done);
1657 }
1658 
1659 void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
1660  Register centry) {
1661  const Runtime::Function* f = Runtime::FunctionForId(fid);
1662  // TODO(1236192): Most runtime routines don't need the number of
1663  // arguments passed in because it is constant. At some point we
1664  // should remove this need and make the runtime routine entry code
1665  // smarter.
1666  mov(r3, Operand(f->nargs));
1667  Move(r4, ExternalReference::Create(f));
1668  DCHECK(!AreAliased(centry, r3, r4));
1669  addi(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag));
1670  Call(centry);
1671 }
1672 
1673 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
1674  SaveFPRegsMode save_doubles) {
1675  // All parameters are on the stack. r3 has the return value after call.
1676 
1677  // If the expected number of arguments of the runtime function is
1678  // constant, we check that the actual number of arguments match the
1679  // expectation.
1680  CHECK(f->nargs < 0 || f->nargs == num_arguments);
1681 
1682  // TODO(1236192): Most runtime routines don't need the number of
1683  // arguments passed in because it is constant. At some point we
1684  // should remove this need and make the runtime routine entry code
1685  // smarter.
1686  mov(r3, Operand(num_arguments));
1687  Move(r4, ExternalReference::Create(f));
1688 #if V8_TARGET_ARCH_PPC64
1689  Handle<Code> code =
1690  CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
1691 #else
1692  Handle<Code> code = CodeFactory::CEntry(isolate(), 1, save_doubles);
1693 #endif
1694  Call(code, RelocInfo::CODE_TARGET);
1695 }
1696 
1697 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
1698  const Runtime::Function* function = Runtime::FunctionForId(fid);
1699  DCHECK_EQ(1, function->result_size);
1700  if (function->nargs >= 0) {
1701  mov(r3, Operand(function->nargs));
1702  }
1703  JumpToExternalReference(ExternalReference::Create(fid));
1704 }
1705 
1706 
1707 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
1708  bool builtin_exit_frame) {
1709  Move(r4, builtin);
1710  Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
1711  kArgvOnStack, builtin_exit_frame);
1712  Jump(code, RelocInfo::CODE_TARGET);
1713 }
1714 
1715 void MacroAssembler::JumpToInstructionStream(Address entry) {
1716  mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
1717  Jump(kOffHeapTrampolineRegister);
1718 }
1719 
1720 void MacroAssembler::LoadWeakValue(Register out, Register in,
1721  Label* target_if_cleared) {
1722  cmpi(in, Operand(kClearedWeakHeapObjectLower32));
1723  beq(target_if_cleared);
1724 
1725  mov(r0, Operand(~kWeakHeapObjectMask));
1726  and_(out, in, r0);
1727 }
1728 
1729 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
1730  Register scratch1, Register scratch2) {
1731  DCHECK_GT(value, 0);
1732  if (FLAG_native_code_counters && counter->Enabled()) {
1733  Move(scratch2, ExternalReference::Create(counter));
1734  lwz(scratch1, MemOperand(scratch2));
1735  addi(scratch1, scratch1, Operand(value));
1736  stw(scratch1, MemOperand(scratch2));
1737  }
1738 }
1739 
1740 
1741 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
1742  Register scratch1, Register scratch2) {
1743  DCHECK_GT(value, 0);
1744  if (FLAG_native_code_counters && counter->Enabled()) {
1745  Move(scratch2, ExternalReference::Create(counter));
1746  lwz(scratch1, MemOperand(scratch2));
1747  subi(scratch1, scratch1, Operand(value));
1748  stw(scratch1, MemOperand(scratch2));
1749  }
1750 }
1751 
1752 void TurboAssembler::Assert(Condition cond, AbortReason reason,
1753  CRegister cr) {
1754  if (emit_debug_code()) Check(cond, reason, cr);
1755 }
1756 
1757 void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
1758  Label L;
1759  b(cond, &L, cr);
1760  Abort(reason);
1761  // will not return here
1762  bind(&L);
1763 }
1764 
1765 void TurboAssembler::Abort(AbortReason reason) {
1766  Label abort_start;
1767  bind(&abort_start);
1768  const char* msg = GetAbortReason(reason);
1769 #ifdef DEBUG
1770  RecordComment("Abort message: ");
1771  RecordComment(msg);
1772 #endif
1773 
1774  // Avoid emitting call to builtin if requested.
1775  if (trap_on_abort()) {
1776  stop(msg);
1777  return;
1778  }
1779 
1780  if (should_abort_hard()) {
1781  // We don't care if we constructed a frame. Just pretend we did.
1782  FrameScope assume_frame(this, StackFrame::NONE);
1783  mov(r3, Operand(static_cast<int>(reason)));
1784  PrepareCallCFunction(1, r4);
1785  CallCFunction(ExternalReference::abort_with_reason(), 1);
1786  return;
1787  }
1788 
1789  LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(reason)));
1790 
1791  // Disable stub call restrictions to always allow calls to abort.
1792  if (!has_frame_) {
1793  // We don't actually want to generate a pile of code for this, so just
1794  // claim there is a stack frame, without generating one.
1795  FrameScope scope(this, StackFrame::NONE);
1796  Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1797  } else {
1798  Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1799  }
1800  // will not return here
1801 }
1802 
1803 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
1804  LoadP(dst, NativeContextMemOperand());
1805  LoadP(dst, ContextMemOperand(dst, index));
1806 }
1807 
1808 
1809 void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
1810  Label* smi_case) {
1811  STATIC_ASSERT(kSmiTag == 0);
1812  TestBitRange(src, kSmiTagSize - 1, 0, r0);
1813  SmiUntag(dst, src);
1814  beq(smi_case, cr0);
1815 }
1816 
1817 void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
1818  Label* on_either_smi) {
1819  STATIC_ASSERT(kSmiTag == 0);
1820  JumpIfSmi(reg1, on_either_smi);
1821  JumpIfSmi(reg2, on_either_smi);
1822 }
1823 
1824 void MacroAssembler::AssertNotSmi(Register object) {
1825  if (emit_debug_code()) {
1826  STATIC_ASSERT(kSmiTag == 0);
1827  TestIfSmi(object, r0);
1828  Check(ne, AbortReason::kOperandIsASmi, cr0);
1829  }
1830 }
1831 
1832 
1833 void MacroAssembler::AssertSmi(Register object) {
1834  if (emit_debug_code()) {
1835  STATIC_ASSERT(kSmiTag == 0);
1836  TestIfSmi(object, r0);
1837  Check(eq, AbortReason::kOperandIsNotASmi, cr0);
1838  }
1839 }
1840 
1841 void MacroAssembler::AssertConstructor(Register object) {
1842  if (emit_debug_code()) {
1843  STATIC_ASSERT(kSmiTag == 0);
1844  TestIfSmi(object, r0);
1845  Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, cr0);
1846  push(object);
1847  LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
1848  lbz(object, FieldMemOperand(object, Map::kBitFieldOffset));
1849  andi(object, object, Operand(Map::IsConstructorBit::kMask));
1850  pop(object);
1851  Check(ne, AbortReason::kOperandIsNotAConstructor, cr0);
1852  }
1853 }
1854 
1855 void MacroAssembler::AssertFunction(Register object) {
1856  if (emit_debug_code()) {
1857  STATIC_ASSERT(kSmiTag == 0);
1858  TestIfSmi(object, r0);
1859  Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
1860  push(object);
1861  CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
1862  pop(object);
1863  Check(eq, AbortReason::kOperandIsNotAFunction);
1864  }
1865 }
1866 
1867 
1868 void MacroAssembler::AssertBoundFunction(Register object) {
1869  if (emit_debug_code()) {
1870  STATIC_ASSERT(kSmiTag == 0);
1871  TestIfSmi(object, r0);
1872  Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, cr0);
1873  push(object);
1874  CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
1875  pop(object);
1876  Check(eq, AbortReason::kOperandIsNotABoundFunction);
1877  }
1878 }
1879 
1880 void MacroAssembler::AssertGeneratorObject(Register object) {
1881  if (!emit_debug_code()) return;
1882  TestIfSmi(object, r0);
1883  Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, cr0);
1884 
1885  // Load map
1886  Register map = object;
1887  push(object);
1888  LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
1889 
1890  // Check if JSGeneratorObject
1891  Label do_check;
1892  Register instance_type = object;
1893  CompareInstanceType(map, instance_type, JS_GENERATOR_OBJECT_TYPE);
1894  beq(&do_check);
1895 
1896  // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType)
1897  cmpi(instance_type, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
1898  beq(&do_check);
1899 
1900  // Check if JSAsyncGeneratorObject (See MacroAssembler::CompareInstanceType)
1901  cmpi(instance_type, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
1902 
1903  bind(&do_check);
1904  // Restore generator object to register and perform assertion
1905  pop(object);
1906  Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
1907 }
1908 
1909 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
1910  Register scratch) {
1911  if (emit_debug_code()) {
1912  Label done_checking;
1913  AssertNotSmi(object);
1914  CompareRoot(object, RootIndex::kUndefinedValue);
1915  beq(&done_checking);
1916  LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1917  CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
1918  Assert(eq, AbortReason::kExpectedUndefinedOrCell);
1919  bind(&done_checking);
1920  }
1921 }
1922 
1923 
1924 static const int kRegisterPassedArguments = 8;
1925 
1926 int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
1927  int num_double_arguments) {
1928  int stack_passed_words = 0;
1929  if (num_double_arguments > DoubleRegister::kNumRegisters) {
1930  stack_passed_words +=
1931  2 * (num_double_arguments - DoubleRegister::kNumRegisters);
1932  }
1933  // Up to 8 simple arguments are passed in registers r3..r10.
1934  if (num_reg_arguments > kRegisterPassedArguments) {
1935  stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
1936  }
1937  return stack_passed_words;
1938 }
1939 
1940 void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
1941  int num_double_arguments,
1942  Register scratch) {
1943  int frame_alignment = ActivationFrameAlignment();
1944  int stack_passed_arguments =
1945  CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
1946  int stack_space = kNumRequiredStackFrameSlots;
1947 
1948  if (frame_alignment > kPointerSize) {
1949  // Make stack end at alignment and make room for stack arguments
1950  // -- preserving original value of sp.
1951  mr(scratch, sp);
1952  addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kPointerSize));
1953  DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1954  ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
1955  StoreP(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
1956  } else {
1957  // Make room for stack arguments
1958  stack_space += stack_passed_arguments;
1959  }
1960 
1961  // Allocate frame with required slots to make ABI work.
1962  li(r0, Operand::Zero());
1963  StorePU(r0, MemOperand(sp, -stack_space * kPointerSize));
1964 }
1965 
1966 void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
1967  Register scratch) {
1968  PrepareCallCFunction(num_reg_arguments, 0, scratch);
1969 }
1970 
1971 void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); }
1972 
1973 void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); }
1974 
1975 void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
1976  DoubleRegister src2) {
1977  if (src2 == d1) {
1978  DCHECK(src1 != d2);
1979  Move(d2, src2);
1980  Move(d1, src1);
1981  } else {
1982  Move(d1, src1);
1983  Move(d2, src2);
1984  }
1985 }
1986 
1987 void TurboAssembler::CallCFunction(ExternalReference function,
1988  int num_reg_arguments,
1989  int num_double_arguments) {
1990  Move(ip, function);
1991  CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
1992 }
1993 
1994 void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
1995  int num_double_arguments) {
1996  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
1997 }
1998 
1999 void TurboAssembler::CallCFunction(ExternalReference function,
2000  int num_arguments) {
2001  CallCFunction(function, num_arguments, 0);
2002 }
2003 
2004 void TurboAssembler::CallCFunction(Register function, int num_arguments) {
2005  CallCFunction(function, num_arguments, 0);
2006 }
2007 
2008 void TurboAssembler::CallCFunctionHelper(Register function,
2009  int num_reg_arguments,
2010  int num_double_arguments) {
2011  DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
2012  DCHECK(has_frame());
2013 
2014  // Just call directly. The function called cannot cause a GC, or
2015  // allow preemption, so the return address in the link register
2016  // stays correct.
2017  Register dest = function;
2018  if (ABI_USES_FUNCTION_DESCRIPTORS) {
2019  // AIX/PPC64BE Linux uses a function descriptor. When calling C code be
2020  // aware of this descriptor and pick up values from it
2021  LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
2022  LoadP(ip, MemOperand(function, 0));
2023  dest = ip;
2024  } else if (ABI_CALL_VIA_IP) {
2025  Move(ip, function);
2026  dest = ip;
2027  }
2028 
2029  Call(dest);
2030 
2031  // Remove frame bought in PrepareCallCFunction
2032  int stack_passed_arguments =
2033  CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
2034  int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
2035  if (ActivationFrameAlignment() > kPointerSize) {
2036  LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
2037  } else {
2038  addi(sp, sp, Operand(stack_space * kPointerSize));
2039  }
2040 }
2041 
2042 
2043 void TurboAssembler::CheckPageFlag(
2044  Register object,
2045  Register scratch, // scratch may be same register as object
2046  int mask, Condition cc, Label* condition_met) {
2047  DCHECK(cc == ne || cc == eq);
2048  ClearRightImm(scratch, object, Operand(kPageSizeBits));
2049  LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
2050 
2051  mov(r0, Operand(mask));
2052  and_(r0, scratch, r0, SetRC);
2053 
2054  if (cc == ne) {
2055  bne(condition_met, cr0);
2056  }
2057  if (cc == eq) {
2058  beq(condition_met, cr0);
2059  }
2060 }
2061 
2062 void TurboAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
2063 
2064 void TurboAssembler::ResetRoundingMode() {
2065  mtfsfi(7, kRoundToNearest); // reset (default is kRoundToNearest)
2066 }
2067 
2068 
2070 //
2071 // New MacroAssembler Interfaces added for PPC
2072 //
2074 void TurboAssembler::LoadIntLiteral(Register dst, int value) {
2075  mov(dst, Operand(value));
2076 }
2077 
2078 void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) {
2079  mov(dst, Operand(smi));
2080 }
2081 
2082 void TurboAssembler::LoadDoubleLiteral(DoubleRegister result, Double value,
2083  Register scratch) {
2084  if (FLAG_enable_embedded_constant_pool && is_constant_pool_available() &&
2085  !(scratch == r0 && ConstantPoolAccessIsInOverflow())) {
2086  ConstantPoolEntry::Access access = ConstantPoolAddEntry(value);
2087  if (access == ConstantPoolEntry::OVERFLOWED) {
2088  addis(scratch, kConstantPoolRegister, Operand::Zero());
2089  lfd(result, MemOperand(scratch, 0));
2090  } else {
2091  lfd(result, MemOperand(kConstantPoolRegister, 0));
2092  }
2093  return;
2094  }
2095 
2096  // avoid gcc strict aliasing error using union cast
2097  union {
2098  uint64_t dval;
2099 #if V8_TARGET_ARCH_PPC64
2100  intptr_t ival;
2101 #else
2102  intptr_t ival[2];
2103 #endif
2104  } litVal;
2105 
2106  litVal.dval = value.AsUint64();
2107 
2108 #if V8_TARGET_ARCH_PPC64
2109  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2110  mov(scratch, Operand(litVal.ival));
2111  mtfprd(result, scratch);
2112  return;
2113  }
2114 #endif
2115 
2116  addi(sp, sp, Operand(-kDoubleSize));
2117 #if V8_TARGET_ARCH_PPC64
2118  mov(scratch, Operand(litVal.ival));
2119  std(scratch, MemOperand(sp));
2120 #else
2121  LoadIntLiteral(scratch, litVal.ival[0]);
2122  stw(scratch, MemOperand(sp, 0));
2123  LoadIntLiteral(scratch, litVal.ival[1]);
2124  stw(scratch, MemOperand(sp, 4));
2125 #endif
2126  nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2127  lfd(result, MemOperand(sp, 0));
2128  addi(sp, sp, Operand(kDoubleSize));
2129 }
2130 
2131 void TurboAssembler::MovIntToDouble(DoubleRegister dst, Register src,
2132  Register scratch) {
2133 // sign-extend src to 64-bit
2134 #if V8_TARGET_ARCH_PPC64
2135  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2136  mtfprwa(dst, src);
2137  return;
2138  }
2139 #endif
2140 
2141  DCHECK(src != scratch);
2142  subi(sp, sp, Operand(kDoubleSize));
2143 #if V8_TARGET_ARCH_PPC64
2144  extsw(scratch, src);
2145  std(scratch, MemOperand(sp, 0));
2146 #else
2147  srawi(scratch, src, 31);
2148  stw(scratch, MemOperand(sp, Register::kExponentOffset));
2149  stw(src, MemOperand(sp, Register::kMantissaOffset));
2150 #endif
2151  nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2152  lfd(dst, MemOperand(sp, 0));
2153  addi(sp, sp, Operand(kDoubleSize));
2154 }
2155 
2156 void TurboAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
2157  Register scratch) {
2158 // zero-extend src to 64-bit
2159 #if V8_TARGET_ARCH_PPC64
2160  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2161  mtfprwz(dst, src);
2162  return;
2163  }
2164 #endif
2165 
2166  DCHECK(src != scratch);
2167  subi(sp, sp, Operand(kDoubleSize));
2168 #if V8_TARGET_ARCH_PPC64
2169  clrldi(scratch, src, Operand(32));
2170  std(scratch, MemOperand(sp, 0));
2171 #else
2172  li(scratch, Operand::Zero());
2173  stw(scratch, MemOperand(sp, Register::kExponentOffset));
2174  stw(src, MemOperand(sp, Register::kMantissaOffset));
2175 #endif
2176  nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2177  lfd(dst, MemOperand(sp, 0));
2178  addi(sp, sp, Operand(kDoubleSize));
2179 }
2180 
2181 void TurboAssembler::MovInt64ToDouble(DoubleRegister dst,
2182 #if !V8_TARGET_ARCH_PPC64
2183  Register src_hi,
2184 #endif
2185  Register src) {
2186 #if V8_TARGET_ARCH_PPC64
2187  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2188  mtfprd(dst, src);
2189  return;
2190  }
2191 #endif
2192 
2193  subi(sp, sp, Operand(kDoubleSize));
2194 #if V8_TARGET_ARCH_PPC64
2195  std(src, MemOperand(sp, 0));
2196 #else
2197  stw(src_hi, MemOperand(sp, Register::kExponentOffset));
2198  stw(src, MemOperand(sp, Register::kMantissaOffset));
2199 #endif
2200  nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2201  lfd(dst, MemOperand(sp, 0));
2202  addi(sp, sp, Operand(kDoubleSize));
2203 }
2204 
2205 
2206 #if V8_TARGET_ARCH_PPC64
2207 void TurboAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
2208  Register src_hi,
2209  Register src_lo,
2210  Register scratch) {
2211  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2212  sldi(scratch, src_hi, Operand(32));
2213  rldimi(scratch, src_lo, 0, 32);
2214  mtfprd(dst, scratch);
2215  return;
2216  }
2217 
2218  subi(sp, sp, Operand(kDoubleSize));
2219  stw(src_hi, MemOperand(sp, Register::kExponentOffset));
2220  stw(src_lo, MemOperand(sp, Register::kMantissaOffset));
2221  nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2222  lfd(dst, MemOperand(sp));
2223  addi(sp, sp, Operand(kDoubleSize));
2224 }
2225 #endif
2226 
2227 void TurboAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
2228  Register scratch) {
2229 #if V8_TARGET_ARCH_PPC64
2230  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2231  mffprd(scratch, dst);
2232  rldimi(scratch, src, 0, 32);
2233  mtfprd(dst, scratch);
2234  return;
2235  }
2236 #endif
2237 
2238  subi(sp, sp, Operand(kDoubleSize));
2239  stfd(dst, MemOperand(sp));
2240  stw(src, MemOperand(sp, Register::kMantissaOffset));
2241  nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2242  lfd(dst, MemOperand(sp));
2243  addi(sp, sp, Operand(kDoubleSize));
2244 }
2245 
2246 void TurboAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
2247  Register scratch) {
2248 #if V8_TARGET_ARCH_PPC64
2249  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2250  mffprd(scratch, dst);
2251  rldimi(scratch, src, 32, 0);
2252  mtfprd(dst, scratch);
2253  return;
2254  }
2255 #endif
2256 
2257  subi(sp, sp, Operand(kDoubleSize));
2258  stfd(dst, MemOperand(sp));
2259  stw(src, MemOperand(sp, Register::kExponentOffset));
2260  nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2261  lfd(dst, MemOperand(sp));
2262  addi(sp, sp, Operand(kDoubleSize));
2263 }
2264 
2265 void TurboAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
2266 #if V8_TARGET_ARCH_PPC64
2267  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2268  mffprwz(dst, src);
2269  return;
2270  }
2271 #endif
2272 
2273  subi(sp, sp, Operand(kDoubleSize));
2274  stfd(src, MemOperand(sp));
2275  nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2276  lwz(dst, MemOperand(sp, Register::kMantissaOffset));
2277  addi(sp, sp, Operand(kDoubleSize));
2278 }
2279 
2280 void TurboAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
2281 #if V8_TARGET_ARCH_PPC64
2282  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2283  mffprd(dst, src);
2284  srdi(dst, dst, Operand(32));
2285  return;
2286  }
2287 #endif
2288 
2289  subi(sp, sp, Operand(kDoubleSize));
2290  stfd(src, MemOperand(sp));
2291  nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2292  lwz(dst, MemOperand(sp, Register::kExponentOffset));
2293  addi(sp, sp, Operand(kDoubleSize));
2294 }
2295 
2296 void TurboAssembler::MovDoubleToInt64(
2297 #if !V8_TARGET_ARCH_PPC64
2298  Register dst_hi,
2299 #endif
2300  Register dst, DoubleRegister src) {
2301 #if V8_TARGET_ARCH_PPC64
2302  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2303  mffprd(dst, src);
2304  return;
2305  }
2306 #endif
2307 
2308  subi(sp, sp, Operand(kDoubleSize));
2309  stfd(src, MemOperand(sp));
2310  nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2311 #if V8_TARGET_ARCH_PPC64
2312  ld(dst, MemOperand(sp, 0));
2313 #else
2314  lwz(dst_hi, MemOperand(sp, Register::kExponentOffset));
2315  lwz(dst, MemOperand(sp, Register::kMantissaOffset));
2316 #endif
2317  addi(sp, sp, Operand(kDoubleSize));
2318 }
2319 
2320 void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
2321  subi(sp, sp, Operand(kFloatSize));
2322  stw(src, MemOperand(sp, 0));
2323  nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2324  lfs(dst, MemOperand(sp, 0));
2325  addi(sp, sp, Operand(kFloatSize));
2326 }
2327 
2328 void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
2329  subi(sp, sp, Operand(kFloatSize));
2330  stfs(src, MemOperand(sp, 0));
2331  nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2332  lwz(dst, MemOperand(sp, 0));
2333  addi(sp, sp, Operand(kFloatSize));
2334 }
2335 
2336 void TurboAssembler::Add(Register dst, Register src, intptr_t value,
2337  Register scratch) {
2338  if (is_int16(value)) {
2339  addi(dst, src, Operand(value));
2340  } else {
2341  mov(scratch, Operand(value));
2342  add(dst, src, scratch);
2343  }
2344 }
2345 
2346 
2347 void TurboAssembler::Cmpi(Register src1, const Operand& src2, Register scratch,
2348  CRegister cr) {
2349  intptr_t value = src2.immediate();
2350  if (is_int16(value)) {
2351  cmpi(src1, src2, cr);
2352  } else {
2353  mov(scratch, src2);
2354  cmp(src1, scratch, cr);
2355  }
2356 }
2357 
2358 void TurboAssembler::Cmpli(Register src1, const Operand& src2, Register scratch,
2359  CRegister cr) {
2360  intptr_t value = src2.immediate();
2361  if (is_uint16(value)) {
2362  cmpli(src1, src2, cr);
2363  } else {
2364  mov(scratch, src2);
2365  cmpl(src1, scratch, cr);
2366  }
2367 }
2368 
2369 void TurboAssembler::Cmpwi(Register src1, const Operand& src2, Register scratch,
2370  CRegister cr) {
2371  intptr_t value = src2.immediate();
2372  if (is_int16(value)) {
2373  cmpwi(src1, src2, cr);
2374  } else {
2375  mov(scratch, src2);
2376  cmpw(src1, scratch, cr);
2377  }
2378 }
2379 
2380 
2381 void MacroAssembler::Cmplwi(Register src1, const Operand& src2,
2382  Register scratch, CRegister cr) {
2383  intptr_t value = src2.immediate();
2384  if (is_uint16(value)) {
2385  cmplwi(src1, src2, cr);
2386  } else {
2387  mov(scratch, src2);
2388  cmplw(src1, scratch, cr);
2389  }
2390 }
2391 
2392 
2393 void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
2394  RCBit rc) {
2395  if (rb.is_reg()) {
2396  and_(ra, rs, rb.rm(), rc);
2397  } else {
2398  if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
2399  rc == SetRC) {
2400  andi(ra, rs, rb);
2401  } else {
2402  // mov handles the relocation.
2403  DCHECK(rs != r0);
2404  mov(r0, rb);
2405  and_(ra, rs, r0, rc);
2406  }
2407  }
2408 }
2409 
2410 
2411 void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
2412  if (rb.is_reg()) {
2413  orx(ra, rs, rb.rm(), rc);
2414  } else {
2415  if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
2416  rc == LeaveRC) {
2417  ori(ra, rs, rb);
2418  } else {
2419  // mov handles the relocation.
2420  DCHECK(rs != r0);
2421  mov(r0, rb);
2422  orx(ra, rs, r0, rc);
2423  }
2424  }
2425 }
2426 
2427 
2428 void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
2429  RCBit rc) {
2430  if (rb.is_reg()) {
2431  xor_(ra, rs, rb.rm(), rc);
2432  } else {
2433  if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
2434  rc == LeaveRC) {
2435  xori(ra, rs, rb);
2436  } else {
2437  // mov handles the relocation.
2438  DCHECK(rs != r0);
2439  mov(r0, rb);
2440  xor_(ra, rs, r0, rc);
2441  }
2442  }
2443 }
2444 
2445 void MacroAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch,
2446  CRegister cr) {
2447 #if V8_TARGET_ARCH_PPC64
2448  LoadSmiLiteral(scratch, smi);
2449  cmp(src1, scratch, cr);
2450 #else
2451  Cmpi(src1, Operand(smi), scratch, cr);
2452 #endif
2453 }
2454 
2455 void MacroAssembler::CmplSmiLiteral(Register src1, Smi smi, Register scratch,
2456  CRegister cr) {
2457 #if V8_TARGET_ARCH_PPC64
2458  LoadSmiLiteral(scratch, smi);
2459  cmpl(src1, scratch, cr);
2460 #else
2461  Cmpli(src1, Operand(smi), scratch, cr);
2462 #endif
2463 }
2464 
2465 void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi smi,
2466  Register scratch) {
2467 #if V8_TARGET_ARCH_PPC64
2468  LoadSmiLiteral(scratch, smi);
2469  add(dst, src, scratch);
2470 #else
2471  Add(dst, src, reinterpret_cast<intptr_t>(smi), scratch);
2472 #endif
2473 }
2474 
2475 void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi smi,
2476  Register scratch) {
2477 #if V8_TARGET_ARCH_PPC64
2478  LoadSmiLiteral(scratch, smi);
2479  sub(dst, src, scratch);
2480 #else
2481  Add(dst, src, -(reinterpret_cast<intptr_t>(smi)), scratch);
2482 #endif
2483 }
2484 
2485 void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
2486  Register scratch, RCBit rc) {
2487 #if V8_TARGET_ARCH_PPC64
2488  LoadSmiLiteral(scratch, smi);
2489  and_(dst, src, scratch, rc);
2490 #else
2491  And(dst, src, Operand(smi), rc);
2492 #endif
2493 }
2494 
2495 
2496 // Load a "pointer" sized value from the memory location
2497 void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
2498  Register scratch) {
2499  DCHECK_EQ(mem.rb(), no_reg);
2500  int offset = mem.offset();
2501 
2502  if (!is_int16(offset)) {
2503  /* cannot use d-form */
2504  DCHECK_NE(scratch, no_reg);
2505  mov(scratch, Operand(offset));
2506  LoadPX(dst, MemOperand(mem.ra(), scratch));
2507  } else {
2508 #if V8_TARGET_ARCH_PPC64
2509  int misaligned = (offset & 3);
2510  if (misaligned) {
2511  // adjust base to conform to offset alignment requirements
2512  // Todo: enhance to use scratch if dst is unsuitable
2513  DCHECK(dst != r0);
2514  addi(dst, mem.ra(), Operand((offset & 3) - 4));
2515  ld(dst, MemOperand(dst, (offset & ~3) + 4));
2516  } else {
2517  ld(dst, mem);
2518  }
2519 #else
2520  lwz(dst, mem);
2521 #endif
2522  }
2523 }
2524 
2525 void TurboAssembler::LoadPU(Register dst, const MemOperand& mem,
2526  Register scratch) {
2527  int offset = mem.offset();
2528 
2529  if (!is_int16(offset)) {
2530  /* cannot use d-form */
2531  DCHECK(scratch != no_reg);
2532  mov(scratch, Operand(offset));
2533  LoadPUX(dst, MemOperand(mem.ra(), scratch));
2534  } else {
2535 #if V8_TARGET_ARCH_PPC64
2536  ldu(dst, mem);
2537 #else
2538  lwzu(dst, mem);
2539 #endif
2540  }
2541 }
2542 
2543 // Store a "pointer" sized value to the memory location
2544 void TurboAssembler::StoreP(Register src, const MemOperand& mem,
2545  Register scratch) {
2546  int offset = mem.offset();
2547 
2548  if (!is_int16(offset)) {
2549  /* cannot use d-form */
2550  DCHECK(scratch != no_reg);
2551  mov(scratch, Operand(offset));
2552  StorePX(src, MemOperand(mem.ra(), scratch));
2553  } else {
2554 #if V8_TARGET_ARCH_PPC64
2555  int misaligned = (offset & 3);
2556  if (misaligned) {
2557  // adjust base to conform to offset alignment requirements
2558  // a suitable scratch is required here
2559  DCHECK(scratch != no_reg);
2560  if (scratch == r0) {
2561  LoadIntLiteral(scratch, offset);
2562  stdx(src, MemOperand(mem.ra(), scratch));
2563  } else {
2564  addi(scratch, mem.ra(), Operand((offset & 3) - 4));
2565  std(src, MemOperand(scratch, (offset & ~3) + 4));
2566  }
2567  } else {
2568  std(src, mem);
2569  }
2570 #else
2571  stw(src, mem);
2572 #endif
2573  }
2574 }
2575 
2576 void TurboAssembler::StorePU(Register src, const MemOperand& mem,
2577  Register scratch) {
2578  int offset = mem.offset();
2579 
2580  if (!is_int16(offset)) {
2581  /* cannot use d-form */
2582  DCHECK(scratch != no_reg);
2583  mov(scratch, Operand(offset));
2584  StorePUX(src, MemOperand(mem.ra(), scratch));
2585  } else {
2586 #if V8_TARGET_ARCH_PPC64
2587  stdu(src, mem);
2588 #else
2589  stwu(src, mem);
2590 #endif
2591  }
2592 }
2593 
2594 void TurboAssembler::LoadWordArith(Register dst, const MemOperand& mem,
2595  Register scratch) {
2596  int offset = mem.offset();
2597 
2598  if (!is_int16(offset)) {
2599  DCHECK(scratch != no_reg);
2600  mov(scratch, Operand(offset));
2601  lwax(dst, MemOperand(mem.ra(), scratch));
2602  } else {
2603 #if V8_TARGET_ARCH_PPC64
2604  int misaligned = (offset & 3);
2605  if (misaligned) {
2606  // adjust base to conform to offset alignment requirements
2607  // Todo: enhance to use scratch if dst is unsuitable
2608  DCHECK(dst != r0);
2609  addi(dst, mem.ra(), Operand((offset & 3) - 4));
2610  lwa(dst, MemOperand(dst, (offset & ~3) + 4));
2611  } else {
2612  lwa(dst, mem);
2613  }
2614 #else
2615  lwz(dst, mem);
2616 #endif
2617  }
2618 }
2619 
2620 
2621 // Variable length depending on whether offset fits into immediate field
2622 // MemOperand currently only supports d-form
2623 void MacroAssembler::LoadWord(Register dst, const MemOperand& mem,
2624  Register scratch) {
2625  Register base = mem.ra();
2626  int offset = mem.offset();
2627 
2628  if (!is_int16(offset)) {
2629  LoadIntLiteral(scratch, offset);
2630  lwzx(dst, MemOperand(base, scratch));
2631  } else {
2632  lwz(dst, mem);
2633  }
2634 }
2635 
2636 
2637 // Variable length depending on whether offset fits into immediate field
2638 // MemOperand current only supports d-form
2639 void MacroAssembler::StoreWord(Register src, const MemOperand& mem,
2640  Register scratch) {
2641  Register base = mem.ra();
2642  int offset = mem.offset();
2643 
2644  if (!is_int16(offset)) {
2645  LoadIntLiteral(scratch, offset);
2646  stwx(src, MemOperand(base, scratch));
2647  } else {
2648  stw(src, mem);
2649  }
2650 }
2651 
2652 
2653 void MacroAssembler::LoadHalfWordArith(Register dst, const MemOperand& mem,
2654  Register scratch) {
2655  int offset = mem.offset();
2656 
2657  if (!is_int16(offset)) {
2658  DCHECK(scratch != no_reg);
2659  mov(scratch, Operand(offset));
2660  lhax(dst, MemOperand(mem.ra(), scratch));
2661  } else {
2662  lha(dst, mem);
2663  }
2664 }
2665 
2666 
2667 // Variable length depending on whether offset fits into immediate field
2668 // MemOperand currently only supports d-form
2669 void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
2670  Register scratch) {
2671  Register base = mem.ra();
2672  int offset = mem.offset();
2673 
2674  if (!is_int16(offset)) {
2675  DCHECK_NE(scratch, no_reg);
2676  LoadIntLiteral(scratch, offset);
2677  lhzx(dst, MemOperand(base, scratch));
2678  } else {
2679  lhz(dst, mem);
2680  }
2681 }
2682 
2683 
2684 // Variable length depending on whether offset fits into immediate field
2685 // MemOperand current only supports d-form
2686 void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
2687  Register scratch) {
2688  Register base = mem.ra();
2689  int offset = mem.offset();
2690 
2691  if (!is_int16(offset)) {
2692  LoadIntLiteral(scratch, offset);
2693  sthx(src, MemOperand(base, scratch));
2694  } else {
2695  sth(src, mem);
2696  }
2697 }
2698 
2699 
2700 // Variable length depending on whether offset fits into immediate field
2701 // MemOperand currently only supports d-form
2702 void MacroAssembler::LoadByte(Register dst, const MemOperand& mem,
2703  Register scratch) {
2704  Register base = mem.ra();
2705  int offset = mem.offset();
2706 
2707  if (!is_int16(offset)) {
2708  LoadIntLiteral(scratch, offset);
2709  lbzx(dst, MemOperand(base, scratch));
2710  } else {
2711  lbz(dst, mem);
2712  }
2713 }
2714 
2715 
2716 // Variable length depending on whether offset fits into immediate field
2717 // MemOperand current only supports d-form
2718 void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
2719  Register scratch) {
2720  Register base = mem.ra();
2721  int offset = mem.offset();
2722 
2723  if (!is_int16(offset)) {
2724  LoadIntLiteral(scratch, offset);
2725  stbx(src, MemOperand(base, scratch));
2726  } else {
2727  stb(src, mem);
2728  }
2729 }
2730 
2731 
2732 void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
2733  Representation r, Register scratch) {
2734  DCHECK(!r.IsDouble());
2735  if (r.IsInteger8()) {
2736  LoadByte(dst, mem, scratch);
2737  extsb(dst, dst);
2738  } else if (r.IsUInteger8()) {
2739  LoadByte(dst, mem, scratch);
2740  } else if (r.IsInteger16()) {
2741  LoadHalfWordArith(dst, mem, scratch);
2742  } else if (r.IsUInteger16()) {
2743  LoadHalfWord(dst, mem, scratch);
2744 #if V8_TARGET_ARCH_PPC64
2745  } else if (r.IsInteger32()) {
2746  LoadWordArith(dst, mem, scratch);
2747 #endif
2748  } else {
2749  LoadP(dst, mem, scratch);
2750  }
2751 }
2752 
2753 
2754 void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
2755  Representation r, Register scratch) {
2756  DCHECK(!r.IsDouble());
2757  if (r.IsInteger8() || r.IsUInteger8()) {
2758  StoreByte(src, mem, scratch);
2759  } else if (r.IsInteger16() || r.IsUInteger16()) {
2760  StoreHalfWord(src, mem, scratch);
2761 #if V8_TARGET_ARCH_PPC64
2762  } else if (r.IsInteger32()) {
2763  StoreWord(src, mem, scratch);
2764 #endif
2765  } else {
2766  if (r.IsHeapObject()) {
2767  AssertNotSmi(src);
2768  } else if (r.IsSmi()) {
2769  AssertSmi(src);
2770  }
2771  StoreP(src, mem, scratch);
2772  }
2773 }
2774 
2775 void TurboAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem,
2776  Register scratch) {
2777  Register base = mem.ra();
2778  int offset = mem.offset();
2779 
2780  if (!is_int16(offset)) {
2781  mov(scratch, Operand(offset));
2782  lfdx(dst, MemOperand(base, scratch));
2783  } else {
2784  lfd(dst, mem);
2785  }
2786 }
2787 
2788 void MacroAssembler::LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
2789  Register scratch) {
2790  Register base = mem.ra();
2791  int offset = mem.offset();
2792 
2793  if (!is_int16(offset)) {
2794  mov(scratch, Operand(offset));
2795  lfdux(dst, MemOperand(base, scratch));
2796  } else {
2797  lfdu(dst, mem);
2798  }
2799 }
2800 
2801 void TurboAssembler::LoadSingle(DoubleRegister dst, const MemOperand& mem,
2802  Register scratch) {
2803  Register base = mem.ra();
2804  int offset = mem.offset();
2805 
2806  if (!is_int16(offset)) {
2807  mov(scratch, Operand(offset));
2808  lfsx(dst, MemOperand(base, scratch));
2809  } else {
2810  lfs(dst, mem);
2811  }
2812 }
2813 
2814 void TurboAssembler::LoadSingleU(DoubleRegister dst, const MemOperand& mem,
2815  Register scratch) {
2816  Register base = mem.ra();
2817  int offset = mem.offset();
2818 
2819  if (!is_int16(offset)) {
2820  mov(scratch, Operand(offset));
2821  lfsux(dst, MemOperand(base, scratch));
2822  } else {
2823  lfsu(dst, mem);
2824  }
2825 }
2826 
2827 void TurboAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
2828  Register scratch) {
2829  Register base = mem.ra();
2830  int offset = mem.offset();
2831 
2832  if (!is_int16(offset)) {
2833  mov(scratch, Operand(offset));
2834  stfdx(src, MemOperand(base, scratch));
2835  } else {
2836  stfd(src, mem);
2837  }
2838 }
2839 
2840 void TurboAssembler::StoreDoubleU(DoubleRegister src, const MemOperand& mem,
2841  Register scratch) {
2842  Register base = mem.ra();
2843  int offset = mem.offset();
2844 
2845  if (!is_int16(offset)) {
2846  mov(scratch, Operand(offset));
2847  stfdux(src, MemOperand(base, scratch));
2848  } else {
2849  stfdu(src, mem);
2850  }
2851 }
2852 
2853 void TurboAssembler::StoreSingle(DoubleRegister src, const MemOperand& mem,
2854  Register scratch) {
2855  Register base = mem.ra();
2856  int offset = mem.offset();
2857 
2858  if (!is_int16(offset)) {
2859  mov(scratch, Operand(offset));
2860  stfsx(src, MemOperand(base, scratch));
2861  } else {
2862  stfs(src, mem);
2863  }
2864 }
2865 
2866 void TurboAssembler::StoreSingleU(DoubleRegister src, const MemOperand& mem,
2867  Register scratch) {
2868  Register base = mem.ra();
2869  int offset = mem.offset();
2870 
2871  if (!is_int16(offset)) {
2872  mov(scratch, Operand(offset));
2873  stfsux(src, MemOperand(base, scratch));
2874  } else {
2875  stfsu(src, mem);
2876  }
2877 }
2878 
2879 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
2880  Register reg4, Register reg5,
2881  Register reg6) {
2882  RegList regs = 0;
2883  if (reg1.is_valid()) regs |= reg1.bit();
2884  if (reg2.is_valid()) regs |= reg2.bit();
2885  if (reg3.is_valid()) regs |= reg3.bit();
2886  if (reg4.is_valid()) regs |= reg4.bit();
2887  if (reg5.is_valid()) regs |= reg5.bit();
2888  if (reg6.is_valid()) regs |= reg6.bit();
2889 
2890  const RegisterConfiguration* config = RegisterConfiguration::Default();
2891  for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
2892  int code = config->GetAllocatableGeneralCode(i);
2893  Register candidate = Register::from_code(code);
2894  if (regs & candidate.bit()) continue;
2895  return candidate;
2896  }
2897  UNREACHABLE();
2898 }
2899 
2900 void TurboAssembler::SwapP(Register src, Register dst, Register scratch) {
2901  if (src == dst) return;
2902  DCHECK(!AreAliased(src, dst, scratch));
2903  mr(scratch, src);
2904  mr(src, dst);
2905  mr(dst, scratch);
2906 }
2907 
2908 void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
2909  if (dst.ra() != r0 && dst.ra().is_valid())
2910  DCHECK(!AreAliased(src, dst.ra(), scratch));
2911  if (dst.rb() != r0 && dst.rb().is_valid())
2912  DCHECK(!AreAliased(src, dst.rb(), scratch));
2913  DCHECK(!AreAliased(src, scratch));
2914  mr(scratch, src);
2915  LoadP(src, dst, r0);
2916  StoreP(scratch, dst, r0);
2917 }
2918 
2919 void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
2920  Register scratch_1) {
2921  if (src.ra() != r0 && src.ra().is_valid())
2922  DCHECK(!AreAliased(src.ra(), scratch_0, scratch_1));
2923  if (src.rb() != r0 && src.rb().is_valid())
2924  DCHECK(!AreAliased(src.rb(), scratch_0, scratch_1));
2925  if (dst.ra() != r0 && dst.ra().is_valid())
2926  DCHECK(!AreAliased(dst.ra(), scratch_0, scratch_1));
2927  if (dst.rb() != r0 && dst.rb().is_valid())
2928  DCHECK(!AreAliased(dst.rb(), scratch_0, scratch_1));
2929  DCHECK(!AreAliased(scratch_0, scratch_1));
2930  if (is_int16(src.offset()) || is_int16(dst.offset())) {
2931  if (!is_int16(src.offset())) {
2932  // swap operand
2933  MemOperand temp = src;
2934  src = dst;
2935  dst = temp;
2936  }
2937  LoadP(scratch_1, dst, scratch_0);
2938  LoadP(scratch_0, src);
2939  StoreP(scratch_1, src);
2940  StoreP(scratch_0, dst, scratch_1);
2941  } else {
2942  LoadP(scratch_1, dst, scratch_0);
2943  push(scratch_1);
2944  LoadP(scratch_0, src, scratch_1);
2945  StoreP(scratch_0, dst, scratch_1);
2946  pop(scratch_1);
2947  StoreP(scratch_1, src, scratch_0);
2948  }
2949 }
2950 
2951 void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst,
2952  DoubleRegister scratch) {
2953  if (src == dst) return;
2954  DCHECK(!AreAliased(src, dst, scratch));
2955  fmr(scratch, src);
2956  fmr(src, dst);
2957  fmr(dst, scratch);
2958 }
2959 
2960 void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
2961  DoubleRegister scratch) {
2962  DCHECK(!AreAliased(src, scratch));
2963  fmr(scratch, src);
2964  LoadSingle(src, dst, r0);
2965  StoreSingle(scratch, dst, r0);
2966 }
2967 
2968 void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst,
2969  DoubleRegister scratch_0,
2970  DoubleRegister scratch_1) {
2971  DCHECK(!AreAliased(scratch_0, scratch_1));
2972  LoadSingle(scratch_0, src, r0);
2973  LoadSingle(scratch_1, dst, r0);
2974  StoreSingle(scratch_0, dst, r0);
2975  StoreSingle(scratch_1, src, r0);
2976 }
2977 
2978 void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
2979  DoubleRegister scratch) {
2980  if (src == dst) return;
2981  DCHECK(!AreAliased(src, dst, scratch));
2982  fmr(scratch, src);
2983  fmr(src, dst);
2984  fmr(dst, scratch);
2985 }
2986 
2987 void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
2988  DoubleRegister scratch) {
2989  DCHECK(!AreAliased(src, scratch));
2990  fmr(scratch, src);
2991  LoadDouble(src, dst, r0);
2992  StoreDouble(scratch, dst, r0);
2993 }
2994 
2995 void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
2996  DoubleRegister scratch_0,
2997  DoubleRegister scratch_1) {
2998  DCHECK(!AreAliased(scratch_0, scratch_1));
2999  LoadDouble(scratch_0, src, r0);
3000  LoadDouble(scratch_1, dst, r0);
3001  StoreDouble(scratch_0, dst, r0);
3002  StoreDouble(scratch_1, src, r0);
3003 }
3004 
3005 void TurboAssembler::ResetSpeculationPoisonRegister() {
3006  mov(kSpeculationPoisonRegister, Operand(-1));
3007 }
3008 
3009 void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
3010  Cmpi(x, Operand(y), r0);
3011  beq(dest);
3012 }
3013 
3014 void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
3015  Cmpi(x, Operand(y), r0);
3016  blt(dest);
3017 }
3018 
3019 } // namespace internal
3020 } // namespace v8
3021 
3022 #endif // V8_TARGET_ARCH_PPC
STL namespace.
Definition: libplatform.h:13