V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
builtins-arm64.cc
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_ARM64
6 
7 #include "src/arm64/macro-assembler-arm64-inl.h"
8 #include "src/code-factory.h"
9 #include "src/code-stubs.h"
10 #include "src/counters.h"
11 #include "src/debug/debug.h"
12 #include "src/deoptimizer.h"
13 #include "src/frame-constants.h"
14 #include "src/frames.h"
15 #include "src/objects-inl.h"
16 #include "src/objects/js-generator.h"
17 #include "src/objects/smi.h"
18 #include "src/register-configuration.h"
19 #include "src/runtime/runtime.h"
20 #include "src/wasm/wasm-objects.h"
21 
22 namespace v8 {
23 namespace internal {
24 
25 #define __ ACCESS_MASM(masm)
26 
27 void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
28  ExitFrameType exit_frame_type) {
29  __ Mov(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
30  if (exit_frame_type == BUILTIN_EXIT) {
31  __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
32  RelocInfo::CODE_TARGET);
33  } else {
34  DCHECK(exit_frame_type == EXIT);
35  __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithExitFrame),
36  RelocInfo::CODE_TARGET);
37  }
38 }
39 
40 void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
41  // ----------- S t a t e -------------
42  // -- x0 : number of arguments
43  // -- lr : return address
44  // -- sp[...]: constructor arguments
45  // -----------------------------------
46  ASM_LOCATION("Builtins::Generate_InternalArrayConstructor");
47  Label generic_array_code;
48 
49  if (FLAG_debug_code) {
50  // Initial map for the builtin InternalArray functions should be maps.
51  __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
52  __ Tst(x10, kSmiTagMask);
53  __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
54  __ CompareObjectType(x10, x11, x12, MAP_TYPE);
55  __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
56  }
57 
58  // Run the native code for the InternalArray function called as a normal
59  // function.
60  __ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl),
61  RelocInfo::CODE_TARGET);
62 }
63 
64 static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
65  Runtime::FunctionId function_id) {
66  // ----------- S t a t e -------------
67  // -- x0 : argument count (preserved for callee)
68  // -- x1 : target function (preserved for callee)
69  // -- x3 : new target (preserved for callee)
70  // -----------------------------------
71  {
72  FrameScope scope(masm, StackFrame::INTERNAL);
73  // Push a copy of the target function and the new target.
74  // Push another copy as a parameter to the runtime call.
75  __ SmiTag(x0);
76  __ Push(x0, x1, x3, padreg);
77  __ PushArgument(x1);
78 
79  __ CallRuntime(function_id, 1);
80  __ Mov(x2, x0);
81 
82  // Restore target function and new target.
83  __ Pop(padreg, x3, x1, x0);
84  __ SmiUntag(x0);
85  }
86 
87  static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
88  __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
89  __ Br(x2);
90 }
91 
92 namespace {
93 
94 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
95  Label post_instantiation_deopt_entry;
96 
97  // ----------- S t a t e -------------
98  // -- x0 : number of arguments
99  // -- x1 : constructor function
100  // -- x3 : new target
101  // -- cp : context
102  // -- lr : return address
103  // -- sp[...]: constructor arguments
104  // -----------------------------------
105 
106  ASM_LOCATION("Builtins::Generate_JSConstructStubHelper");
107 
108  // Enter a construct frame.
109  {
110  FrameScope scope(masm, StackFrame::CONSTRUCT);
111  Label already_aligned;
112  Register argc = x0;
113 
114  if (__ emit_debug_code()) {
115  // Check that FrameScope pushed the context on to the stack already.
116  __ Peek(x2, 0);
117  __ Cmp(x2, cp);
118  __ Check(eq, AbortReason::kUnexpectedValue);
119  }
120 
121  // Push number of arguments.
122  __ SmiTag(x11, argc);
123  __ Push(x11, padreg);
124 
125  // Add a slot for the receiver, and round up to maintain alignment.
126  Register slot_count = x2;
127  Register slot_count_without_rounding = x12;
128  __ Add(slot_count_without_rounding, argc, 2);
129  __ Bic(slot_count, slot_count_without_rounding, 1);
130  __ Claim(slot_count);
131 
132  // Preserve the incoming parameters on the stack.
133  __ LoadRoot(x10, RootIndex::kTheHoleValue);
134 
135  // Compute a pointer to the slot immediately above the location on the
136  // stack to which arguments will be later copied.
137  __ SlotAddress(x2, argc);
138 
139  // Poke the hole (receiver) in the highest slot.
140  __ Str(x10, MemOperand(x2));
141  __ Tbnz(slot_count_without_rounding, 0, &already_aligned);
142 
143  // Store padding, if needed.
144  __ Str(padreg, MemOperand(x2, 1 * kPointerSize));
145  __ Bind(&already_aligned);
146 
147  // Copy arguments to the expression stack.
148  {
149  Register count = x2;
150  Register dst = x10;
151  Register src = x11;
152  __ Mov(count, argc);
153  __ SlotAddress(dst, 0);
154  __ Add(src, fp, StandardFrameConstants::kCallerSPOffset);
155  __ CopyDoubleWords(dst, src, count);
156  }
157 
158  // ----------- S t a t e -------------
159  // -- x0: number of arguments (untagged)
160  // -- x1: constructor function
161  // -- x3: new target
162  // If argc is odd:
163  // -- sp[0*kPointerSize]: argument n - 1
164  // -- ...
165  // -- sp[(n-1)*kPointerSize]: argument 0
166  // -- sp[(n+0)*kPointerSize]: the hole (receiver)
167  // -- sp[(n+1)*kPointerSize]: padding
168  // -- sp[(n+2)*kPointerSize]: padding
169  // -- sp[(n+3)*kPointerSize]: number of arguments (tagged)
170  // -- sp[(n+4)*kPointerSize]: context (pushed by FrameScope)
171  // If argc is even:
172  // -- sp[0*kPointerSize]: argument n - 1
173  // -- ...
174  // -- sp[(n-1)*kPointerSize]: argument 0
175  // -- sp[(n+0)*kPointerSize]: the hole (receiver)
176  // -- sp[(n+1)*kPointerSize]: padding
177  // -- sp[(n+2)*kPointerSize]: number of arguments (tagged)
178  // -- sp[(n+3)*kPointerSize]: context (pushed by FrameScope)
179  // -----------------------------------
180 
181  // Call the function.
182  ParameterCount actual(argc);
183  __ InvokeFunction(x1, x3, actual, CALL_FUNCTION);
184 
185  // Restore the context from the frame.
186  __ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
187  // Restore smi-tagged arguments count from the frame. Use fp relative
188  // addressing to avoid the circular dependency between padding existence and
189  // argc parity.
190  __ SmiUntag(x1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
191  // Leave construct frame.
192  }
193 
194  // Remove caller arguments from the stack and return.
195  __ DropArguments(x1, TurboAssembler::kCountExcludesReceiver);
196  __ Ret();
197 }
198 
199 void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
200  Label* stack_overflow) {
201  UseScratchRegisterScope temps(masm);
202  Register scratch = temps.AcquireX();
203 
204  // Check the stack for overflow.
205  // We are not trying to catch interruptions (e.g. debug break and
206  // preemption) here, so the "real stack limit" is checked.
207  Label enough_stack_space;
208  __ LoadRoot(scratch, RootIndex::kRealStackLimit);
209  // Make scratch the space we have left. The stack might already be overflowed
210  // here which will cause scratch to become negative.
211  __ Sub(scratch, sp, scratch);
212  // Check if the arguments will overflow the stack.
213  __ Cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
214  __ B(le, stack_overflow);
215 
216 #if defined(V8_OS_WIN)
217  // Simulate _chkstk to extend stack guard page on Windows ARM64.
218  const int kPageSize = 4096;
219  Label chkstk, chkstk_done;
220  Register probe = temps.AcquireX();
221 
222  __ Sub(scratch, sp, Operand(num_args, LSL, kPointerSizeLog2));
223  __ Mov(probe, sp);
224 
225  // Loop start of stack probe.
226  __ Bind(&chkstk);
227  __ Sub(probe, probe, kPageSize);
228  __ Cmp(probe, scratch);
229  __ B(lo, &chkstk_done);
230  __ Ldrb(xzr, MemOperand(probe));
231  __ B(&chkstk);
232 
233  __ Bind(&chkstk_done);
234 #endif
235 }
236 
237 } // namespace
238 
239 // The construct stub for ES5 constructor functions and ES6 class constructors.
240 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
241  // ----------- S t a t e -------------
242  // -- x0 : number of arguments
243  // -- x1 : constructor function
244  // -- x3 : new target
245  // -- lr : return address
246  // -- cp : context pointer
247  // -- sp[...]: constructor arguments
248  // -----------------------------------
249 
250  ASM_LOCATION("Builtins::Generate_JSConstructStubGeneric");
251 
252  // Enter a construct frame.
253  {
254  FrameScope scope(masm, StackFrame::CONSTRUCT);
255  Label post_instantiation_deopt_entry, not_create_implicit_receiver;
256 
257  if (__ emit_debug_code()) {
258  // Check that FrameScope pushed the context on to the stack already.
259  __ Peek(x2, 0);
260  __ Cmp(x2, cp);
261  __ Check(eq, AbortReason::kUnexpectedValue);
262  }
263 
264  // Preserve the incoming parameters on the stack.
265  __ SmiTag(x0);
266  __ Push(x0, x1, padreg, x3);
267 
268  // ----------- S t a t e -------------
269  // -- sp[0*kPointerSize]: new target
270  // -- sp[1*kPointerSize]: padding
271  // -- x1 and sp[2*kPointerSize]: constructor function
272  // -- sp[3*kPointerSize]: number of arguments (tagged)
273  // -- sp[4*kPointerSize]: context (pushed by FrameScope)
274  // -----------------------------------
275 
276  __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
277  __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
278  __ TestAndBranchIfAnySet(w4,
279  SharedFunctionInfo::IsDerivedConstructorBit::kMask,
280  &not_create_implicit_receiver);
281 
282  // If not derived class constructor: Allocate the new receiver object.
283  __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
284  x4, x5);
285  __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
286  RelocInfo::CODE_TARGET);
287  __ B(&post_instantiation_deopt_entry);
288 
289  // Else: use TheHoleValue as receiver for constructor call
290  __ Bind(&not_create_implicit_receiver);
291  __ LoadRoot(x0, RootIndex::kTheHoleValue);
292 
293  // ----------- S t a t e -------------
294  // -- x0: receiver
295  // -- Slot 4 / sp[0*kPointerSize]: new target
296  // -- Slot 3 / sp[1*kPointerSize]: padding
297  // -- Slot 2 / sp[2*kPointerSize]: constructor function
298  // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
299  // -- Slot 0 / sp[4*kPointerSize]: context
300  // -----------------------------------
301  // Deoptimizer enters here.
302  masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
303  masm->pc_offset());
304 
305  __ Bind(&post_instantiation_deopt_entry);
306 
307  // Restore new target from the top of the stack.
308  __ Peek(x3, 0 * kPointerSize);
309 
310  // Restore constructor function and argument count.
311  __ Ldr(x1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
312  __ SmiUntag(x12, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
313 
314  // Copy arguments to the expression stack. The called function pops the
315  // receiver along with its arguments, so we need an extra receiver on the
316  // stack, in case we have to return it later.
317 
318  // Overwrite the new target with a receiver.
319  __ Poke(x0, 0);
320 
321  // Push two further copies of the receiver. One will be popped by the called
322  // function. The second acts as padding if the number of arguments plus
323  // receiver is odd - pushing receiver twice avoids branching. It also means
324  // that we don't have to handle the even and odd cases specially on
325  // InvokeFunction's return, as top of stack will be the receiver in either
326  // case.
327  __ Push(x0, x0);
328 
329  // ----------- S t a t e -------------
330  // -- x3: new target
331  // -- x12: number of arguments (untagged)
332  // -- sp[0*kPointerSize]: implicit receiver (overwrite if argc odd)
333  // -- sp[1*kPointerSize]: implicit receiver
334  // -- sp[2*kPointerSize]: implicit receiver
335  // -- sp[3*kPointerSize]: padding
336  // -- x1 and sp[4*kPointerSize]: constructor function
337  // -- sp[5*kPointerSize]: number of arguments (tagged)
338  // -- sp[6*kPointerSize]: context
339  // -----------------------------------
340 
341  // Round the number of arguments down to the next even number, and claim
342  // slots for the arguments. If the number of arguments was odd, the last
343  // argument will overwrite one of the receivers pushed above.
344  __ Bic(x10, x12, 1);
345 
346  // Check if we have enough stack space to push all arguments.
347  Label enough_stack_space, stack_overflow;
348  Generate_StackOverflowCheck(masm, x10, &stack_overflow);
349  __ B(&enough_stack_space);
350 
351  __ Bind(&stack_overflow);
352  // Restore the context from the frame.
353  __ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
354  __ CallRuntime(Runtime::kThrowStackOverflow);
355  __ Unreachable();
356 
357  __ Bind(&enough_stack_space);
358  __ Claim(x10);
359 
360  // Copy the arguments.
361  {
362  Register count = x2;
363  Register dst = x10;
364  Register src = x11;
365  __ Mov(count, x12);
366  __ SlotAddress(dst, 0);
367  __ Add(src, fp, StandardFrameConstants::kCallerSPOffset);
368  __ CopyDoubleWords(dst, src, count);
369  }
370 
371  // Call the function.
372  __ Mov(x0, x12);
373  ParameterCount actual(x0);
374  __ InvokeFunction(x1, x3, actual, CALL_FUNCTION);
375 
376  // ----------- S t a t e -------------
377  // -- sp[0*kPointerSize]: implicit receiver
378  // -- sp[1*kPointerSize]: padding
379  // -- sp[2*kPointerSize]: constructor function
380  // -- sp[3*kPointerSize]: number of arguments
381  // -- sp[4*kPointerSize]: context
382  // -----------------------------------
383 
384  // Store offset of return address for deoptimizer.
385  masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
386  masm->pc_offset());
387 
388  // Restore the context from the frame.
389  __ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
390 
391  // If the result is an object (in the ECMA sense), we should get rid
392  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
393  // on page 74.
394  Label use_receiver, do_throw, leave_frame;
395 
396  // If the result is undefined, we jump out to using the implicit receiver.
397  __ CompareRoot(x0, RootIndex::kUndefinedValue);
398  __ B(eq, &use_receiver);
399 
400  // Otherwise we do a smi check and fall through to check if the return value
401  // is a valid receiver.
402 
403  // If the result is a smi, it is *not* an object in the ECMA sense.
404  __ JumpIfSmi(x0, &use_receiver);
405 
406  // If the type of the result (stored in its map) is less than
407  // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
408  STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
409  __ JumpIfObjectType(x0, x4, x5, FIRST_JS_RECEIVER_TYPE, &leave_frame, ge);
410  __ B(&use_receiver);
411 
412  __ Bind(&do_throw);
413  __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
414 
415  // Throw away the result of the constructor invocation and use the
416  // on-stack receiver as the result.
417  __ Bind(&use_receiver);
418  __ Peek(x0, 0 * kPointerSize);
419  __ CompareRoot(x0, RootIndex::kTheHoleValue);
420  __ B(eq, &do_throw);
421 
422  __ Bind(&leave_frame);
423  // Restore smi-tagged arguments count from the frame.
424  __ SmiUntag(x1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
425  // Leave construct frame.
426  }
427  // Remove caller arguments from the stack and return.
428  __ DropArguments(x1, TurboAssembler::kCountExcludesReceiver);
429  __ Ret();
430 }
431 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
432  Generate_JSBuiltinsConstructStubHelper(masm);
433 }
434 
435 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
436  FrameScope scope(masm, StackFrame::INTERNAL);
437  __ PushArgument(x1);
438  __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
439 }
440 
441 // static
442 void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
443  // ----------- S t a t e -------------
444  // -- x0 : the value to pass to the generator
445  // -- x1 : the JSGeneratorObject to resume
446  // -- lr : return address
447  // -----------------------------------
448  __ AssertGeneratorObject(x1);
449 
450  // Store input value into generator object.
451  __ Str(x0, FieldMemOperand(x1, JSGeneratorObject::kInputOrDebugPosOffset));
452  __ RecordWriteField(x1, JSGeneratorObject::kInputOrDebugPosOffset, x0, x3,
453  kLRHasNotBeenSaved, kDontSaveFPRegs);
454 
455  // Load suspended function and context.
456  __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
457  __ Ldr(cp, FieldMemOperand(x4, JSFunction::kContextOffset));
458 
459  // Flood function if we are stepping.
460  Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
461  Label stepping_prepared;
462  ExternalReference debug_hook =
463  ExternalReference::debug_hook_on_function_call_address(masm->isolate());
464  __ Mov(x10, debug_hook);
465  __ Ldrsb(x10, MemOperand(x10));
466  __ CompareAndBranch(x10, Operand(0), ne, &prepare_step_in_if_stepping);
467 
468  // Flood function if we need to continue stepping in the suspended generator.
469  ExternalReference debug_suspended_generator =
470  ExternalReference::debug_suspended_generator_address(masm->isolate());
471  __ Mov(x10, debug_suspended_generator);
472  __ Ldr(x10, MemOperand(x10));
473  __ CompareAndBranch(x10, Operand(x1), eq,
474  &prepare_step_in_suspended_generator);
475  __ Bind(&stepping_prepared);
476 
477  // Check the stack for overflow. We are not trying to catch interruptions
478  // (i.e. debug break and preemption) here, so check the "real stack limit".
479  Label stack_overflow;
480  __ CompareRoot(sp, RootIndex::kRealStackLimit);
481  __ B(lo, &stack_overflow);
482 
483  // Get number of arguments for generator function.
484  __ Ldr(x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
485  __ Ldrh(w10, FieldMemOperand(
486  x10, SharedFunctionInfo::kFormalParameterCountOffset));
487 
488  // Claim slots for arguments and receiver (rounded up to a multiple of two).
489  __ Add(x11, x10, 2);
490  __ Bic(x11, x11, 1);
491  __ Claim(x11);
492 
493  // Store padding (which might be replaced by the receiver).
494  __ Sub(x11, x11, 1);
495  __ Poke(padreg, Operand(x11, LSL, kPointerSizeLog2));
496 
497  // Poke receiver into highest claimed slot.
498  __ Ldr(x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
499  __ Poke(x5, Operand(x10, LSL, kPointerSizeLog2));
500 
501  // ----------- S t a t e -------------
502  // -- x1 : the JSGeneratorObject to resume
503  // -- x4 : generator function
504  // -- x10 : argument count
505  // -- cp : generator context
506  // -- lr : return address
507  // -- sp[arg count] : generator receiver
508  // -- sp[0 .. arg count - 1] : claimed for args
509  // -----------------------------------
510 
511  // Copy the function arguments from the generator object's register file.
512 
513  __ Ldr(x5,
514  FieldMemOperand(x1, JSGeneratorObject::kParametersAndRegistersOffset));
515  {
516  Label loop, done;
517  __ Cbz(x10, &done);
518  __ Mov(x12, 0);
519 
520  __ Bind(&loop);
521  __ Sub(x10, x10, 1);
522  __ Add(x11, x5, Operand(x12, LSL, kPointerSizeLog2));
523  __ Ldr(x11, FieldMemOperand(x11, FixedArray::kHeaderSize));
524  __ Poke(x11, Operand(x10, LSL, kPointerSizeLog2));
525  __ Add(x12, x12, 1);
526  __ Cbnz(x10, &loop);
527  __ Bind(&done);
528  }
529 
530  // Underlying function needs to have bytecode available.
531  if (FLAG_debug_code) {
532  Label check_has_bytecode_array;
533  __ Ldr(x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
534  __ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
535  __ CompareObjectType(x3, x0, x0, INTERPRETER_DATA_TYPE);
536  __ B(ne, &check_has_bytecode_array);
537  __ Ldr(x3, FieldMemOperand(x3, InterpreterData::kBytecodeArrayOffset));
538  __ Bind(&check_has_bytecode_array);
539  __ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
540  __ Assert(eq, AbortReason::kMissingBytecodeArray);
541  }
542 
543  // Resume (Ignition/TurboFan) generator object.
544  {
545  __ Ldr(x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
546  __ Ldrh(w0, FieldMemOperand(
547  x0, SharedFunctionInfo::kFormalParameterCountOffset));
548  // We abuse new.target both to indicate that this is a resume call and to
549  // pass in the generator object. In ordinary calls, new.target is always
550  // undefined because generator functions are non-constructable.
551  __ Mov(x3, x1);
552  __ Mov(x1, x4);
553  static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
554  __ Ldr(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
555  __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
556  __ Jump(x2);
557  }
558 
559  __ Bind(&prepare_step_in_if_stepping);
560  {
561  FrameScope scope(masm, StackFrame::INTERNAL);
562  // Push hole as receiver since we do not use it for stepping.
563  __ LoadRoot(x5, RootIndex::kTheHoleValue);
564  __ Push(x1, padreg, x4, x5);
565  __ CallRuntime(Runtime::kDebugOnFunctionCall);
566  __ Pop(padreg, x1);
567  __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
568  }
569  __ B(&stepping_prepared);
570 
571  __ Bind(&prepare_step_in_suspended_generator);
572  {
573  FrameScope scope(masm, StackFrame::INTERNAL);
574  __ Push(x1, padreg);
575  __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
576  __ Pop(padreg, x1);
577  __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
578  }
579  __ B(&stepping_prepared);
580 
581  __ bind(&stack_overflow);
582  {
583  FrameScope scope(masm, StackFrame::INTERNAL);
584  __ CallRuntime(Runtime::kThrowStackOverflow);
585  __ Unreachable(); // This should be unreachable.
586  }
587 }
588 
589 
590 // Input:
591 // x0: new.target.
592 // x1: function.
593 // x2: receiver.
594 // x3: argc.
595 // x4: argv.
596 // Output:
597 // x0: result.
598 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
599  bool is_construct) {
600  Register new_target = x0;
601  Register function = x1;
602  Register receiver = x2;
603  Register argc = x3;
604  Register argv = x4;
605  Register scratch = x10;
606  Register slots_to_claim = x11;
607 
608  {
609  // Enter an internal frame.
610  FrameScope scope(masm, StackFrame::INTERNAL);
611 
612  // Setup the context (we need to use the caller context from the isolate).
613  __ Mov(scratch, ExternalReference::Create(IsolateAddressId::kContextAddress,
614  masm->isolate()));
615  __ Ldr(cp, MemOperand(scratch));
616 
617  // Claim enough space for the arguments, the receiver and the function,
618  // including an optional slot of padding.
619  __ Add(slots_to_claim, argc, 3);
620  __ Bic(slots_to_claim, slots_to_claim, 1);
621 
622  // Check if we have enough stack space to push all arguments.
623  Label enough_stack_space, stack_overflow;
624  Generate_StackOverflowCheck(masm, slots_to_claim, &stack_overflow);
625  __ B(&enough_stack_space);
626 
627  __ Bind(&stack_overflow);
628  __ CallRuntime(Runtime::kThrowStackOverflow);
629  __ Unreachable();
630 
631  __ Bind(&enough_stack_space);
632  __ Claim(slots_to_claim);
633 
634  // Store padding (which might be overwritten).
635  __ SlotAddress(scratch, slots_to_claim);
636  __ Str(padreg, MemOperand(scratch, -kPointerSize));
637 
638  // Store receiver and function on the stack.
639  __ SlotAddress(scratch, argc);
640  __ Stp(receiver, function, MemOperand(scratch));
641 
642  // Copy arguments to the stack in a loop, in reverse order.
643  // x3: argc.
644  // x4: argv.
645  Label loop, done;
646 
647  // Skip the argument set up if we have no arguments.
648  __ Cbz(argc, &done);
649 
650  // scratch has been set to point to the location of the receiver, which
651  // marks the end of the argument copy.
652 
653  __ Bind(&loop);
654  // Load the handle.
655  __ Ldr(x11, MemOperand(argv, kPointerSize, PostIndex));
656  // Dereference the handle.
657  __ Ldr(x11, MemOperand(x11));
658  // Poke the result into the stack.
659  __ Str(x11, MemOperand(scratch, -kPointerSize, PreIndex));
660  // Loop if we've not reached the end of copy marker.
661  __ Cmp(sp, scratch);
662  __ B(lt, &loop);
663 
664  __ Bind(&done);
665 
666  __ Mov(scratch, argc);
667  __ Mov(argc, new_target);
668  __ Mov(new_target, scratch);
669  // x0: argc.
670  // x3: new.target.
671 
672  // Initialize all JavaScript callee-saved registers, since they will be seen
673  // by the garbage collector as part of handlers.
674  // The original values have been saved in JSEntryStub::GenerateBody().
675  __ LoadRoot(x19, RootIndex::kUndefinedValue);
676  __ Mov(x20, x19);
677  __ Mov(x21, x19);
678  __ Mov(x22, x19);
679  __ Mov(x23, x19);
680  __ Mov(x24, x19);
681  __ Mov(x25, x19);
682  __ Mov(x28, x19);
683  // Don't initialize the reserved registers.
684  // x26 : root register (kRootRegister).
685  // x27 : context pointer (cp).
686  // x29 : frame pointer (fp).
687 
688  Handle<Code> builtin = is_construct
689  ? BUILTIN_CODE(masm->isolate(), Construct)
690  : masm->isolate()->builtins()->Call();
691  __ Call(builtin, RelocInfo::CODE_TARGET);
692 
693  // Exit the JS internal frame and remove the parameters (except function),
694  // and return.
695  }
696 
697  // Result is in x0. Return.
698  __ Ret();
699 }
700 
701 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
702  Generate_JSEntryTrampolineHelper(masm, false);
703 }
704 
705 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
706  Generate_JSEntryTrampolineHelper(masm, true);
707 }
708 
709 static void ReplaceClosureCodeWithOptimizedCode(
710  MacroAssembler* masm, Register optimized_code, Register closure,
711  Register scratch1, Register scratch2, Register scratch3) {
712  // Store code entry in the closure.
713  __ Str(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
714  __ Mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
715  __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
716  kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
717  OMIT_SMI_CHECK);
718 }
719 
720 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
721  Register args_size = scratch;
722 
723  // Get the arguments + receiver count.
724  __ Ldr(args_size,
725  MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
726  __ Ldr(args_size.W(),
727  FieldMemOperand(args_size, BytecodeArray::kParameterSizeOffset));
728 
729  // Leave the frame (also dropping the register file).
730  __ LeaveFrame(StackFrame::INTERPRETED);
731 
732  // Drop receiver + arguments.
733  if (__ emit_debug_code()) {
734  __ Tst(args_size, kPointerSize - 1);
735  __ Check(eq, AbortReason::kUnexpectedValue);
736  }
737  __ Lsr(args_size, args_size, kPointerSizeLog2);
738  __ DropArguments(args_size);
739 }
740 
741 // Tail-call |function_id| if |smi_entry| == |marker|
742 static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
743  Register smi_entry,
744  OptimizationMarker marker,
745  Runtime::FunctionId function_id) {
746  Label no_match;
747  __ CompareAndBranch(smi_entry, Operand(Smi::FromEnum(marker)), ne, &no_match);
748  GenerateTailCallToReturnedCode(masm, function_id);
749  __ bind(&no_match);
750 }
751 
752 static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
753  Register feedback_vector,
754  Register scratch1, Register scratch2,
755  Register scratch3) {
756  // ----------- S t a t e -------------
757  // -- x0 : argument count (preserved for callee if needed, and caller)
758  // -- x3 : new target (preserved for callee if needed, and caller)
759  // -- x1 : target function (preserved for callee if needed, and caller)
760  // -- feedback vector (preserved for caller if needed)
761  // -----------------------------------
762  DCHECK(
763  !AreAliased(feedback_vector, x0, x1, x3, scratch1, scratch2, scratch3));
764 
765  Label optimized_code_slot_is_weak_ref, fallthrough;
766 
767  Register closure = x1;
768  Register optimized_code_entry = scratch1;
769 
770  __ Ldr(
771  optimized_code_entry,
772  FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
773 
774  // Check if the code entry is a Smi. If yes, we interpret it as an
775  // optimisation marker. Otherwise, interpret is at a weak reference to a code
776  // object.
777  __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
778 
779  {
780  // Optimized code slot is a Smi optimization marker.
781 
782  // Fall through if no optimization trigger.
783  __ CompareAndBranch(optimized_code_entry,
784  Operand(Smi::FromEnum(OptimizationMarker::kNone)), eq,
785  &fallthrough);
786 
787  // TODO(v8:8394): The logging of first execution will break if
788  // feedback vectors are not allocated. We need to find a different way of
789  // logging these events if required.
790  TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
791  OptimizationMarker::kLogFirstExecution,
792  Runtime::kFunctionFirstExecution);
793  TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
794  OptimizationMarker::kCompileOptimized,
795  Runtime::kCompileOptimized_NotConcurrent);
796  TailCallRuntimeIfMarkerEquals(
797  masm, optimized_code_entry,
798  OptimizationMarker::kCompileOptimizedConcurrent,
799  Runtime::kCompileOptimized_Concurrent);
800 
801  {
802  // Otherwise, the marker is InOptimizationQueue, so fall through hoping
803  // that an interrupt will eventually update the slot with optimized code.
804  if (FLAG_debug_code) {
805  __ Cmp(
806  optimized_code_entry,
807  Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
808  __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
809  }
810  __ B(&fallthrough);
811  }
812  }
813 
814  {
815  // Optimized code slot is a weak reference.
816  __ bind(&optimized_code_slot_is_weak_ref);
817 
818  __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
819 
820  // Check if the optimized code is marked for deopt. If it is, call the
821  // runtime to clear it.
822  Label found_deoptimized_code;
823  __ Ldr(scratch2, FieldMemOperand(optimized_code_entry,
824  Code::kCodeDataContainerOffset));
825  __ Ldr(
826  scratch2,
827  FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
828  __ TestAndBranchIfAnySet(scratch2, 1 << Code::kMarkedForDeoptimizationBit,
829  &found_deoptimized_code);
830 
831  // Optimized code is good, get it into the closure and link the closure into
832  // the optimized functions list, then tail call the optimized code.
833  // The feedback vector is no longer used, so re-use it as a scratch
834  // register.
835  ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
836  scratch2, scratch3, feedback_vector);
837  static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
838  __ Add(x2, optimized_code_entry,
839  Operand(Code::kHeaderSize - kHeapObjectTag));
840  __ Jump(x2);
841 
842  // Optimized code slot contains deoptimized code, evict it and re-enter the
843  // closure's code.
844  __ bind(&found_deoptimized_code);
845  GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
846  }
847 
848  // Fall-through if the optimized code cell is clear and there is no
849  // optimization marker.
850  __ bind(&fallthrough);
851 }
852 
853 // Advance the current bytecode offset. This simulates what all bytecode
854 // handlers do upon completion of the underlying operation. Will bail out to a
855 // label if the bytecode (without prefix) is a return bytecode.
856 static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
857  Register bytecode_array,
858  Register bytecode_offset,
859  Register bytecode, Register scratch1,
860  Label* if_return) {
861  Register bytecode_size_table = scratch1;
862  DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
863  bytecode));
864 
865  __ Mov(bytecode_size_table, ExternalReference::bytecode_size_table_address());
866 
867  // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
868  Label process_bytecode, extra_wide;
869  STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
870  STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
871  STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
872  STATIC_ASSERT(3 ==
873  static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
874  __ Cmp(bytecode, Operand(0x3));
875  __ B(hi, &process_bytecode);
876  __ Tst(bytecode, Operand(0x1));
877  __ B(ne, &extra_wide);
878 
879  // Load the next bytecode and update table to the wide scaled table.
880  __ Add(bytecode_offset, bytecode_offset, Operand(1));
881  __ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
882  __ Add(bytecode_size_table, bytecode_size_table,
883  Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
884  __ B(&process_bytecode);
885 
886  __ Bind(&extra_wide);
887  // Load the next bytecode and update table to the extra wide scaled table.
888  __ Add(bytecode_offset, bytecode_offset, Operand(1));
889  __ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
890  __ Add(bytecode_size_table, bytecode_size_table,
891  Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
892 
893  __ Bind(&process_bytecode);
894 
895 // Bailout to the return label if this is a return bytecode.
896 #define JUMP_IF_EQUAL(NAME) \
897  __ Cmp(x1, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
898  __ B(if_return, eq);
899  RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
900 #undef JUMP_IF_EQUAL
901 
902  // Otherwise, load the size of the current bytecode and advance the offset.
903  __ Ldr(scratch1.W(), MemOperand(bytecode_size_table, bytecode, LSL, 2));
904  __ Add(bytecode_offset, bytecode_offset, scratch1);
905 }
906 
907 // Generate code for entering a JS function with the interpreter.
908 // On entry to the function the receiver and arguments have been pushed on the
909 // stack left to right. The actual argument count matches the formal parameter
910 // count expected by the function.
911 //
912 // The live registers are:
913 // - x1: the JS function object being called.
914 // - x3: the incoming new target or generator object
915 // - cp: our context.
916 // - fp: our caller's frame pointer.
917 // - lr: return address.
918 //
919 // The function builds an interpreter frame. See InterpreterFrameConstants in
920 // frames.h for its layout.
921 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
922  Register closure = x1;
923  Register feedback_vector = x2;
924 
925  // Load the feedback vector from the closure.
926  __ Ldr(feedback_vector,
927  FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
928  __ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
929 
930  Label push_stack_frame;
931  // Check if feedback vector is valid. If valid, check for optimized code
932  // and update invocation count. Otherwise, setup the stack frame.
933  __ CompareRoot(feedback_vector, RootIndex::kUndefinedValue);
934  __ B(eq, &push_stack_frame);
935 
936  // Read off the optimized code slot in the feedback vector, and if there
937  // is optimized code or an optimization marker, call that instead.
938  MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
939 
940  // Increment invocation count for the function.
941  // MaybeTailCallOptimizedCodeSlot preserves feedback_vector, so safe to reuse
942  __ Ldr(w10, FieldMemOperand(feedback_vector,
943  FeedbackVector::kInvocationCountOffset));
944  __ Add(w10, w10, Operand(1));
945  __ Str(w10, FieldMemOperand(feedback_vector,
946  FeedbackVector::kInvocationCountOffset));
947 
948  // Open a frame scope to indicate that there is a frame on the stack. The
949  // MANUAL indicates that the scope shouldn't actually generate code to set up
950  // the frame (that is done below).
951  __ Bind(&push_stack_frame);
952  FrameScope frame_scope(masm, StackFrame::MANUAL);
953  __ Push(lr, fp, cp, closure);
954  __ Add(fp, sp, StandardFrameConstants::kFixedFrameSizeFromFp);
955 
956  // Get the bytecode array from the function object and load it into
957  // kInterpreterBytecodeArrayRegister.
958  Label has_bytecode_array;
959  __ Ldr(x0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
960  __ Ldr(kInterpreterBytecodeArrayRegister,
961  FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
962  __ CompareObjectType(kInterpreterBytecodeArrayRegister, x11, x11,
963  INTERPRETER_DATA_TYPE);
964  __ B(ne, &has_bytecode_array);
965  __ Ldr(kInterpreterBytecodeArrayRegister,
966  FieldMemOperand(kInterpreterBytecodeArrayRegister,
967  InterpreterData::kBytecodeArrayOffset));
968  __ Bind(&has_bytecode_array);
969 
970  // Check function data field is actually a BytecodeArray object.
971  if (FLAG_debug_code) {
972  __ AssertNotSmi(
973  kInterpreterBytecodeArrayRegister,
974  AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
975  __ CompareObjectType(kInterpreterBytecodeArrayRegister, x0, x0,
976  BYTECODE_ARRAY_TYPE);
977  __ Assert(
978  eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
979  }
980 
981  // Reset code age.
982  __ Mov(x10, Operand(BytecodeArray::kNoAgeBytecodeAge));
983  __ Strb(x10, FieldMemOperand(kInterpreterBytecodeArrayRegister,
984  BytecodeArray::kBytecodeAgeOffset));
985 
986  // Load the initial bytecode offset.
987  __ Mov(kInterpreterBytecodeOffsetRegister,
988  Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
989 
990  // Push bytecode array and Smi tagged bytecode array offset.
991  __ SmiTag(x0, kInterpreterBytecodeOffsetRegister);
992  __ Push(kInterpreterBytecodeArrayRegister, x0);
993 
994  // Allocate the local and temporary register file on the stack.
995  {
996  // Load frame size from the BytecodeArray object.
997  __ Ldr(w11, FieldMemOperand(kInterpreterBytecodeArrayRegister,
998  BytecodeArray::kFrameSizeOffset));
999 
1000  // Do a stack check to ensure we don't go over the limit.
1001  Label ok;
1002  __ Sub(x10, sp, Operand(x11));
1003  __ CompareRoot(x10, RootIndex::kRealStackLimit);
1004  __ B(hs, &ok);
1005  __ CallRuntime(Runtime::kThrowStackOverflow);
1006  __ Bind(&ok);
1007 
1008  // If ok, push undefined as the initial value for all register file entries.
1009  // Note: there should always be at least one stack slot for the return
1010  // register in the register file.
1011  Label loop_header;
1012  __ LoadRoot(x10, RootIndex::kUndefinedValue);
1013  __ Lsr(x11, x11, kPointerSizeLog2);
1014  // Round up the number of registers to a multiple of 2, to align the stack
1015  // to 16 bytes.
1016  __ Add(x11, x11, 1);
1017  __ Bic(x11, x11, 1);
1018  __ PushMultipleTimes(x10, x11);
1019  __ Bind(&loop_header);
1020  }
1021 
1022  // If the bytecode array has a valid incoming new target or generator object
1023  // register, initialize it with incoming value which was passed in x3.
1024  Label no_incoming_new_target_or_generator_register;
1025  __ Ldrsw(x10,
1026  FieldMemOperand(
1027  kInterpreterBytecodeArrayRegister,
1028  BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
1029  __ Cbz(x10, &no_incoming_new_target_or_generator_register);
1030  __ Str(x3, MemOperand(fp, x10, LSL, kPointerSizeLog2));
1031  __ Bind(&no_incoming_new_target_or_generator_register);
1032 
1033  // Load accumulator with undefined.
1034  __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1035 
1036  // Load the dispatch table into a register and dispatch to the bytecode
1037  // handler at the current bytecode offset.
1038  Label do_dispatch;
1039  __ bind(&do_dispatch);
1040  __ Mov(
1041  kInterpreterDispatchTableRegister,
1042  ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1043 #if defined(V8_OS_WIN)
1044  __ Ldrb(x23, MemOperand(kInterpreterBytecodeArrayRegister,
1045  kInterpreterBytecodeOffsetRegister));
1046  __ Mov(x1, Operand(x23, LSL, kPointerSizeLog2));
1047 #else
1048  __ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
1049  kInterpreterBytecodeOffsetRegister));
1050  __ Mov(x1, Operand(x18, LSL, kPointerSizeLog2));
1051 #endif
1052  __ Ldr(kJavaScriptCallCodeStartRegister,
1053  MemOperand(kInterpreterDispatchTableRegister, x1));
1054  __ Call(kJavaScriptCallCodeStartRegister);
1055  masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
1056 
1057  // Any returns to the entry trampoline are either due to the return bytecode
1058  // or the interpreter tail calling a builtin and then a dispatch.
1059 
1060  // Get bytecode array and bytecode offset from the stack frame.
1061  __ Ldr(kInterpreterBytecodeArrayRegister,
1062  MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1063  __ Ldr(kInterpreterBytecodeOffsetRegister,
1064  MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1065  __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1066 
1067  // Either return, or advance to the next bytecode and dispatch.
1068  Label do_return;
1069  __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
1070  kInterpreterBytecodeOffsetRegister));
1071  AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1072  kInterpreterBytecodeOffsetRegister, x1, x2,
1073  &do_return);
1074  __ B(&do_dispatch);
1075 
1076  __ bind(&do_return);
1077  // The return value is in x0.
1078  LeaveInterpreterFrame(masm, x2);
1079  __ Ret();
1080 }
1081 
1082 static void Generate_InterpreterPushArgs(MacroAssembler* masm,
1083  Register num_args,
1084  Register first_arg_index,
1085  Register spread_arg_out,
1086  ConvertReceiverMode receiver_mode,
1087  InterpreterPushArgsMode mode) {
1088  Register last_arg_addr = x10;
1089  Register stack_addr = x11;
1090  Register slots_to_claim = x12;
1091  Register slots_to_copy = x13; // May include receiver, unlike num_args.
1092 
1093  DCHECK(!AreAliased(num_args, first_arg_index, last_arg_addr, stack_addr,
1094  slots_to_claim, slots_to_copy));
1095  // spread_arg_out may alias with the first_arg_index input.
1096  DCHECK(!AreAliased(spread_arg_out, last_arg_addr, stack_addr, slots_to_claim,
1097  slots_to_copy));
1098 
1099  // Add one slot for the receiver.
1100  __ Add(slots_to_claim, num_args, 1);
1101 
1102  if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1103  // Exclude final spread from slots to claim and the number of arguments.
1104  __ Sub(slots_to_claim, slots_to_claim, 1);
1105  __ Sub(num_args, num_args, 1);
1106  }
1107 
1108  // Add a stack check before pushing arguments.
1109  Label stack_overflow, done;
1110  Generate_StackOverflowCheck(masm, slots_to_claim, &stack_overflow);
1111  __ B(&done);
1112  __ Bind(&stack_overflow);
1113  __ TailCallRuntime(Runtime::kThrowStackOverflow);
1114  __ Unreachable();
1115  __ Bind(&done);
1116 
1117  // Round up to an even number of slots and claim them.
1118  __ Add(slots_to_claim, slots_to_claim, 1);
1119  __ Bic(slots_to_claim, slots_to_claim, 1);
1120  __ Claim(slots_to_claim);
1121 
1122  {
1123  // Store padding, which may be overwritten.
1124  UseScratchRegisterScope temps(masm);
1125  Register scratch = temps.AcquireX();
1126  __ Sub(scratch, slots_to_claim, 1);
1127  __ Poke(padreg, Operand(scratch, LSL, kPointerSizeLog2));
1128  }
1129 
1130  if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1131  // Store "undefined" as the receiver arg if we need to.
1132  Register receiver = x14;
1133  __ LoadRoot(receiver, RootIndex::kUndefinedValue);
1134  __ SlotAddress(stack_addr, num_args);
1135  __ Str(receiver, MemOperand(stack_addr));
1136  __ Mov(slots_to_copy, num_args);
1137  } else {
1138  // If we're not given an explicit receiver to store, we'll need to copy it
1139  // together with the rest of the arguments.
1140  __ Add(slots_to_copy, num_args, 1);
1141  }
1142 
1143  __ Sub(last_arg_addr, first_arg_index,
1144  Operand(slots_to_copy, LSL, kPointerSizeLog2));
1145  __ Add(last_arg_addr, last_arg_addr, kPointerSize);
1146 
1147  // Load the final spread argument into spread_arg_out, if necessary.
1148  if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1149  __ Ldr(spread_arg_out, MemOperand(last_arg_addr, -kPointerSize));
1150  }
1151 
1152  // Copy the rest of the arguments.
1153  __ SlotAddress(stack_addr, 0);
1154  __ CopyDoubleWords(stack_addr, last_arg_addr, slots_to_copy);
1155 }
1156 
1157 // static
1158 void Builtins::Generate_InterpreterPushArgsThenCallImpl(
1159  MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1160  InterpreterPushArgsMode mode) {
1161  DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
1162  // ----------- S t a t e -------------
1163  // -- x0 : the number of arguments (not including the receiver)
1164  // -- x2 : the address of the first argument to be pushed. Subsequent
1165  // arguments should be consecutive above this, in the same order as
1166  // they are to be pushed onto the stack.
1167  // -- x1 : the target to call (can be any Object).
1168  // -----------------------------------
1169 
1170  // Push the arguments. num_args may be updated according to mode.
1171  // spread_arg_out will be updated to contain the last spread argument, when
1172  // mode == InterpreterPushArgsMode::kWithFinalSpread.
1173  Register num_args = x0;
1174  Register first_arg_index = x2;
1175  Register spread_arg_out =
1176  (mode == InterpreterPushArgsMode::kWithFinalSpread) ? x2 : no_reg;
1177  Generate_InterpreterPushArgs(masm, num_args, first_arg_index, spread_arg_out,
1178  receiver_mode, mode);
1179 
1180  // Call the target.
1181  if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1182  __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
1183  RelocInfo::CODE_TARGET);
1184  } else {
1185  __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
1186  RelocInfo::CODE_TARGET);
1187  }
1188 }
1189 
1190 // static
1191 void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
1192  MacroAssembler* masm, InterpreterPushArgsMode mode) {
1193  // ----------- S t a t e -------------
1194  // -- x0 : argument count (not including receiver)
1195  // -- x3 : new target
1196  // -- x1 : constructor to call
1197  // -- x2 : allocation site feedback if available, undefined otherwise
1198  // -- x4 : address of the first argument
1199  // -----------------------------------
1200  __ AssertUndefinedOrAllocationSite(x2);
1201 
1202  // Push the arguments. num_args may be updated according to mode.
1203  // spread_arg_out will be updated to contain the last spread argument, when
1204  // mode == InterpreterPushArgsMode::kWithFinalSpread.
1205  Register num_args = x0;
1206  Register first_arg_index = x4;
1207  Register spread_arg_out =
1208  (mode == InterpreterPushArgsMode::kWithFinalSpread) ? x2 : no_reg;
1209  Generate_InterpreterPushArgs(masm, num_args, first_arg_index, spread_arg_out,
1210  ConvertReceiverMode::kNullOrUndefined, mode);
1211 
1212  if (mode == InterpreterPushArgsMode::kArrayFunction) {
1213  __ AssertFunction(x1);
1214 
1215  // Tail call to the array construct stub (still in the caller
1216  // context at this point).
1217  Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
1218  __ Jump(code, RelocInfo::CODE_TARGET);
1219  } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1220  // Call the constructor with x0, x1, and x3 unmodified.
1221  __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
1222  RelocInfo::CODE_TARGET);
1223  } else {
1224  DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
1225  // Call the constructor with x0, x1, and x3 unmodified.
1226  __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
1227  }
1228 }
1229 
1230 static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1231  // Set the return address to the correct point in the interpreter entry
1232  // trampoline.
1233  Label builtin_trampoline, trampoline_loaded;
1234  Smi interpreter_entry_return_pc_offset(
1235  masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1236  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
1237 
1238  // If the SFI function_data is an InterpreterData, the function will have a
1239  // custom copy of the interpreter entry trampoline for profiling. If so,
1240  // get the custom trampoline, otherwise grab the entry address of the global
1241  // trampoline.
1242  __ Ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
1243  __ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
1244  __ Ldr(x1, FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
1245  __ CompareObjectType(x1, kInterpreterDispatchTableRegister,
1246  kInterpreterDispatchTableRegister,
1247  INTERPRETER_DATA_TYPE);
1248  __ B(ne, &builtin_trampoline);
1249 
1250  __ Ldr(x1,
1251  FieldMemOperand(x1, InterpreterData::kInterpreterTrampolineOffset));
1252  __ Add(x1, x1, Operand(Code::kHeaderSize - kHeapObjectTag));
1253  __ B(&trampoline_loaded);
1254 
1255  __ Bind(&builtin_trampoline);
1256  __ Mov(x1, ExternalReference::
1257  address_of_interpreter_entry_trampoline_instruction_start(
1258  masm->isolate()));
1259  __ Ldr(x1, MemOperand(x1));
1260 
1261  __ Bind(&trampoline_loaded);
1262  __ Add(lr, x1, Operand(interpreter_entry_return_pc_offset->value()));
1263 
1264  // Initialize the dispatch table register.
1265  __ Mov(
1266  kInterpreterDispatchTableRegister,
1267  ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1268 
1269  // Get the bytecode array pointer from the frame.
1270  __ Ldr(kInterpreterBytecodeArrayRegister,
1271  MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1272 
1273  if (FLAG_debug_code) {
1274  // Check function data field is actually a BytecodeArray object.
1275  __ AssertNotSmi(
1276  kInterpreterBytecodeArrayRegister,
1277  AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1278  __ CompareObjectType(kInterpreterBytecodeArrayRegister, x1, x1,
1279  BYTECODE_ARRAY_TYPE);
1280  __ Assert(
1281  eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1282  }
1283 
1284  // Get the target bytecode offset from the frame.
1285  __ Ldr(kInterpreterBytecodeOffsetRegister,
1286  MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1287  __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1288 
1289  // Dispatch to the target bytecode.
1290 #if defined(V8_OS_WIN)
1291  __ Ldrb(x23, MemOperand(kInterpreterBytecodeArrayRegister,
1292  kInterpreterBytecodeOffsetRegister));
1293  __ Mov(x1, Operand(x23, LSL, kPointerSizeLog2));
1294 #else
1295  __ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
1296  kInterpreterBytecodeOffsetRegister));
1297  __ Mov(x1, Operand(x18, LSL, kPointerSizeLog2));
1298 #endif
1299  __ Ldr(kJavaScriptCallCodeStartRegister,
1300  MemOperand(kInterpreterDispatchTableRegister, x1));
1301  __ Jump(kJavaScriptCallCodeStartRegister);
1302 }
1303 
1304 void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
1305  // Get bytecode array and bytecode offset from the stack frame.
1306  __ ldr(kInterpreterBytecodeArrayRegister,
1307  MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1308  __ ldr(kInterpreterBytecodeOffsetRegister,
1309  MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1310  __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1311 
1312  // Load the current bytecode.
1313  __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
1314  kInterpreterBytecodeOffsetRegister));
1315 
1316  // Advance to the next bytecode.
1317  Label if_return;
1318  AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1319  kInterpreterBytecodeOffsetRegister, x1, x2,
1320  &if_return);
1321 
1322  // Convert new bytecode offset to a Smi and save in the stackframe.
1323  __ SmiTag(x2, kInterpreterBytecodeOffsetRegister);
1324  __ Str(x2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1325 
1326  Generate_InterpreterEnterBytecode(masm);
1327 
1328  // We should never take the if_return path.
1329  __ bind(&if_return);
1330  __ Abort(AbortReason::kInvalidBytecodeAdvance);
1331 }
1332 
1333 void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
1334  Generate_InterpreterEnterBytecode(masm);
1335 }
1336 
1337 void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
1338  // ----------- S t a t e -------------
1339  // -- x0 : argument count (preserved for callee)
1340  // -- x1 : new target (preserved for callee)
1341  // -- x3 : target function (preserved for callee)
1342  // -----------------------------------
1343  Register argc = x0;
1344  Register new_target = x1;
1345  Register target = x3;
1346 
1347  Label failed;
1348  {
1349  FrameScope scope(masm, StackFrame::INTERNAL);
1350 
1351  // Push argument count, a copy of the target function and the new target,
1352  // together with some padding to maintain 16-byte alignment.
1353  __ SmiTag(argc);
1354  __ Push(argc, new_target, target, padreg);
1355 
1356  // Push another copy of new target as a parameter to the runtime call and
1357  // copy the rest of the arguments from caller (stdlib, foreign, heap).
1358  Label args_done;
1359  Register undef = x10;
1360  Register scratch1 = x12;
1361  Register scratch2 = x13;
1362  Register scratch3 = x14;
1363  __ LoadRoot(undef, RootIndex::kUndefinedValue);
1364 
1365  Label at_least_one_arg;
1366  Label three_args;
1367  DCHECK_NULL(Smi::kZero);
1368  __ Cbnz(argc, &at_least_one_arg);
1369 
1370  // No arguments.
1371  __ Push(new_target, undef, undef, undef);
1372  __ B(&args_done);
1373 
1374  __ Bind(&at_least_one_arg);
1375  // Load two arguments, though we may only use one (for the one arg case).
1376  __ Ldp(scratch2, scratch1,
1377  MemOperand(fp, StandardFrameConstants::kCallerSPOffset));
1378 
1379  // Set flags for determining the value of smi-tagged argc.
1380  // lt => 1, eq => 2, gt => 3.
1381  __ Cmp(argc, Smi::FromInt(2));
1382  __ B(gt, &three_args);
1383 
1384  // One or two arguments.
1385  // If there is one argument (flags are lt), scratch2 contains that argument,
1386  // and scratch1 must be undefined.
1387  __ CmovX(scratch1, scratch2, lt);
1388  __ CmovX(scratch2, undef, lt);
1389  __ Push(new_target, scratch1, scratch2, undef);
1390  __ B(&args_done);
1391 
1392  // Three arguments.
1393  __ Bind(&three_args);
1394  __ Ldr(scratch3, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
1395  2 * kPointerSize));
1396  __ Push(new_target, scratch3, scratch1, scratch2);
1397 
1398  __ Bind(&args_done);
1399 
1400  // Call runtime, on success unwind frame, and parent frame.
1401  __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
1402 
1403  // A smi 0 is returned on failure, an object on success.
1404  __ JumpIfSmi(x0, &failed);
1405 
1406  // Peek the argument count from the stack, untagging at the same time.
1407  __ SmiUntag(x4, MemOperand(sp, 3 * kPointerSize));
1408  __ Drop(4);
1409  scope.GenerateLeaveFrame();
1410 
1411  // Drop arguments and receiver.
1412  __ DropArguments(x4, TurboAssembler::kCountExcludesReceiver);
1413  __ Ret();
1414 
1415  __ Bind(&failed);
1416  // Restore target function and new target.
1417  __ Pop(padreg, target, new_target, argc);
1418  __ SmiUntag(argc);
1419  }
1420  // On failure, tail call back to regular js by re-calling the function
1421  // which has be reset to the compile lazy builtin.
1422  __ Ldr(x4, FieldMemOperand(new_target, JSFunction::kCodeOffset));
1423  __ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
1424  __ Jump(x4);
1425 }
1426 
1427 namespace {
1428 void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1429  bool java_script_builtin,
1430  bool with_result) {
1431  const RegisterConfiguration* config(RegisterConfiguration::Default());
1432  int allocatable_register_count = config->num_allocatable_general_registers();
1433  int frame_size = BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp +
1434  (allocatable_register_count +
1435  BuiltinContinuationFrameConstants::PaddingSlotCount(
1436  allocatable_register_count)) *
1437  kPointerSize;
1438 
1439  // Set up frame pointer.
1440  __ Add(fp, sp, frame_size);
1441 
1442  if (with_result) {
1443  // Overwrite the hole inserted by the deoptimizer with the return value from
1444  // the LAZY deopt point.
1445  __ Str(x0,
1446  MemOperand(fp, BuiltinContinuationFrameConstants::kCallerSPOffset));
1447  }
1448 
1449  // Restore registers in pairs.
1450  int offset = -BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp -
1451  allocatable_register_count * kPointerSize;
1452  for (int i = allocatable_register_count - 1; i > 0; i -= 2) {
1453  int code1 = config->GetAllocatableGeneralCode(i);
1454  int code2 = config->GetAllocatableGeneralCode(i - 1);
1455  Register reg1 = Register::from_code(code1);
1456  Register reg2 = Register::from_code(code2);
1457  __ Ldp(reg1, reg2, MemOperand(fp, offset));
1458  offset += 2 * kPointerSize;
1459  }
1460 
1461  // Restore first register separately, if number of registers is odd.
1462  if (allocatable_register_count % 2 != 0) {
1463  int code = config->GetAllocatableGeneralCode(0);
1464  __ Ldr(Register::from_code(code), MemOperand(fp, offset));
1465  }
1466 
1467  if (java_script_builtin) __ SmiUntag(kJavaScriptCallArgCountRegister);
1468 
1469  // Load builtin object.
1470  UseScratchRegisterScope temps(masm);
1471  Register builtin = temps.AcquireX();
1472  __ Ldr(builtin,
1473  MemOperand(fp, BuiltinContinuationFrameConstants::kBuiltinOffset));
1474 
1475  // Restore fp, lr.
1476  __ Mov(sp, fp);
1477  __ Pop(fp, lr);
1478 
1479  // Call builtin.
1480  __ Add(builtin, builtin, Code::kHeaderSize - kHeapObjectTag);
1481  __ Br(builtin);
1482 }
1483 } // namespace
1484 
1485 void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
1486  Generate_ContinueToBuiltinHelper(masm, false, false);
1487 }
1488 
1489 void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
1490  MacroAssembler* masm) {
1491  Generate_ContinueToBuiltinHelper(masm, false, true);
1492 }
1493 
1494 void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
1495  Generate_ContinueToBuiltinHelper(masm, true, false);
1496 }
1497 
1498 void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
1499  MacroAssembler* masm) {
1500  Generate_ContinueToBuiltinHelper(masm, true, true);
1501 }
1502 
1503 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1504  {
1505  FrameScope scope(masm, StackFrame::INTERNAL);
1506  __ CallRuntime(Runtime::kNotifyDeoptimized);
1507  }
1508 
1509  // Pop TOS register and padding.
1510  DCHECK_EQ(kInterpreterAccumulatorRegister.code(), x0.code());
1511  __ Pop(x0, padreg);
1512  __ Ret();
1513 }
1514 
1515 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
1516  // Lookup the function in the JavaScript frame.
1517  __ Ldr(x0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1518  __ Ldr(x0, MemOperand(x0, JavaScriptFrameConstants::kFunctionOffset));
1519 
1520  {
1521  FrameScope scope(masm, StackFrame::INTERNAL);
1522  // Pass function as argument.
1523  __ PushArgument(x0);
1524  __ CallRuntime(Runtime::kCompileForOnStackReplacement);
1525  }
1526 
1527  // If the code object is null, just return to the caller.
1528  Label skip;
1529  __ CompareAndBranch(x0, Smi::zero(), ne, &skip);
1530  __ Ret();
1531 
1532  __ Bind(&skip);
1533 
1534  // Drop the handler frame that is be sitting on top of the actual
1535  // JavaScript frame. This is the case then OSR is triggered from bytecode.
1536  __ LeaveFrame(StackFrame::STUB);
1537 
1538  // Load deoptimization data from the code object.
1539  // <deopt_data> = <code>[#deoptimization_data_offset]
1540  __ Ldr(x1, MemOperand(x0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
1541 
1542  // Load the OSR entrypoint offset from the deoptimization data.
1543  // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
1544  __ SmiUntag(x1,
1545  FieldMemOperand(x1, FixedArray::OffsetOfElementAt(
1546  DeoptimizationData::kOsrPcOffsetIndex)));
1547 
1548  // Compute the target address = code_obj + header_size + osr_offset
1549  // <entry_addr> = <code_obj> + #header_size + <osr_offset>
1550  __ Add(x0, x0, x1);
1551  __ Add(lr, x0, Code::kHeaderSize - kHeapObjectTag);
1552 
1553  // And "return" to the OSR entry point of the function.
1554  __ Ret();
1555 }
1556 
1557 // static
1558 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
1559  // ----------- S t a t e -------------
1560  // -- x0 : argc
1561  // -- sp[0] : argArray (if argc == 2)
1562  // -- sp[8] : thisArg (if argc >= 1)
1563  // -- sp[16] : receiver
1564  // -----------------------------------
1565  ASM_LOCATION("Builtins::Generate_FunctionPrototypeApply");
1566 
1567  Register argc = x0;
1568  Register arg_array = x2;
1569  Register receiver = x1;
1570  Register this_arg = x0;
1571  Register undefined_value = x3;
1572  Register null_value = x4;
1573 
1574  __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
1575  __ LoadRoot(null_value, RootIndex::kNullValue);
1576 
1577  // 1. Load receiver into x1, argArray into x2 (if present), remove all
1578  // arguments from the stack (including the receiver), and push thisArg (if
1579  // present) instead.
1580  {
1581  Register saved_argc = x10;
1582  Register scratch = x11;
1583 
1584  // Push two undefined values on the stack, to put it in a consistent state
1585  // so that we can always read three arguments from it.
1586  __ Push(undefined_value, undefined_value);
1587 
1588  // The state of the stack (with arrows pointing to the slots we will read)
1589  // is as follows:
1590  //
1591  // argc = 0 argc = 1 argc = 2
1592  // -> sp[16]: receiver -> sp[24]: receiver -> sp[32]: receiver
1593  // -> sp[8]: undefined -> sp[16]: this_arg -> sp[24]: this_arg
1594  // -> sp[0]: undefined -> sp[8]: undefined -> sp[16]: arg_array
1595  // sp[0]: undefined sp[8]: undefined
1596  // sp[0]: undefined
1597  //
1598  // There are now always three arguments to read, in the slots starting from
1599  // slot argc.
1600  __ SlotAddress(scratch, argc);
1601 
1602  __ Mov(saved_argc, argc);
1603  __ Ldp(arg_array, this_arg, MemOperand(scratch)); // Overwrites argc.
1604  __ Ldr(receiver, MemOperand(scratch, 2 * kPointerSize));
1605 
1606  __ Drop(2); // Drop the undefined values we pushed above.
1607  __ DropArguments(saved_argc, TurboAssembler::kCountExcludesReceiver);
1608 
1609  __ PushArgument(this_arg);
1610  }
1611 
1612  // ----------- S t a t e -------------
1613  // -- x2 : argArray
1614  // -- x1 : receiver
1615  // -- sp[0] : thisArg
1616  // -----------------------------------
1617 
1618  // 2. We don't need to check explicitly for callable receiver here,
1619  // since that's the first thing the Call/CallWithArrayLike builtins
1620  // will do.
1621 
1622  // 3. Tail call with no arguments if argArray is null or undefined.
1623  Label no_arguments;
1624  __ Cmp(arg_array, null_value);
1625  __ Ccmp(arg_array, undefined_value, ZFlag, ne);
1626  __ B(eq, &no_arguments);
1627 
1628  // 4a. Apply the receiver to the given argArray.
1629  __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1630  RelocInfo::CODE_TARGET);
1631 
1632  // 4b. The argArray is either null or undefined, so we tail call without any
1633  // arguments to the receiver.
1634  __ Bind(&no_arguments);
1635  {
1636  __ Mov(x0, 0);
1637  DCHECK(receiver.Is(x1));
1638  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1639  }
1640 }
1641 
1642 // static
1643 void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
1644  Register argc = x0;
1645  Register function = x1;
1646 
1647  ASM_LOCATION("Builtins::Generate_FunctionPrototypeCall");
1648 
1649  // 1. Get the callable to call (passed as receiver) from the stack.
1650  __ Peek(function, Operand(argc, LSL, kXRegSizeLog2));
1651 
1652  // 2. Handle case with no arguments.
1653  {
1654  Label non_zero;
1655  Register scratch = x10;
1656  __ Cbnz(argc, &non_zero);
1657  __ LoadRoot(scratch, RootIndex::kUndefinedValue);
1658  // Overwrite receiver with undefined, which will be the new receiver.
1659  // We do not need to overwrite the padding slot above it with anything.
1660  __ Poke(scratch, 0);
1661  // Call function. The argument count is already zero.
1662  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1663  __ Bind(&non_zero);
1664  }
1665 
1666  // 3. Overwrite the receiver with padding. If argc is odd, this is all we
1667  // need to do.
1668  Label arguments_ready;
1669  __ Poke(padreg, Operand(argc, LSL, kXRegSizeLog2));
1670  __ Tbnz(argc, 0, &arguments_ready);
1671 
1672  // 4. If argc is even:
1673  // Copy arguments two slots higher in memory, overwriting the original
1674  // receiver and padding.
1675  {
1676  Label loop;
1677  Register copy_from = x10;
1678  Register copy_to = x11;
1679  Register count = x12;
1680  Register last_arg_slot = x13;
1681  __ Mov(count, argc);
1682  __ Sub(last_arg_slot, argc, 1);
1683  __ SlotAddress(copy_from, last_arg_slot);
1684  __ Add(copy_to, copy_from, 2 * kPointerSize);
1685  __ CopyDoubleWords(copy_to, copy_from, count,
1686  TurboAssembler::kSrcLessThanDst);
1687  // Drop two slots. These are copies of the last two arguments.
1688  __ Drop(2);
1689  }
1690 
1691  // 5. Adjust argument count to make the original first argument the new
1692  // receiver and call the callable.
1693  __ Bind(&arguments_ready);
1694  __ Sub(argc, argc, 1);
1695  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1696 }
1697 
1698 void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
1699  // ----------- S t a t e -------------
1700  // -- x0 : argc
1701  // -- sp[0] : argumentsList (if argc == 3)
1702  // -- sp[8] : thisArgument (if argc >= 2)
1703  // -- sp[16] : target (if argc >= 1)
1704  // -- sp[24] : receiver
1705  // -----------------------------------
1706  ASM_LOCATION("Builtins::Generate_ReflectApply");
1707 
1708  Register argc = x0;
1709  Register arguments_list = x2;
1710  Register target = x1;
1711  Register this_argument = x4;
1712  Register undefined_value = x3;
1713 
1714  __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
1715 
1716  // 1. Load target into x1 (if present), argumentsList into x2 (if present),
1717  // remove all arguments from the stack (including the receiver), and push
1718  // thisArgument (if present) instead.
1719  {
1720  // Push four undefined values on the stack, to put it in a consistent state
1721  // so that we can always read the three arguments we need from it. The
1722  // fourth value is used for stack alignment.
1723  __ Push(undefined_value, undefined_value, undefined_value, undefined_value);
1724 
1725  // The state of the stack (with arrows pointing to the slots we will read)
1726  // is as follows:
1727  //
1728  // argc = 0 argc = 1 argc = 2
1729  // sp[32]: receiver sp[40]: receiver sp[48]: receiver
1730  // -> sp[24]: undefined -> sp[32]: target -> sp[40]: target
1731  // -> sp[16]: undefined -> sp[24]: undefined -> sp[32]: this_argument
1732  // -> sp[8]: undefined -> sp[16]: undefined -> sp[24]: undefined
1733  // sp[0]: undefined sp[8]: undefined sp[16]: undefined
1734  // sp[0]: undefined sp[8]: undefined
1735  // sp[0]: undefined
1736  // argc = 3
1737  // sp[56]: receiver
1738  // -> sp[48]: target
1739  // -> sp[40]: this_argument
1740  // -> sp[32]: arguments_list
1741  // sp[24]: undefined
1742  // sp[16]: undefined
1743  // sp[8]: undefined
1744  // sp[0]: undefined
1745  //
1746  // There are now always three arguments to read, in the slots starting from
1747  // slot (argc + 1).
1748  Register scratch = x10;
1749  __ SlotAddress(scratch, argc);
1750  __ Ldp(arguments_list, this_argument,
1751  MemOperand(scratch, 1 * kPointerSize));
1752  __ Ldr(target, MemOperand(scratch, 3 * kPointerSize));
1753 
1754  __ Drop(4); // Drop the undefined values we pushed above.
1755  __ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
1756 
1757  __ PushArgument(this_argument);
1758  }
1759 
1760  // ----------- S t a t e -------------
1761  // -- x2 : argumentsList
1762  // -- x1 : target
1763  // -- sp[0] : thisArgument
1764  // -----------------------------------
1765 
1766  // 2. We don't need to check explicitly for callable target here,
1767  // since that's the first thing the Call/CallWithArrayLike builtins
1768  // will do.
1769 
1770  // 3. Apply the target to the given argumentsList.
1771  __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1772  RelocInfo::CODE_TARGET);
1773 }
1774 
1775 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
1776  // ----------- S t a t e -------------
1777  // -- x0 : argc
1778  // -- sp[0] : new.target (optional)
1779  // -- sp[8] : argumentsList
1780  // -- sp[16] : target
1781  // -- sp[24] : receiver
1782  // -----------------------------------
1783  ASM_LOCATION("Builtins::Generate_ReflectConstruct");
1784 
1785  Register argc = x0;
1786  Register arguments_list = x2;
1787  Register target = x1;
1788  Register new_target = x3;
1789  Register undefined_value = x4;
1790 
1791  __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
1792 
1793  // 1. Load target into x1 (if present), argumentsList into x2 (if present),
1794  // new.target into x3 (if present, otherwise use target), remove all
1795  // arguments from the stack (including the receiver), and push thisArgument
1796  // (if present) instead.
1797  {
1798  // Push four undefined values on the stack, to put it in a consistent state
1799  // so that we can always read the three arguments we need from it. The
1800  // fourth value is used for stack alignment.
1801  __ Push(undefined_value, undefined_value, undefined_value, undefined_value);
1802 
1803  // The state of the stack (with arrows pointing to the slots we will read)
1804  // is as follows:
1805  //
1806  // argc = 0 argc = 1 argc = 2
1807  // sp[32]: receiver sp[40]: receiver sp[48]: receiver
1808  // -> sp[24]: undefined -> sp[32]: target -> sp[40]: target
1809  // -> sp[16]: undefined -> sp[24]: undefined -> sp[32]: arguments_list
1810  // -> sp[8]: undefined -> sp[16]: undefined -> sp[24]: undefined
1811  // sp[0]: undefined sp[8]: undefined sp[16]: undefined
1812  // sp[0]: undefined sp[8]: undefined
1813  // sp[0]: undefined
1814  // argc = 3
1815  // sp[56]: receiver
1816  // -> sp[48]: target
1817  // -> sp[40]: arguments_list
1818  // -> sp[32]: new_target
1819  // sp[24]: undefined
1820  // sp[16]: undefined
1821  // sp[8]: undefined
1822  // sp[0]: undefined
1823  //
1824  // There are now always three arguments to read, in the slots starting from
1825  // slot (argc + 1).
1826  Register scratch = x10;
1827  __ SlotAddress(scratch, argc);
1828  __ Ldp(new_target, arguments_list, MemOperand(scratch, 1 * kPointerSize));
1829  __ Ldr(target, MemOperand(scratch, 3 * kPointerSize));
1830 
1831  __ Cmp(argc, 2);
1832  __ CmovX(new_target, target, ls); // target if argc <= 2.
1833 
1834  __ Drop(4); // Drop the undefined values we pushed above.
1835  __ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
1836 
1837  // Push receiver (undefined).
1838  __ PushArgument(undefined_value);
1839  }
1840 
1841  // ----------- S t a t e -------------
1842  // -- x2 : argumentsList
1843  // -- x1 : target
1844  // -- x3 : new.target
1845  // -- sp[0] : receiver (undefined)
1846  // -----------------------------------
1847 
1848  // 2. We don't need to check explicitly for constructor target here,
1849  // since that's the first thing the Construct/ConstructWithArrayLike
1850  // builtins will do.
1851 
1852  // 3. We don't need to check explicitly for constructor new.target here,
1853  // since that's the second thing the Construct/ConstructWithArrayLike
1854  // builtins will do.
1855 
1856  // 4. Construct the target with the given new.target and argumentsList.
1857  __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
1858  RelocInfo::CODE_TARGET);
1859 }
1860 
1861 namespace {
1862 
1863 void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
1864  __ Push(lr, fp);
1865  __ Mov(x11, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
1866  __ Push(x11, x1); // x1: function
1867  __ SmiTag(x11, x0); // x0: number of arguments.
1868  __ Push(x11, padreg);
1869  __ Add(fp, sp, ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp);
1870 }
1871 
1872 void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
1873  // ----------- S t a t e -------------
1874  // -- x0 : result being passed through
1875  // -----------------------------------
1876  // Get the number of arguments passed (as a smi), tear down the frame and
1877  // then drop the parameters and the receiver.
1878  __ Ldr(x10, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
1879  __ Mov(sp, fp);
1880  __ Pop(fp, lr);
1881 
1882  // Drop actual parameters and receiver.
1883  __ SmiUntag(x10);
1884  __ DropArguments(x10, TurboAssembler::kCountExcludesReceiver);
1885 }
1886 
1887 // Prepares the stack for copying the varargs. First we claim the necessary
1888 // slots, taking care of potential padding. Then we copy the existing arguments
1889 // one slot up or one slot down, as needed.
1890 void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
1891  Register len) {
1892  Label len_odd, exit;
1893  Register slots_to_copy = x10; // If needed.
1894  __ Add(slots_to_copy, argc, 1);
1895  __ Add(argc, argc, len);
1896  __ Tbnz(len, 0, &len_odd);
1897  __ Claim(len);
1898  __ B(&exit);
1899 
1900  __ Bind(&len_odd);
1901  // Claim space we need. If argc is even, slots_to_claim = len + 1, as we need
1902  // one extra padding slot. If argc is odd, we know that the original arguments
1903  // will have a padding slot we can reuse (since len is odd), so
1904  // slots_to_claim = len - 1.
1905  {
1906  Register scratch = x11;
1907  Register slots_to_claim = x12;
1908  __ Add(slots_to_claim, len, 1);
1909  __ And(scratch, argc, 1);
1910  __ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
1911  __ Claim(slots_to_claim);
1912  }
1913 
1914  Label copy_down;
1915  __ Tbz(slots_to_copy, 0, &copy_down);
1916 
1917  // Copy existing arguments one slot up.
1918  {
1919  Register src = x11;
1920  Register dst = x12;
1921  Register scratch = x13;
1922  __ Sub(scratch, argc, 1);
1923  __ SlotAddress(src, scratch);
1924  __ SlotAddress(dst, argc);
1925  __ CopyDoubleWords(dst, src, slots_to_copy,
1926  TurboAssembler::kSrcLessThanDst);
1927  }
1928  __ B(&exit);
1929 
1930  // Copy existing arguments one slot down and add padding.
1931  __ Bind(&copy_down);
1932  {
1933  Register src = x11;
1934  Register dst = x12;
1935  Register scratch = x13;
1936  __ Add(src, len, 1);
1937  __ Mov(dst, len); // CopySlots will corrupt dst.
1938  __ CopySlots(dst, src, slots_to_copy);
1939  __ Add(scratch, argc, 1);
1940  __ Poke(padreg, Operand(scratch, LSL, kPointerSizeLog2)); // Store padding.
1941  }
1942 
1943  __ Bind(&exit);
1944 }
1945 
1946 } // namespace
1947 
1948 // static
1949 void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
1950  Handle<Code> code) {
1951  // ----------- S t a t e -------------
1952  // -- x1 : target
1953  // -- x0 : number of parameters on the stack (not including the receiver)
1954  // -- x2 : arguments list (a FixedArray)
1955  // -- x4 : len (number of elements to push from args)
1956  // -- x3 : new.target (for [[Construct]])
1957  // -----------------------------------
1958  if (masm->emit_debug_code()) {
1959  // Allow x2 to be a FixedArray, or a FixedDoubleArray if x4 == 0.
1960  Label ok, fail;
1961  __ AssertNotSmi(x2, AbortReason::kOperandIsNotAFixedArray);
1962  __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
1963  __ Ldrh(x13, FieldMemOperand(x10, Map::kInstanceTypeOffset));
1964  __ Cmp(x13, FIXED_ARRAY_TYPE);
1965  __ B(eq, &ok);
1966  __ Cmp(x13, FIXED_DOUBLE_ARRAY_TYPE);
1967  __ B(ne, &fail);
1968  __ Cmp(x4, 0);
1969  __ B(eq, &ok);
1970  // Fall through.
1971  __ bind(&fail);
1972  __ Abort(AbortReason::kOperandIsNotAFixedArray);
1973 
1974  __ bind(&ok);
1975  }
1976 
1977  Register arguments_list = x2;
1978  Register argc = x0;
1979  Register len = x4;
1980 
1981  Label stack_overflow;
1982  Generate_StackOverflowCheck(masm, len, &stack_overflow);
1983 
1984  // Skip argument setup if we don't need to push any varargs.
1985  Label done;
1986  __ Cbz(len, &done);
1987 
1988  Generate_PrepareForCopyingVarargs(masm, argc, len);
1989 
1990  // Push varargs.
1991  {
1992  Label loop;
1993  Register src = x10;
1994  Register the_hole_value = x11;
1995  Register undefined_value = x12;
1996  Register scratch = x13;
1997  __ Add(src, arguments_list, FixedArray::kHeaderSize - kHeapObjectTag);
1998  __ LoadRoot(the_hole_value, RootIndex::kTheHoleValue);
1999  __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2000  // We do not use the CompareRoot macro as it would do a LoadRoot behind the
2001  // scenes and we want to avoid that in a loop.
2002  // TODO(all): Consider using Ldp and Stp.
2003  __ Bind(&loop);
2004  __ Sub(len, len, 1);
2005  __ Ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
2006  __ Cmp(scratch, the_hole_value);
2007  __ Csel(scratch, scratch, undefined_value, ne);
2008  __ Poke(scratch, Operand(len, LSL, kPointerSizeLog2));
2009  __ Cbnz(len, &loop);
2010  }
2011  __ Bind(&done);
2012 
2013  // Tail-call to the actual Call or Construct builtin.
2014  __ Jump(code, RelocInfo::CODE_TARGET);
2015 
2016  __ bind(&stack_overflow);
2017  __ TailCallRuntime(Runtime::kThrowStackOverflow);
2018 }
2019 
2020 // static
2021 void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
2022  CallOrConstructMode mode,
2023  Handle<Code> code) {
2024  // ----------- S t a t e -------------
2025  // -- x0 : the number of arguments (not including the receiver)
2026  // -- x3 : the new.target (for [[Construct]] calls)
2027  // -- x1 : the target to call (can be any Object)
2028  // -- x2 : start index (to support rest parameters)
2029  // -----------------------------------
2030 
2031  Register argc = x0;
2032  Register start_index = x2;
2033 
2034  // Check if new.target has a [[Construct]] internal method.
2035  if (mode == CallOrConstructMode::kConstruct) {
2036  Label new_target_constructor, new_target_not_constructor;
2037  __ JumpIfSmi(x3, &new_target_not_constructor);
2038  __ Ldr(x5, FieldMemOperand(x3, HeapObject::kMapOffset));
2039  __ Ldrb(x5, FieldMemOperand(x5, Map::kBitFieldOffset));
2040  __ TestAndBranchIfAnySet(x5, Map::IsConstructorBit::kMask,
2041  &new_target_constructor);
2042  __ Bind(&new_target_not_constructor);
2043  {
2044  FrameScope scope(masm, StackFrame::MANUAL);
2045  __ EnterFrame(StackFrame::INTERNAL);
2046  __ PushArgument(x3);
2047  __ CallRuntime(Runtime::kThrowNotConstructor);
2048  }
2049  __ Bind(&new_target_constructor);
2050  }
2051 
2052  // Check if we have an arguments adaptor frame below the function frame.
2053  // args_fp will point to the frame that contains the actual arguments, which
2054  // will be the current frame unless we have an arguments adaptor frame, in
2055  // which case args_fp points to the arguments adaptor frame.
2056  Register args_fp = x5;
2057  Register len = x6;
2058  {
2059  Label arguments_adaptor, arguments_done;
2060  Register scratch = x10;
2061  __ Ldr(args_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2062  __ Ldr(x4, MemOperand(args_fp,
2063  CommonFrameConstants::kContextOrFrameTypeOffset));
2064  __ Cmp(x4, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
2065  __ B(eq, &arguments_adaptor);
2066  {
2067  __ Ldr(scratch,
2068  MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
2069  __ Ldr(scratch,
2070  FieldMemOperand(scratch, JSFunction::kSharedFunctionInfoOffset));
2071  __ Ldrh(len,
2072  FieldMemOperand(scratch,
2073  SharedFunctionInfo::kFormalParameterCountOffset));
2074  __ Mov(args_fp, fp);
2075  }
2076  __ B(&arguments_done);
2077  __ Bind(&arguments_adaptor);
2078  {
2079  // Just load the length from ArgumentsAdaptorFrame.
2080  __ SmiUntag(
2081  len,
2082  MemOperand(args_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
2083  }
2084  __ Bind(&arguments_done);
2085  }
2086 
2087  Label stack_done, stack_overflow;
2088  __ Subs(len, len, start_index);
2089  __ B(le, &stack_done);
2090  // Check for stack overflow.
2091  Generate_StackOverflowCheck(masm, x6, &stack_overflow);
2092 
2093  Generate_PrepareForCopyingVarargs(masm, argc, len);
2094 
2095  // Push varargs.
2096  {
2097  Register dst = x13;
2098  __ Add(args_fp, args_fp, 2 * kPointerSize);
2099  __ SlotAddress(dst, 0);
2100  __ CopyDoubleWords(dst, args_fp, len);
2101  }
2102  __ B(&stack_done);
2103 
2104  __ Bind(&stack_overflow);
2105  __ TailCallRuntime(Runtime::kThrowStackOverflow);
2106  __ Bind(&stack_done);
2107 
2108  __ Jump(code, RelocInfo::CODE_TARGET);
2109 }
2110 
2111 // static
2112 void Builtins::Generate_CallFunction(MacroAssembler* masm,
2113  ConvertReceiverMode mode) {
2114  ASM_LOCATION("Builtins::Generate_CallFunction");
2115  // ----------- S t a t e -------------
2116  // -- x0 : the number of arguments (not including the receiver)
2117  // -- x1 : the function to call (checked to be a JSFunction)
2118  // -----------------------------------
2119  __ AssertFunction(x1);
2120 
2121  // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
2122  // Check that function is not a "classConstructor".
2123  Label class_constructor;
2124  __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
2125  __ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kFlagsOffset));
2126  __ TestAndBranchIfAnySet(w3, SharedFunctionInfo::IsClassConstructorBit::kMask,
2127  &class_constructor);
2128 
2129  // Enter the context of the function; ToObject has to run in the function
2130  // context, and we also need to take the global proxy from the function
2131  // context in case of conversion.
2132  __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
2133  // We need to convert the receiver for non-native sloppy mode functions.
2134  Label done_convert;
2135  __ TestAndBranchIfAnySet(w3,
2136  SharedFunctionInfo::IsNativeBit::kMask |
2137  SharedFunctionInfo::IsStrictBit::kMask,
2138  &done_convert);
2139  {
2140  // ----------- S t a t e -------------
2141  // -- x0 : the number of arguments (not including the receiver)
2142  // -- x1 : the function to call (checked to be a JSFunction)
2143  // -- x2 : the shared function info.
2144  // -- cp : the function context.
2145  // -----------------------------------
2146 
2147  if (mode == ConvertReceiverMode::kNullOrUndefined) {
2148  // Patch receiver to global proxy.
2149  __ LoadGlobalProxy(x3);
2150  } else {
2151  Label convert_to_object, convert_receiver;
2152  __ Peek(x3, Operand(x0, LSL, kXRegSizeLog2));
2153  __ JumpIfSmi(x3, &convert_to_object);
2154  STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
2155  __ CompareObjectType(x3, x4, x4, FIRST_JS_RECEIVER_TYPE);
2156  __ B(hs, &done_convert);
2157  if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
2158  Label convert_global_proxy;
2159  __ JumpIfRoot(x3, RootIndex::kUndefinedValue, &convert_global_proxy);
2160  __ JumpIfNotRoot(x3, RootIndex::kNullValue, &convert_to_object);
2161  __ Bind(&convert_global_proxy);
2162  {
2163  // Patch receiver to global proxy.
2164  __ LoadGlobalProxy(x3);
2165  }
2166  __ B(&convert_receiver);
2167  }
2168  __ Bind(&convert_to_object);
2169  {
2170  // Convert receiver using ToObject.
2171  // TODO(bmeurer): Inline the allocation here to avoid building the frame
2172  // in the fast case? (fall back to AllocateInNewSpace?)
2173  FrameScope scope(masm, StackFrame::INTERNAL);
2174  __ SmiTag(x0);
2175  __ Push(padreg, x0, x1, cp);
2176  __ Mov(x0, x3);
2177  __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
2178  RelocInfo::CODE_TARGET);
2179  __ Mov(x3, x0);
2180  __ Pop(cp, x1, x0, padreg);
2181  __ SmiUntag(x0);
2182  }
2183  __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
2184  __ Bind(&convert_receiver);
2185  }
2186  __ Poke(x3, Operand(x0, LSL, kXRegSizeLog2));
2187  }
2188  __ Bind(&done_convert);
2189 
2190  // ----------- S t a t e -------------
2191  // -- x0 : the number of arguments (not including the receiver)
2192  // -- x1 : the function to call (checked to be a JSFunction)
2193  // -- x2 : the shared function info.
2194  // -- cp : the function context.
2195  // -----------------------------------
2196 
2197  __ Ldrh(x2,
2198  FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
2199  ParameterCount actual(x0);
2200  ParameterCount expected(x2);
2201  __ InvokeFunctionCode(x1, no_reg, expected, actual, JUMP_FUNCTION);
2202 
2203  // The function is a "classConstructor", need to raise an exception.
2204  __ Bind(&class_constructor);
2205  {
2206  FrameScope frame(masm, StackFrame::INTERNAL);
2207  __ PushArgument(x1);
2208  __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2209  }
2210 }
2211 
2212 namespace {
2213 
2214 void Generate_PushBoundArguments(MacroAssembler* masm) {
2215  // ----------- S t a t e -------------
2216  // -- x0 : the number of arguments (not including the receiver)
2217  // -- x1 : target (checked to be a JSBoundFunction)
2218  // -- x3 : new.target (only in case of [[Construct]])
2219  // -----------------------------------
2220 
2221  Register bound_argc = x4;
2222  Register bound_argv = x2;
2223 
2224  // Load [[BoundArguments]] into x2 and length of that into x4.
2225  Label no_bound_arguments;
2226  __ Ldr(bound_argv,
2227  FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
2228  __ SmiUntag(bound_argc,
2229  FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
2230  __ Cbz(bound_argc, &no_bound_arguments);
2231  {
2232  // ----------- S t a t e -------------
2233  // -- x0 : the number of arguments (not including the receiver)
2234  // -- x1 : target (checked to be a JSBoundFunction)
2235  // -- x2 : the [[BoundArguments]] (implemented as FixedArray)
2236  // -- x3 : new.target (only in case of [[Construct]])
2237  // -- x4 : the number of [[BoundArguments]]
2238  // -----------------------------------
2239 
2240  Register argc = x0;
2241 
2242  // Check for stack overflow.
2243  {
2244  // Check the stack for overflow. We are not trying to catch interruptions
2245  // (i.e. debug break and preemption) here, so check the "real stack
2246  // limit".
2247  Label done;
2248  __ LoadRoot(x10, RootIndex::kRealStackLimit);
2249  // Make x10 the space we have left. The stack might already be overflowed
2250  // here which will cause x10 to become negative.
2251  __ Sub(x10, sp, x10);
2252  // Check if the arguments will overflow the stack.
2253  __ Cmp(x10, Operand(bound_argc, LSL, kPointerSizeLog2));
2254  __ B(hs, &done);
2255  __ TailCallRuntime(Runtime::kThrowStackOverflow);
2256  __ Bind(&done);
2257  }
2258 
2259  // Check if we need padding.
2260  Label copy_args, copy_bound_args;
2261  Register total_argc = x15;
2262  Register slots_to_claim = x12;
2263  __ Add(total_argc, argc, bound_argc);
2264  __ Mov(slots_to_claim, bound_argc);
2265  __ Tbz(bound_argc, 0, &copy_args);
2266 
2267  // Load receiver before we start moving the arguments. We will only
2268  // need this in this path because the bound arguments are odd.
2269  Register receiver = x14;
2270  __ Peek(receiver, Operand(argc, LSL, kPointerSizeLog2));
2271 
2272  // Claim space we need. If argc is even, slots_to_claim = bound_argc + 1,
2273  // as we need one extra padding slot. If argc is odd, we know that the
2274  // original arguments will have a padding slot we can reuse (since
2275  // bound_argc is odd), so slots_to_claim = bound_argc - 1.
2276  {
2277  Register scratch = x11;
2278  __ Add(slots_to_claim, bound_argc, 1);
2279  __ And(scratch, total_argc, 1);
2280  __ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
2281  }
2282 
2283  // Copy bound arguments.
2284  __ Bind(&copy_args);
2285  // Skip claim and copy of existing arguments in the special case where we
2286  // do not need to claim any slots (this will be the case when
2287  // bound_argc == 1 and the existing arguments have padding we can reuse).
2288  __ Cbz(slots_to_claim, &copy_bound_args);
2289  __ Claim(slots_to_claim);
2290  {
2291  Register count = x10;
2292  // Relocate arguments to a lower address.
2293  __ Mov(count, argc);
2294  __ CopySlots(0, slots_to_claim, count);
2295 
2296  __ Bind(&copy_bound_args);
2297  // Copy [[BoundArguments]] to the stack (below the arguments). The first
2298  // element of the array is copied to the highest address.
2299  {
2300  Label loop;
2301  Register counter = x10;
2302  Register scratch = x11;
2303  Register copy_to = x12;
2304  __ Add(bound_argv, bound_argv,
2305  FixedArray::kHeaderSize - kHeapObjectTag);
2306  __ SlotAddress(copy_to, argc);
2307  __ Add(argc, argc,
2308  bound_argc); // Update argc to include bound arguments.
2309  __ Lsl(counter, bound_argc, kPointerSizeLog2);
2310  __ Bind(&loop);
2311  __ Sub(counter, counter, kPointerSize);
2312  __ Ldr(scratch, MemOperand(bound_argv, counter));
2313  // Poke into claimed area of stack.
2314  __ Str(scratch, MemOperand(copy_to, kPointerSize, PostIndex));
2315  __ Cbnz(counter, &loop);
2316  }
2317 
2318  {
2319  Label done;
2320  Register scratch = x10;
2321  __ Tbz(bound_argc, 0, &done);
2322  // Store receiver.
2323  __ Add(scratch, sp, Operand(total_argc, LSL, kPointerSizeLog2));
2324  __ Str(receiver, MemOperand(scratch, kPointerSize, PostIndex));
2325  __ Tbnz(total_argc, 0, &done);
2326  // Store padding.
2327  __ Str(padreg, MemOperand(scratch));
2328  __ Bind(&done);
2329  }
2330  }
2331  }
2332  __ Bind(&no_bound_arguments);
2333 }
2334 
2335 } // namespace
2336 
2337 // static
2338 void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2339  // ----------- S t a t e -------------
2340  // -- x0 : the number of arguments (not including the receiver)
2341  // -- x1 : the function to call (checked to be a JSBoundFunction)
2342  // -----------------------------------
2343  __ AssertBoundFunction(x1);
2344 
2345  // Patch the receiver to [[BoundThis]].
2346  __ Ldr(x10, FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
2347  __ Poke(x10, Operand(x0, LSL, kPointerSizeLog2));
2348 
2349  // Push the [[BoundArguments]] onto the stack.
2350  Generate_PushBoundArguments(masm);
2351 
2352  // Call the [[BoundTargetFunction]] via the Call builtin.
2353  __ Ldr(x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
2354  __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
2355  RelocInfo::CODE_TARGET);
2356 }
2357 
2358 // static
2359 void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2360  // ----------- S t a t e -------------
2361  // -- x0 : the number of arguments (not including the receiver)
2362  // -- x1 : the target to call (can be any Object).
2363  // -----------------------------------
2364 
2365  Label non_callable, non_function, non_smi;
2366  __ JumpIfSmi(x1, &non_callable);
2367  __ Bind(&non_smi);
2368  __ CompareObjectType(x1, x4, x5, JS_FUNCTION_TYPE);
2369  __ Jump(masm->isolate()->builtins()->CallFunction(mode),
2370  RelocInfo::CODE_TARGET, eq);
2371  __ Cmp(x5, JS_BOUND_FUNCTION_TYPE);
2372  __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
2373  RelocInfo::CODE_TARGET, eq);
2374 
2375  // Check if target has a [[Call]] internal method.
2376  __ Ldrb(x4, FieldMemOperand(x4, Map::kBitFieldOffset));
2377  __ TestAndBranchIfAllClear(x4, Map::IsCallableBit::kMask, &non_callable);
2378 
2379  // Check if target is a proxy and call CallProxy external builtin
2380  __ Cmp(x5, JS_PROXY_TYPE);
2381  __ B(ne, &non_function);
2382  __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET);
2383 
2384  // 2. Call to something else, which might have a [[Call]] internal method (if
2385  // not we raise an exception).
2386  __ Bind(&non_function);
2387  // Overwrite the original receiver with the (original) target.
2388  __ Poke(x1, Operand(x0, LSL, kXRegSizeLog2));
2389  // Let the "call_as_function_delegate" take care of the rest.
2390  __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, x1);
2391  __ Jump(masm->isolate()->builtins()->CallFunction(
2392  ConvertReceiverMode::kNotNullOrUndefined),
2393  RelocInfo::CODE_TARGET);
2394 
2395  // 3. Call to something that is not callable.
2396  __ bind(&non_callable);
2397  {
2398  FrameScope scope(masm, StackFrame::INTERNAL);
2399  __ PushArgument(x1);
2400  __ CallRuntime(Runtime::kThrowCalledNonCallable);
2401  }
2402 }
2403 
2404 // static
2405 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2406  // ----------- S t a t e -------------
2407  // -- x0 : the number of arguments (not including the receiver)
2408  // -- x1 : the constructor to call (checked to be a JSFunction)
2409  // -- x3 : the new target (checked to be a constructor)
2410  // -----------------------------------
2411  __ AssertConstructor(x1);
2412  __ AssertFunction(x1);
2413 
2414  // Calling convention for function specific ConstructStubs require
2415  // x2 to contain either an AllocationSite or undefined.
2416  __ LoadRoot(x2, RootIndex::kUndefinedValue);
2417 
2418  Label call_generic_stub;
2419 
2420  // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2421  __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
2422  __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
2423  __ TestAndBranchIfAllClear(
2424  w4, SharedFunctionInfo::ConstructAsBuiltinBit::kMask, &call_generic_stub);
2425 
2426  __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
2427  RelocInfo::CODE_TARGET);
2428 
2429  __ bind(&call_generic_stub);
2430  __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
2431  RelocInfo::CODE_TARGET);
2432 }
2433 
2434 // static
2435 void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2436  // ----------- S t a t e -------------
2437  // -- x0 : the number of arguments (not including the receiver)
2438  // -- x1 : the function to call (checked to be a JSBoundFunction)
2439  // -- x3 : the new target (checked to be a constructor)
2440  // -----------------------------------
2441  __ AssertConstructor(x1);
2442  __ AssertBoundFunction(x1);
2443 
2444  // Push the [[BoundArguments]] onto the stack.
2445  Generate_PushBoundArguments(masm);
2446 
2447  // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2448  {
2449  Label done;
2450  __ Cmp(x1, x3);
2451  __ B(ne, &done);
2452  __ Ldr(x3,
2453  FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
2454  __ Bind(&done);
2455  }
2456 
2457  // Construct the [[BoundTargetFunction]] via the Construct builtin.
2458  __ Ldr(x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
2459  __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
2460 }
2461 
2462 // static
2463 void Builtins::Generate_Construct(MacroAssembler* masm) {
2464  // ----------- S t a t e -------------
2465  // -- x0 : the number of arguments (not including the receiver)
2466  // -- x1 : the constructor to call (can be any Object)
2467  // -- x3 : the new target (either the same as the constructor or
2468  // the JSFunction on which new was invoked initially)
2469  // -----------------------------------
2470 
2471  // Check if target is a Smi.
2472  Label non_constructor, non_proxy;
2473  __ JumpIfSmi(x1, &non_constructor);
2474 
2475  // Check if target has a [[Construct]] internal method.
2476  __ Ldr(x4, FieldMemOperand(x1, HeapObject::kMapOffset));
2477  __ Ldrb(x2, FieldMemOperand(x4, Map::kBitFieldOffset));
2478  __ TestAndBranchIfAllClear(x2, Map::IsConstructorBit::kMask,
2479  &non_constructor);
2480 
2481  // Dispatch based on instance type.
2482  __ CompareInstanceType(x4, x5, JS_FUNCTION_TYPE);
2483  __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
2484  RelocInfo::CODE_TARGET, eq);
2485 
2486  // Only dispatch to bound functions after checking whether they are
2487  // constructors.
2488  __ Cmp(x5, JS_BOUND_FUNCTION_TYPE);
2489  __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
2490  RelocInfo::CODE_TARGET, eq);
2491 
2492  // Only dispatch to proxies after checking whether they are constructors.
2493  __ Cmp(x5, JS_PROXY_TYPE);
2494  __ B(ne, &non_proxy);
2495  __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
2496  RelocInfo::CODE_TARGET);
2497 
2498  // Called Construct on an exotic Object with a [[Construct]] internal method.
2499  __ bind(&non_proxy);
2500  {
2501  // Overwrite the original receiver with the (original) target.
2502  __ Poke(x1, Operand(x0, LSL, kXRegSizeLog2));
2503  // Let the "call_as_constructor_delegate" take care of the rest.
2504  __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, x1);
2505  __ Jump(masm->isolate()->builtins()->CallFunction(),
2506  RelocInfo::CODE_TARGET);
2507  }
2508 
2509  // Called Construct on an Object that doesn't have a [[Construct]] internal
2510  // method.
2511  __ bind(&non_constructor);
2512  __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
2513  RelocInfo::CODE_TARGET);
2514 }
2515 
2516 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
2517  ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
2518  // ----------- S t a t e -------------
2519  // -- x0 : actual number of arguments
2520  // -- x1 : function (passed through to callee)
2521  // -- x2 : expected number of arguments
2522  // -- x3 : new target (passed through to callee)
2523  // -----------------------------------
2524 
2525  // The frame we are about to construct will look like:
2526  //
2527  // slot Adaptor frame
2528  // +-----------------+--------------------------------
2529  // -n-1 | receiver | ^
2530  // | (parameter 0) | |
2531  // |- - - - - - - - -| |
2532  // -n | | Caller
2533  // ... | ... | frame slots --> actual args
2534  // -2 | parameter n-1 | |
2535  // |- - - - - - - - -| |
2536  // -1 | parameter n | v
2537  // -----+-----------------+--------------------------------
2538  // 0 | return addr | ^
2539  // |- - - - - - - - -| |
2540  // 1 | saved frame ptr | <-- frame ptr |
2541  // |- - - - - - - - -| |
2542  // 2 |Frame Type Marker| |
2543  // |- - - - - - - - -| |
2544  // 3 | function | Callee
2545  // |- - - - - - - - -| frame slots
2546  // 4 | num of | |
2547  // | actual args | |
2548  // |- - - - - - - - -| |
2549  // 5 | padding | |
2550  // |-----------------+---- |
2551  // [6] | [padding] | ^ |
2552  // |- - - - - - - - -| | |
2553  // 6+pad | receiver | | |
2554  // | (parameter 0) | | |
2555  // |- - - - - - - - -| | |
2556  // 7+pad | parameter 1 | | |
2557  // |- - - - - - - - -| Frame slots ----> expected args
2558  // 8+pad | parameter 2 | | |
2559  // |- - - - - - - - -| | |
2560  // | | | |
2561  // ... | ... | | |
2562  // | parameter m | | |
2563  // |- - - - - - - - -| | |
2564  // | [undefined] | | |
2565  // |- - - - - - - - -| | |
2566  // | | | |
2567  // | ... | | |
2568  // | [undefined] | v <-- stack ptr v
2569  // -----+-----------------+---------------------------------
2570  //
2571  // There is an optional slot of padding above the receiver to ensure stack
2572  // alignment of the arguments.
2573  // If the number of expected arguments is larger than the number of actual
2574  // arguments, the remaining expected slots will be filled with undefined.
2575 
2576  Register argc_actual = x0; // Excluding the receiver.
2577  Register argc_expected = x2; // Excluding the receiver.
2578  Register function = x1;
2579 
2580  Label dont_adapt_arguments, stack_overflow;
2581 
2582  Label enough_arguments;
2583  __ Cmp(argc_expected, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
2584  __ B(eq, &dont_adapt_arguments);
2585 
2586  EnterArgumentsAdaptorFrame(masm);
2587 
2588  Register copy_from = x10;
2589  Register copy_end = x11;
2590  Register copy_to = x12;
2591  Register argc_to_copy = x13;
2592  Register argc_unused_actual = x14;
2593  Register scratch1 = x15, scratch2 = x16;
2594 
2595  // We need slots for the expected arguments, with one extra slot for the
2596  // receiver.
2597  __ RecordComment("-- Stack check --");
2598  __ Add(scratch1, argc_expected, 1);
2599  Generate_StackOverflowCheck(masm, scratch1, &stack_overflow);
2600 
2601  // Round up number of slots to be even, to maintain stack alignment.
2602  __ RecordComment("-- Allocate callee frame slots --");
2603  __ Add(scratch1, scratch1, 1);
2604  __ Bic(scratch1, scratch1, 1);
2605  __ Claim(scratch1, kPointerSize);
2606 
2607  __ Mov(copy_to, sp);
2608 
2609  // Preparing the expected arguments is done in four steps, the order of
2610  // which is chosen so we can use LDP/STP and avoid conditional branches as
2611  // much as possible.
2612 
2613  // (1) If we don't have enough arguments, fill the remaining expected
2614  // arguments with undefined, otherwise skip this step.
2615  __ Subs(scratch1, argc_actual, argc_expected);
2616  __ Csel(argc_unused_actual, xzr, scratch1, lt);
2617  __ Csel(argc_to_copy, argc_expected, argc_actual, ge);
2618  __ B(ge, &enough_arguments);
2619 
2620  // Fill the remaining expected arguments with undefined.
2621  __ RecordComment("-- Fill slots with undefined --");
2622  __ Sub(copy_end, copy_to, Operand(scratch1, LSL, kPointerSizeLog2));
2623  __ LoadRoot(scratch1, RootIndex::kUndefinedValue);
2624 
2625  Label fill;
2626  __ Bind(&fill);
2627  __ Stp(scratch1, scratch1, MemOperand(copy_to, 2 * kPointerSize, PostIndex));
2628  // We might write one slot extra, but that is ok because we'll overwrite it
2629  // below.
2630  __ Cmp(copy_end, copy_to);
2631  __ B(hi, &fill);
2632 
2633  // Correct copy_to, for the case where we wrote one additional slot.
2634  __ Mov(copy_to, copy_end);
2635 
2636  __ Bind(&enough_arguments);
2637  // (2) Copy all of the actual arguments, or as many as we need.
2638  Label skip_copy;
2639  __ RecordComment("-- Copy actual arguments --");
2640  __ Cbz(argc_to_copy, &skip_copy);
2641  __ Add(copy_end, copy_to, Operand(argc_to_copy, LSL, kPointerSizeLog2));
2642  __ Add(copy_from, fp, 2 * kPointerSize);
2643  // Adjust for difference between actual and expected arguments.
2644  __ Add(copy_from, copy_from,
2645  Operand(argc_unused_actual, LSL, kPointerSizeLog2));
2646 
2647  // Copy arguments. We use load/store pair instructions, so we might overshoot
2648  // by one slot, but since we copy the arguments starting from the last one, if
2649  // we do overshoot, the extra slot will be overwritten later by the receiver.
2650  Label copy_2_by_2;
2651  __ Bind(&copy_2_by_2);
2652  __ Ldp(scratch1, scratch2,
2653  MemOperand(copy_from, 2 * kPointerSize, PostIndex));
2654  __ Stp(scratch1, scratch2, MemOperand(copy_to, 2 * kPointerSize, PostIndex));
2655  __ Cmp(copy_end, copy_to);
2656  __ B(hi, &copy_2_by_2);
2657  __ Bind(&skip_copy);
2658 
2659  // (3) Store padding, which might be overwritten by the receiver, if it is not
2660  // necessary.
2661  __ RecordComment("-- Store padding --");
2662  __ Str(padreg, MemOperand(fp, -5 * kPointerSize));
2663 
2664  // (4) Store receiver. Calculate target address from the sp to avoid checking
2665  // for padding. Storing the receiver will overwrite either the extra slot
2666  // we copied with the actual arguments, if we did copy one, or the padding we
2667  // stored above.
2668  __ RecordComment("-- Store receiver --");
2669  __ Add(copy_from, fp, 2 * kPointerSize);
2670  __ Ldr(scratch1, MemOperand(copy_from, argc_actual, LSL, kPointerSizeLog2));
2671  __ Str(scratch1, MemOperand(sp, argc_expected, LSL, kPointerSizeLog2));
2672 
2673  // Arguments have been adapted. Now call the entry point.
2674  __ RecordComment("-- Call entry point --");
2675  __ Mov(argc_actual, argc_expected);
2676  // x0 : expected number of arguments
2677  // x1 : function (passed through to callee)
2678  // x3 : new target (passed through to callee)
2679  static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
2680  __ Ldr(x2, FieldMemOperand(function, JSFunction::kCodeOffset));
2681  __ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
2682  __ Call(x2);
2683 
2684  // Store offset of return address for deoptimizer.
2685  masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
2686 
2687  // Exit frame and return.
2688  LeaveArgumentsAdaptorFrame(masm);
2689  __ Ret();
2690 
2691  // Call the entry point without adapting the arguments.
2692  __ RecordComment("-- Call without adapting args --");
2693  __ Bind(&dont_adapt_arguments);
2694  static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
2695  __ Ldr(x2, FieldMemOperand(function, JSFunction::kCodeOffset));
2696  __ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
2697  __ Jump(x2);
2698 
2699  __ Bind(&stack_overflow);
2700  __ RecordComment("-- Stack overflow --");
2701  {
2702  FrameScope frame(masm, StackFrame::MANUAL);
2703  __ CallRuntime(Runtime::kThrowStackOverflow);
2704  __ Unreachable();
2705  }
2706 }
2707 
2708 void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
2709  // The function index was put in w8 by the jump table trampoline.
2710  // Sign extend and convert to Smi for the runtime call.
2711  __ sxtw(kWasmCompileLazyFuncIndexRegister,
2712  kWasmCompileLazyFuncIndexRegister.W());
2713  __ SmiTag(kWasmCompileLazyFuncIndexRegister,
2714  kWasmCompileLazyFuncIndexRegister);
2715  {
2716  HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2717  FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
2718 
2719  // Save all parameter registers (see wasm-linkage.cc). They might be
2720  // overwritten in the runtime call below. We don't have any callee-saved
2721  // registers in wasm, so no need to store anything else.
2722  constexpr RegList gp_regs =
2723  Register::ListOf<x0, x1, x2, x3, x4, x5, x6, x7>();
2724  constexpr RegList fp_regs =
2725  Register::ListOf<d0, d1, d2, d3, d4, d5, d6, d7>();
2726  __ PushXRegList(gp_regs);
2727  __ PushDRegList(fp_regs);
2728 
2729  // Pass instance and function index as explicit arguments to the runtime
2730  // function.
2731  __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
2732  // Load the correct CEntry builtin from the instance object.
2733  __ Ldr(x2, FieldMemOperand(kWasmInstanceRegister,
2734  WasmInstanceObject::kCEntryStubOffset));
2735  // Initialize the JavaScript context with 0. CEntry will use it to
2736  // set the current context on the isolate.
2737  __ Mov(cp, Smi::zero());
2738  __ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, x2);
2739  // The entrypoint address is the return value.
2740  __ mov(x8, kReturnRegister0);
2741 
2742  // Restore registers.
2743  __ PopDRegList(fp_regs);
2744  __ PopXRegList(gp_regs);
2745  }
2746  // Finally, jump to the entrypoint.
2747  __ Jump(x8);
2748 }
2749 
2750 void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
2751  SaveFPRegsMode save_doubles, ArgvMode argv_mode,
2752  bool builtin_exit_frame) {
2753  // The Abort mechanism relies on CallRuntime, which in turn relies on
2754  // CEntry, so until this stub has been generated, we have to use a
2755  // fall-back Abort mechanism.
2756  //
2757  // Note that this stub must be generated before any use of Abort.
2758  HardAbortScope hard_aborts(masm);
2759 
2760  ASM_LOCATION("CEntry::Generate entry");
2761 
2762  // Register parameters:
2763  // x0: argc (including receiver, untagged)
2764  // x1: target
2765  // If argv_mode == kArgvInRegister:
2766  // x11: argv (pointer to first argument)
2767  //
2768  // The stack on entry holds the arguments and the receiver, with the receiver
2769  // at the highest address:
2770  //
2771  // sp]argc-1]: receiver
2772  // sp[argc-2]: arg[argc-2]
2773  // ... ...
2774  // sp[1]: arg[1]
2775  // sp[0]: arg[0]
2776  //
2777  // The arguments are in reverse order, so that arg[argc-2] is actually the
2778  // first argument to the target function and arg[0] is the last.
2779  const Register& argc_input = x0;
2780  const Register& target_input = x1;
2781 
2782  // Calculate argv, argc and the target address, and store them in
2783  // callee-saved registers so we can retry the call without having to reload
2784  // these arguments.
2785  // TODO(jbramley): If the first call attempt succeeds in the common case (as
2786  // it should), then we might be better off putting these parameters directly
2787  // into their argument registers, rather than using callee-saved registers and
2788  // preserving them on the stack.
2789  const Register& argv = x21;
2790  const Register& argc = x22;
2791  const Register& target = x23;
2792 
2793  // Derive argv from the stack pointer so that it points to the first argument
2794  // (arg[argc-2]), or just below the receiver in case there are no arguments.
2795  // - Adjust for the arg[] array.
2796  Register temp_argv = x11;
2797  if (argv_mode == kArgvOnStack) {
2798  __ SlotAddress(temp_argv, x0);
2799  // - Adjust for the receiver.
2800  __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
2801  }
2802 
2803  // Reserve three slots to preserve x21-x23 callee-saved registers.
2804  int extra_stack_space = 3;
2805  // Enter the exit frame.
2806  FrameScope scope(masm, StackFrame::MANUAL);
2807  __ EnterExitFrame(
2808  save_doubles == kSaveFPRegs, x10, extra_stack_space,
2809  builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
2810 
2811  // Poke callee-saved registers into reserved space.
2812  __ Poke(argv, 1 * kPointerSize);
2813  __ Poke(argc, 2 * kPointerSize);
2814  __ Poke(target, 3 * kPointerSize);
2815 
2816  // We normally only keep tagged values in callee-saved registers, as they
2817  // could be pushed onto the stack by called stubs and functions, and on the
2818  // stack they can confuse the GC. However, we're only calling C functions
2819  // which can push arbitrary data onto the stack anyway, and so the GC won't
2820  // examine that part of the stack.
2821  __ Mov(argc, argc_input);
2822  __ Mov(target, target_input);
2823  __ Mov(argv, temp_argv);
2824 
2825  // x21 : argv
2826  // x22 : argc
2827  // x23 : call target
2828  //
2829  // The stack (on entry) holds the arguments and the receiver, with the
2830  // receiver at the highest address:
2831  //
2832  // argv[8]: receiver
2833  // argv -> argv[0]: arg[argc-2]
2834  // ... ...
2835  // argv[...]: arg[1]
2836  // argv[...]: arg[0]
2837  //
2838  // Immediately below (after) this is the exit frame, as constructed by
2839  // EnterExitFrame:
2840  // fp[8]: CallerPC (lr)
2841  // fp -> fp[0]: CallerFP (old fp)
2842  // fp[-8]: Space reserved for SPOffset.
2843  // fp[-16]: CodeObject()
2844  // sp[...]: Saved doubles, if saved_doubles is true.
2845  // sp[32]: Alignment padding, if necessary.
2846  // sp[24]: Preserved x23 (used for target).
2847  // sp[16]: Preserved x22 (used for argc).
2848  // sp[8]: Preserved x21 (used for argv).
2849  // sp -> sp[0]: Space reserved for the return address.
2850  //
2851  // After a successful call, the exit frame, preserved registers (x21-x23) and
2852  // the arguments (including the receiver) are dropped or popped as
2853  // appropriate. The stub then returns.
2854  //
2855  // After an unsuccessful call, the exit frame and suchlike are left
2856  // untouched, and the stub either throws an exception by jumping to one of
2857  // the exception_returned label.
2858 
2859  // Prepare AAPCS64 arguments to pass to the builtin.
2860  __ Mov(x0, argc);
2861  __ Mov(x1, argv);
2862  __ Mov(x2, ExternalReference::isolate_address(masm->isolate()));
2863 
2864  Label return_location;
2865  __ Adr(x12, &return_location);
2866  __ Poke(x12, 0);
2867 
2868  if (__ emit_debug_code()) {
2869  // Verify that the slot below fp[kSPOffset]-8 points to the return location
2870  // (currently in x12).
2871  UseScratchRegisterScope temps(masm);
2872  Register temp = temps.AcquireX();
2873  __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
2874  __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSize)));
2875  __ Cmp(temp, x12);
2876  __ Check(eq, AbortReason::kReturnAddressNotFoundInFrame);
2877  }
2878 
2879  // Call the builtin.
2880  __ Blr(target);
2881  __ Bind(&return_location);
2882 
2883  // Result returned in x0 or x1:x0 - do not destroy these registers!
2884 
2885  // x0 result0 The return code from the call.
2886  // x1 result1 For calls which return ObjectPair.
2887  // x21 argv
2888  // x22 argc
2889  // x23 target
2890  const Register& result = x0;
2891 
2892  // Check result for exception sentinel.
2893  Label exception_returned;
2894  __ CompareRoot(result, RootIndex::kException);
2895  __ B(eq, &exception_returned);
2896 
2897  // The call succeeded, so unwind the stack and return.
2898 
2899  // Restore callee-saved registers x21-x23.
2900  __ Mov(x11, argc);
2901 
2902  __ Peek(argv, 1 * kPointerSize);
2903  __ Peek(argc, 2 * kPointerSize);
2904  __ Peek(target, 3 * kPointerSize);
2905 
2906  __ LeaveExitFrame(save_doubles == kSaveFPRegs, x10, x9);
2907  if (argv_mode == kArgvOnStack) {
2908  // Drop the remaining stack slots and return from the stub.
2909  __ DropArguments(x11);
2910  }
2911  __ AssertFPCRState();
2912  __ Ret();
2913 
2914  // Handling of exception.
2915  __ Bind(&exception_returned);
2916 
2917  ExternalReference pending_handler_context_address = ExternalReference::Create(
2918  IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
2919  ExternalReference pending_handler_entrypoint_address =
2920  ExternalReference::Create(
2921  IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
2922  ExternalReference pending_handler_fp_address = ExternalReference::Create(
2923  IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
2924  ExternalReference pending_handler_sp_address = ExternalReference::Create(
2925  IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
2926 
2927  // Ask the runtime for help to determine the handler. This will set x0 to
2928  // contain the current pending exception, don't clobber it.
2929  ExternalReference find_handler =
2930  ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
2931  {
2932  FrameScope scope(masm, StackFrame::MANUAL);
2933  __ Mov(x0, 0); // argc.
2934  __ Mov(x1, 0); // argv.
2935  __ Mov(x2, ExternalReference::isolate_address(masm->isolate()));
2936  __ CallCFunction(find_handler, 3);
2937  }
2938 
2939  // Retrieve the handler context, SP and FP.
2940  __ Mov(cp, pending_handler_context_address);
2941  __ Ldr(cp, MemOperand(cp));
2942  {
2943  UseScratchRegisterScope temps(masm);
2944  Register scratch = temps.AcquireX();
2945  __ Mov(scratch, pending_handler_sp_address);
2946  __ Ldr(scratch, MemOperand(scratch));
2947  __ Mov(sp, scratch);
2948  }
2949  __ Mov(fp, pending_handler_fp_address);
2950  __ Ldr(fp, MemOperand(fp));
2951 
2952  // If the handler is a JS frame, restore the context to the frame. Note that
2953  // the context will be set to (cp == 0) for non-JS frames.
2954  Label not_js_frame;
2955  __ Cbz(cp, &not_js_frame);
2956  __ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2957  __ Bind(&not_js_frame);
2958 
2959  // Reset the masking register. This is done independent of the underlying
2960  // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
2961  // with both configurations. It is safe to always do this, because the
2962  // underlying register is caller-saved and can be arbitrarily clobbered.
2963  __ ResetSpeculationPoisonRegister();
2964 
2965  // Compute the handler entry address and jump to it.
2966  __ Mov(x10, pending_handler_entrypoint_address);
2967  __ Ldr(x10, MemOperand(x10));
2968  __ Br(x10);
2969 }
2970 
2971 void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
2972  Label done;
2973  Register result = x7;
2974 
2975  DCHECK(result.Is64Bits());
2976 
2977  HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2978  UseScratchRegisterScope temps(masm);
2979  Register scratch1 = temps.AcquireX();
2980  Register scratch2 = temps.AcquireX();
2981  DoubleRegister double_scratch = temps.AcquireD();
2982 
2983  // Account for saved regs.
2984  const int kArgumentOffset = 2 * kPointerSize;
2985 
2986  __ Push(result, scratch1); // scratch1 is also pushed to preserve alignment.
2987  __ Peek(double_scratch, kArgumentOffset);
2988 
2989  // Try to convert with a FPU convert instruction. This handles all
2990  // non-saturating cases.
2991  __ TryConvertDoubleToInt64(result, double_scratch, &done);
2992  __ Fmov(result, double_scratch);
2993 
2994  // If we reach here we need to manually convert the input to an int32.
2995 
2996  // Extract the exponent.
2997  Register exponent = scratch1;
2998  __ Ubfx(exponent, result, HeapNumber::kMantissaBits,
2999  HeapNumber::kExponentBits);
3000 
3001  // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
3002  // the mantissa gets shifted completely out of the int32_t result.
3003  __ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
3004  __ CzeroX(result, ge);
3005  __ B(ge, &done);
3006 
3007  // The Fcvtzs sequence handles all cases except where the conversion causes
3008  // signed overflow in the int64_t target. Since we've already handled
3009  // exponents >= 84, we can guarantee that 63 <= exponent < 84.
3010 
3011  if (masm->emit_debug_code()) {
3012  __ Cmp(exponent, HeapNumber::kExponentBias + 63);
3013  // Exponents less than this should have been handled by the Fcvt case.
3014  __ Check(ge, AbortReason::kUnexpectedValue);
3015  }
3016 
3017  // Isolate the mantissa bits, and set the implicit '1'.
3018  Register mantissa = scratch2;
3019  __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
3020  __ Orr(mantissa, mantissa, 1ULL << HeapNumber::kMantissaBits);
3021 
3022  // Negate the mantissa if necessary.
3023  __ Tst(result, kXSignMask);
3024  __ Cneg(mantissa, mantissa, ne);
3025 
3026  // Shift the mantissa bits in the correct place. We know that we have to shift
3027  // it left here, because exponent >= 63 >= kMantissaBits.
3028  __ Sub(exponent, exponent,
3029  HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
3030  __ Lsl(result, mantissa, exponent);
3031 
3032  __ Bind(&done);
3033  __ Poke(result, kArgumentOffset);
3034  __ Pop(scratch1, result);
3035  __ Ret();
3036 }
3037 
3038 void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
3039  Register exponent_integer = x12;
3040  Register saved_lr = x19;
3041  VRegister result_double = d0;
3042  VRegister base_double = d0;
3043  VRegister exponent_double = d1;
3044  VRegister base_double_copy = d2;
3045  VRegister scratch1_double = d6;
3046  VRegister scratch0_double = d7;
3047 
3048  // A fast-path for integer exponents.
3049  Label exponent_is_integer;
3050  // Allocate a heap number for the result, and return it.
3051  Label done;
3052 
3053  // Unpack the inputs.
3054 
3055  // Handle double (heap number) exponents.
3056  // Detect integer exponents stored as doubles and handle those in the
3057  // integer fast-path.
3058  __ TryRepresentDoubleAsInt64(exponent_integer, exponent_double,
3059  scratch0_double, &exponent_is_integer);
3060 
3061  {
3062  AllowExternalCallThatCantCauseGC scope(masm);
3063  __ Mov(saved_lr, lr);
3064  __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
3065  __ Mov(lr, saved_lr);
3066  __ B(&done);
3067  }
3068 
3069  __ Bind(&exponent_is_integer);
3070 
3071  // Find abs(exponent). For negative exponents, we can find the inverse later.
3072  Register exponent_abs = x13;
3073  __ Cmp(exponent_integer, 0);
3074  __ Cneg(exponent_abs, exponent_integer, mi);
3075 
3076  // Repeatedly multiply to calculate the power.
3077  // result = 1.0;
3078  // For each bit n (exponent_integer{n}) {
3079  // if (exponent_integer{n}) {
3080  // result *= base;
3081  // }
3082  // base *= base;
3083  // if (remaining bits in exponent_integer are all zero) {
3084  // break;
3085  // }
3086  // }
3087  Label power_loop, power_loop_entry, power_loop_exit;
3088  __ Fmov(scratch1_double, base_double);
3089  __ Fmov(base_double_copy, base_double);
3090  __ Fmov(result_double, 1.0);
3091  __ B(&power_loop_entry);
3092 
3093  __ Bind(&power_loop);
3094  __ Fmul(scratch1_double, scratch1_double, scratch1_double);
3095  __ Lsr(exponent_abs, exponent_abs, 1);
3096  __ Cbz(exponent_abs, &power_loop_exit);
3097 
3098  __ Bind(&power_loop_entry);
3099  __ Tbz(exponent_abs, 0, &power_loop);
3100  __ Fmul(result_double, result_double, scratch1_double);
3101  __ B(&power_loop);
3102 
3103  __ Bind(&power_loop_exit);
3104 
3105  // If the exponent was positive, result_double holds the result.
3106  __ Tbz(exponent_integer, kXSignBit, &done);
3107 
3108  // The exponent was negative, so find the inverse.
3109  __ Fmov(scratch0_double, 1.0);
3110  __ Fdiv(result_double, scratch0_double, result_double);
3111  // ECMA-262 only requires Math.pow to return an 'implementation-dependent
3112  // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow
3113  // to calculate the subnormal value 2^-1074. This method of calculating
3114  // negative powers doesn't work because 2^1074 overflows to infinity. To
3115  // catch this corner-case, we bail out if the result was 0. (This can only
3116  // occur if the divisor is infinity or the base is zero.)
3117  __ Fcmp(result_double, 0.0);
3118  __ B(&done, ne);
3119 
3120  AllowExternalCallThatCantCauseGC scope(masm);
3121  __ Mov(saved_lr, lr);
3122  __ Fmov(base_double, base_double_copy);
3123  __ Scvtf(exponent_double, exponent_integer);
3124  __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
3125  __ Mov(lr, saved_lr);
3126  __ Bind(&done);
3127  __ Ret();
3128 }
3129 
3130 namespace {
3131 
3132 void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
3133  ElementsKind kind) {
3134  Label zero_case, n_case;
3135  Register argc = x0;
3136 
3137  __ Cbz(argc, &zero_case);
3138  __ CompareAndBranch(argc, 1, ne, &n_case);
3139 
3140  // One argument.
3141  if (IsFastPackedElementsKind(kind)) {
3142  Label packed_case;
3143 
3144  // We might need to create a holey array; look at the first argument.
3145  __ Peek(x10, 0);
3146  __ Cbz(x10, &packed_case);
3147 
3148  __ Jump(CodeFactory::InternalArraySingleArgumentConstructor(
3149  masm->isolate(), GetHoleyElementsKind(kind))
3150  .code(),
3151  RelocInfo::CODE_TARGET);
3152 
3153  __ Bind(&packed_case);
3154  }
3155 
3156  __ Jump(
3157  CodeFactory::InternalArraySingleArgumentConstructor(masm->isolate(), kind)
3158  .code(),
3159  RelocInfo::CODE_TARGET);
3160 
3161  __ Bind(&zero_case);
3162  // No arguments.
3163  __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
3164  .code(),
3165  RelocInfo::CODE_TARGET);
3166 
3167  __ Bind(&n_case);
3168  // N arguments.
3169  // Load undefined into the allocation site parameter as required by
3170  // ArrayNArgumentsConstructor.
3171  __ LoadRoot(kJavaScriptCallExtraArg1Register, RootIndex::kUndefinedValue);
3172  Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor);
3173  __ Jump(code, RelocInfo::CODE_TARGET);
3174 }
3175 
3176 } // namespace
3177 
3178 void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
3179  // ----------- S t a t e -------------
3180  // -- x0 : argc
3181  // -- x1 : constructor
3182  // -- sp[0] : return address
3183  // -- sp[4] : last argument
3184  // -----------------------------------
3185 
3186  Register constructor = x1;
3187 
3188  if (FLAG_debug_code) {
3189  // The array construct code is only set for the global and natives
3190  // builtin Array functions which always have maps.
3191 
3192  Label unexpected_map, map_ok;
3193  // Initial map for the builtin Array function should be a map.
3194  __ Ldr(x10, FieldMemOperand(constructor,
3195  JSFunction::kPrototypeOrInitialMapOffset));
3196  // Will both indicate a nullptr and a Smi.
3197  __ JumpIfSmi(x10, &unexpected_map);
3198  __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
3199  __ Bind(&unexpected_map);
3200  __ Abort(AbortReason::kUnexpectedInitialMapForArrayFunction);
3201  __ Bind(&map_ok);
3202  }
3203 
3204  Register kind = w3;
3205  // Figure out the right elements kind
3206  __ Ldr(x10, FieldMemOperand(constructor,
3207  JSFunction::kPrototypeOrInitialMapOffset));
3208 
3209  // Retrieve elements_kind from map.
3210  __ LoadElementsKindFromMap(kind, x10);
3211 
3212  if (FLAG_debug_code) {
3213  Label done;
3214  __ Cmp(x3, PACKED_ELEMENTS);
3215  __ Ccmp(x3, HOLEY_ELEMENTS, ZFlag, ne);
3216  __ Assert(
3217  eq,
3218  AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
3219  }
3220 
3221  Label fast_elements_case;
3222  __ CompareAndBranch(kind, PACKED_ELEMENTS, eq, &fast_elements_case);
3223  GenerateInternalArrayConstructorCase(masm, HOLEY_ELEMENTS);
3224 
3225  __ Bind(&fast_elements_case);
3226  GenerateInternalArrayConstructorCase(masm, PACKED_ELEMENTS);
3227 }
3228 
3229 #undef __
3230 
3231 } // namespace internal
3232 } // namespace v8
3233 
3234 #endif // V8_TARGET_ARCH_ARM
Definition: libplatform.h:13