V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
builtins-mips.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_MIPS
6 
7 #include "src/code-factory.h"
8 #include "src/code-stubs.h"
9 #include "src/counters.h"
10 #include "src/debug/debug.h"
11 #include "src/deoptimizer.h"
12 #include "src/frame-constants.h"
13 #include "src/frames.h"
14 #include "src/mips/constants-mips.h"
15 #include "src/objects-inl.h"
16 #include "src/objects/js-generator.h"
17 #include "src/objects/smi.h"
18 #include "src/register-configuration.h"
19 #include "src/runtime/runtime.h"
20 #include "src/wasm/wasm-objects.h"
21 
22 namespace v8 {
23 namespace internal {
24 
25 #define __ ACCESS_MASM(masm)
26 
27 void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
28  ExitFrameType exit_frame_type) {
29  __ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
30  if (exit_frame_type == BUILTIN_EXIT) {
31  __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
32  RelocInfo::CODE_TARGET);
33  } else {
34  DCHECK(exit_frame_type == EXIT);
35  __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithExitFrame),
36  RelocInfo::CODE_TARGET);
37  }
38 }
39 
40 void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
41  // ----------- S t a t e -------------
42  // -- a0 : number of arguments
43  // -- ra : return address
44  // -- sp[...]: constructor arguments
45  // -----------------------------------
46  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
47 
48  if (FLAG_debug_code) {
49  // Initial map for the builtin InternalArray functions should be maps.
50  __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
51  __ SmiTst(a2, t0);
52  __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction,
53  t0, Operand(zero_reg));
54  __ GetObjectType(a2, a3, t0);
55  __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction,
56  t0, Operand(MAP_TYPE));
57  }
58 
59  // Run the native code for the InternalArray function called as a normal
60  // function.
61  __ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl),
62  RelocInfo::CODE_TARGET);
63 }
64 
65 static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
66  Runtime::FunctionId function_id) {
67  // ----------- S t a t e -------------
68  // -- a0 : argument count (preserved for callee)
69  // -- a1 : target function (preserved for callee)
70  // -- a3 : new target (preserved for callee)
71  // -----------------------------------
72  {
73  FrameScope scope(masm, StackFrame::INTERNAL);
74  // Push a copy of the target function and the new target.
75  // Push function as parameter to the runtime call.
76  __ SmiTag(a0);
77  __ Push(a0, a1, a3, a1);
78 
79  __ CallRuntime(function_id, 1);
80 
81  // Restore target function and new target.
82  __ Pop(a0, a1, a3);
83  __ SmiUntag(a0);
84  }
85 
86  static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
87  __ Addu(a2, v0, Code::kHeaderSize - kHeapObjectTag);
88  __ Jump(a2);
89 }
90 
91 namespace {
92 
93 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
94  // ----------- S t a t e -------------
95  // -- a0 : number of arguments
96  // -- a1 : constructor function
97  // -- a3 : new target
98  // -- cp : context
99  // -- ra : return address
100  // -- sp[...]: constructor arguments
101  // -----------------------------------
102 
103  // Enter a construct frame.
104  {
105  FrameScope scope(masm, StackFrame::CONSTRUCT);
106 
107  // Preserve the incoming parameters on the stack.
108  __ SmiTag(a0);
109  __ Push(cp, a0);
110  __ SmiUntag(a0);
111 
112  // The receiver for the builtin/api call.
113  __ PushRoot(RootIndex::kTheHoleValue);
114 
115  // Set up pointer to last argument.
116  __ Addu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
117 
118  // Copy arguments and receiver to the expression stack.
119  Label loop, entry;
120  __ mov(t3, a0);
121  // ----------- S t a t e -------------
122  // -- a0: number of arguments (untagged)
123  // -- a3: new target
124  // -- t2: pointer to last argument
125  // -- t3: counter
126  // -- sp[0*kPointerSize]: the hole (receiver)
127  // -- sp[1*kPointerSize]: number of arguments (tagged)
128  // -- sp[2*kPointerSize]: context
129  // -----------------------------------
130  __ jmp(&entry);
131  __ bind(&loop);
132  __ Lsa(t0, t2, t3, kPointerSizeLog2);
133  __ lw(t1, MemOperand(t0));
134  __ push(t1);
135  __ bind(&entry);
136  __ Addu(t3, t3, Operand(-1));
137  __ Branch(&loop, greater_equal, t3, Operand(zero_reg));
138 
139  // Call the function.
140  // a0: number of arguments (untagged)
141  // a1: constructor function
142  // a3: new target
143  ParameterCount actual(a0);
144  __ InvokeFunction(a1, a3, actual, CALL_FUNCTION);
145 
146  // Restore context from the frame.
147  __ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
148  // Restore smi-tagged arguments count from the frame.
149  __ lw(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
150  // Leave construct frame.
151  }
152 
153  // Remove caller arguments from the stack and return.
154  __ Lsa(sp, sp, a1, kPointerSizeLog2 - 1);
155  __ Addu(sp, sp, kPointerSize);
156  __ Ret();
157 }
158 
159 static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
160  Register scratch1, Register scratch2,
161  Label* stack_overflow) {
162  // Check the stack for overflow. We are not trying to catch
163  // interruptions (e.g. debug break and preemption) here, so the "real stack
164  // limit" is checked.
165  __ LoadRoot(scratch1, RootIndex::kRealStackLimit);
166  // Make scratch1 the space we have left. The stack might already be overflowed
167  // here which will cause scratch1 to become negative.
168  __ subu(scratch1, sp, scratch1);
169  // Check if the arguments will overflow the stack.
170  __ sll(scratch2, num_args, kPointerSizeLog2);
171  // Signed comparison.
172  __ Branch(stack_overflow, le, scratch1, Operand(scratch2));
173 }
174 
175 } // namespace
176 
177 // The construct stub for ES5 constructor functions and ES6 class constructors.
178 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
179  // ----------- S t a t e -------------
180  // -- a0: number of arguments (untagged)
181  // -- a1: constructor function
182  // -- a3: new target
183  // -- cp: context
184  // -- ra: return address
185  // -- sp[...]: constructor arguments
186  // -----------------------------------
187 
188  // Enter a construct frame.
189  {
190  FrameScope scope(masm, StackFrame::CONSTRUCT);
191  Label post_instantiation_deopt_entry, not_create_implicit_receiver;
192 
193  // Preserve the incoming parameters on the stack.
194  __ SmiTag(a0);
195  __ Push(cp, a0, a1);
196  __ PushRoot(RootIndex::kTheHoleValue);
197  __ Push(a3);
198 
199  // ----------- S t a t e -------------
200  // -- sp[0*kPointerSize]: new target
201  // -- sp[1*kPointerSize]: padding
202  // -- a1 and sp[2*kPointerSize]: constructor function
203  // -- sp[3*kPointerSize]: number of arguments (tagged)
204  // -- sp[4*kPointerSize]: context
205  // -----------------------------------
206 
207  __ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
208  __ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
209  __ And(t2, t2, Operand(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
210  __ Branch(&not_create_implicit_receiver, ne, t2, Operand(zero_reg));
211 
212  // If not derived class constructor: Allocate the new receiver object.
213  __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
214  t2, t3);
215  __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
216  RelocInfo::CODE_TARGET);
217  __ Branch(&post_instantiation_deopt_entry);
218 
219  // Else: use TheHoleValue as receiver for constructor call
220  __ bind(&not_create_implicit_receiver);
221  __ LoadRoot(v0, RootIndex::kTheHoleValue);
222 
223  // ----------- S t a t e -------------
224  // -- v0: receiver
225  // -- Slot 4 / sp[0*kPointerSize]: new target
226  // -- Slot 3 / sp[1*kPointerSize]: padding
227  // -- Slot 2 / sp[2*kPointerSize]: constructor function
228  // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
229  // -- Slot 0 / sp[4*kPointerSize]: context
230  // -----------------------------------
231  // Deoptimizer enters here.
232  masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
233  masm->pc_offset());
234  __ bind(&post_instantiation_deopt_entry);
235 
236  // Restore new target.
237  __ Pop(a3);
238  // Push the allocated receiver to the stack. We need two copies
239  // because we may have to return the original one and the calling
240  // conventions dictate that the called function pops the receiver.
241  __ Push(v0, v0);
242 
243  // ----------- S t a t e -------------
244  // -- r3: new target
245  // -- sp[0*kPointerSize]: implicit receiver
246  // -- sp[1*kPointerSize]: implicit receiver
247  // -- sp[2*kPointerSize]: padding
248  // -- sp[3*kPointerSize]: constructor function
249  // -- sp[4*kPointerSize]: number of arguments (tagged)
250  // -- sp[5*kPointerSize]: context
251  // -----------------------------------
252 
253  // Restore constructor function and argument count.
254  __ lw(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
255  __ lw(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
256  __ SmiUntag(a0);
257 
258  // Set up pointer to last argument.
259  __ Addu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
260 
261  Label enough_stack_space, stack_overflow;
262  Generate_StackOverflowCheck(masm, a0, t0, t1, &stack_overflow);
263  __ Branch(&enough_stack_space);
264 
265  __ bind(&stack_overflow);
266  // Restore the context from the frame.
267  __ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
268  __ CallRuntime(Runtime::kThrowStackOverflow);
269  // Unreachable code.
270  __ break_(0xCC);
271 
272  __ bind(&enough_stack_space);
273 
274  // Copy arguments and receiver to the expression stack.
275  Label loop, entry;
276  __ mov(t3, a0);
277  // ----------- S t a t e -------------
278  // -- a0: number of arguments (untagged)
279  // -- a3: new target
280  // -- t2: pointer to last argument
281  // -- t3: counter
282  // -- sp[0*kPointerSize]: implicit receiver
283  // -- sp[1*kPointerSize]: implicit receiver
284  // -- sp[2*kPointerSize]: padding
285  // -- a1 and sp[3*kPointerSize]: constructor function
286  // -- sp[4*kPointerSize]: number of arguments (tagged)
287  // -- sp[5*kPointerSize]: context
288  // -----------------------------------
289  __ jmp(&entry);
290  __ bind(&loop);
291  __ Lsa(t0, t2, t3, kPointerSizeLog2);
292  __ lw(t1, MemOperand(t0));
293  __ push(t1);
294  __ bind(&entry);
295  __ Addu(t3, t3, Operand(-1));
296  __ Branch(&loop, greater_equal, t3, Operand(zero_reg));
297 
298  // Call the function.
299  ParameterCount actual(a0);
300  __ InvokeFunction(a1, a3, actual, CALL_FUNCTION);
301 
302  // ----------- S t a t e -------------
303  // -- v0: constructor result
304  // -- sp[0*kPointerSize]: implicit receiver
305  // -- sp[1*kPointerSize]: padding
306  // -- sp[2*kPointerSize]: constructor function
307  // -- sp[3*kPointerSize]: number of arguments
308  // -- sp[4*kPointerSize]: context
309  // -----------------------------------
310 
311  // Store offset of return address for deoptimizer.
312  masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
313  masm->pc_offset());
314 
315  // Restore the context from the frame.
316  __ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
317 
318  // If the result is an object (in the ECMA sense), we should get rid
319  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
320  // on page 74.
321  Label use_receiver, do_throw, leave_frame;
322 
323  // If the result is undefined, we jump out to using the implicit receiver.
324  __ JumpIfRoot(v0, RootIndex::kUndefinedValue, &use_receiver);
325 
326  // Otherwise we do a smi check and fall through to check if the return value
327  // is a valid receiver.
328 
329  // If the result is a smi, it is *not* an object in the ECMA sense.
330  __ JumpIfSmi(v0, &use_receiver);
331 
332  // If the type of the result (stored in its map) is less than
333  // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
334  __ GetObjectType(v0, t2, t2);
335  STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
336  __ Branch(&leave_frame, greater_equal, t2, Operand(FIRST_JS_RECEIVER_TYPE));
337  __ Branch(&use_receiver);
338 
339  __ bind(&do_throw);
340  __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
341 
342  // Throw away the result of the constructor invocation and use the
343  // on-stack receiver as the result.
344  __ bind(&use_receiver);
345  __ lw(v0, MemOperand(sp, 0 * kPointerSize));
346  __ JumpIfRoot(v0, RootIndex::kTheHoleValue, &do_throw);
347 
348  __ bind(&leave_frame);
349  // Restore smi-tagged arguments count from the frame.
350  __ lw(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
351  // Leave construct frame.
352  }
353  // Remove caller arguments from the stack and return.
354  __ Lsa(sp, sp, a1, kPointerSizeLog2 - kSmiTagSize);
355  __ Addu(sp, sp, kPointerSize);
356  __ Ret();
357 }
358 
359 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
360  Generate_JSBuiltinsConstructStubHelper(masm);
361 }
362 
363 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
364  FrameScope scope(masm, StackFrame::INTERNAL);
365  __ Push(a1);
366  __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
367 }
368 
369 // Clobbers a2; preserves all other registers.
370 static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc) {
371  // Check the stack for overflow. We are not trying to catch
372  // interruptions (e.g. debug break and preemption) here, so the "real stack
373  // limit" is checked.
374  Label okay;
375  __ LoadRoot(a2, RootIndex::kRealStackLimit);
376  // Make a2 the space we have left. The stack might already be overflowed
377  // here which will cause a2 to become negative.
378  __ Subu(a2, sp, a2);
379  // Check if the arguments will overflow the stack.
380  __ sll(t3, argc, kPointerSizeLog2);
381  // Signed comparison.
382  __ Branch(&okay, gt, a2, Operand(t3));
383 
384  // Out of stack space.
385  __ CallRuntime(Runtime::kThrowStackOverflow);
386 
387  __ bind(&okay);
388 }
389 
390 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
391  bool is_construct) {
392  // ----------- S t a t e -------------
393  // -- a0: new.target
394  // -- a1: function
395  // -- a2: receiver_pointer
396  // -- a3: argc
397  // -- s0: argv
398  // -----------------------------------
399 
400  // Enter an internal frame.
401  {
402  FrameScope scope(masm, StackFrame::INTERNAL);
403 
404  // Setup the context (we need to use the caller context from the isolate).
405  ExternalReference context_address = ExternalReference::Create(
406  IsolateAddressId::kContextAddress, masm->isolate());
407  __ li(cp, context_address);
408  __ lw(cp, MemOperand(cp));
409 
410  // Push the function and the receiver onto the stack.
411  __ Push(a1, a2);
412 
413  // Check if we have enough stack space to push all arguments.
414  // Clobbers a2.
415  Generate_CheckStackOverflow(masm, a3);
416 
417  // Remember new.target.
418  __ mov(t1, a0);
419 
420  // Copy arguments to the stack in a loop.
421  // a3: argc
422  // s0: argv, i.e. points to first arg
423  Label loop, entry;
424  __ Lsa(t2, s0, a3, kPointerSizeLog2);
425  __ b(&entry);
426  __ nop(); // Branch delay slot nop.
427  // t2 points past last arg.
428  __ bind(&loop);
429  __ lw(t0, MemOperand(s0)); // Read next parameter.
430  __ addiu(s0, s0, kPointerSize);
431  __ lw(t0, MemOperand(t0)); // Dereference handle.
432  __ push(t0); // Push parameter.
433  __ bind(&entry);
434  __ Branch(&loop, ne, s0, Operand(t2));
435 
436  // Setup new.target and argc.
437  __ mov(a0, a3);
438  __ mov(a3, t1);
439 
440  // Initialize all JavaScript callee-saved registers, since they will be seen
441  // by the garbage collector as part of handlers.
442  __ LoadRoot(t0, RootIndex::kUndefinedValue);
443  __ mov(s1, t0);
444  __ mov(s2, t0);
445  __ mov(s3, t0);
446  __ mov(s4, t0);
447  __ mov(s5, t0);
448  // s6 holds the root address. Do not clobber.
449  // s7 is cp. Do not init.
450 
451  // Invoke the code.
452  Handle<Code> builtin = is_construct
453  ? BUILTIN_CODE(masm->isolate(), Construct)
454  : masm->isolate()->builtins()->Call();
455  __ Call(builtin, RelocInfo::CODE_TARGET);
456 
457  // Leave internal frame.
458  }
459 
460  __ Jump(ra);
461 }
462 
463 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
464  Generate_JSEntryTrampolineHelper(masm, false);
465 }
466 
467 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
468  Generate_JSEntryTrampolineHelper(masm, true);
469 }
470 
471 static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
472  Register sfi_data,
473  Register scratch1) {
474  Label done;
475 
476  __ GetObjectType(sfi_data, scratch1, scratch1);
477  __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
478  __ lw(sfi_data,
479  FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
480 
481  __ bind(&done);
482 }
483 
484 // static
485 void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
486  // ----------- S t a t e -------------
487  // -- v0 : the value to pass to the generator
488  // -- a1 : the JSGeneratorObject to resume
489  // -- ra : return address
490  // -----------------------------------
491 
492  __ AssertGeneratorObject(a1);
493 
494  // Store input value into generator object.
495  __ sw(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
496  __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
497  kRAHasNotBeenSaved, kDontSaveFPRegs);
498 
499  // Load suspended function and context.
500  __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
501  __ lw(cp, FieldMemOperand(t0, JSFunction::kContextOffset));
502 
503  // Flood function if we are stepping.
504  Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
505  Label stepping_prepared;
506  ExternalReference debug_hook =
507  ExternalReference::debug_hook_on_function_call_address(masm->isolate());
508  __ li(t1, debug_hook);
509  __ lb(t1, MemOperand(t1));
510  __ Branch(&prepare_step_in_if_stepping, ne, t1, Operand(zero_reg));
511 
512  // Flood function if we need to continue stepping in the suspended generator.
513  ExternalReference debug_suspended_generator =
514  ExternalReference::debug_suspended_generator_address(masm->isolate());
515  __ li(t1, debug_suspended_generator);
516  __ lw(t1, MemOperand(t1));
517  __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(t1));
518  __ bind(&stepping_prepared);
519 
520  // Check the stack for overflow. We are not trying to catch interruptions
521  // (i.e. debug break and preemption) here, so check the "real stack limit".
522  Label stack_overflow;
523  __ LoadRoot(kScratchReg, RootIndex::kRealStackLimit);
524  __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
525 
526  // Push receiver.
527  __ lw(t1, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
528  __ Push(t1);
529 
530  // ----------- S t a t e -------------
531  // -- a1 : the JSGeneratorObject to resume
532  // -- t0 : generator function
533  // -- cp : generator context
534  // -- ra : return address
535  // -- sp[0] : generator receiver
536  // -----------------------------------
537 
538  // Copy the function arguments from the generator object's register file.
539 
540  __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
541  __ lhu(a3,
542  FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
543  __ lw(t1,
544  FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
545  {
546  Label done_loop, loop;
547  __ Move(t2, zero_reg);
548  __ bind(&loop);
549  __ Subu(a3, a3, Operand(1));
550  __ Branch(&done_loop, lt, a3, Operand(zero_reg));
551  __ Lsa(kScratchReg, t1, t2, kPointerSizeLog2);
552  __ lw(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
553  __ Push(kScratchReg);
554  __ Addu(t2, t2, Operand(1));
555  __ Branch(&loop);
556  __ bind(&done_loop);
557  }
558 
559  // Underlying function needs to have bytecode available.
560  if (FLAG_debug_code) {
561  __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
562  __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
563  GetSharedFunctionInfoBytecode(masm, a3, a0);
564  __ GetObjectType(a3, a3, a3);
565  __ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
566  Operand(BYTECODE_ARRAY_TYPE));
567  }
568 
569  // Resume (Ignition/TurboFan) generator object.
570  {
571  __ lw(a0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
572  __ lhu(a0, FieldMemOperand(
573  a0, SharedFunctionInfo::kFormalParameterCountOffset));
574  // We abuse new.target both to indicate that this is a resume call and to
575  // pass in the generator object. In ordinary calls, new.target is always
576  // undefined because generator functions are non-constructable.
577  __ Move(a3, a1);
578  __ Move(a1, t0);
579  static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
580  __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
581  __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
582  __ Jump(a2);
583  }
584 
585  __ bind(&prepare_step_in_if_stepping);
586  {
587  FrameScope scope(masm, StackFrame::INTERNAL);
588  __ Push(a1, t0);
589  // Push hole as receiver since we do not use it for stepping.
590  __ PushRoot(RootIndex::kTheHoleValue);
591  __ CallRuntime(Runtime::kDebugOnFunctionCall);
592  __ Pop(a1);
593  }
594  __ Branch(USE_DELAY_SLOT, &stepping_prepared);
595  __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
596 
597  __ bind(&prepare_step_in_suspended_generator);
598  {
599  FrameScope scope(masm, StackFrame::INTERNAL);
600  __ Push(a1);
601  __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
602  __ Pop(a1);
603  }
604  __ Branch(USE_DELAY_SLOT, &stepping_prepared);
605  __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
606 
607  __ bind(&stack_overflow);
608  {
609  FrameScope scope(masm, StackFrame::INTERNAL);
610  __ CallRuntime(Runtime::kThrowStackOverflow);
611  __ break_(0xCC); // This should be unreachable.
612  }
613 }
614 
615 static void ReplaceClosureCodeWithOptimizedCode(
616  MacroAssembler* masm, Register optimized_code, Register closure,
617  Register scratch1, Register scratch2, Register scratch3) {
618  // Store code entry in the closure.
619  __ sw(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
620  __ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
621  __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
622  kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
623  OMIT_SMI_CHECK);
624 }
625 
626 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
627  Register args_count = scratch;
628 
629  // Get the arguments + receiver count.
630  __ lw(args_count,
631  MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
632  __ lw(args_count,
633  FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
634 
635  // Leave the frame (also dropping the register file).
636  __ LeaveFrame(StackFrame::INTERPRETED);
637 
638  // Drop receiver + arguments.
639  __ Addu(sp, sp, args_count);
640 }
641 
642 // Tail-call |function_id| if |smi_entry| == |marker|
643 static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
644  Register smi_entry,
645  OptimizationMarker marker,
646  Runtime::FunctionId function_id) {
647  Label no_match;
648  __ Branch(&no_match, ne, smi_entry, Operand(Smi::FromEnum(marker)));
649  GenerateTailCallToReturnedCode(masm, function_id);
650  __ bind(&no_match);
651 }
652 
653 static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
654  Register feedback_vector,
655  Register scratch1, Register scratch2,
656  Register scratch3) {
657  // ----------- S t a t e -------------
658  // -- a0 : argument count (preserved for callee if needed, and caller)
659  // -- a3 : new target (preserved for callee if needed, and caller)
660  // -- a1 : target function (preserved for callee if needed, and caller)
661  // -- feedback vector (preserved for caller if needed)
662  // -----------------------------------
663  DCHECK(
664  !AreAliased(feedback_vector, a0, a1, a3, scratch1, scratch2, scratch3));
665 
666  Label optimized_code_slot_is_weak_ref, fallthrough;
667 
668  Register closure = a1;
669  Register optimized_code_entry = scratch1;
670 
671  __ lw(optimized_code_entry,
672  FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
673 
674  // Check if the code entry is a Smi. If yes, we interpret it as an
675  // optimisation marker. Otherwise, interpret it as a weak cell to a code
676  // object.
677  __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
678 
679  {
680  // Optimized code slot is a Smi optimization marker.
681 
682  // Fall through if no optimization trigger.
683  __ Branch(&fallthrough, eq, optimized_code_entry,
684  Operand(Smi::FromEnum(OptimizationMarker::kNone)));
685 
686  TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
687  OptimizationMarker::kLogFirstExecution,
688  Runtime::kFunctionFirstExecution);
689  TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
690  OptimizationMarker::kCompileOptimized,
691  Runtime::kCompileOptimized_NotConcurrent);
692  TailCallRuntimeIfMarkerEquals(
693  masm, optimized_code_entry,
694  OptimizationMarker::kCompileOptimizedConcurrent,
695  Runtime::kCompileOptimized_Concurrent);
696 
697  {
698  // Otherwise, the marker is InOptimizationQueue, so fall through hoping
699  // that an interrupt will eventually update the slot with optimized code.
700  if (FLAG_debug_code) {
701  __ Assert(
702  eq, AbortReason::kExpectedOptimizationSentinel,
703  optimized_code_entry,
704  Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
705  }
706  __ jmp(&fallthrough);
707  }
708  }
709 
710  {
711  // Optimized code slot is a weak reference.
712  __ bind(&optimized_code_slot_is_weak_ref);
713 
714  __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
715 
716  // Check if the optimized code is marked for deopt. If it is, call the
717  // runtime to clear it.
718  Label found_deoptimized_code;
719  __ lw(scratch2, FieldMemOperand(optimized_code_entry,
720  Code::kCodeDataContainerOffset));
721  __ lw(scratch2, FieldMemOperand(
722  scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
723  __ And(scratch2, scratch2, Operand(1 << Code::kMarkedForDeoptimizationBit));
724  __ Branch(&found_deoptimized_code, ne, scratch2, Operand(zero_reg));
725 
726  // Optimized code is good, get it into the closure and link the closure into
727  // the optimized functions list, then tail call the optimized code.
728  // The feedback vector is no longer used, so re-use it as a scratch
729  // register.
730  ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
731  scratch2, scratch3, feedback_vector);
732  static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
733  __ Addu(a2, optimized_code_entry, Code::kHeaderSize - kHeapObjectTag);
734  __ Jump(a2);
735 
736  // Optimized code slot contains deoptimized code, evict it and re-enter the
737  // losure's code.
738  __ bind(&found_deoptimized_code);
739  GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
740  }
741 
742  // Fall-through if the optimized code cell is clear and there is no
743  // optimization marker.
744  __ bind(&fallthrough);
745 }
746 
747 // Advance the current bytecode offset. This simulates what all bytecode
748 // handlers do upon completion of the underlying operation. Will bail out to a
749 // label if the bytecode (without prefix) is a return bytecode.
750 static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
751  Register bytecode_array,
752  Register bytecode_offset,
753  Register bytecode, Register scratch1,
754  Register scratch2, Label* if_return) {
755  Register bytecode_size_table = scratch1;
756  DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
757  bytecode));
758 
759  __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address());
760 
761  // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
762  Label process_bytecode, extra_wide;
763  STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
764  STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
765  STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
766  STATIC_ASSERT(3 ==
767  static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
768  __ Branch(&process_bytecode, hi, bytecode, Operand(3));
769  __ And(scratch2, bytecode, Operand(1));
770  __ Branch(&extra_wide, ne, scratch2, Operand(zero_reg));
771 
772  // Load the next bytecode and update table to the wide scaled table.
773  __ Addu(bytecode_offset, bytecode_offset, Operand(1));
774  __ Addu(scratch2, bytecode_array, bytecode_offset);
775  __ lbu(bytecode, MemOperand(scratch2));
776  __ Addu(bytecode_size_table, bytecode_size_table,
777  Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
778  __ jmp(&process_bytecode);
779 
780  __ bind(&extra_wide);
781  // Load the next bytecode and update table to the extra wide scaled table.
782  __ Addu(bytecode_offset, bytecode_offset, Operand(1));
783  __ Addu(scratch2, bytecode_array, bytecode_offset);
784  __ lbu(bytecode, MemOperand(scratch2));
785  __ Addu(bytecode_size_table, bytecode_size_table,
786  Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
787 
788  __ bind(&process_bytecode);
789 
790 // Bailout to the return label if this is a return bytecode.
791 #define JUMP_IF_EQUAL(NAME) \
792  __ Branch(if_return, eq, bytecode, \
793  Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
794  RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
795 #undef JUMP_IF_EQUAL
796 
797  // Otherwise, load the size of the current bytecode and advance the offset.
798  __ Lsa(scratch2, bytecode_size_table, bytecode, 2);
799  __ lw(scratch2, MemOperand(scratch2));
800  __ Addu(bytecode_offset, bytecode_offset, scratch2);
801 }
802 
803 // Generate code for entering a JS function with the interpreter.
804 // On entry to the function the receiver and arguments have been pushed on the
805 // stack left to right. The actual argument count matches the formal parameter
806 // count expected by the function.
807 //
808 // The live registers are:
809 // o a1: the JS function object being called.
810 // o a3: the incoming new target or generator object
811 // o cp: our context
812 // o fp: the caller's frame pointer
813 // o sp: stack pointer
814 // o ra: return address
815 //
816 // The function builds an interpreter frame. See InterpreterFrameConstants in
817 // frames.h for its layout.
818 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
819  Register closure = a1;
820  Register feedback_vector = a2;
821 
822  // Load the feedback vector from the closure.
823  __ lw(feedback_vector,
824  FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
825  __ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
826  // Read off the optimized code slot in the feedback vector, and if there
827  // is optimized code or an optimization marker, call that instead.
828  MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
829 
830  // Open a frame scope to indicate that there is a frame on the stack. The
831  // MANUAL indicates that the scope shouldn't actually generate code to set up
832  // the frame (that is done below).
833  FrameScope frame_scope(masm, StackFrame::MANUAL);
834  __ PushStandardFrame(closure);
835 
836  // Get the bytecode array from the function object and load it into
837  // kInterpreterBytecodeArrayRegister.
838  __ lw(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
839  __ lw(kInterpreterBytecodeArrayRegister,
840  FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
841  GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, t0);
842 
843  // Increment invocation count for the function.
844  __ lw(t0, FieldMemOperand(feedback_vector,
845  FeedbackVector::kInvocationCountOffset));
846  __ Addu(t0, t0, Operand(1));
847  __ sw(t0, FieldMemOperand(feedback_vector,
848  FeedbackVector::kInvocationCountOffset));
849 
850  // Check function data field is actually a BytecodeArray object.
851  if (FLAG_debug_code) {
852  __ SmiTst(kInterpreterBytecodeArrayRegister, t0);
853  __ Assert(ne,
854  AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
855  t0, Operand(zero_reg));
856  __ GetObjectType(kInterpreterBytecodeArrayRegister, t0, t0);
857  __ Assert(eq,
858  AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
859  t0, Operand(BYTECODE_ARRAY_TYPE));
860  }
861 
862  // Reset code age.
863  DCHECK_EQ(0, BytecodeArray::kNoAgeBytecodeAge);
864  __ sb(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
865  BytecodeArray::kBytecodeAgeOffset));
866 
867  // Load initial bytecode offset.
868  __ li(kInterpreterBytecodeOffsetRegister,
869  Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
870 
871  // Push bytecode array and Smi tagged bytecode array offset.
872  __ SmiTag(t0, kInterpreterBytecodeOffsetRegister);
873  __ Push(kInterpreterBytecodeArrayRegister, t0);
874 
875  // Allocate the local and temporary register file on the stack.
876  {
877  // Load frame size from the BytecodeArray object.
878  __ lw(t0, FieldMemOperand(kInterpreterBytecodeArrayRegister,
879  BytecodeArray::kFrameSizeOffset));
880 
881  // Do a stack check to ensure we don't go over the limit.
882  Label ok;
883  __ Subu(t1, sp, Operand(t0));
884  __ LoadRoot(a2, RootIndex::kRealStackLimit);
885  __ Branch(&ok, hs, t1, Operand(a2));
886  __ CallRuntime(Runtime::kThrowStackOverflow);
887  __ bind(&ok);
888 
889  // If ok, push undefined as the initial value for all register file entries.
890  Label loop_header;
891  Label loop_check;
892  __ LoadRoot(t1, RootIndex::kUndefinedValue);
893  __ Branch(&loop_check);
894  __ bind(&loop_header);
895  // TODO(rmcilroy): Consider doing more than one push per loop iteration.
896  __ push(t1);
897  // Continue loop if not done.
898  __ bind(&loop_check);
899  __ Subu(t0, t0, Operand(kPointerSize));
900  __ Branch(&loop_header, ge, t0, Operand(zero_reg));
901  }
902 
903  // If the bytecode array has a valid incoming new target or generator object
904  // register, initialize it with incoming value which was passed in r3.
905  Label no_incoming_new_target_or_generator_register;
906  __ lw(t1, FieldMemOperand(
907  kInterpreterBytecodeArrayRegister,
908  BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
909  __ Branch(&no_incoming_new_target_or_generator_register, eq, t1,
910  Operand(zero_reg));
911  __ Lsa(t1, fp, t1, kPointerSizeLog2);
912  __ sw(a3, MemOperand(t1));
913  __ bind(&no_incoming_new_target_or_generator_register);
914 
915  // Load accumulator with undefined.
916  __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
917 
918  // Load the dispatch table into a register and dispatch to the bytecode
919  // handler at the current bytecode offset.
920  Label do_dispatch;
921  __ bind(&do_dispatch);
922  __ li(kInterpreterDispatchTableRegister,
923  ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
924  __ Addu(a0, kInterpreterBytecodeArrayRegister,
925  kInterpreterBytecodeOffsetRegister);
926  __ lbu(t3, MemOperand(a0));
927  __ Lsa(kScratchReg, kInterpreterDispatchTableRegister, t3, kPointerSizeLog2);
928  __ lw(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg));
929  __ Call(kJavaScriptCallCodeStartRegister);
930  masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
931 
932  // Any returns to the entry trampoline are either due to the return bytecode
933  // or the interpreter tail calling a builtin and then a dispatch.
934 
935  // Get bytecode array and bytecode offset from the stack frame.
936  __ lw(kInterpreterBytecodeArrayRegister,
937  MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
938  __ lw(kInterpreterBytecodeOffsetRegister,
939  MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
940  __ SmiUntag(kInterpreterBytecodeOffsetRegister);
941  // Either return, or advance to the next bytecode and dispatch.
942  Label do_return;
943  __ Addu(a1, kInterpreterBytecodeArrayRegister,
944  kInterpreterBytecodeOffsetRegister);
945  __ lbu(a1, MemOperand(a1));
946  AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
947  kInterpreterBytecodeOffsetRegister, a1, a2, a3,
948  &do_return);
949  __ jmp(&do_dispatch);
950 
951  __ bind(&do_return);
952  // The return value is in v0.
953  LeaveInterpreterFrame(masm, t0);
954  __ Jump(ra);
955 }
956 
957 static void Generate_InterpreterPushArgs(MacroAssembler* masm,
958  Register num_args, Register index,
959  Register scratch, Register scratch2) {
960  // Find the address of the last argument.
961  __ mov(scratch2, num_args);
962  __ sll(scratch2, scratch2, kPointerSizeLog2);
963  __ Subu(scratch2, index, Operand(scratch2));
964 
965  // Push the arguments.
966  Label loop_header, loop_check;
967  __ Branch(&loop_check);
968  __ bind(&loop_header);
969  __ lw(scratch, MemOperand(index));
970  __ Addu(index, index, Operand(-kPointerSize));
971  __ push(scratch);
972  __ bind(&loop_check);
973  __ Branch(&loop_header, gt, index, Operand(scratch2));
974 }
975 
976 // static
977 void Builtins::Generate_InterpreterPushArgsThenCallImpl(
978  MacroAssembler* masm, ConvertReceiverMode receiver_mode,
979  InterpreterPushArgsMode mode) {
980  DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
981  // ----------- S t a t e -------------
982  // -- a0 : the number of arguments (not including the receiver)
983  // -- a2 : the address of the first argument to be pushed. Subsequent
984  // arguments should be consecutive above this, in the same order as
985  // they are to be pushed onto the stack.
986  // -- a1 : the target to call (can be any Object).
987  // -----------------------------------
988  Label stack_overflow;
989 
990  __ Addu(t0, a0, Operand(1)); // Add one for receiver.
991 
992  Generate_StackOverflowCheck(masm, t0, t4, t1, &stack_overflow);
993 
994  // Push "undefined" as the receiver arg if we need to.
995  if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
996  __ PushRoot(RootIndex::kUndefinedValue);
997  __ mov(t0, a0); // No receiver.
998  }
999 
1000  // This function modifies a2, t4 and t1.
1001  Generate_InterpreterPushArgs(masm, t0, a2, t4, t1);
1002 
1003  if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1004  __ Pop(a2); // Pass the spread in a register
1005  __ Subu(a0, a0, Operand(1)); // Subtract one for spread
1006  }
1007 
1008  // Call the target.
1009  if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1010  __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
1011  RelocInfo::CODE_TARGET);
1012  } else {
1013  __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
1014  RelocInfo::CODE_TARGET);
1015  }
1016 
1017  __ bind(&stack_overflow);
1018  {
1019  __ TailCallRuntime(Runtime::kThrowStackOverflow);
1020  // Unreachable code.
1021  __ break_(0xCC);
1022  }
1023 }
1024 
1025 // static
1026 void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
1027  MacroAssembler* masm, InterpreterPushArgsMode mode) {
1028  // ----------- S t a t e -------------
1029  // -- a0 : argument count (not including receiver)
1030  // -- a3 : new target
1031  // -- a1 : constructor to call
1032  // -- a2 : allocation site feedback if available, undefined otherwise.
1033  // -- t4 : address of the first argument
1034  // -----------------------------------
1035  Label stack_overflow;
1036 
1037  // Push a slot for the receiver.
1038  __ push(zero_reg);
1039 
1040  Generate_StackOverflowCheck(masm, a0, t1, t0, &stack_overflow);
1041 
1042  // This function modified t4, t1 and t0.
1043  Generate_InterpreterPushArgs(masm, a0, t4, t1, t0);
1044 
1045  if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1046  __ Pop(a2); // Pass the spread in a register
1047  __ Subu(a0, a0, Operand(1)); // Subtract one for spread
1048  } else {
1049  __ AssertUndefinedOrAllocationSite(a2, t0);
1050  }
1051 
1052  if (mode == InterpreterPushArgsMode::kArrayFunction) {
1053  __ AssertFunction(a1);
1054 
1055  // Tail call to the array construct stub (still in the caller
1056  // context at this point).
1057  __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl),
1058  RelocInfo::CODE_TARGET);
1059  } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1060  // Call the constructor with a0, a1, and a3 unmodified.
1061  __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
1062  RelocInfo::CODE_TARGET);
1063  } else {
1064  DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
1065  // Call the constructor with a0, a1, and a3 unmodified.
1066  __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
1067  }
1068 
1069  __ bind(&stack_overflow);
1070  {
1071  __ TailCallRuntime(Runtime::kThrowStackOverflow);
1072  // Unreachable code.
1073  __ break_(0xCC);
1074  }
1075 }
1076 
1077 static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1078  // Set the return address to the correct point in the interpreter entry
1079  // trampoline.
1080  Label builtin_trampoline, trampoline_loaded;
1081  Smi interpreter_entry_return_pc_offset(
1082  masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1083  DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
1084 
1085  // If the SFI function_data is an InterpreterData, the function will have a
1086  // custom copy of the interpreter entry trampoline for profiling. If so,
1087  // get the custom trampoline, otherwise grab the entry address of the global
1088  // trampoline.
1089  __ lw(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
1090  __ lw(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
1091  __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
1092  __ GetObjectType(t0, kInterpreterDispatchTableRegister,
1093  kInterpreterDispatchTableRegister);
1094  __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
1095  Operand(INTERPRETER_DATA_TYPE));
1096 
1097  __ lw(t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
1098  __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
1099  __ Branch(&trampoline_loaded);
1100 
1101  __ bind(&builtin_trampoline);
1102  __ li(t0, ExternalReference::
1103  address_of_interpreter_entry_trampoline_instruction_start(
1104  masm->isolate()));
1105  __ lw(t0, MemOperand(t0));
1106 
1107  __ bind(&trampoline_loaded);
1108  __ Addu(ra, t0, Operand(interpreter_entry_return_pc_offset->value()));
1109 
1110  // Initialize the dispatch table register.
1111  __ li(kInterpreterDispatchTableRegister,
1112  ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1113 
1114  // Get the bytecode array pointer from the frame.
1115  __ lw(kInterpreterBytecodeArrayRegister,
1116  MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1117 
1118  if (FLAG_debug_code) {
1119  // Check function data field is actually a BytecodeArray object.
1120  __ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg);
1121  __ Assert(ne,
1122  AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1123  kScratchReg, Operand(zero_reg));
1124  __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
1125  __ Assert(eq,
1126  AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1127  a1, Operand(BYTECODE_ARRAY_TYPE));
1128  }
1129 
1130  // Get the target bytecode offset from the frame.
1131  __ lw(kInterpreterBytecodeOffsetRegister,
1132  MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1133  __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1134 
1135  // Dispatch to the target bytecode.
1136  __ Addu(a1, kInterpreterBytecodeArrayRegister,
1137  kInterpreterBytecodeOffsetRegister);
1138  __ lbu(t3, MemOperand(a1));
1139  __ Lsa(a1, kInterpreterDispatchTableRegister, t3, kPointerSizeLog2);
1140  __ lw(kJavaScriptCallCodeStartRegister, MemOperand(a1));
1141  __ Jump(kJavaScriptCallCodeStartRegister);
1142 }
1143 
1144 void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
1145  // Advance the current bytecode offset stored within the given interpreter
1146  // stack frame. This simulates what all bytecode handlers do upon completion
1147  // of the underlying operation.
1148  __ lw(kInterpreterBytecodeArrayRegister,
1149  MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1150  __ lw(kInterpreterBytecodeOffsetRegister,
1151  MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1152  __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1153 
1154  // Load the current bytecode.
1155  __ Addu(a1, kInterpreterBytecodeArrayRegister,
1156  kInterpreterBytecodeOffsetRegister);
1157  __ lbu(a1, MemOperand(a1));
1158 
1159  // Advance to the next bytecode.
1160  Label if_return;
1161  AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1162  kInterpreterBytecodeOffsetRegister, a1, a2, a3,
1163  &if_return);
1164 
1165  // Convert new bytecode offset to a Smi and save in the stackframe.
1166  __ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
1167  __ sw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1168 
1169  Generate_InterpreterEnterBytecode(masm);
1170 
1171  // We should never take the if_return path.
1172  __ bind(&if_return);
1173  __ Abort(AbortReason::kInvalidBytecodeAdvance);
1174 }
1175 
1176 void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
1177  Generate_InterpreterEnterBytecode(masm);
1178 }
1179 
1180 void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
1181  // ----------- S t a t e -------------
1182  // -- a0 : argument count (preserved for callee)
1183  // -- a1 : new target (preserved for callee)
1184  // -- a3 : target function (preserved for callee)
1185  // -----------------------------------
1186  Label failed;
1187  {
1188  FrameScope scope(masm, StackFrame::INTERNAL);
1189  // Preserve argument count for later compare.
1190  __ Move(t4, a0);
1191  // Push a copy of the target function and the new target.
1192  // Push function as parameter to the runtime call.
1193  __ SmiTag(a0);
1194  __ Push(a0, a1, a3, a1);
1195 
1196  // Copy arguments from caller (stdlib, foreign, heap).
1197  Label args_done;
1198  for (int j = 0; j < 4; ++j) {
1199  Label over;
1200  if (j < 3) {
1201  __ Branch(&over, ne, t4, Operand(j));
1202  }
1203  for (int i = j - 1; i >= 0; --i) {
1204  __ lw(t4, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
1205  i * kPointerSize));
1206  __ push(t4);
1207  }
1208  for (int i = 0; i < 3 - j; ++i) {
1209  __ PushRoot(RootIndex::kUndefinedValue);
1210  }
1211  if (j < 3) {
1212  __ jmp(&args_done);
1213  __ bind(&over);
1214  }
1215  }
1216  __ bind(&args_done);
1217 
1218  // Call runtime, on success unwind frame, and parent frame.
1219  __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
1220  // A smi 0 is returned on failure, an object on success.
1221  __ JumpIfSmi(v0, &failed);
1222 
1223  __ Drop(2);
1224  __ pop(t4);
1225  __ SmiUntag(t4);
1226  scope.GenerateLeaveFrame();
1227 
1228  __ Addu(t4, t4, Operand(1));
1229  __ Lsa(sp, sp, t4, kPointerSizeLog2);
1230  __ Ret();
1231 
1232  __ bind(&failed);
1233  // Restore target function and new target.
1234  __ Pop(a0, a1, a3);
1235  __ SmiUntag(a0);
1236  }
1237  // On failure, tail call back to regular js by re-calling the function
1238  // which has be reset to the compile lazy builtin.
1239  static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
1240  __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
1241  __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
1242  __ Jump(a2);
1243 }
1244 
1245 namespace {
1246 void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1247  bool java_script_builtin,
1248  bool with_result) {
1249  const RegisterConfiguration* config(RegisterConfiguration::Default());
1250  int allocatable_register_count = config->num_allocatable_general_registers();
1251  if (with_result) {
1252  // Overwrite the hole inserted by the deoptimizer with the return value from
1253  // the LAZY deopt point.
1254  __ sw(v0,
1255  MemOperand(
1256  sp, config->num_allocatable_general_registers() * kPointerSize +
1257  BuiltinContinuationFrameConstants::kFixedFrameSize));
1258  }
1259  for (int i = allocatable_register_count - 1; i >= 0; --i) {
1260  int code = config->GetAllocatableGeneralCode(i);
1261  __ Pop(Register::from_code(code));
1262  if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
1263  __ SmiUntag(Register::from_code(code));
1264  }
1265  }
1266  __ lw(fp, MemOperand(
1267  sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1268  __ Pop(t0);
1269  __ Addu(sp, sp,
1270  Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1271  __ Pop(ra);
1272  __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
1273  __ Jump(t0);
1274 }
1275 } // namespace
1276 
1277 void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
1278  Generate_ContinueToBuiltinHelper(masm, false, false);
1279 }
1280 
1281 void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
1282  MacroAssembler* masm) {
1283  Generate_ContinueToBuiltinHelper(masm, false, true);
1284 }
1285 
1286 void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
1287  Generate_ContinueToBuiltinHelper(masm, true, false);
1288 }
1289 
1290 void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
1291  MacroAssembler* masm) {
1292  Generate_ContinueToBuiltinHelper(masm, true, true);
1293 }
1294 
1295 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1296  {
1297  FrameScope scope(masm, StackFrame::INTERNAL);
1298  __ CallRuntime(Runtime::kNotifyDeoptimized);
1299  }
1300 
1301  DCHECK_EQ(kInterpreterAccumulatorRegister.code(), v0.code());
1302  __ lw(v0, MemOperand(sp, 0 * kPointerSize));
1303  __ Ret(USE_DELAY_SLOT);
1304  // Safe to fill delay slot Addu will emit one instruction.
1305  __ Addu(sp, sp, Operand(1 * kPointerSize)); // Remove accumulator.
1306 }
1307 
1308 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
1309  // Lookup the function in the JavaScript frame.
1310  __ lw(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1311  __ lw(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset));
1312 
1313  {
1314  FrameScope scope(masm, StackFrame::INTERNAL);
1315  // Pass function as argument.
1316  __ push(a0);
1317  __ CallRuntime(Runtime::kCompileForOnStackReplacement);
1318  }
1319 
1320  // If the code object is null, just return to the caller.
1321  __ Ret(eq, v0, Operand(Smi::zero()));
1322 
1323  // Drop the handler frame that is be sitting on top of the actual
1324  // JavaScript frame. This is the case then OSR is triggered from bytecode.
1325  __ LeaveFrame(StackFrame::STUB);
1326 
1327  // Load deoptimization data from the code object.
1328  // <deopt_data> = <code>[#deoptimization_data_offset]
1329  __ lw(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
1330 
1331  // Load the OSR entrypoint offset from the deoptimization data.
1332  // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
1333  __ lw(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
1334  DeoptimizationData::kOsrPcOffsetIndex) -
1335  kHeapObjectTag));
1336  __ SmiUntag(a1);
1337 
1338  // Compute the target address = code_obj + header_size + osr_offset
1339  // <entry_addr> = <code_obj> + #header_size + <osr_offset>
1340  __ Addu(v0, v0, a1);
1341  __ addiu(ra, v0, Code::kHeaderSize - kHeapObjectTag);
1342 
1343  // And "return" to the OSR entry point of the function.
1344  __ Ret();
1345 }
1346 
1347 // static
1348 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
1349  // ----------- S t a t e -------------
1350  // -- a0 : argc
1351  // -- sp[0] : argArray
1352  // -- sp[4] : thisArg
1353  // -- sp[8] : receiver
1354  // -----------------------------------
1355 
1356  // 1. Load receiver into a1, argArray into a0 (if present), remove all
1357  // arguments from the stack (including the receiver), and push thisArg (if
1358  // present) instead.
1359  {
1360  Label no_arg;
1361  Register scratch = t0;
1362  __ LoadRoot(a2, RootIndex::kUndefinedValue);
1363  __ mov(a3, a2);
1364  // Lsa() cannot be used hare as scratch value used later.
1365  __ sll(scratch, a0, kPointerSizeLog2);
1366  __ Addu(a0, sp, Operand(scratch));
1367  __ lw(a1, MemOperand(a0)); // receiver
1368  __ Subu(a0, a0, Operand(kPointerSize));
1369  __ Branch(&no_arg, lt, a0, Operand(sp));
1370  __ lw(a2, MemOperand(a0)); // thisArg
1371  __ Subu(a0, a0, Operand(kPointerSize));
1372  __ Branch(&no_arg, lt, a0, Operand(sp));
1373  __ lw(a3, MemOperand(a0)); // argArray
1374  __ bind(&no_arg);
1375  __ Addu(sp, sp, Operand(scratch));
1376  __ sw(a2, MemOperand(sp));
1377  __ mov(a2, a3);
1378  }
1379 
1380  // ----------- S t a t e -------------
1381  // -- a2 : argArray
1382  // -- a1 : receiver
1383  // -- sp[0] : thisArg
1384  // -----------------------------------
1385 
1386  // 2. We don't need to check explicitly for callable receiver here,
1387  // since that's the first thing the Call/CallWithArrayLike builtins
1388  // will do.
1389 
1390  // 3. Tail call with no arguments if argArray is null or undefined.
1391  Label no_arguments;
1392  __ JumpIfRoot(a2, RootIndex::kNullValue, &no_arguments);
1393  __ JumpIfRoot(a2, RootIndex::kUndefinedValue, &no_arguments);
1394 
1395  // 4a. Apply the receiver to the given argArray.
1396  __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1397  RelocInfo::CODE_TARGET);
1398 
1399  // 4b. The argArray is either null or undefined, so we tail call without any
1400  // arguments to the receiver.
1401  __ bind(&no_arguments);
1402  {
1403  __ mov(a0, zero_reg);
1404  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1405  }
1406 }
1407 
1408 // static
1409 void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
1410  // 1. Make sure we have at least one argument.
1411  // a0: actual number of arguments
1412  {
1413  Label done;
1414  __ Branch(&done, ne, a0, Operand(zero_reg));
1415  __ PushRoot(RootIndex::kUndefinedValue);
1416  __ Addu(a0, a0, Operand(1));
1417  __ bind(&done);
1418  }
1419 
1420  // 2. Get the function to call (passed as receiver) from the stack.
1421  // a0: actual number of arguments
1422  __ Lsa(kScratchReg, sp, a0, kPointerSizeLog2);
1423  __ lw(a1, MemOperand(kScratchReg));
1424 
1425  // 3. Shift arguments and return address one slot down on the stack
1426  // (overwriting the original receiver). Adjust argument count to make
1427  // the original first argument the new receiver.
1428  // a0: actual number of arguments
1429  // a1: function
1430  {
1431  Label loop;
1432  // Calculate the copy start address (destination). Copy end address is sp.
1433  __ Lsa(a2, sp, a0, kPointerSizeLog2);
1434 
1435  __ bind(&loop);
1436  __ lw(kScratchReg, MemOperand(a2, -kPointerSize));
1437  __ sw(kScratchReg, MemOperand(a2));
1438  __ Subu(a2, a2, Operand(kPointerSize));
1439  __ Branch(&loop, ne, a2, Operand(sp));
1440  // Adjust the actual number of arguments and remove the top element
1441  // (which is a copy of the last argument).
1442  __ Subu(a0, a0, Operand(1));
1443  __ Pop();
1444  }
1445 
1446  // 4. Call the callable.
1447  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1448 }
1449 
1450 void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
1451  // ----------- S t a t e -------------
1452  // -- a0 : argc
1453  // -- sp[0] : argumentsList
1454  // -- sp[4] : thisArgument
1455  // -- sp[8] : target
1456  // -- sp[12] : receiver
1457  // -----------------------------------
1458 
1459  // 1. Load target into a1 (if present), argumentsList into a0 (if present),
1460  // remove all arguments from the stack (including the receiver), and push
1461  // thisArgument (if present) instead.
1462  {
1463  Label no_arg;
1464  Register scratch = t0;
1465  __ LoadRoot(a1, RootIndex::kUndefinedValue);
1466  __ mov(a2, a1);
1467  __ mov(a3, a1);
1468  __ sll(scratch, a0, kPointerSizeLog2);
1469  __ mov(a0, scratch);
1470  __ Subu(a0, a0, Operand(kPointerSize));
1471  __ Branch(&no_arg, lt, a0, Operand(zero_reg));
1472  __ Addu(a0, sp, Operand(a0));
1473  __ lw(a1, MemOperand(a0)); // target
1474  __ Subu(a0, a0, Operand(kPointerSize));
1475  __ Branch(&no_arg, lt, a0, Operand(sp));
1476  __ lw(a2, MemOperand(a0)); // thisArgument
1477  __ Subu(a0, a0, Operand(kPointerSize));
1478  __ Branch(&no_arg, lt, a0, Operand(sp));
1479  __ lw(a3, MemOperand(a0)); // argumentsList
1480  __ bind(&no_arg);
1481  __ Addu(sp, sp, Operand(scratch));
1482  __ sw(a2, MemOperand(sp));
1483  __ mov(a2, a3);
1484  }
1485 
1486  // ----------- S t a t e -------------
1487  // -- a2 : argumentsList
1488  // -- a1 : target
1489  // -- sp[0] : thisArgument
1490  // -----------------------------------
1491 
1492  // 2. We don't need to check explicitly for callable target here,
1493  // since that's the first thing the Call/CallWithArrayLike builtins
1494  // will do.
1495 
1496  // 3. Apply the target to the given argumentsList.
1497  __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1498  RelocInfo::CODE_TARGET);
1499 }
1500 
1501 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
1502  // ----------- S t a t e -------------
1503  // -- a0 : argc
1504  // -- sp[0] : new.target (optional)
1505  // -- sp[4] : argumentsList
1506  // -- sp[8] : target
1507  // -- sp[12] : receiver
1508  // -----------------------------------
1509 
1510  // 1. Load target into a1 (if present), argumentsList into a0 (if present),
1511  // new.target into a3 (if present, otherwise use target), remove all
1512  // arguments from the stack (including the receiver), and push thisArgument
1513  // (if present) instead.
1514  {
1515  Label no_arg;
1516  Register scratch = t0;
1517  __ LoadRoot(a1, RootIndex::kUndefinedValue);
1518  __ mov(a2, a1);
1519  // Lsa() cannot be used hare as scratch value used later.
1520  __ sll(scratch, a0, kPointerSizeLog2);
1521  __ Addu(a0, sp, Operand(scratch));
1522  __ sw(a2, MemOperand(a0)); // receiver
1523  __ Subu(a0, a0, Operand(kPointerSize));
1524  __ Branch(&no_arg, lt, a0, Operand(sp));
1525  __ lw(a1, MemOperand(a0)); // target
1526  __ mov(a3, a1); // new.target defaults to target
1527  __ Subu(a0, a0, Operand(kPointerSize));
1528  __ Branch(&no_arg, lt, a0, Operand(sp));
1529  __ lw(a2, MemOperand(a0)); // argumentsList
1530  __ Subu(a0, a0, Operand(kPointerSize));
1531  __ Branch(&no_arg, lt, a0, Operand(sp));
1532  __ lw(a3, MemOperand(a0)); // new.target
1533  __ bind(&no_arg);
1534  __ Addu(sp, sp, Operand(scratch));
1535  }
1536 
1537  // ----------- S t a t e -------------
1538  // -- a2 : argumentsList
1539  // -- a3 : new.target
1540  // -- a1 : target
1541  // -- sp[0] : receiver (undefined)
1542  // -----------------------------------
1543 
1544  // 2. We don't need to check explicitly for constructor target here,
1545  // since that's the first thing the Construct/ConstructWithArrayLike
1546  // builtins will do.
1547 
1548  // 3. We don't need to check explicitly for constructor new.target here,
1549  // since that's the second thing the Construct/ConstructWithArrayLike
1550  // builtins will do.
1551 
1552  // 4. Construct the target with the given new.target and argumentsList.
1553  __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
1554  RelocInfo::CODE_TARGET);
1555 }
1556 
1557 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
1558  __ sll(a0, a0, kSmiTagSize);
1559  __ li(t0, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
1560  __ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit());
1561  __ Push(Smi::zero()); // Padding.
1562  __ Addu(fp, sp,
1563  Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
1564 }
1565 
1566 static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
1567  // ----------- S t a t e -------------
1568  // -- v0 : result being passed through
1569  // -----------------------------------
1570  // Get the number of arguments passed (as a smi), tear down the frame and
1571  // then tear down the parameters.
1572  __ lw(a1, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
1573  __ mov(sp, fp);
1574  __ MultiPop(fp.bit() | ra.bit());
1575  __ Lsa(sp, sp, a1, kPointerSizeLog2 - kSmiTagSize);
1576  // Adjust for the receiver.
1577  __ Addu(sp, sp, Operand(kPointerSize));
1578 }
1579 
1580 // static
1581 void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
1582  Handle<Code> code) {
1583  // ----------- S t a t e -------------
1584  // -- a1 : target
1585  // -- a0 : number of parameters on the stack (not including the receiver)
1586  // -- a2 : arguments list (a FixedArray)
1587  // -- t0 : len (number of elements to push from args)
1588  // -- a3 : new.target (for [[Construct]])
1589  // -----------------------------------
1590  if (masm->emit_debug_code()) {
1591  // Allow a2 to be a FixedArray, or a FixedDoubleArray if t0 == 0.
1592  Label ok, fail;
1593  __ AssertNotSmi(a2);
1594  __ GetObjectType(a2, t8, t8);
1595  __ Branch(&ok, eq, t8, Operand(FIXED_ARRAY_TYPE));
1596  __ Branch(&fail, ne, t8, Operand(FIXED_DOUBLE_ARRAY_TYPE));
1597  __ Branch(&ok, eq, t0, Operand(0));
1598  // Fall through.
1599  __ bind(&fail);
1600  __ Abort(AbortReason::kOperandIsNotAFixedArray);
1601 
1602  __ bind(&ok);
1603  }
1604 
1605  // Check for stack overflow.
1606  Label stack_overflow;
1607  Generate_StackOverflowCheck(masm, t0, kScratchReg, t1, &stack_overflow);
1608 
1609  // Push arguments onto the stack (thisArgument is already on the stack).
1610  {
1611  __ mov(t2, zero_reg);
1612  Label done, push, loop;
1613  __ LoadRoot(t1, RootIndex::kTheHoleValue);
1614  __ bind(&loop);
1615  __ Branch(&done, eq, t2, Operand(t0));
1616  __ Lsa(kScratchReg, a2, t2, kPointerSizeLog2);
1617  __ lw(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
1618  __ Branch(&push, ne, t1, Operand(kScratchReg));
1619  __ LoadRoot(kScratchReg, RootIndex::kUndefinedValue);
1620  __ bind(&push);
1621  __ Push(kScratchReg);
1622  __ Addu(t2, t2, Operand(1));
1623  __ Branch(&loop);
1624  __ bind(&done);
1625  __ Addu(a0, a0, t2);
1626  }
1627 
1628  // Tail-call to the actual Call or Construct builtin.
1629  __ Jump(code, RelocInfo::CODE_TARGET);
1630 
1631  __ bind(&stack_overflow);
1632  __ TailCallRuntime(Runtime::kThrowStackOverflow);
1633 }
1634 
1635 // static
1636 void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
1637  CallOrConstructMode mode,
1638  Handle<Code> code) {
1639  // ----------- S t a t e -------------
1640  // -- a0 : the number of arguments (not including the receiver)
1641  // -- a3 : the new.target (for [[Construct]] calls)
1642  // -- a1 : the target to call (can be any Object)
1643  // -- a2 : start index (to support rest parameters)
1644  // -----------------------------------
1645 
1646  // Check if new.target has a [[Construct]] internal method.
1647  if (mode == CallOrConstructMode::kConstruct) {
1648  Label new_target_constructor, new_target_not_constructor;
1649  __ JumpIfSmi(a3, &new_target_not_constructor);
1650  __ lw(t1, FieldMemOperand(a3, HeapObject::kMapOffset));
1651  __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
1652  __ And(t1, t1, Operand(Map::IsConstructorBit::kMask));
1653  __ Branch(&new_target_constructor, ne, t1, Operand(zero_reg));
1654  __ bind(&new_target_not_constructor);
1655  {
1656  FrameScope scope(masm, StackFrame::MANUAL);
1657  __ EnterFrame(StackFrame::INTERNAL);
1658  __ Push(a3);
1659  __ CallRuntime(Runtime::kThrowNotConstructor);
1660  }
1661  __ bind(&new_target_constructor);
1662  }
1663 
1664  // Check if we have an arguments adaptor frame below the function frame.
1665  Label arguments_adaptor, arguments_done;
1666  __ lw(t3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1667  __ lw(t2, MemOperand(t3, CommonFrameConstants::kContextOrFrameTypeOffset));
1668  __ Branch(&arguments_adaptor, eq, t2,
1669  Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
1670  {
1671  __ lw(t2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
1672  __ lw(t2, FieldMemOperand(t2, JSFunction::kSharedFunctionInfoOffset));
1673  __ lhu(t2, FieldMemOperand(
1674  t2, SharedFunctionInfo::kFormalParameterCountOffset));
1675  __ mov(t3, fp);
1676  }
1677  __ Branch(&arguments_done);
1678  __ bind(&arguments_adaptor);
1679  {
1680  // Just get the length from the ArgumentsAdaptorFrame.
1681  __ lw(t2, MemOperand(t3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1682  __ SmiUntag(t2);
1683  }
1684  __ bind(&arguments_done);
1685 
1686  Label stack_done, stack_overflow;
1687  __ Subu(t2, t2, a2);
1688  __ Branch(&stack_done, le, t2, Operand(zero_reg));
1689  {
1690  // Check for stack overflow.
1691  Generate_StackOverflowCheck(masm, t2, t0, t1, &stack_overflow);
1692 
1693  // Forward the arguments from the caller frame.
1694  {
1695  Label loop;
1696  __ Addu(a0, a0, t2);
1697  __ bind(&loop);
1698  {
1699  __ Lsa(kScratchReg, t3, t2, kPointerSizeLog2);
1700  __ lw(kScratchReg, MemOperand(kScratchReg, 1 * kPointerSize));
1701  __ push(kScratchReg);
1702  __ Subu(t2, t2, Operand(1));
1703  __ Branch(&loop, ne, t2, Operand(zero_reg));
1704  }
1705  }
1706  }
1707  __ Branch(&stack_done);
1708  __ bind(&stack_overflow);
1709  __ TailCallRuntime(Runtime::kThrowStackOverflow);
1710  __ bind(&stack_done);
1711 
1712  // Tail-call to the {code} handler.
1713  __ Jump(code, RelocInfo::CODE_TARGET);
1714 }
1715 
1716 // static
1717 void Builtins::Generate_CallFunction(MacroAssembler* masm,
1718  ConvertReceiverMode mode) {
1719  // ----------- S t a t e -------------
1720  // -- a0 : the number of arguments (not including the receiver)
1721  // -- a1 : the function to call (checked to be a JSFunction)
1722  // -----------------------------------
1723  __ AssertFunction(a1);
1724 
1725  // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
1726  // Check that the function is not a "classConstructor".
1727  Label class_constructor;
1728  __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
1729  __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
1730  __ And(kScratchReg, a3,
1731  Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
1732  __ Branch(&class_constructor, ne, kScratchReg, Operand(zero_reg));
1733 
1734  // Enter the context of the function; ToObject has to run in the function
1735  // context, and we also need to take the global proxy from the function
1736  // context in case of conversion.
1737  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
1738  // We need to convert the receiver for non-native sloppy mode functions.
1739  Label done_convert;
1740  __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
1741  __ And(kScratchReg, a3,
1742  Operand(SharedFunctionInfo::IsNativeBit::kMask |
1743  SharedFunctionInfo::IsStrictBit::kMask));
1744  __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg));
1745  {
1746  // ----------- S t a t e -------------
1747  // -- a0 : the number of arguments (not including the receiver)
1748  // -- a1 : the function to call (checked to be a JSFunction)
1749  // -- a2 : the shared function info.
1750  // -- cp : the function context.
1751  // -----------------------------------
1752 
1753  if (mode == ConvertReceiverMode::kNullOrUndefined) {
1754  // Patch receiver to global proxy.
1755  __ LoadGlobalProxy(a3);
1756  } else {
1757  Label convert_to_object, convert_receiver;
1758  __ Lsa(kScratchReg, sp, a0, kPointerSizeLog2);
1759  __ lw(a3, MemOperand(kScratchReg));
1760  __ JumpIfSmi(a3, &convert_to_object);
1761  STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
1762  __ GetObjectType(a3, t0, t0);
1763  __ Branch(&done_convert, hs, t0, Operand(FIRST_JS_RECEIVER_TYPE));
1764  if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
1765  Label convert_global_proxy;
1766  __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy);
1767  __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object);
1768  __ bind(&convert_global_proxy);
1769  {
1770  // Patch receiver to global proxy.
1771  __ LoadGlobalProxy(a3);
1772  }
1773  __ Branch(&convert_receiver);
1774  }
1775  __ bind(&convert_to_object);
1776  {
1777  // Convert receiver using ToObject.
1778  // TODO(bmeurer): Inline the allocation here to avoid building the frame
1779  // in the fast case? (fall back to AllocateInNewSpace?)
1780  FrameScope scope(masm, StackFrame::INTERNAL);
1781  __ sll(a0, a0, kSmiTagSize); // Smi tagged.
1782  __ Push(a0, a1);
1783  __ mov(a0, a3);
1784  __ Push(cp);
1785  __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
1786  RelocInfo::CODE_TARGET);
1787  __ Pop(cp);
1788  __ mov(a3, v0);
1789  __ Pop(a0, a1);
1790  __ sra(a0, a0, kSmiTagSize); // Un-tag.
1791  }
1792  __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
1793  __ bind(&convert_receiver);
1794  }
1795  __ Lsa(kScratchReg, sp, a0, kPointerSizeLog2);
1796  __ sw(a3, MemOperand(kScratchReg));
1797  }
1798  __ bind(&done_convert);
1799 
1800  // ----------- S t a t e -------------
1801  // -- a0 : the number of arguments (not including the receiver)
1802  // -- a1 : the function to call (checked to be a JSFunction)
1803  // -- a2 : the shared function info.
1804  // -- cp : the function context.
1805  // -----------------------------------
1806 
1807  __ lhu(a2,
1808  FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
1809  ParameterCount actual(a0);
1810  ParameterCount expected(a2);
1811  __ InvokeFunctionCode(a1, no_reg, expected, actual, JUMP_FUNCTION);
1812 
1813  // The function is a "classConstructor", need to raise an exception.
1814  __ bind(&class_constructor);
1815  {
1816  FrameScope frame(masm, StackFrame::INTERNAL);
1817  __ Push(a1);
1818  __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
1819  }
1820 }
1821 
1822 // static
1823 void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
1824  // ----------- S t a t e -------------
1825  // -- a0 : the number of arguments (not including the receiver)
1826  // -- a1 : the function to call (checked to be a JSBoundFunction)
1827  // -----------------------------------
1828  __ AssertBoundFunction(a1);
1829 
1830  // Patch the receiver to [[BoundThis]].
1831  {
1832  __ lw(kScratchReg, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
1833  __ Lsa(t0, sp, a0, kPointerSizeLog2);
1834  __ sw(kScratchReg, MemOperand(t0));
1835  }
1836 
1837  // Load [[BoundArguments]] into a2 and length of that into t0.
1838  __ lw(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
1839  __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
1840  __ SmiUntag(t0);
1841 
1842  // ----------- S t a t e -------------
1843  // -- a0 : the number of arguments (not including the receiver)
1844  // -- a1 : the function to call (checked to be a JSBoundFunction)
1845  // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
1846  // -- t0 : the number of [[BoundArguments]]
1847  // -----------------------------------
1848 
1849  // Reserve stack space for the [[BoundArguments]].
1850  {
1851  Label done;
1852  __ sll(t1, t0, kPointerSizeLog2);
1853  __ Subu(sp, sp, Operand(t1));
1854  // Check the stack for overflow. We are not trying to catch interruptions
1855  // (i.e. debug break and preemption) here, so check the "real stack limit".
1856  __ LoadRoot(kScratchReg, RootIndex::kRealStackLimit);
1857  __ Branch(&done, hs, sp, Operand(kScratchReg));
1858  // Restore the stack pointer.
1859  __ Addu(sp, sp, Operand(t1));
1860  {
1861  FrameScope scope(masm, StackFrame::MANUAL);
1862  __ EnterFrame(StackFrame::INTERNAL);
1863  __ CallRuntime(Runtime::kThrowStackOverflow);
1864  }
1865  __ bind(&done);
1866  }
1867 
1868  // Relocate arguments down the stack.
1869  {
1870  Label loop, done_loop;
1871  __ mov(t1, zero_reg);
1872  __ bind(&loop);
1873  __ Branch(&done_loop, gt, t1, Operand(a0));
1874  __ Lsa(t2, sp, t0, kPointerSizeLog2);
1875  __ lw(kScratchReg, MemOperand(t2));
1876  __ Lsa(t2, sp, t1, kPointerSizeLog2);
1877  __ sw(kScratchReg, MemOperand(t2));
1878  __ Addu(t0, t0, Operand(1));
1879  __ Addu(t1, t1, Operand(1));
1880  __ Branch(&loop);
1881  __ bind(&done_loop);
1882  }
1883 
1884  // Copy [[BoundArguments]] to the stack (below the arguments).
1885  {
1886  Label loop, done_loop;
1887  __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
1888  __ SmiUntag(t0);
1889  __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1890  __ bind(&loop);
1891  __ Subu(t0, t0, Operand(1));
1892  __ Branch(&done_loop, lt, t0, Operand(zero_reg));
1893  __ Lsa(t1, a2, t0, kPointerSizeLog2);
1894  __ lw(kScratchReg, MemOperand(t1));
1895  __ Lsa(t1, sp, a0, kPointerSizeLog2);
1896  __ sw(kScratchReg, MemOperand(t1));
1897  __ Addu(a0, a0, Operand(1));
1898  __ Branch(&loop);
1899  __ bind(&done_loop);
1900  }
1901 
1902  // Call the [[BoundTargetFunction]] via the Call builtin.
1903  __ lw(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
1904  __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
1905  RelocInfo::CODE_TARGET);
1906 }
1907 
1908 // static
1909 void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
1910  // ----------- S t a t e -------------
1911  // -- a0 : the number of arguments (not including the receiver)
1912  // -- a1 : the target to call (can be any Object).
1913  // -----------------------------------
1914 
1915  Label non_callable, non_function, non_smi;
1916  __ JumpIfSmi(a1, &non_callable);
1917  __ bind(&non_smi);
1918  __ GetObjectType(a1, t1, t2);
1919  __ Jump(masm->isolate()->builtins()->CallFunction(mode),
1920  RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
1921  __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
1922  RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
1923 
1924  // Check if target has a [[Call]] internal method.
1925  __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
1926  __ And(t1, t1, Operand(Map::IsCallableBit::kMask));
1927  __ Branch(&non_callable, eq, t1, Operand(zero_reg));
1928 
1929  // Check if target is a proxy and call CallProxy external builtin
1930  __ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE));
1931  __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET);
1932 
1933  // 2. Call to something else, which might have a [[Call]] internal method (if
1934  // not we raise an exception).
1935  __ bind(&non_function);
1936  // Overwrite the original receiver with the (original) target.
1937  __ Lsa(kScratchReg, sp, a0, kPointerSizeLog2);
1938  __ sw(a1, MemOperand(kScratchReg));
1939  // Let the "call_as_function_delegate" take care of the rest.
1940  __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
1941  __ Jump(masm->isolate()->builtins()->CallFunction(
1942  ConvertReceiverMode::kNotNullOrUndefined),
1943  RelocInfo::CODE_TARGET);
1944 
1945  // 3. Call to something that is not callable.
1946  __ bind(&non_callable);
1947  {
1948  FrameScope scope(masm, StackFrame::INTERNAL);
1949  __ Push(a1);
1950  __ CallRuntime(Runtime::kThrowCalledNonCallable);
1951  }
1952 }
1953 
1954 // static
1955 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
1956  // ----------- S t a t e -------------
1957  // -- a0 : the number of arguments (not including the receiver)
1958  // -- a1 : the constructor to call (checked to be a JSFunction)
1959  // -- a3 : the new target (checked to be a constructor)
1960  // -----------------------------------
1961  __ AssertConstructor(a1);
1962  __ AssertFunction(a1);
1963 
1964  // Calling convention for function specific ConstructStubs require
1965  // a2 to contain either an AllocationSite or undefined.
1966  __ LoadRoot(a2, RootIndex::kUndefinedValue);
1967 
1968  Label call_generic_stub;
1969 
1970  // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
1971  __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
1972  __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kFlagsOffset));
1973  __ And(t0, t0, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
1974  __ Branch(&call_generic_stub, eq, t0, Operand(zero_reg));
1975 
1976  __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
1977  RelocInfo::CODE_TARGET);
1978 
1979  __ bind(&call_generic_stub);
1980  __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
1981  RelocInfo::CODE_TARGET);
1982 }
1983 
1984 // static
1985 void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
1986  // ----------- S t a t e -------------
1987  // -- a0 : the number of arguments (not including the receiver)
1988  // -- a1 : the function to call (checked to be a JSBoundFunction)
1989  // -- a3 : the new target (checked to be a constructor)
1990  // -----------------------------------
1991  __ AssertConstructor(a1);
1992  __ AssertBoundFunction(a1);
1993 
1994  // Load [[BoundArguments]] into a2 and length of that into t0.
1995  __ lw(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
1996  __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
1997  __ SmiUntag(t0);
1998 
1999  // ----------- S t a t e -------------
2000  // -- a0 : the number of arguments (not including the receiver)
2001  // -- a1 : the function to call (checked to be a JSBoundFunction)
2002  // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
2003  // -- a3 : the new target (checked to be a constructor)
2004  // -- t0 : the number of [[BoundArguments]]
2005  // -----------------------------------
2006 
2007  // Reserve stack space for the [[BoundArguments]].
2008  {
2009  Label done;
2010  __ sll(t1, t0, kPointerSizeLog2);
2011  __ Subu(sp, sp, Operand(t1));
2012  // Check the stack for overflow. We are not trying to catch interruptions
2013  // (i.e. debug break and preemption) here, so check the "real stack limit".
2014  __ LoadRoot(kScratchReg, RootIndex::kRealStackLimit);
2015  __ Branch(&done, hs, sp, Operand(kScratchReg));
2016  // Restore the stack pointer.
2017  __ Addu(sp, sp, Operand(t1));
2018  {
2019  FrameScope scope(masm, StackFrame::MANUAL);
2020  __ EnterFrame(StackFrame::INTERNAL);
2021  __ CallRuntime(Runtime::kThrowStackOverflow);
2022  }
2023  __ bind(&done);
2024  }
2025 
2026  // Relocate arguments down the stack.
2027  {
2028  Label loop, done_loop;
2029  __ mov(t1, zero_reg);
2030  __ bind(&loop);
2031  __ Branch(&done_loop, ge, t1, Operand(a0));
2032  __ Lsa(t2, sp, t0, kPointerSizeLog2);
2033  __ lw(kScratchReg, MemOperand(t2));
2034  __ Lsa(t2, sp, t1, kPointerSizeLog2);
2035  __ sw(kScratchReg, MemOperand(t2));
2036  __ Addu(t0, t0, Operand(1));
2037  __ Addu(t1, t1, Operand(1));
2038  __ Branch(&loop);
2039  __ bind(&done_loop);
2040  }
2041 
2042  // Copy [[BoundArguments]] to the stack (below the arguments).
2043  {
2044  Label loop, done_loop;
2045  __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
2046  __ SmiUntag(t0);
2047  __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2048  __ bind(&loop);
2049  __ Subu(t0, t0, Operand(1));
2050  __ Branch(&done_loop, lt, t0, Operand(zero_reg));
2051  __ Lsa(t1, a2, t0, kPointerSizeLog2);
2052  __ lw(kScratchReg, MemOperand(t1));
2053  __ Lsa(t1, sp, a0, kPointerSizeLog2);
2054  __ sw(kScratchReg, MemOperand(t1));
2055  __ Addu(a0, a0, Operand(1));
2056  __ Branch(&loop);
2057  __ bind(&done_loop);
2058  }
2059 
2060  // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2061  {
2062  Label skip_load;
2063  __ Branch(&skip_load, ne, a1, Operand(a3));
2064  __ lw(a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2065  __ bind(&skip_load);
2066  }
2067 
2068  // Construct the [[BoundTargetFunction]] via the Construct builtin.
2069  __ lw(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2070  __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
2071 }
2072 
2073 // static
2074 void Builtins::Generate_Construct(MacroAssembler* masm) {
2075  // ----------- S t a t e -------------
2076  // -- a0 : the number of arguments (not including the receiver)
2077  // -- a1 : the constructor to call (can be any Object)
2078  // -- a3 : the new target (either the same as the constructor or
2079  // the JSFunction on which new was invoked initially)
2080  // -----------------------------------
2081 
2082  // Check if target is a Smi.
2083  Label non_constructor, non_proxy;
2084  __ JumpIfSmi(a1, &non_constructor);
2085 
2086  // Check if target has a [[Construct]] internal method.
2087  __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
2088  __ lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
2089  __ And(t3, t3, Operand(Map::IsConstructorBit::kMask));
2090  __ Branch(&non_constructor, eq, t3, Operand(zero_reg));
2091 
2092  // Dispatch based on instance type.
2093  __ lhu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
2094  __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
2095  RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
2096 
2097  // Only dispatch to bound functions after checking whether they are
2098  // constructors.
2099  __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
2100  RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
2101 
2102  // Only dispatch to proxies after checking whether they are constructors.
2103  __ Branch(&non_proxy, ne, t2, Operand(JS_PROXY_TYPE));
2104  __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
2105  RelocInfo::CODE_TARGET);
2106 
2107  // Called Construct on an exotic Object with a [[Construct]] internal method.
2108  __ bind(&non_proxy);
2109  {
2110  // Overwrite the original receiver with the (original) target.
2111  __ Lsa(kScratchReg, sp, a0, kPointerSizeLog2);
2112  __ sw(a1, MemOperand(kScratchReg));
2113  // Let the "call_as_constructor_delegate" take care of the rest.
2114  __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
2115  __ Jump(masm->isolate()->builtins()->CallFunction(),
2116  RelocInfo::CODE_TARGET);
2117  }
2118 
2119  // Called Construct on an Object that doesn't have a [[Construct]] internal
2120  // method.
2121  __ bind(&non_constructor);
2122  __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
2123  RelocInfo::CODE_TARGET);
2124 }
2125 
2126 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
2127  // State setup as expected by MacroAssembler::InvokePrologue.
2128  // ----------- S t a t e -------------
2129  // -- a0: actual arguments count
2130  // -- a1: function (passed through to callee)
2131  // -- a2: expected arguments count
2132  // -- a3: new target (passed through to callee)
2133  // -----------------------------------
2134 
2135  Label invoke, dont_adapt_arguments, stack_overflow;
2136 
2137  Label enough, too_few;
2138  __ Branch(&dont_adapt_arguments, eq, a2,
2139  Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
2140  // We use Uless as the number of argument should always be greater than 0.
2141  __ Branch(&too_few, Uless, a0, Operand(a2));
2142 
2143  { // Enough parameters: actual >= expected.
2144  // a0: actual number of arguments as a smi
2145  // a1: function
2146  // a2: expected number of arguments
2147  // a3: new target (passed through to callee)
2148  __ bind(&enough);
2149  EnterArgumentsAdaptorFrame(masm);
2150  Generate_StackOverflowCheck(masm, a2, t1, kScratchReg, &stack_overflow);
2151 
2152  // Calculate copy start address into a0 and copy end address into t1.
2153  __ Lsa(a0, fp, a0, kPointerSizeLog2 - kSmiTagSize);
2154  // Adjust for return address and receiver.
2155  __ Addu(a0, a0, Operand(2 * kPointerSize));
2156  // Compute copy end address.
2157  __ sll(t1, a2, kPointerSizeLog2);
2158  __ subu(t1, a0, t1);
2159 
2160  // Copy the arguments (including the receiver) to the new stack frame.
2161  // a0: copy start address
2162  // a1: function
2163  // a2: expected number of arguments
2164  // a3: new target (passed through to callee)
2165  // t1: copy end address
2166 
2167  Label copy;
2168  __ bind(&copy);
2169  __ lw(t0, MemOperand(a0));
2170  __ push(t0);
2171  __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(t1));
2172  __ addiu(a0, a0, -kPointerSize); // In delay slot.
2173 
2174  __ jmp(&invoke);
2175  }
2176 
2177  { // Too few parameters: Actual < expected.
2178  __ bind(&too_few);
2179  EnterArgumentsAdaptorFrame(masm);
2180  Generate_StackOverflowCheck(masm, a2, t1, kScratchReg, &stack_overflow);
2181 
2182  // Calculate copy start address into a0 and copy end address into t3.
2183  // a0: actual number of arguments as a smi
2184  // a1: function
2185  // a2: expected number of arguments
2186  // a3: new target (passed through to callee)
2187  __ Lsa(a0, fp, a0, kPointerSizeLog2 - kSmiTagSize);
2188  // Adjust for return address and receiver.
2189  __ Addu(a0, a0, Operand(2 * kPointerSize));
2190  // Compute copy end address. Also adjust for return address.
2191  __ Addu(t3, fp, kPointerSize);
2192 
2193  // Copy the arguments (including the receiver) to the new stack frame.
2194  // a0: copy start address
2195  // a1: function
2196  // a2: expected number of arguments
2197  // a3: new target (passed through to callee)
2198  // t3: copy end address
2199  Label copy;
2200  __ bind(&copy);
2201  __ lw(t0, MemOperand(a0)); // Adjusted above for return addr and receiver.
2202  __ Subu(sp, sp, kPointerSize);
2203  __ Subu(a0, a0, kPointerSize);
2204  __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(t3));
2205  __ sw(t0, MemOperand(sp)); // In the delay slot.
2206 
2207  // Fill the remaining expected arguments with undefined.
2208  // a1: function
2209  // a2: expected number of arguments
2210  // a3: new target (passed through to callee)
2211  __ LoadRoot(t0, RootIndex::kUndefinedValue);
2212  __ sll(t2, a2, kPointerSizeLog2);
2213  __ Subu(t1, fp, Operand(t2));
2214  // Adjust for frame.
2215  __ Subu(t1, t1,
2216  Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
2217  kPointerSize));
2218 
2219  Label fill;
2220  __ bind(&fill);
2221  __ Subu(sp, sp, kPointerSize);
2222  __ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(t1));
2223  __ sw(t0, MemOperand(sp));
2224  }
2225 
2226  // Call the entry point.
2227  __ bind(&invoke);
2228  __ mov(a0, a2);
2229  // a0 : expected number of arguments
2230  // a1 : function (passed through to callee)
2231  // a3 : new target (passed through to callee)
2232  static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
2233  __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
2234  __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
2235  __ Call(a2);
2236 
2237  // Store offset of return address for deoptimizer.
2238  masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
2239 
2240  // Exit frame and return.
2241  LeaveArgumentsAdaptorFrame(masm);
2242  __ Ret();
2243 
2244  // -------------------------------------------
2245  // Don't adapt arguments.
2246  // -------------------------------------------
2247  __ bind(&dont_adapt_arguments);
2248  static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
2249  __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
2250  __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
2251  __ Jump(a2);
2252 
2253  __ bind(&stack_overflow);
2254  {
2255  FrameScope frame(masm, StackFrame::MANUAL);
2256  __ CallRuntime(Runtime::kThrowStackOverflow);
2257  __ break_(0xCC);
2258  }
2259 }
2260 
2261 void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
2262  // The function index was put in t0 by the jump table trampoline.
2263  // Convert to Smi for the runtime call.
2264  __ SmiTag(kWasmCompileLazyFuncIndexRegister);
2265  {
2266  HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2267  FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
2268 
2269  // Save all parameter registers (see wasm-linkage.cc). They might be
2270  // overwritten in the runtime call below. We don't have any callee-saved
2271  // registers in wasm, so no need to store anything else.
2272  constexpr RegList gp_regs = Register::ListOf<a0, a1, a2, a3>();
2273  constexpr RegList fp_regs =
2274  DoubleRegister::ListOf<f2, f4, f6, f8, f10, f12, f14>();
2275  __ MultiPush(gp_regs);
2276  __ MultiPushFPU(fp_regs);
2277 
2278  // Pass instance and function index as an explicit arguments to the runtime
2279  // function.
2280  __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
2281  // Load the correct CEntry builtin from the instance object.
2282  __ lw(a2, FieldMemOperand(kWasmInstanceRegister,
2283  WasmInstanceObject::kCEntryStubOffset));
2284  // Initialize the JavaScript context with 0. CEntry will use it to
2285  // set the current context on the isolate.
2286  __ Move(kContextRegister, Smi::zero());
2287  __ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, a2);
2288 
2289  // Restore registers.
2290  __ MultiPopFPU(fp_regs);
2291  __ MultiPop(gp_regs);
2292  }
2293  // Finally, jump to the entrypoint.
2294  __ Jump(kScratchReg, v0, 0);
2295 }
2296 
2297 void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
2298  SaveFPRegsMode save_doubles, ArgvMode argv_mode,
2299  bool builtin_exit_frame) {
2300  // Called from JavaScript; parameters are on stack as if calling JS function
2301  // a0: number of arguments including receiver
2302  // a1: pointer to builtin function
2303  // fp: frame pointer (restored after C call)
2304  // sp: stack pointer (restored as callee's sp after C call)
2305  // cp: current context (C callee-saved)
2306  //
2307  // If argv_mode == kArgvInRegister:
2308  // a2: pointer to the first argument
2309 
2310  if (argv_mode == kArgvInRegister) {
2311  // Move argv into the correct register.
2312  __ mov(s1, a2);
2313  } else {
2314  // Compute the argv pointer in a callee-saved register.
2315  __ Lsa(s1, sp, a0, kPointerSizeLog2);
2316  __ Subu(s1, s1, kPointerSize);
2317  }
2318 
2319  // Enter the exit frame that transitions from JavaScript to C++.
2320  FrameScope scope(masm, StackFrame::MANUAL);
2321  __ EnterExitFrame(
2322  save_doubles == kSaveFPRegs, 0,
2323  builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
2324 
2325  // s0: number of arguments including receiver (C callee-saved)
2326  // s1: pointer to first argument (C callee-saved)
2327  // s2: pointer to builtin function (C callee-saved)
2328 
2329  // Prepare arguments for C routine.
2330  // a0 = argc
2331  __ mov(s0, a0);
2332  __ mov(s2, a1);
2333 
2334  // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
2335  // also need to reserve the 4 argument slots on the stack.
2336 
2337  __ AssertStackIsAligned();
2338 
2339  // a0 = argc, a1 = argv, a2 = isolate
2340  __ li(a2, ExternalReference::isolate_address(masm->isolate()));
2341  __ mov(a1, s1);
2342 
2343  // To let the GC traverse the return address of the exit frames, we need to
2344  // know where the return address is. The CEntry is unmovable, so
2345  // we can store the address on the stack to be able to find it again and
2346  // we never have to restore it, because it will not change.
2347  {
2348  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
2349  int kNumInstructionsToJump = 4;
2350  Label find_ra;
2351  // Adjust the value in ra to point to the correct return location, 2nd
2352  // instruction past the real call into C code (the jalr(t9)), and push it.
2353  // This is the return address of the exit frame.
2354  if (kArchVariant >= kMips32r6) {
2355  __ addiupc(ra, kNumInstructionsToJump + 1);
2356  } else {
2357  // This no-op-and-link sequence saves PC + 8 in ra register on pre-r6 MIPS
2358  __ nal(); // nal has branch delay slot.
2359  __ Addu(ra, ra, kNumInstructionsToJump * kInstrSize);
2360  }
2361  __ bind(&find_ra);
2362 
2363  // This spot was reserved in EnterExitFrame.
2364  __ sw(ra, MemOperand(sp));
2365  // Stack space reservation moved to the branch delay slot below.
2366  // Stack is still aligned.
2367 
2368  // Call the C routine.
2369  __ mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
2370  __ jalr(t9);
2371  // Set up sp in the delay slot.
2372  __ addiu(sp, sp, -kCArgsSlotsSize);
2373  // Make sure the stored 'ra' points to this position.
2374  DCHECK_EQ(kNumInstructionsToJump,
2375  masm->InstructionsGeneratedSince(&find_ra));
2376  }
2377 
2378  // Result returned in v0 or v1:v0 - do not destroy these registers!
2379 
2380  // Check result for exception sentinel.
2381  Label exception_returned;
2382  __ LoadRoot(t0, RootIndex::kException);
2383  __ Branch(&exception_returned, eq, t0, Operand(v0));
2384 
2385  // Check that there is no pending exception, otherwise we
2386  // should have returned the exception sentinel.
2387  if (FLAG_debug_code) {
2388  Label okay;
2389  ExternalReference pending_exception_address = ExternalReference::Create(
2390  IsolateAddressId::kPendingExceptionAddress, masm->isolate());
2391  __ li(a2, pending_exception_address);
2392  __ lw(a2, MemOperand(a2));
2393  __ LoadRoot(t0, RootIndex::kTheHoleValue);
2394  // Cannot use check here as it attempts to generate call into runtime.
2395  __ Branch(&okay, eq, t0, Operand(a2));
2396  __ stop("Unexpected pending exception");
2397  __ bind(&okay);
2398  }
2399 
2400  // Exit C frame and return.
2401  // v0:v1: result
2402  // sp: stack pointer
2403  // fp: frame pointer
2404  Register argc = argv_mode == kArgvInRegister
2405  // We don't want to pop arguments so set argc to no_reg.
2406  ? no_reg
2407  // s0: still holds argc (callee-saved).
2408  : s0;
2409  __ LeaveExitFrame(save_doubles == kSaveFPRegs, argc, EMIT_RETURN);
2410 
2411  // Handling of exception.
2412  __ bind(&exception_returned);
2413 
2414  ExternalReference pending_handler_context_address = ExternalReference::Create(
2415  IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
2416  ExternalReference pending_handler_entrypoint_address =
2417  ExternalReference::Create(
2418  IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
2419  ExternalReference pending_handler_fp_address = ExternalReference::Create(
2420  IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
2421  ExternalReference pending_handler_sp_address = ExternalReference::Create(
2422  IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
2423 
2424  // Ask the runtime for help to determine the handler. This will set v0 to
2425  // contain the current pending exception, don't clobber it.
2426  ExternalReference find_handler =
2427  ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
2428  {
2429  FrameScope scope(masm, StackFrame::MANUAL);
2430  __ PrepareCallCFunction(3, 0, a0);
2431  __ mov(a0, zero_reg);
2432  __ mov(a1, zero_reg);
2433  __ li(a2, ExternalReference::isolate_address(masm->isolate()));
2434  __ CallCFunction(find_handler, 3);
2435  }
2436 
2437  // Retrieve the handler context, SP and FP.
2438  __ li(cp, pending_handler_context_address);
2439  __ lw(cp, MemOperand(cp));
2440  __ li(sp, pending_handler_sp_address);
2441  __ lw(sp, MemOperand(sp));
2442  __ li(fp, pending_handler_fp_address);
2443  __ lw(fp, MemOperand(fp));
2444 
2445  // If the handler is a JS frame, restore the context to the frame. Note that
2446  // the context will be set to (cp == 0) for non-JS frames.
2447  Label zero;
2448  __ Branch(&zero, eq, cp, Operand(zero_reg));
2449  __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2450  __ bind(&zero);
2451 
2452  // Reset the masking register. This is done independent of the underlying
2453  // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
2454  // with both configurations. It is safe to always do this, because the
2455  // underlying register is caller-saved and can be arbitrarily clobbered.
2456  __ ResetSpeculationPoisonRegister();
2457 
2458  // Compute the handler entry address and jump to it.
2459  __ li(t9, pending_handler_entrypoint_address);
2460  __ lw(t9, MemOperand(t9));
2461  __ Jump(t9);
2462 }
2463 
2464 void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
2465  Label out_of_range, only_low, negate, done;
2466  Register result_reg = t0;
2467 
2468  Register scratch = GetRegisterThatIsNotOneOf(result_reg);
2469  Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
2470  Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
2471  DoubleRegister double_scratch = kScratchDoubleReg;
2472 
2473  // Account for saved regs.
2474  const int kArgumentOffset = 4 * kPointerSize;
2475 
2476  __ Push(result_reg);
2477  __ Push(scratch, scratch2, scratch3);
2478 
2479  // Load double input.
2480  __ Ldc1(double_scratch, MemOperand(sp, kArgumentOffset));
2481 
2482  // Clear cumulative exception flags and save the FCSR.
2483  __ cfc1(scratch2, FCSR);
2484  __ ctc1(zero_reg, FCSR);
2485 
2486  // Try a conversion to a signed integer.
2487  __ Trunc_w_d(double_scratch, double_scratch);
2488  // Move the converted value into the result register.
2489  __ mfc1(scratch3, double_scratch);
2490 
2491  // Retrieve and restore the FCSR.
2492  __ cfc1(scratch, FCSR);
2493  __ ctc1(scratch2, FCSR);
2494 
2495  // Check for overflow and NaNs.
2496  __ And(
2497  scratch, scratch,
2498  kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
2499  // If we had no exceptions then set result_reg and we are done.
2500  Label error;
2501  __ Branch(&error, ne, scratch, Operand(zero_reg));
2502  __ Move(result_reg, scratch3);
2503  __ Branch(&done);
2504  __ bind(&error);
2505 
2506  // Load the double value and perform a manual truncation.
2507  Register input_high = scratch2;
2508  Register input_low = scratch3;
2509 
2510  __ lw(input_low, MemOperand(sp, kArgumentOffset + Register::kMantissaOffset));
2511  __ lw(input_high,
2512  MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
2513 
2514  Label normal_exponent, restore_sign;
2515  // Extract the biased exponent in result.
2516  __ Ext(result_reg, input_high, HeapNumber::kExponentShift,
2517  HeapNumber::kExponentBits);
2518 
2519  // Check for Infinity and NaNs, which should return 0.
2520  __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
2521  __ Movz(result_reg, zero_reg, scratch);
2522  __ Branch(&done, eq, scratch, Operand(zero_reg));
2523 
2524  // Express exponent as delta to (number of mantissa bits + 31).
2525  __ Subu(result_reg, result_reg,
2526  Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
2527 
2528  // If the delta is strictly positive, all bits would be shifted away,
2529  // which means that we can return 0.
2530  __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
2531  __ mov(result_reg, zero_reg);
2532  __ Branch(&done);
2533 
2534  __ bind(&normal_exponent);
2535  const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
2536  // Calculate shift.
2537  __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
2538 
2539  // Save the sign.
2540  Register sign = result_reg;
2541  result_reg = no_reg;
2542  __ And(sign, input_high, Operand(HeapNumber::kSignMask));
2543 
2544  // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
2545  // to check for this specific case.
2546  Label high_shift_needed, high_shift_done;
2547  __ Branch(&high_shift_needed, lt, scratch, Operand(32));
2548  __ mov(input_high, zero_reg);
2549  __ Branch(&high_shift_done);
2550  __ bind(&high_shift_needed);
2551 
2552  // Set the implicit 1 before the mantissa part in input_high.
2553  __ Or(input_high, input_high,
2554  Operand(1 << HeapNumber::kMantissaBitsInTopWord));
2555  // Shift the mantissa bits to the correct position.
2556  // We don't need to clear non-mantissa bits as they will be shifted away.
2557  // If they weren't, it would mean that the answer is in the 32bit range.
2558  __ sllv(input_high, input_high, scratch);
2559 
2560  __ bind(&high_shift_done);
2561 
2562  // Replace the shifted bits with bits from the lower mantissa word.
2563  Label pos_shift, shift_done;
2564  __ li(kScratchReg, 32);
2565  __ subu(scratch, kScratchReg, scratch);
2566  __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
2567 
2568  // Negate scratch.
2569  __ Subu(scratch, zero_reg, scratch);
2570  __ sllv(input_low, input_low, scratch);
2571  __ Branch(&shift_done);
2572 
2573  __ bind(&pos_shift);
2574  __ srlv(input_low, input_low, scratch);
2575 
2576  __ bind(&shift_done);
2577  __ Or(input_high, input_high, Operand(input_low));
2578  // Restore sign if necessary.
2579  __ mov(scratch, sign);
2580  result_reg = sign;
2581  sign = no_reg;
2582  __ Subu(result_reg, zero_reg, input_high);
2583  __ Movz(result_reg, input_high, scratch);
2584 
2585  __ bind(&done);
2586  __ sw(result_reg, MemOperand(sp, kArgumentOffset));
2587  __ Pop(scratch, scratch2, scratch3);
2588  __ Pop(result_reg);
2589  __ Ret();
2590 }
2591 
2592 void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
2593  const Register exponent = a2;
2594  const DoubleRegister double_base = f2;
2595  const DoubleRegister double_exponent = f4;
2596  const DoubleRegister double_result = f0;
2597  const DoubleRegister double_scratch = f6;
2598  const FPURegister single_scratch = f8;
2599  const Register scratch = t5;
2600  const Register scratch2 = t3;
2601 
2602  Label call_runtime, done, int_exponent;
2603 
2604  Label int_exponent_convert;
2605  // Detect integer exponents stored as double.
2606  __ EmitFPUTruncate(kRoundToMinusInf, scratch, double_exponent, kScratchReg,
2607  double_scratch, scratch2, kCheckForInexactConversion);
2608  // scratch2 == 0 means there was no conversion error.
2609  __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
2610 
2611  __ push(ra);
2612  {
2613  AllowExternalCallThatCantCauseGC scope(masm);
2614  __ PrepareCallCFunction(0, 2, scratch2);
2615  __ MovToFloatParameters(double_base, double_exponent);
2616  __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
2617  }
2618  __ pop(ra);
2619  __ MovFromFloatResult(double_result);
2620  __ jmp(&done);
2621 
2622  __ bind(&int_exponent_convert);
2623 
2624  // Calculate power with integer exponent.
2625  __ bind(&int_exponent);
2626 
2627  // Get two copies of exponent in the registers scratch and exponent.
2628  // Exponent has previously been stored into scratch as untagged integer.
2629  __ mov(exponent, scratch);
2630 
2631  __ mov_d(double_scratch, double_base); // Back up base.
2632  __ Move(double_result, 1.0);
2633 
2634  // Get absolute value of exponent.
2635  Label positive_exponent, bail_out;
2636  __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
2637  __ Subu(scratch, zero_reg, scratch);
2638  // Check when Subu overflows and we get negative result
2639  // (happens only when input is MIN_INT).
2640  __ Branch(&bail_out, gt, zero_reg, Operand(scratch));
2641  __ bind(&positive_exponent);
2642  __ Assert(ge, AbortReason::kUnexpectedNegativeValue, scratch,
2643  Operand(zero_reg));
2644 
2645  Label while_true, no_carry, loop_end;
2646  __ bind(&while_true);
2647 
2648  __ And(scratch2, scratch, 1);
2649 
2650  __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
2651  __ mul_d(double_result, double_result, double_scratch);
2652  __ bind(&no_carry);
2653 
2654  __ sra(scratch, scratch, 1);
2655 
2656  __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
2657  __ mul_d(double_scratch, double_scratch, double_scratch);
2658 
2659  __ Branch(&while_true);
2660 
2661  __ bind(&loop_end);
2662 
2663  __ Branch(&done, ge, exponent, Operand(zero_reg));
2664  __ Move(double_scratch, 1.0);
2665  __ div_d(double_result, double_scratch, double_result);
2666  // Test whether result is zero. Bail out to check for subnormal result.
2667  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
2668  __ CompareF64(EQ, double_result, kDoubleRegZero);
2669  __ BranchFalseShortF(&done);
2670 
2671  // double_exponent may not contain the exponent value if the input was a
2672  // smi. We set it with exponent value before bailing out.
2673  __ bind(&bail_out);
2674  __ mtc1(exponent, single_scratch);
2675  __ cvt_d_w(double_exponent, single_scratch);
2676 
2677  // Returning or bailing out.
2678  __ push(ra);
2679  {
2680  AllowExternalCallThatCantCauseGC scope(masm);
2681  __ PrepareCallCFunction(0, 2, scratch);
2682  __ MovToFloatParameters(double_base, double_exponent);
2683  __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
2684  }
2685  __ pop(ra);
2686  __ MovFromFloatResult(double_result);
2687 
2688  __ bind(&done);
2689  __ Ret();
2690 }
2691 
2692 namespace {
2693 
2694 void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
2695  ElementsKind kind) {
2696  // Load undefined into the allocation site parameter as required by
2697  // ArrayNArgumentsConstructor.
2698  __ LoadRoot(kJavaScriptCallExtraArg1Register, RootIndex::kUndefinedValue);
2699 
2700  __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
2701  .code(),
2702  RelocInfo::CODE_TARGET, lo, a0, Operand(1));
2703 
2704  __ Jump(BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor),
2705  RelocInfo::CODE_TARGET, hi, a0, Operand(1));
2706 
2707  if (IsFastPackedElementsKind(kind)) {
2708  // We might need to create a holey array
2709  // look at the first argument.
2710  __ lw(kScratchReg, MemOperand(sp, 0));
2711 
2712  __ Jump(CodeFactory::InternalArraySingleArgumentConstructor(
2713  masm->isolate(), GetHoleyElementsKind(kind))
2714  .code(),
2715  RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
2716  }
2717 
2718  __ Jump(
2719  CodeFactory::InternalArraySingleArgumentConstructor(masm->isolate(), kind)
2720  .code(),
2721  RelocInfo::CODE_TARGET);
2722 }
2723 
2724 } // namespace
2725 
2726 void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
2727  // ----------- S t a t e -------------
2728  // -- a0 : argc
2729  // -- a1 : constructor
2730  // -- sp[0] : return address
2731  // -- sp[4] : last argument
2732  // -----------------------------------
2733 
2734  if (FLAG_debug_code) {
2735  // The array construct code is only set for the global and natives
2736  // builtin Array functions which always have maps.
2737 
2738  // Initial map for the builtin Array function should be a map.
2739  __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
2740  // Will both indicate a nullptr and a Smi.
2741  __ SmiTst(a3, kScratchReg);
2742  __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction,
2743  kScratchReg, Operand(zero_reg));
2744  __ GetObjectType(a3, a3, t0);
2745  __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, t0,
2746  Operand(MAP_TYPE));
2747  }
2748 
2749  // Figure out the right elements kind.
2750  __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
2751 
2752  // Load the map's "bit field 2" into a3. We only need the first byte,
2753  // but the following bit field extraction takes care of that anyway.
2754  __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
2755  // Retrieve elements_kind from bit field 2.
2756  __ DecodeField<Map::ElementsKindBits>(a3);
2757 
2758  if (FLAG_debug_code) {
2759  Label done;
2760  __ Branch(&done, eq, a3, Operand(PACKED_ELEMENTS));
2761  __ Assert(
2762  eq,
2763  AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray,
2764  a3, Operand(HOLEY_ELEMENTS));
2765  __ bind(&done);
2766  }
2767 
2768  Label fast_elements_case;
2769  __ Branch(&fast_elements_case, eq, a3, Operand(PACKED_ELEMENTS));
2770  GenerateInternalArrayConstructorCase(masm, HOLEY_ELEMENTS);
2771 
2772  __ bind(&fast_elements_case);
2773  GenerateInternalArrayConstructorCase(masm, PACKED_ELEMENTS);
2774 }
2775 
2776 #undef __
2777 
2778 } // namespace internal
2779 } // namespace v8
2780 
2781 #endif // V8_TARGET_ARCH_MIPS
Definition: libplatform.h:13