V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
interpreter-assembler.cc
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/interpreter/interpreter-assembler.h"
6 
7 #include <limits>
8 #include <ostream>
9 
10 #include "src/code-factory.h"
11 #include "src/frames.h"
12 #include "src/interface-descriptors.h"
13 #include "src/interpreter/bytecodes.h"
14 #include "src/interpreter/interpreter.h"
15 #include "src/machine-type.h"
16 #include "src/macro-assembler.h"
17 #include "src/objects-inl.h"
18 #include "src/zone/zone.h"
19 
20 namespace v8 {
21 namespace internal {
22 namespace interpreter {
23 
24 using compiler::CodeAssemblerState;
25 using compiler::Node;
26 template <class T>
27 using TNode = compiler::TNode<T>;
28 
29 InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
30  Bytecode bytecode,
31  OperandScale operand_scale)
32  : CodeStubAssembler(state),
33  bytecode_(bytecode),
34  operand_scale_(operand_scale),
35  VARIABLE_CONSTRUCTOR(interpreted_frame_pointer_,
36  MachineType::PointerRepresentation()),
37  VARIABLE_CONSTRUCTOR(
38  bytecode_array_, MachineRepresentation::kTagged,
39  Parameter(InterpreterDispatchDescriptor::kBytecodeArray)),
40  VARIABLE_CONSTRUCTOR(
41  bytecode_offset_, MachineType::PointerRepresentation(),
42  Parameter(InterpreterDispatchDescriptor::kBytecodeOffset)),
43  VARIABLE_CONSTRUCTOR(
44  dispatch_table_, MachineType::PointerRepresentation(),
45  Parameter(InterpreterDispatchDescriptor::kDispatchTable)),
46  VARIABLE_CONSTRUCTOR(
47  accumulator_, MachineRepresentation::kTagged,
48  Parameter(InterpreterDispatchDescriptor::kAccumulator)),
49  accumulator_use_(AccumulatorUse::kNone),
50  made_call_(false),
51  reloaded_frame_ptr_(false),
52  bytecode_array_valid_(true),
53  disable_stack_check_across_call_(false),
54  stack_pointer_before_call_(nullptr) {
55 #ifdef V8_TRACE_IGNITION
56  TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
57 #endif
58  RegisterCallGenerationCallbacks([this] { CallPrologue(); },
59  [this] { CallEpilogue(); });
60 
61  // Save the bytecode offset immediately if bytecode will make a call along the
62  // critical path, or it is a return bytecode.
63  if (Bytecodes::MakesCallAlongCriticalPath(bytecode) ||
64  Bytecodes::Returns(bytecode)) {
65  SaveBytecodeOffset();
66  }
67 }
68 
69 InterpreterAssembler::~InterpreterAssembler() {
70  // If the following check fails the handler does not use the
71  // accumulator in the way described in the bytecode definitions in
72  // bytecodes.h.
73  DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
74  UnregisterCallGenerationCallbacks();
75 }
76 
77 Node* InterpreterAssembler::GetInterpretedFramePointer() {
78  if (!interpreted_frame_pointer_.IsBound()) {
79  interpreted_frame_pointer_.Bind(LoadParentFramePointer());
80  } else if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
81  !reloaded_frame_ptr_) {
82  interpreted_frame_pointer_.Bind(LoadParentFramePointer());
83  reloaded_frame_ptr_ = true;
84  }
85  return interpreted_frame_pointer_.value();
86 }
87 
88 Node* InterpreterAssembler::BytecodeOffset() {
89  if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
90  (bytecode_offset_.value() ==
91  Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))) {
92  bytecode_offset_.Bind(ReloadBytecodeOffset());
93  }
94  return bytecode_offset_.value();
95 }
96 
97 Node* InterpreterAssembler::ReloadBytecodeOffset() {
98  Node* offset = LoadAndUntagRegister(Register::bytecode_offset());
99  if (operand_scale() != OperandScale::kSingle) {
100  // Add one to the offset such that it points to the actual bytecode rather
101  // than the Wide / ExtraWide prefix bytecode.
102  offset = IntPtrAdd(offset, IntPtrConstant(1));
103  }
104  return offset;
105 }
106 
107 void InterpreterAssembler::SaveBytecodeOffset() {
108  Node* offset = BytecodeOffset();
109  if (operand_scale() != OperandScale::kSingle) {
110  // Subtract one from the offset such that it points to the Wide / ExtraWide
111  // prefix bytecode.
112  offset = IntPtrSub(BytecodeOffset(), IntPtrConstant(1));
113  }
114  StoreAndTagRegister(offset, Register::bytecode_offset());
115 }
116 
117 Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
118  // Force a re-load of the bytecode array after every call in case the debugger
119  // has been activated.
120  if (!bytecode_array_valid_) {
121  bytecode_array_.Bind(LoadRegister(Register::bytecode_array()));
122  bytecode_array_valid_ = true;
123  }
124  return bytecode_array_.value();
125 }
126 
127 Node* InterpreterAssembler::DispatchTableRawPointer() {
128  if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
129  (dispatch_table_.value() ==
130  Parameter(InterpreterDispatchDescriptor::kDispatchTable))) {
131  dispatch_table_.Bind(ExternalConstant(
132  ExternalReference::interpreter_dispatch_table_address(isolate())));
133  }
134  return dispatch_table_.value();
135 }
136 
137 Node* InterpreterAssembler::GetAccumulatorUnchecked() {
138  return accumulator_.value();
139 }
140 
141 Node* InterpreterAssembler::GetAccumulator() {
142  DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
143  accumulator_use_ = accumulator_use_ | AccumulatorUse::kRead;
144  return TaggedPoisonOnSpeculation(GetAccumulatorUnchecked());
145 }
146 
147 void InterpreterAssembler::SetAccumulator(Node* value) {
148  DCHECK(Bytecodes::WritesAccumulator(bytecode_));
149  accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
150  accumulator_.Bind(value);
151 }
152 
153 Node* InterpreterAssembler::GetContext() {
154  return LoadRegister(Register::current_context());
155 }
156 
157 void InterpreterAssembler::SetContext(Node* value) {
158  StoreRegister(value, Register::current_context());
159 }
160 
161 Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
162  Variable cur_context(this, MachineRepresentation::kTaggedPointer);
163  cur_context.Bind(context);
164 
165  Variable cur_depth(this, MachineRepresentation::kWord32);
166  cur_depth.Bind(depth);
167 
168  Label context_found(this);
169 
170  Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
171  Label context_search(this, 2, context_search_loop_variables);
172 
173  // Fast path if the depth is 0.
174  Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search);
175 
176  // Loop until the depth is 0.
177  BIND(&context_search);
178  {
179  cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
180  cur_context.Bind(
181  LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
182 
183  Branch(Word32Equal(cur_depth.value(), Int32Constant(0)), &context_found,
184  &context_search);
185  }
186 
187  BIND(&context_found);
188  return cur_context.value();
189 }
190 
191 void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context,
192  Node* depth,
193  Label* target) {
194  Variable cur_context(this, MachineRepresentation::kTaggedPointer);
195  cur_context.Bind(context);
196 
197  Variable cur_depth(this, MachineRepresentation::kWord32);
198  cur_depth.Bind(depth);
199 
200  Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
201  Label context_search(this, 2, context_search_loop_variables);
202 
203  // Loop until the depth is 0.
204  Goto(&context_search);
205  BIND(&context_search);
206  {
207  // TODO(leszeks): We only need to do this check if the context had a sloppy
208  // eval, we could pass in a context chain bitmask to figure out which
209  // contexts actually need to be checked.
210 
211  Node* extension_slot =
212  LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX);
213 
214  // Jump to the target if the extension slot is not a hole.
215  GotoIf(WordNotEqual(extension_slot, TheHoleConstant()), target);
216 
217  cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
218  cur_context.Bind(
219  LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
220 
221  GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
222  &context_search);
223  }
224 }
225 
226 Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
227  return WordPoisonOnSpeculation(
228  IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index)));
229 }
230 
231 Node* InterpreterAssembler::RegisterLocation(Register reg) {
232  return RegisterLocation(IntPtrConstant(reg.ToOperand()));
233 }
234 
235 Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
236  return TimesPointerSize(index);
237 }
238 
239 Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
240  return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
241  RegisterFrameOffset(reg_index), LoadSensitivity::kCritical);
242 }
243 
244 Node* InterpreterAssembler::LoadRegister(Register reg) {
245  return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
246  IntPtrConstant(reg.ToOperand() << kPointerSizeLog2));
247 }
248 
249 Node* InterpreterAssembler::LoadAndUntagRegister(Register reg) {
250  return LoadAndUntagSmi(GetInterpretedFramePointer(), reg.ToOperand()
251  << kPointerSizeLog2);
252 }
253 
254 Node* InterpreterAssembler::LoadRegisterAtOperandIndex(int operand_index) {
255  return LoadRegister(
256  BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
257 }
258 
259 std::pair<Node*, Node*> InterpreterAssembler::LoadRegisterPairAtOperandIndex(
260  int operand_index) {
261  DCHECK_EQ(OperandType::kRegPair,
262  Bytecodes::GetOperandType(bytecode_, operand_index));
263  Node* first_reg_index =
264  BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
265  Node* second_reg_index = NextRegister(first_reg_index);
266  return std::make_pair(LoadRegister(first_reg_index),
267  LoadRegister(second_reg_index));
268 }
269 
270 InterpreterAssembler::RegListNodePair
271 InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) {
272  DCHECK(Bytecodes::IsRegisterListOperandType(
273  Bytecodes::GetOperandType(bytecode_, operand_index)));
274  DCHECK_EQ(OperandType::kRegCount,
275  Bytecodes::GetOperandType(bytecode_, operand_index + 1));
276  Node* base_reg = RegisterLocation(
277  BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
278  Node* reg_count = BytecodeOperandCount(operand_index + 1);
279  return RegListNodePair(base_reg, reg_count);
280 }
281 
282 Node* InterpreterAssembler::LoadRegisterFromRegisterList(
283  const RegListNodePair& reg_list, int index) {
284  Node* location = RegisterLocationInRegisterList(reg_list, index);
285  // Location is already poisoned on speculation, so no need to poison here.
286  return Load(MachineType::AnyTagged(), location);
287 }
288 
289 Node* InterpreterAssembler::RegisterLocationInRegisterList(
290  const RegListNodePair& reg_list, int index) {
291  CSA_ASSERT(this,
292  Uint32GreaterThan(reg_list.reg_count(), Int32Constant(index)));
293  Node* offset = RegisterFrameOffset(IntPtrConstant(index));
294  // Register indexes are negative, so subtract index from base location to get
295  // location.
296  return IntPtrSub(reg_list.base_reg_location(), offset);
297 }
298 
299 void InterpreterAssembler::StoreRegister(Node* value, Register reg) {
300  StoreNoWriteBarrier(
301  MachineRepresentation::kTagged, GetInterpretedFramePointer(),
302  IntPtrConstant(reg.ToOperand() << kPointerSizeLog2), value);
303 }
304 
305 void InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
306  StoreNoWriteBarrier(MachineRepresentation::kTagged,
307  GetInterpretedFramePointer(),
308  RegisterFrameOffset(reg_index), value);
309 }
310 
311 void InterpreterAssembler::StoreAndTagRegister(Node* value, Register reg) {
312  int offset = reg.ToOperand() << kPointerSizeLog2;
313  StoreAndTagSmi(GetInterpretedFramePointer(), offset, value);
314 }
315 
316 void InterpreterAssembler::StoreRegisterAtOperandIndex(Node* value,
317  int operand_index) {
318  StoreRegister(value,
319  BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
320 }
321 
322 void InterpreterAssembler::StoreRegisterPairAtOperandIndex(Node* value1,
323  Node* value2,
324  int operand_index) {
325  DCHECK_EQ(OperandType::kRegOutPair,
326  Bytecodes::GetOperandType(bytecode_, operand_index));
327  Node* first_reg_index =
328  BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
329  StoreRegister(value1, first_reg_index);
330  Node* second_reg_index = NextRegister(first_reg_index);
331  StoreRegister(value2, second_reg_index);
332 }
333 
334 void InterpreterAssembler::StoreRegisterTripleAtOperandIndex(
335  Node* value1, Node* value2, Node* value3, int operand_index) {
336  DCHECK_EQ(OperandType::kRegOutTriple,
337  Bytecodes::GetOperandType(bytecode_, operand_index));
338  Node* first_reg_index =
339  BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
340  StoreRegister(value1, first_reg_index);
341  Node* second_reg_index = NextRegister(first_reg_index);
342  StoreRegister(value2, second_reg_index);
343  Node* third_reg_index = NextRegister(second_reg_index);
344  StoreRegister(value3, third_reg_index);
345 }
346 
347 Node* InterpreterAssembler::NextRegister(Node* reg_index) {
348  // Register indexes are negative, so the next index is minus one.
349  return IntPtrAdd(reg_index, IntPtrConstant(-1));
350 }
351 
352 Node* InterpreterAssembler::OperandOffset(int operand_index) {
353  return IntPtrConstant(
354  Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
355 }
356 
357 Node* InterpreterAssembler::BytecodeOperandUnsignedByte(
358  int operand_index, LoadSensitivity needs_poisoning) {
359  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
360  DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
361  bytecode_, operand_index, operand_scale()));
362  Node* operand_offset = OperandOffset(operand_index);
363  return Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
364  IntPtrAdd(BytecodeOffset(), operand_offset), needs_poisoning);
365 }
366 
367 Node* InterpreterAssembler::BytecodeOperandSignedByte(
368  int operand_index, LoadSensitivity needs_poisoning) {
369  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
370  DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
371  bytecode_, operand_index, operand_scale()));
372  Node* operand_offset = OperandOffset(operand_index);
373  return Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
374  IntPtrAdd(BytecodeOffset(), operand_offset), needs_poisoning);
375 }
376 
377 Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
378  int relative_offset, MachineType result_type,
379  LoadSensitivity needs_poisoning) {
380  static const int kMaxCount = 4;
381  DCHECK(!TargetSupportsUnalignedAccess());
382 
383  int count;
384  switch (result_type.representation()) {
385  case MachineRepresentation::kWord16:
386  count = 2;
387  break;
388  case MachineRepresentation::kWord32:
389  count = 4;
390  break;
391  default:
392  UNREACHABLE();
393  break;
394  }
395  MachineType msb_type =
396  result_type.IsSigned() ? MachineType::Int8() : MachineType::Uint8();
397 
398 #if V8_TARGET_LITTLE_ENDIAN
399  const int kStep = -1;
400  int msb_offset = count - 1;
401 #elif V8_TARGET_BIG_ENDIAN
402  const int kStep = 1;
403  int msb_offset = 0;
404 #else
405 #error "Unknown Architecture"
406 #endif
407 
408  // Read the most signicant bytecode into bytes[0] and then in order
409  // down to least significant in bytes[count - 1].
410  DCHECK_LE(count, kMaxCount);
411  Node* bytes[kMaxCount];
412  for (int i = 0; i < count; i++) {
413  MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
414  Node* offset = IntPtrConstant(relative_offset + msb_offset + i * kStep);
415  Node* array_offset = IntPtrAdd(BytecodeOffset(), offset);
416  bytes[i] = Load(machine_type, BytecodeArrayTaggedPointer(), array_offset,
417  needs_poisoning);
418  }
419 
420  // Pack LSB to MSB.
421  Node* result = bytes[--count];
422  for (int i = 1; --count >= 0; i++) {
423  Node* shift = Int32Constant(i * kBitsPerByte);
424  Node* value = Word32Shl(bytes[count], shift);
425  result = Word32Or(value, result);
426  }
427  return result;
428 }
429 
430 Node* InterpreterAssembler::BytecodeOperandUnsignedShort(
431  int operand_index, LoadSensitivity needs_poisoning) {
432  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
433  DCHECK_EQ(
434  OperandSize::kShort,
435  Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
436  int operand_offset =
437  Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
438  if (TargetSupportsUnalignedAccess()) {
439  return Load(MachineType::Uint16(), BytecodeArrayTaggedPointer(),
440  IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
441  needs_poisoning);
442  } else {
443  return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16(),
444  needs_poisoning);
445  }
446 }
447 
448 Node* InterpreterAssembler::BytecodeOperandSignedShort(
449  int operand_index, LoadSensitivity needs_poisoning) {
450  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
451  DCHECK_EQ(
452  OperandSize::kShort,
453  Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
454  int operand_offset =
455  Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
456  if (TargetSupportsUnalignedAccess()) {
457  return Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
458  IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
459  needs_poisoning);
460  } else {
461  return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16(),
462  needs_poisoning);
463  }
464 }
465 
466 Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(
467  int operand_index, LoadSensitivity needs_poisoning) {
468  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
469  DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
470  bytecode_, operand_index, operand_scale()));
471  int operand_offset =
472  Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
473  if (TargetSupportsUnalignedAccess()) {
474  return Load(MachineType::Uint32(), BytecodeArrayTaggedPointer(),
475  IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
476  needs_poisoning);
477  } else {
478  return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32(),
479  needs_poisoning);
480  }
481 }
482 
483 Node* InterpreterAssembler::BytecodeOperandSignedQuad(
484  int operand_index, LoadSensitivity needs_poisoning) {
485  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
486  DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
487  bytecode_, operand_index, operand_scale()));
488  int operand_offset =
489  Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
490  if (TargetSupportsUnalignedAccess()) {
491  return Load(MachineType::Int32(), BytecodeArrayTaggedPointer(),
492  IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
493  needs_poisoning);
494  } else {
495  return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32(),
496  needs_poisoning);
497  }
498 }
499 
500 Node* InterpreterAssembler::BytecodeSignedOperand(
501  int operand_index, OperandSize operand_size,
502  LoadSensitivity needs_poisoning) {
503  DCHECK(!Bytecodes::IsUnsignedOperandType(
504  Bytecodes::GetOperandType(bytecode_, operand_index)));
505  switch (operand_size) {
506  case OperandSize::kByte:
507  return BytecodeOperandSignedByte(operand_index, needs_poisoning);
508  case OperandSize::kShort:
509  return BytecodeOperandSignedShort(operand_index, needs_poisoning);
510  case OperandSize::kQuad:
511  return BytecodeOperandSignedQuad(operand_index, needs_poisoning);
512  case OperandSize::kNone:
513  UNREACHABLE();
514  }
515  return nullptr;
516 }
517 
518 Node* InterpreterAssembler::BytecodeUnsignedOperand(
519  int operand_index, OperandSize operand_size,
520  LoadSensitivity needs_poisoning) {
521  DCHECK(Bytecodes::IsUnsignedOperandType(
522  Bytecodes::GetOperandType(bytecode_, operand_index)));
523  switch (operand_size) {
524  case OperandSize::kByte:
525  return BytecodeOperandUnsignedByte(operand_index, needs_poisoning);
526  case OperandSize::kShort:
527  return BytecodeOperandUnsignedShort(operand_index, needs_poisoning);
528  case OperandSize::kQuad:
529  return BytecodeOperandUnsignedQuad(operand_index, needs_poisoning);
530  case OperandSize::kNone:
531  UNREACHABLE();
532  }
533  return nullptr;
534 }
535 
536 Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
537  DCHECK_EQ(OperandType::kRegCount,
538  Bytecodes::GetOperandType(bytecode_, operand_index));
539  OperandSize operand_size =
540  Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
541  return BytecodeUnsignedOperand(operand_index, operand_size);
542 }
543 
544 Node* InterpreterAssembler::BytecodeOperandFlag(int operand_index) {
545  DCHECK_EQ(OperandType::kFlag8,
546  Bytecodes::GetOperandType(bytecode_, operand_index));
547  OperandSize operand_size =
548  Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
549  DCHECK_EQ(operand_size, OperandSize::kByte);
550  return BytecodeUnsignedOperand(operand_index, operand_size);
551 }
552 
553 Node* InterpreterAssembler::BytecodeOperandUImm(int operand_index) {
554  DCHECK_EQ(OperandType::kUImm,
555  Bytecodes::GetOperandType(bytecode_, operand_index));
556  OperandSize operand_size =
557  Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
558  return BytecodeUnsignedOperand(operand_index, operand_size);
559 }
560 
561 Node* InterpreterAssembler::BytecodeOperandUImmWord(int operand_index) {
562  return ChangeUint32ToWord(BytecodeOperandUImm(operand_index));
563 }
564 
565 Node* InterpreterAssembler::BytecodeOperandUImmSmi(int operand_index) {
566  return SmiFromInt32(BytecodeOperandUImm(operand_index));
567 }
568 
569 Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
570  DCHECK_EQ(OperandType::kImm,
571  Bytecodes::GetOperandType(bytecode_, operand_index));
572  OperandSize operand_size =
573  Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
574  return BytecodeSignedOperand(operand_index, operand_size);
575 }
576 
577 Node* InterpreterAssembler::BytecodeOperandImmIntPtr(int operand_index) {
578  return ChangeInt32ToIntPtr(BytecodeOperandImm(operand_index));
579 }
580 
581 Node* InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) {
582  return SmiFromInt32(BytecodeOperandImm(operand_index));
583 }
584 
585 Node* InterpreterAssembler::BytecodeOperandIdxInt32(int operand_index) {
586  DCHECK_EQ(OperandType::kIdx,
587  Bytecodes::GetOperandType(bytecode_, operand_index));
588  OperandSize operand_size =
589  Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
590  return BytecodeUnsignedOperand(operand_index, operand_size);
591 }
592 
593 Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
594  return ChangeUint32ToWord(BytecodeOperandIdxInt32(operand_index));
595 }
596 
597 Node* InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
598  return SmiTag(BytecodeOperandIdx(operand_index));
599 }
600 
601 Node* InterpreterAssembler::BytecodeOperandConstantPoolIdx(
602  int operand_index, LoadSensitivity needs_poisoning) {
603  DCHECK_EQ(OperandType::kIdx,
604  Bytecodes::GetOperandType(bytecode_, operand_index));
605  OperandSize operand_size =
606  Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
607  return ChangeUint32ToWord(
608  BytecodeUnsignedOperand(operand_index, operand_size, needs_poisoning));
609 }
610 
611 Node* InterpreterAssembler::BytecodeOperandReg(
612  int operand_index, LoadSensitivity needs_poisoning) {
613  DCHECK(Bytecodes::IsRegisterOperandType(
614  Bytecodes::GetOperandType(bytecode_, operand_index)));
615  OperandSize operand_size =
616  Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
617  return ChangeInt32ToIntPtr(
618  BytecodeSignedOperand(operand_index, operand_size, needs_poisoning));
619 }
620 
621 Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
622  DCHECK_EQ(OperandType::kRuntimeId,
623  Bytecodes::GetOperandType(bytecode_, operand_index));
624  OperandSize operand_size =
625  Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
626  DCHECK_EQ(operand_size, OperandSize::kShort);
627  return BytecodeUnsignedOperand(operand_index, operand_size);
628 }
629 
630 Node* InterpreterAssembler::BytecodeOperandNativeContextIndex(
631  int operand_index) {
632  DCHECK_EQ(OperandType::kNativeContextIndex,
633  Bytecodes::GetOperandType(bytecode_, operand_index));
634  OperandSize operand_size =
635  Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
636  return ChangeUint32ToWord(
637  BytecodeUnsignedOperand(operand_index, operand_size));
638 }
639 
640 Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
641  DCHECK_EQ(OperandType::kIntrinsicId,
642  Bytecodes::GetOperandType(bytecode_, operand_index));
643  OperandSize operand_size =
644  Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
645  DCHECK_EQ(operand_size, OperandSize::kByte);
646  return BytecodeUnsignedOperand(operand_index, operand_size);
647 }
648 
649 Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
650  TNode<FixedArray> constant_pool = CAST(LoadObjectField(
651  BytecodeArrayTaggedPointer(), BytecodeArray::kConstantPoolOffset));
652  return LoadFixedArrayElement(constant_pool, UncheckedCast<IntPtrT>(index),
653  LoadSensitivity::kCritical);
654 }
655 
656 Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
657  return SmiUntag(LoadConstantPoolEntry(index));
658 }
659 
660 Node* InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex(
661  int operand_index) {
662  Node* index =
663  BytecodeOperandConstantPoolIdx(operand_index, LoadSensitivity::kSafe);
664  return LoadConstantPoolEntry(index);
665 }
666 
667 Node* InterpreterAssembler::LoadAndUntagConstantPoolEntryAtOperandIndex(
668  int operand_index) {
669  return SmiUntag(LoadConstantPoolEntryAtOperandIndex(operand_index));
670 }
671 
672 TNode<FeedbackVector> InterpreterAssembler::LoadFeedbackVector() {
673  TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
674  return CodeStubAssembler::LoadFeedbackVector(function);
675 }
676 
677 Node* InterpreterAssembler::LoadFeedbackVectorUnchecked() {
678  TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
679  return CodeStubAssembler::LoadFeedbackVectorUnchecked(function);
680 }
681 
682 void InterpreterAssembler::CallPrologue() {
683  if (!Bytecodes::MakesCallAlongCriticalPath(bytecode_)) {
684  // Bytecodes that make a call along the critical path save the bytecode
685  // offset in the bytecode handler's prologue. For other bytecodes, if
686  // there are multiple calls in the bytecode handler, you need to spill
687  // before each of them, unless SaveBytecodeOffset has explicitly been called
688  // in a path that dominates _all_ of those calls (which we don't track).
689  SaveBytecodeOffset();
690  }
691 
692  if (FLAG_debug_code && !disable_stack_check_across_call_) {
693  DCHECK_NULL(stack_pointer_before_call_);
694  stack_pointer_before_call_ = LoadStackPointer();
695  }
696  bytecode_array_valid_ = false;
697  made_call_ = true;
698 }
699 
700 void InterpreterAssembler::CallEpilogue() {
701  if (FLAG_debug_code && !disable_stack_check_across_call_) {
702  Node* stack_pointer_after_call = LoadStackPointer();
703  Node* stack_pointer_before_call = stack_pointer_before_call_;
704  stack_pointer_before_call_ = nullptr;
705  AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
706  AbortReason::kUnexpectedStackPointer);
707  }
708 }
709 
710 void InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
711  Node* slot_id) {
712  Comment("increment call count");
713  TNode<Smi> call_count =
714  CAST(LoadFeedbackVectorSlot(feedback_vector, slot_id, kPointerSize));
715  // The lowest {FeedbackNexus::CallCountField::kShift} bits of the call
716  // count are used as flags. To increment the call count by 1 we hence
717  // have to increment by 1 << {FeedbackNexus::CallCountField::kShift}.
718  Node* new_count = SmiAdd(
719  call_count, SmiConstant(1 << FeedbackNexus::CallCountField::kShift));
720  // Count is Smi, so we don't need a write barrier.
721  StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count,
722  SKIP_WRITE_BARRIER, kPointerSize);
723 }
724 
725 void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context,
726  Node* feedback_vector,
727  Node* slot_id) {
728  Label extra_checks(this, Label::kDeferred), done(this);
729 
730  // Check if we have monomorphic {target} feedback already.
731  TNode<MaybeObject> feedback =
732  LoadFeedbackVectorSlot(feedback_vector, slot_id);
733  Comment("check if monomorphic");
734  TNode<BoolT> is_monomorphic = IsWeakReferenceTo(feedback, CAST(target));
735  GotoIf(is_monomorphic, &done);
736 
737  // Check if it is a megamorphic {target}.
738  Comment("check if megamorphic");
739  Node* is_megamorphic = WordEqual(
740  feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
741  Branch(is_megamorphic, &done, &extra_checks);
742 
743  BIND(&extra_checks);
744  {
745  Label initialize(this), mark_megamorphic(this);
746 
747  Comment("check if weak reference");
748  Node* is_uninitialized = WordEqual(
749  feedback,
750  HeapConstant(FeedbackVector::UninitializedSentinel(isolate())));
751  GotoIf(is_uninitialized, &initialize);
752  CSA_ASSERT(this, IsWeakOrCleared(feedback));
753 
754  // If the weak reference is cleared, we have a new chance to become
755  // monomorphic.
756  Comment("check if weak reference is cleared");
757  Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
758 
759  BIND(&initialize);
760  {
761  // Check if {target} is a JSFunction in the current native context.
762  Comment("check if function in same native context");
763  GotoIf(TaggedIsSmi(target), &mark_megamorphic);
764  // Check if the {target} is a JSFunction or JSBoundFunction
765  // in the current native context.
766  VARIABLE(var_current, MachineRepresentation::kTagged, target);
767  Label loop(this, &var_current), done_loop(this);
768  Goto(&loop);
769  BIND(&loop);
770  {
771  Label if_boundfunction(this), if_function(this);
772  Node* current = var_current.value();
773  CSA_ASSERT(this, TaggedIsNotSmi(current));
774  Node* current_instance_type = LoadInstanceType(current);
775  GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
776  &if_boundfunction);
777  Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
778  &if_function, &mark_megamorphic);
779 
780  BIND(&if_function);
781  {
782  // Check that the JSFunction {current} is in the current native
783  // context.
784  Node* current_context =
785  LoadObjectField(current, JSFunction::kContextOffset);
786  Node* current_native_context = LoadNativeContext(current_context);
787  Branch(WordEqual(LoadNativeContext(context), current_native_context),
788  &done_loop, &mark_megamorphic);
789  }
790 
791  BIND(&if_boundfunction);
792  {
793  // Continue with the [[BoundTargetFunction]] of {target}.
794  var_current.Bind(LoadObjectField(
795  current, JSBoundFunction::kBoundTargetFunctionOffset));
796  Goto(&loop);
797  }
798  }
799  BIND(&done_loop);
800  StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
801  CAST(target));
802  ReportFeedbackUpdate(feedback_vector, slot_id, "Call:Initialize");
803  Goto(&done);
804  }
805 
806  BIND(&mark_megamorphic);
807  {
808  // MegamorphicSentinel is an immortal immovable object so
809  // write-barrier is not needed.
810  Comment("transition to megamorphic");
811  DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
812  StoreFeedbackVectorSlot(
813  feedback_vector, slot_id,
814  HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
815  SKIP_WRITE_BARRIER);
816  ReportFeedbackUpdate(feedback_vector, slot_id,
817  "Call:TransitionMegamorphic");
818  Goto(&done);
819  }
820  }
821 
822  BIND(&done);
823 }
824 
825 void InterpreterAssembler::CollectCallFeedback(Node* target, Node* context,
826  Node* maybe_feedback_vector,
827  Node* slot_id) {
828  Label feedback_done(this);
829  // If feedback_vector is not valid, then nothing to do.
830  GotoIf(IsUndefined(maybe_feedback_vector), &feedback_done);
831 
832  CSA_SLOW_ASSERT(this, IsFeedbackVector(maybe_feedback_vector));
833 
834  // Increment the call count.
835  IncrementCallCount(maybe_feedback_vector, slot_id);
836 
837  // Collect the callable {target} feedback.
838  CollectCallableFeedback(target, context, maybe_feedback_vector, slot_id);
839  Goto(&feedback_done);
840 
841  BIND(&feedback_done);
842 }
843 
844 void InterpreterAssembler::CallJSAndDispatch(
845  Node* function, Node* context, const RegListNodePair& args,
846  ConvertReceiverMode receiver_mode) {
847  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
848  DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
849  bytecode_ == Bytecode::kInvokeIntrinsic);
850  DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
851 
852  Node* args_count;
853  if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
854  // The receiver is implied, so it is not in the argument list.
855  args_count = args.reg_count();
856  } else {
857  // Subtract the receiver from the argument count.
858  Node* receiver_count = Int32Constant(1);
859  args_count = Int32Sub(args.reg_count(), receiver_count);
860  }
861 
862  Callable callable = CodeFactory::InterpreterPushArgsThenCall(
863  isolate(), receiver_mode, InterpreterPushArgsMode::kOther);
864  Node* code_target = HeapConstant(callable.code());
865 
866  TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
867  args_count, args.base_reg_location(),
868  function);
869  // TailCallStubThenDispatch updates accumulator with result.
870  accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
871 }
872 
873 template <class... TArgs>
874 void InterpreterAssembler::CallJSAndDispatch(Node* function, Node* context,
875  Node* arg_count,
876  ConvertReceiverMode receiver_mode,
877  TArgs... args) {
878  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
879  DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
880  bytecode_ == Bytecode::kInvokeIntrinsic);
881  DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
882  Callable callable = CodeFactory::Call(isolate());
883  Node* code_target = HeapConstant(callable.code());
884 
885  if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
886  // The first argument parameter (the receiver) is implied to be undefined.
887  TailCallStubThenBytecodeDispatch(
888  callable.descriptor(), code_target, context, function, arg_count,
889  static_cast<Node*>(UndefinedConstant()), args...);
890  } else {
891  TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
892  context, function, arg_count, args...);
893  }
894  // TailCallStubThenDispatch updates accumulator with result.
895  accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
896 }
897 
898 // Instantiate CallJSAndDispatch() for argument counts used by interpreter
899 // generator.
900 template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
901  Node* function, Node* context, Node* arg_count,
902  ConvertReceiverMode receiver_mode);
903 template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
904  Node* function, Node* context, Node* arg_count,
905  ConvertReceiverMode receiver_mode, Node*);
906 template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
907  Node* function, Node* context, Node* arg_count,
908  ConvertReceiverMode receiver_mode, Node*, Node*);
909 template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
910  Node* function, Node* context, Node* arg_count,
911  ConvertReceiverMode receiver_mode, Node*, Node*, Node*);
912 
913 void InterpreterAssembler::CallJSWithSpreadAndDispatch(
914  Node* function, Node* context, const RegListNodePair& args, Node* slot_id,
915  Node* maybe_feedback_vector) {
916  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
917  DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny);
918  CollectCallFeedback(function, context, maybe_feedback_vector, slot_id);
919  Comment("call using CallWithSpread builtin");
920  Callable callable = CodeFactory::InterpreterPushArgsThenCall(
921  isolate(), ConvertReceiverMode::kAny,
922  InterpreterPushArgsMode::kWithFinalSpread);
923  Node* code_target = HeapConstant(callable.code());
924 
925  Node* receiver_count = Int32Constant(1);
926  Node* args_count = Int32Sub(args.reg_count(), receiver_count);
927  TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
928  args_count, args.base_reg_location(),
929  function);
930  // TailCallStubThenDispatch updates accumulator with result.
931  accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
932 }
933 
934 Node* InterpreterAssembler::Construct(Node* target, Node* context,
935  Node* new_target,
936  const RegListNodePair& args,
937  Node* slot_id, Node* feedback_vector) {
938  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
939  VARIABLE(var_result, MachineRepresentation::kTagged);
940  VARIABLE(var_site, MachineRepresentation::kTagged);
941  Label extra_checks(this, Label::kDeferred), return_result(this, &var_result),
942  construct(this), construct_array(this, &var_site);
943 
944  // Increment the call count.
945  IncrementCallCount(feedback_vector, slot_id);
946 
947  // Check if we have monomorphic {new_target} feedback already.
948  TNode<MaybeObject> feedback =
949  LoadFeedbackVectorSlot(feedback_vector, slot_id);
950  Branch(IsWeakReferenceTo(feedback, CAST(new_target)), &construct,
951  &extra_checks);
952 
953  BIND(&extra_checks);
954  {
955  Label check_allocation_site(this), check_initialized(this),
956  initialize(this), mark_megamorphic(this);
957 
958  // Check if it is a megamorphic {new_target}..
959  Comment("check if megamorphic");
960  Node* is_megamorphic = WordEqual(
961  feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
962  GotoIf(is_megamorphic, &construct);
963 
964  Comment("check if weak reference");
965  GotoIfNot(IsWeakOrCleared(feedback), &check_allocation_site);
966 
967  // If the weak reference is cleared, we have a new chance to become
968  // monomorphic.
969  Comment("check if weak reference is cleared");
970  Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
971 
972  BIND(&check_allocation_site);
973  {
974  // Check if it is an AllocationSite.
975  Comment("check if allocation site");
976  TNode<HeapObject> strong_feedback = CAST(feedback);
977  GotoIfNot(IsAllocationSite(strong_feedback), &check_initialized);
978 
979  // Make sure that {target} and {new_target} are the Array constructor.
980  Node* array_function = LoadContextElement(LoadNativeContext(context),
981  Context::ARRAY_FUNCTION_INDEX);
982  GotoIfNot(WordEqual(target, array_function), &mark_megamorphic);
983  GotoIfNot(WordEqual(new_target, array_function), &mark_megamorphic);
984  var_site.Bind(strong_feedback);
985  Goto(&construct_array);
986  }
987 
988  BIND(&check_initialized);
989  {
990  // Check if it is uninitialized.
991  Comment("check if uninitialized");
992  Node* is_uninitialized =
993  WordEqual(feedback, LoadRoot(RootIndex::kuninitialized_symbol));
994  Branch(is_uninitialized, &initialize, &mark_megamorphic);
995  }
996 
997  BIND(&initialize);
998  {
999  Comment("check if function in same native context");
1000  GotoIf(TaggedIsSmi(new_target), &mark_megamorphic);
1001  // Check if the {new_target} is a JSFunction or JSBoundFunction
1002  // in the current native context.
1003  VARIABLE(var_current, MachineRepresentation::kTagged, new_target);
1004  Label loop(this, &var_current), done_loop(this);
1005  Goto(&loop);
1006  BIND(&loop);
1007  {
1008  Label if_boundfunction(this), if_function(this);
1009  Node* current = var_current.value();
1010  CSA_ASSERT(this, TaggedIsNotSmi(current));
1011  Node* current_instance_type = LoadInstanceType(current);
1012  GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
1013  &if_boundfunction);
1014  Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
1015  &if_function, &mark_megamorphic);
1016 
1017  BIND(&if_function);
1018  {
1019  // Check that the JSFunction {current} is in the current native
1020  // context.
1021  Node* current_context =
1022  LoadObjectField(current, JSFunction::kContextOffset);
1023  Node* current_native_context = LoadNativeContext(current_context);
1024  Branch(WordEqual(LoadNativeContext(context), current_native_context),
1025  &done_loop, &mark_megamorphic);
1026  }
1027 
1028  BIND(&if_boundfunction);
1029  {
1030  // Continue with the [[BoundTargetFunction]] of {current}.
1031  var_current.Bind(LoadObjectField(
1032  current, JSBoundFunction::kBoundTargetFunctionOffset));
1033  Goto(&loop);
1034  }
1035  }
1036  BIND(&done_loop);
1037 
1038  // Create an AllocationSite if {target} and {new_target} refer
1039  // to the current native context's Array constructor.
1040  Label create_allocation_site(this), store_weak_reference(this);
1041  GotoIfNot(WordEqual(target, new_target), &store_weak_reference);
1042  Node* array_function = LoadContextElement(LoadNativeContext(context),
1043  Context::ARRAY_FUNCTION_INDEX);
1044  Branch(WordEqual(target, array_function), &create_allocation_site,
1045  &store_weak_reference);
1046 
1047  BIND(&create_allocation_site);
1048  {
1049  var_site.Bind(CreateAllocationSiteInFeedbackVector(feedback_vector,
1050  SmiTag(slot_id)));
1051  ReportFeedbackUpdate(feedback_vector, slot_id,
1052  "Construct:CreateAllocationSite");
1053  Goto(&construct_array);
1054  }
1055 
1056  BIND(&store_weak_reference);
1057  {
1058  StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
1059  CAST(new_target));
1060  ReportFeedbackUpdate(feedback_vector, slot_id,
1061  "Construct:StoreWeakReference");
1062  Goto(&construct);
1063  }
1064  }
1065 
1066  BIND(&mark_megamorphic);
1067  {
1068  // MegamorphicSentinel is an immortal immovable object so
1069  // write-barrier is not needed.
1070  Comment("transition to megamorphic");
1071  DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
1072  StoreFeedbackVectorSlot(
1073  feedback_vector, slot_id,
1074  HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
1075  SKIP_WRITE_BARRIER);
1076  ReportFeedbackUpdate(feedback_vector, slot_id,
1077  "Construct:TransitionMegamorphic");
1078  Goto(&construct);
1079  }
1080  }
1081 
1082  BIND(&construct_array);
1083  {
1084  // TODO(bmeurer): Introduce a dedicated builtin to deal with the Array
1085  // constructor feedback collection inside of Ignition.
1086  Comment("call using ConstructArray builtin");
1087  Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
1088  isolate(), InterpreterPushArgsMode::kArrayFunction);
1089  Node* code_target = HeapConstant(callable.code());
1090  var_result.Bind(CallStub(callable.descriptor(), code_target, context,
1091  args.reg_count(), args.base_reg_location(), target,
1092  new_target, var_site.value()));
1093  Goto(&return_result);
1094  }
1095 
1096  BIND(&construct);
1097  {
1098  // TODO(bmeurer): Remove the generic type_info parameter from the Construct.
1099  Comment("call using Construct builtin");
1100  Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
1101  isolate(), InterpreterPushArgsMode::kOther);
1102  Node* code_target = HeapConstant(callable.code());
1103  var_result.Bind(CallStub(callable.descriptor(), code_target, context,
1104  args.reg_count(), args.base_reg_location(), target,
1105  new_target, UndefinedConstant()));
1106  Goto(&return_result);
1107  }
1108 
1109  BIND(&return_result);
1110  return var_result.value();
1111 }
1112 
1113 Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
1114  Node* new_target,
1115  const RegListNodePair& args,
1116  Node* slot_id,
1117  Node* feedback_vector) {
1118  // TODO(bmeurer): Unify this with the Construct bytecode feedback
1119  // above once we have a way to pass the AllocationSite to the Array
1120  // constructor _and_ spread the last argument at the same time.
1121  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
1122  Label extra_checks(this, Label::kDeferred), construct(this);
1123 
1124  // Increment the call count.
1125  IncrementCallCount(feedback_vector, slot_id);
1126 
1127  // Check if we have monomorphic {new_target} feedback already.
1128  TNode<MaybeObject> feedback =
1129  LoadFeedbackVectorSlot(feedback_vector, slot_id);
1130  Branch(IsWeakReferenceTo(feedback, CAST(new_target)), &construct,
1131  &extra_checks);
1132 
1133  BIND(&extra_checks);
1134  {
1135  Label check_initialized(this), initialize(this), mark_megamorphic(this);
1136 
1137  // Check if it is a megamorphic {new_target}.
1138  Comment("check if megamorphic");
1139  Node* is_megamorphic = WordEqual(
1140  feedback, HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
1141  GotoIf(is_megamorphic, &construct);
1142 
1143  Comment("check if weak reference");
1144  GotoIfNot(IsWeakOrCleared(feedback), &check_initialized);
1145 
1146  // If the weak reference is cleared, we have a new chance to become
1147  // monomorphic.
1148  Comment("check if weak reference is cleared");
1149  Branch(IsCleared(feedback), &initialize, &mark_megamorphic);
1150 
1151  BIND(&check_initialized);
1152  {
1153  // Check if it is uninitialized.
1154  Comment("check if uninitialized");
1155  Node* is_uninitialized =
1156  WordEqual(feedback, LoadRoot(RootIndex::kuninitialized_symbol));
1157  Branch(is_uninitialized, &initialize, &mark_megamorphic);
1158  }
1159 
1160  BIND(&initialize);
1161  {
1162  Comment("check if function in same native context");
1163  GotoIf(TaggedIsSmi(new_target), &mark_megamorphic);
1164  // Check if the {new_target} is a JSFunction or JSBoundFunction
1165  // in the current native context.
1166  VARIABLE(var_current, MachineRepresentation::kTagged, new_target);
1167  Label loop(this, &var_current), done_loop(this);
1168  Goto(&loop);
1169  BIND(&loop);
1170  {
1171  Label if_boundfunction(this), if_function(this);
1172  Node* current = var_current.value();
1173  CSA_ASSERT(this, TaggedIsNotSmi(current));
1174  Node* current_instance_type = LoadInstanceType(current);
1175  GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
1176  &if_boundfunction);
1177  Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
1178  &if_function, &mark_megamorphic);
1179 
1180  BIND(&if_function);
1181  {
1182  // Check that the JSFunction {current} is in the current native
1183  // context.
1184  Node* current_context =
1185  LoadObjectField(current, JSFunction::kContextOffset);
1186  Node* current_native_context = LoadNativeContext(current_context);
1187  Branch(WordEqual(LoadNativeContext(context), current_native_context),
1188  &done_loop, &mark_megamorphic);
1189  }
1190 
1191  BIND(&if_boundfunction);
1192  {
1193  // Continue with the [[BoundTargetFunction]] of {current}.
1194  var_current.Bind(LoadObjectField(
1195  current, JSBoundFunction::kBoundTargetFunctionOffset));
1196  Goto(&loop);
1197  }
1198  }
1199  BIND(&done_loop);
1200  StoreWeakReferenceInFeedbackVector(feedback_vector, slot_id,
1201  CAST(new_target));
1202  ReportFeedbackUpdate(feedback_vector, slot_id,
1203  "ConstructWithSpread:Initialize");
1204  Goto(&construct);
1205  }
1206 
1207  BIND(&mark_megamorphic);
1208  {
1209  // MegamorphicSentinel is an immortal immovable object so
1210  // write-barrier is not needed.
1211  Comment("transition to megamorphic");
1212  DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kmegamorphic_symbol));
1213  StoreFeedbackVectorSlot(
1214  feedback_vector, slot_id,
1215  HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
1216  SKIP_WRITE_BARRIER);
1217  ReportFeedbackUpdate(feedback_vector, slot_id,
1218  "ConstructWithSpread:TransitionMegamorphic");
1219  Goto(&construct);
1220  }
1221  }
1222 
1223  BIND(&construct);
1224  Comment("call using ConstructWithSpread builtin");
1225  Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
1226  isolate(), InterpreterPushArgsMode::kWithFinalSpread);
1227  Node* code_target = HeapConstant(callable.code());
1228  return CallStub(callable.descriptor(), code_target, context, args.reg_count(),
1229  args.base_reg_location(), target, new_target,
1230  UndefinedConstant());
1231 }
1232 
1233 Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
1234  const RegListNodePair& args,
1235  int result_size) {
1236  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
1237  DCHECK(Bytecodes::IsCallRuntime(bytecode_));
1238  Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
1239  Node* code_target = HeapConstant(callable.code());
1240 
1241  // Get the function entry from the function id.
1242  Node* function_table = ExternalConstant(
1243  ExternalReference::runtime_function_table_address(isolate()));
1244  Node* function_offset =
1245  Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
1246  Node* function =
1247  IntPtrAdd(function_table, ChangeUint32ToWord(function_offset));
1248  Node* function_entry =
1249  Load(MachineType::Pointer(), function,
1250  IntPtrConstant(offsetof(Runtime::Function, entry)));
1251 
1252  return CallStubR(callable.descriptor(), result_size, code_target, context,
1253  args.reg_count(), args.base_reg_location(), function_entry);
1254 }
1255 
1256 void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
1257  Comment("[ UpdateInterruptBudget");
1258 
1259  Node* budget_offset =
1260  IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag);
1261 
1262  // Assert that the weight is positive (negative weights should be implemented
1263  // as backward updates).
1264  CSA_ASSERT(this, Int32GreaterThanOrEqual(weight, Int32Constant(0)));
1265 
1266  // Update budget by |weight| and check if it reaches zero.
1267  Variable new_budget(this, MachineRepresentation::kWord32);
1268  Node* old_budget =
1269  Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), budget_offset);
1270  // Make sure we include the current bytecode in the budget calculation.
1271  Node* budget_after_bytecode =
1272  Int32Sub(old_budget, Int32Constant(CurrentBytecodeSize()));
1273 
1274  if (backward) {
1275  new_budget.Bind(Int32Sub(budget_after_bytecode, weight));
1276 
1277  Node* condition =
1278  Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
1279  Label ok(this), interrupt_check(this, Label::kDeferred);
1280  Branch(condition, &ok, &interrupt_check);
1281 
1282  // Perform interrupt and reset budget.
1283  BIND(&interrupt_check);
1284  {
1285  CallRuntime(Runtime::kInterrupt, GetContext());
1286  new_budget.Bind(Int32Constant(Interpreter::InterruptBudget()));
1287  Goto(&ok);
1288  }
1289 
1290  BIND(&ok);
1291  } else {
1292  // For a forward jump, we know we only increase the interrupt budget, so
1293  // no need to check if it's below zero.
1294  new_budget.Bind(Int32Add(budget_after_bytecode, weight));
1295  }
1296 
1297  // Update budget.
1298  StoreNoWriteBarrier(MachineRepresentation::kWord32,
1299  BytecodeArrayTaggedPointer(), budget_offset,
1300  new_budget.value());
1301  Comment("] UpdateInterruptBudget");
1302 }
1303 
1304 Node* InterpreterAssembler::Advance() { return Advance(CurrentBytecodeSize()); }
1305 
1306 Node* InterpreterAssembler::Advance(int delta) {
1307  return Advance(IntPtrConstant(delta));
1308 }
1309 
1310 Node* InterpreterAssembler::Advance(Node* delta, bool backward) {
1311 #ifdef V8_TRACE_IGNITION
1312  TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
1313 #endif
1314  Node* next_offset = backward ? IntPtrSub(BytecodeOffset(), delta)
1315  : IntPtrAdd(BytecodeOffset(), delta);
1316  bytecode_offset_.Bind(next_offset);
1317  return next_offset;
1318 }
1319 
1320 Node* InterpreterAssembler::Jump(Node* delta, bool backward) {
1321  DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_));
1322 
1323  UpdateInterruptBudget(TruncateIntPtrToInt32(delta), backward);
1324  Node* new_bytecode_offset = Advance(delta, backward);
1325  Node* target_bytecode = LoadBytecode(new_bytecode_offset);
1326  return DispatchToBytecode(target_bytecode, new_bytecode_offset);
1327 }
1328 
1329 Node* InterpreterAssembler::Jump(Node* delta) { return Jump(delta, false); }
1330 
1331 Node* InterpreterAssembler::JumpBackward(Node* delta) {
1332  return Jump(delta, true);
1333 }
1334 
1335 void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
1336  Label match(this), no_match(this);
1337 
1338  Branch(condition, &match, &no_match);
1339  BIND(&match);
1340  Jump(delta);
1341  BIND(&no_match);
1342  Dispatch();
1343 }
1344 
1345 void InterpreterAssembler::JumpIfWordEqual(Node* lhs, Node* rhs, Node* delta) {
1346  JumpConditional(WordEqual(lhs, rhs), delta);
1347 }
1348 
1349 void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs,
1350  Node* delta) {
1351  JumpConditional(WordNotEqual(lhs, rhs), delta);
1352 }
1353 
1354 Node* InterpreterAssembler::LoadBytecode(Node* bytecode_offset) {
1355  Node* bytecode =
1356  Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset);
1357  return ChangeUint32ToWord(bytecode);
1358 }
1359 
1360 Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) {
1361  Label do_inline_star(this), done(this);
1362 
1363  Variable var_bytecode(this, MachineType::PointerRepresentation());
1364  var_bytecode.Bind(target_bytecode);
1365 
1366  Node* star_bytecode = IntPtrConstant(static_cast<int>(Bytecode::kStar));
1367  Node* is_star = WordEqual(target_bytecode, star_bytecode);
1368  Branch(is_star, &do_inline_star, &done);
1369 
1370  BIND(&do_inline_star);
1371  {
1372  InlineStar();
1373  var_bytecode.Bind(LoadBytecode(BytecodeOffset()));
1374  Goto(&done);
1375  }
1376  BIND(&done);
1377  return var_bytecode.value();
1378 }
1379 
1380 void InterpreterAssembler::InlineStar() {
1381  Bytecode previous_bytecode = bytecode_;
1382  AccumulatorUse previous_acc_use = accumulator_use_;
1383 
1384  bytecode_ = Bytecode::kStar;
1385  accumulator_use_ = AccumulatorUse::kNone;
1386 
1387 #ifdef V8_TRACE_IGNITION
1388  TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
1389 #endif
1390  StoreRegister(GetAccumulator(),
1391  BytecodeOperandReg(0, LoadSensitivity::kSafe));
1392 
1393  DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
1394 
1395  Advance();
1396  bytecode_ = previous_bytecode;
1397  accumulator_use_ = previous_acc_use;
1398 }
1399 
1400 Node* InterpreterAssembler::Dispatch() {
1401  Comment("========= Dispatch");
1402  DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
1403  Node* target_offset = Advance();
1404  Node* target_bytecode = LoadBytecode(target_offset);
1405 
1406  if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) {
1407  target_bytecode = StarDispatchLookahead(target_bytecode);
1408  }
1409  return DispatchToBytecode(target_bytecode, BytecodeOffset());
1410 }
1411 
1412 Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode,
1413  Node* new_bytecode_offset) {
1414  if (FLAG_trace_ignition_dispatches) {
1415  TraceBytecodeDispatch(target_bytecode);
1416  }
1417 
1418  Node* target_code_entry =
1419  Load(MachineType::Pointer(), DispatchTableRawPointer(),
1420  TimesPointerSize(target_bytecode));
1421 
1422  return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset,
1423  target_bytecode);
1424 }
1425 
1426 Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
1427  Node* bytecode_offset,
1428  Node* target_bytecode) {
1429  // TODO(ishell): Add CSA::CodeEntryPoint(code).
1430  Node* handler_entry =
1431  IntPtrAdd(BitcastTaggedToWord(handler),
1432  IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
1433  return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset,
1434  target_bytecode);
1435 }
1436 
1437 Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
1438  Node* handler_entry, Node* bytecode_offset, Node* target_bytecode) {
1439  // Propagate speculation poisoning.
1440  Node* poisoned_handler_entry = WordPoisonOnSpeculation(handler_entry);
1441  return TailCallBytecodeDispatch(
1442  InterpreterDispatchDescriptor{}, poisoned_handler_entry,
1443  GetAccumulatorUnchecked(), bytecode_offset, BytecodeArrayTaggedPointer(),
1444  DispatchTableRawPointer());
1445 }
1446 
1447 void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
1448  // Dispatching a wide bytecode requires treating the prefix
1449  // bytecode a base pointer into the dispatch table and dispatching
1450  // the bytecode that follows relative to this base.
1451  //
1452  // Indices 0-255 correspond to bytecodes with operand_scale == 0
1453  // Indices 256-511 correspond to bytecodes with operand_scale == 1
1454  // Indices 512-767 correspond to bytecodes with operand_scale == 2
1455  DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
1456  Node* next_bytecode_offset = Advance(1);
1457  Node* next_bytecode = LoadBytecode(next_bytecode_offset);
1458 
1459  if (FLAG_trace_ignition_dispatches) {
1460  TraceBytecodeDispatch(next_bytecode);
1461  }
1462 
1463  Node* base_index;
1464  switch (operand_scale) {
1465  case OperandScale::kDouble:
1466  base_index = IntPtrConstant(1 << kBitsPerByte);
1467  break;
1468  case OperandScale::kQuadruple:
1469  base_index = IntPtrConstant(2 << kBitsPerByte);
1470  break;
1471  default:
1472  UNREACHABLE();
1473  }
1474  Node* target_index = IntPtrAdd(base_index, next_bytecode);
1475  Node* target_code_entry =
1476  Load(MachineType::Pointer(), DispatchTableRawPointer(),
1477  TimesPointerSize(target_index));
1478 
1479  DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset,
1480  next_bytecode);
1481 }
1482 
1483 void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
1484  // TODO(rmcilroy): Investigate whether it is worth supporting self
1485  // optimization of primitive functions like FullCodegen.
1486 
1487  // Update profiling count by the number of bytes between the end of the
1488  // current bytecode and the start of the first one, to simulate backedge to
1489  // start of function.
1490  //
1491  // With headers and current offset, the bytecode array layout looks like:
1492  //
1493  // <---------- simulated backedge ----------
1494  // | header | first bytecode | .... | return bytecode |
1495  // |<------ current offset ------->
1496  // ^ tagged bytecode array pointer
1497  //
1498  // UpdateInterruptBudget already handles adding the bytecode size to the
1499  // length of the back-edge, so we just have to correct for the non-zero offset
1500  // of the first bytecode.
1501 
1502  const int kFirstBytecodeOffset = BytecodeArray::kHeaderSize - kHeapObjectTag;
1503  Node* profiling_weight = Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()),
1504  Int32Constant(kFirstBytecodeOffset));
1505  UpdateInterruptBudget(profiling_weight, true);
1506 }
1507 
1508 Node* InterpreterAssembler::LoadOSRNestingLevel() {
1509  return LoadObjectField(BytecodeArrayTaggedPointer(),
1510  BytecodeArray::kOSRNestingLevelOffset,
1511  MachineType::Int8());
1512 }
1513 
1514 void InterpreterAssembler::Abort(AbortReason abort_reason) {
1515  disable_stack_check_across_call_ = true;
1516  Node* abort_id = SmiConstant(abort_reason);
1517  CallRuntime(Runtime::kAbort, GetContext(), abort_id);
1518  disable_stack_check_across_call_ = false;
1519 }
1520 
1521 void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
1522  AbortReason abort_reason) {
1523  Label ok(this), abort(this, Label::kDeferred);
1524  Branch(WordEqual(lhs, rhs), &ok, &abort);
1525 
1526  BIND(&abort);
1527  Abort(abort_reason);
1528  Goto(&ok);
1529 
1530  BIND(&ok);
1531 }
1532 
1533 void InterpreterAssembler::MaybeDropFrames(Node* context) {
1534  Node* restart_fp_address =
1535  ExternalConstant(ExternalReference::debug_restart_fp_address(isolate()));
1536 
1537  Node* restart_fp = Load(MachineType::Pointer(), restart_fp_address);
1538  Node* null = IntPtrConstant(0);
1539 
1540  Label ok(this), drop_frames(this);
1541  Branch(IntPtrEqual(restart_fp, null), &ok, &drop_frames);
1542 
1543  BIND(&drop_frames);
1544  // We don't expect this call to return since the frame dropper tears down
1545  // the stack and jumps into the function on the target frame to restart it.
1546  CallStub(CodeFactory::FrameDropperTrampoline(isolate()), context, restart_fp);
1547  Abort(AbortReason::kUnexpectedReturnFromFrameDropper);
1548  Goto(&ok);
1549 
1550  BIND(&ok);
1551 }
1552 
1553 void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
1554  CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
1555  SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
1556 }
1557 
1558 void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
1559  Node* counters_table = ExternalConstant(
1560  ExternalReference::interpreter_dispatch_counters(isolate()));
1561  Node* source_bytecode_table_index = IntPtrConstant(
1562  static_cast<int>(bytecode_) * (static_cast<int>(Bytecode::kLast) + 1));
1563 
1564  Node* counter_offset =
1565  TimesPointerSize(IntPtrAdd(source_bytecode_table_index, target_bytecode));
1566  Node* old_counter =
1567  Load(MachineType::IntPtr(), counters_table, counter_offset);
1568 
1569  Label counter_ok(this), counter_saturated(this, Label::kDeferred);
1570 
1571  Node* counter_reached_max = WordEqual(
1572  old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
1573  Branch(counter_reached_max, &counter_saturated, &counter_ok);
1574 
1575  BIND(&counter_ok);
1576  {
1577  Node* new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
1578  StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table,
1579  counter_offset, new_counter);
1580  Goto(&counter_saturated);
1581  }
1582 
1583  BIND(&counter_saturated);
1584 }
1585 
1586 // static
1587 bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
1588 #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
1589  return false;
1590 #elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \
1591  V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
1592  return true;
1593 #else
1594 #error "Unknown Architecture"
1595 #endif
1596 }
1597 
1598 void InterpreterAssembler::AbortIfRegisterCountInvalid(
1599  Node* parameters_and_registers, Node* formal_parameter_count,
1600  Node* register_count) {
1601  Node* array_size = LoadAndUntagFixedArrayBaseLength(parameters_and_registers);
1602 
1603  Label ok(this), abort(this, Label::kDeferred);
1604  Branch(UintPtrLessThanOrEqual(
1605  IntPtrAdd(formal_parameter_count, register_count), array_size),
1606  &ok, &abort);
1607 
1608  BIND(&abort);
1609  Abort(AbortReason::kInvalidParametersAndRegistersInGenerator);
1610  Goto(&ok);
1611 
1612  BIND(&ok);
1613 }
1614 
1615 Node* InterpreterAssembler::ExportParametersAndRegisterFile(
1616  TNode<FixedArray> array, const RegListNodePair& registers,
1617  TNode<Int32T> formal_parameter_count) {
1618  // Store the formal parameters (without receiver) followed by the
1619  // registers into the generator's internal parameters_and_registers field.
1620  TNode<IntPtrT> formal_parameter_count_intptr =
1621  ChangeInt32ToIntPtr(formal_parameter_count);
1622  Node* register_count = ChangeUint32ToWord(registers.reg_count());
1623  if (FLAG_debug_code) {
1624  CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
1625  RegisterLocation(Register(0))));
1626  AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
1627  register_count);
1628  }
1629 
1630  {
1631  Variable var_index(this, MachineType::PointerRepresentation());
1632  var_index.Bind(IntPtrConstant(0));
1633 
1634  // Iterate over parameters and write them into the array.
1635  Label loop(this, &var_index), done_loop(this);
1636 
1637  Node* reg_base = IntPtrAdd(
1638  IntPtrConstant(Register::FromParameterIndex(0, 1).ToOperand() - 1),
1639  formal_parameter_count_intptr);
1640 
1641  Goto(&loop);
1642  BIND(&loop);
1643  {
1644  Node* index = var_index.value();
1645  GotoIfNot(UintPtrLessThan(index, formal_parameter_count_intptr),
1646  &done_loop);
1647 
1648  Node* reg_index = IntPtrSub(reg_base, index);
1649  Node* value = LoadRegister(reg_index);
1650 
1651  StoreFixedArrayElement(array, index, value);
1652 
1653  var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
1654  Goto(&loop);
1655  }
1656  BIND(&done_loop);
1657  }
1658 
1659  {
1660  // Iterate over register file and write values into array.
1661  // The mapping of register to array index must match that used in
1662  // BytecodeGraphBuilder::VisitResumeGenerator.
1663  Variable var_index(this, MachineType::PointerRepresentation());
1664  var_index.Bind(IntPtrConstant(0));
1665 
1666  Label loop(this, &var_index), done_loop(this);
1667  Goto(&loop);
1668  BIND(&loop);
1669  {
1670  Node* index = var_index.value();
1671  GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
1672 
1673  Node* reg_index =
1674  IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1675  Node* value = LoadRegister(reg_index);
1676 
1677  Node* array_index = IntPtrAdd(formal_parameter_count_intptr, index);
1678  StoreFixedArrayElement(array, array_index, value);
1679 
1680  var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
1681  Goto(&loop);
1682  }
1683  BIND(&done_loop);
1684  }
1685 
1686  return array;
1687 }
1688 
1689 Node* InterpreterAssembler::ImportRegisterFile(
1690  TNode<FixedArray> array, const RegListNodePair& registers,
1691  TNode<Int32T> formal_parameter_count) {
1692  TNode<IntPtrT> formal_parameter_count_intptr =
1693  ChangeInt32ToIntPtr(formal_parameter_count);
1694  TNode<UintPtrT> register_count = ChangeUint32ToWord(registers.reg_count());
1695  if (FLAG_debug_code) {
1696  CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
1697  RegisterLocation(Register(0))));
1698  AbortIfRegisterCountInvalid(array, formal_parameter_count_intptr,
1699  register_count);
1700  }
1701 
1702  TVARIABLE(IntPtrT, var_index, IntPtrConstant(0));
1703 
1704  // Iterate over array and write values into register file. Also erase the
1705  // array contents to not keep them alive artificially.
1706  Label loop(this, &var_index), done_loop(this);
1707  Goto(&loop);
1708  BIND(&loop);
1709  {
1710  TNode<IntPtrT> index = var_index.value();
1711  GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
1712 
1713  TNode<IntPtrT> array_index =
1714  IntPtrAdd(formal_parameter_count_intptr, index);
1715  TNode<Object> value = LoadFixedArrayElement(array, array_index);
1716 
1717  TNode<IntPtrT> reg_index =
1718  IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1719  StoreRegister(value, reg_index);
1720 
1721  StoreFixedArrayElement(array, array_index,
1722  LoadRoot(RootIndex::kStaleRegister));
1723 
1724  var_index = IntPtrAdd(index, IntPtrConstant(1));
1725  Goto(&loop);
1726  }
1727  BIND(&done_loop);
1728 
1729  return array;
1730 }
1731 
1732 int InterpreterAssembler::CurrentBytecodeSize() const {
1733  return Bytecodes::Size(bytecode_, operand_scale_);
1734 }
1735 
1736 void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
1737  Node* object = GetAccumulator();
1738  Node* context = GetContext();
1739 
1740  Variable var_type_feedback(this, MachineRepresentation::kTaggedSigned);
1741  Variable var_result(this, MachineRepresentation::kTagged);
1742  Label if_done(this), if_objectissmi(this), if_objectisheapnumber(this),
1743  if_objectisother(this, Label::kDeferred);
1744 
1745  GotoIf(TaggedIsSmi(object), &if_objectissmi);
1746  Branch(IsHeapNumber(object), &if_objectisheapnumber, &if_objectisother);
1747 
1748  BIND(&if_objectissmi);
1749  {
1750  var_result.Bind(object);
1751  var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall));
1752  Goto(&if_done);
1753  }
1754 
1755  BIND(&if_objectisheapnumber);
1756  {
1757  var_result.Bind(object);
1758  var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
1759  Goto(&if_done);
1760  }
1761 
1762  BIND(&if_objectisother);
1763  {
1764  auto builtin = Builtins::kNonNumberToNumber;
1765  if (mode == Object::Conversion::kToNumeric) {
1766  builtin = Builtins::kNonNumberToNumeric;
1767  // Special case for collecting BigInt feedback.
1768  Label not_bigint(this);
1769  GotoIfNot(IsBigInt(object), &not_bigint);
1770  {
1771  var_result.Bind(object);
1772  var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt));
1773  Goto(&if_done);
1774  }
1775  BIND(&not_bigint);
1776  }
1777 
1778  // Convert {object} by calling out to the appropriate builtin.
1779  var_result.Bind(CallBuiltin(builtin, context, object));
1780  var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
1781  Goto(&if_done);
1782  }
1783 
1784  BIND(&if_done);
1785 
1786  // Record the type feedback collected for {object}.
1787  Node* slot_index = BytecodeOperandIdx(0);
1788  Node* maybe_feedback_vector = LoadFeedbackVectorUnchecked();
1789 
1790  UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_index);
1791 
1792  SetAccumulator(var_result.value());
1793  Dispatch();
1794 }
1795 
1796 } // namespace interpreter
1797 } // namespace internal
1798 } // namespace v8
Definition: libplatform.h:13