V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
interpreter-assembler.h
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
6 #define V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
7 
8 #include "src/allocation.h"
9 #include "src/builtins/builtins.h"
10 #include "src/code-stub-assembler.h"
11 #include "src/globals.h"
12 #include "src/interpreter/bytecode-register.h"
13 #include "src/interpreter/bytecodes.h"
14 #include "src/runtime/runtime.h"
15 
16 namespace v8 {
17 namespace internal {
18 namespace interpreter {
19 
20 class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
21  public:
22  InterpreterAssembler(compiler::CodeAssemblerState* state, Bytecode bytecode,
23  OperandScale operand_scale);
25 
26  // Returns the 32-bit unsigned count immediate for bytecode operand
27  // |operand_index| in the current bytecode.
28  compiler::Node* BytecodeOperandCount(int operand_index);
29  // Returns the 32-bit unsigned flag for bytecode operand |operand_index|
30  // in the current bytecode.
31  compiler::Node* BytecodeOperandFlag(int operand_index);
32  // Returns the 32-bit zero-extended index immediate for bytecode operand
33  // |operand_index| in the current bytecode.
34  compiler::Node* BytecodeOperandIdxInt32(int operand_index);
35  // Returns the word zero-extended index immediate for bytecode operand
36  // |operand_index| in the current bytecode.
37  compiler::Node* BytecodeOperandIdx(int operand_index);
38  // Returns the smi index immediate for bytecode operand |operand_index|
39  // in the current bytecode.
40  compiler::Node* BytecodeOperandIdxSmi(int operand_index);
41  // Returns the 32-bit unsigned immediate for bytecode operand |operand_index|
42  // in the current bytecode.
43  compiler::Node* BytecodeOperandUImm(int operand_index);
44  // Returns the word-size unsigned immediate for bytecode operand
45  // |operand_index| in the current bytecode.
46  compiler::Node* BytecodeOperandUImmWord(int operand_index);
47  // Returns the unsigned smi immediate for bytecode operand |operand_index| in
48  // the current bytecode.
49  compiler::Node* BytecodeOperandUImmSmi(int operand_index);
50  // Returns the 32-bit signed immediate for bytecode operand |operand_index|
51  // in the current bytecode.
52  compiler::Node* BytecodeOperandImm(int operand_index);
53  // Returns the word-size signed immediate for bytecode operand |operand_index|
54  // in the current bytecode.
55  compiler::Node* BytecodeOperandImmIntPtr(int operand_index);
56  // Returns the smi immediate for bytecode operand |operand_index| in the
57  // current bytecode.
58  compiler::Node* BytecodeOperandImmSmi(int operand_index);
59  // Returns the 32-bit unsigned runtime id immediate for bytecode operand
60  // |operand_index| in the current bytecode.
61  compiler::Node* BytecodeOperandRuntimeId(int operand_index);
62  // Returns the 32-bit unsigned native context index immediate for bytecode
63  // operand |operand_index| in the current bytecode.
64  compiler::Node* BytecodeOperandNativeContextIndex(int operand_index);
65  // Returns the 32-bit unsigned intrinsic id immediate for bytecode operand
66  // |operand_index| in the current bytecode.
67  compiler::Node* BytecodeOperandIntrinsicId(int operand_index);
68 
69  // Accumulator.
70  compiler::Node* GetAccumulator();
71  void SetAccumulator(compiler::Node* value);
72 
73  // Context.
74  compiler::Node* GetContext();
75  void SetContext(compiler::Node* value);
76 
77  // Context at |depth| in the context chain starting at |context|.
78  compiler::Node* GetContextAtDepth(compiler::Node* context,
79  compiler::Node* depth);
80 
81  // Goto the given |target| if the context chain starting at |context| has any
82  // extensions up to the given |depth|.
83  void GotoIfHasContextExtensionUpToDepth(compiler::Node* context,
84  compiler::Node* depth, Label* target);
85 
86  // A RegListNodePair provides an abstraction over lists of registers.
88  public:
89  RegListNodePair(Node* base_reg_location, Node* reg_count)
90  : base_reg_location_(base_reg_location), reg_count_(reg_count) {}
91 
92  compiler::Node* reg_count() const { return reg_count_; }
93  compiler::Node* base_reg_location() const { return base_reg_location_; }
94 
95  private:
96  compiler::Node* base_reg_location_;
97  compiler::Node* reg_count_;
98  };
99 
100  // Backup/restore register file to/from a fixed array of the correct length.
101  // There is an asymmetry between suspend/export and resume/import.
102  // - Suspend copies arguments and registers to the generator.
103  // - Resume copies only the registers from the generator, the arguments
104  // are copied by the ResumeGenerator trampoline.
105  compiler::Node* ExportParametersAndRegisterFile(
106  TNode<FixedArray> array, const RegListNodePair& registers,
107  TNode<Int32T> formal_parameter_count);
108  compiler::Node* ImportRegisterFile(TNode<FixedArray> array,
109  const RegListNodePair& registers,
110  TNode<Int32T> formal_parameter_count);
111 
112  // Loads from and stores to the interpreter register file.
113  compiler::Node* LoadRegister(Register reg);
114  compiler::Node* LoadAndUntagRegister(Register reg);
115  compiler::Node* LoadRegisterAtOperandIndex(int operand_index);
116  std::pair<compiler::Node*, compiler::Node*> LoadRegisterPairAtOperandIndex(
117  int operand_index);
118  void StoreRegister(compiler::Node* value, Register reg);
119  void StoreAndTagRegister(compiler::Node* value, Register reg);
120  void StoreRegisterAtOperandIndex(compiler::Node* value, int operand_index);
121  void StoreRegisterPairAtOperandIndex(compiler::Node* value1,
122  compiler::Node* value2,
123  int operand_index);
124  void StoreRegisterTripleAtOperandIndex(compiler::Node* value1,
125  compiler::Node* value2,
126  compiler::Node* value3,
127  int operand_index);
128 
129  RegListNodePair GetRegisterListAtOperandIndex(int operand_index);
130  Node* LoadRegisterFromRegisterList(const RegListNodePair& reg_list,
131  int index);
132  Node* RegisterLocationInRegisterList(const RegListNodePair& reg_list,
133  int index);
134 
135  // Load constant at the index specified in operand |operand_index| from the
136  // constant pool.
137  compiler::Node* LoadConstantPoolEntryAtOperandIndex(int operand_index);
138  // Load and untag constant at the index specified in operand |operand_index|
139  // from the constant pool.
140  compiler::Node* LoadAndUntagConstantPoolEntryAtOperandIndex(
141  int operand_index);
142  // Load constant at |index| in the constant pool.
143  compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
144  // Load and untag constant at |index| in the constant pool.
145  compiler::Node* LoadAndUntagConstantPoolEntry(compiler::Node* index);
146 
147  // Load the FeedbackVector for the current function.
148  compiler::TNode<FeedbackVector> LoadFeedbackVector();
149 
150  // Load the FeedbackVector for the current function. The returned node
151  // could be undefined.
152  compiler::Node* LoadFeedbackVectorUnchecked();
153 
154  // Increment the call count for a CALL_IC or construct call.
155  // The call count is located at feedback_vector[slot_id + 1].
156  void IncrementCallCount(compiler::Node* feedback_vector,
157  compiler::Node* slot_id);
158 
159  // Collect the callable |target| feedback for either a CALL_IC or
160  // an INSTANCEOF_IC in the |feedback_vector| at |slot_id|.
161  void CollectCallableFeedback(compiler::Node* target, compiler::Node* context,
162  compiler::Node* feedback_vector,
163  compiler::Node* slot_id);
164 
165  // Collect CALL_IC feedback for |target| function in the
166  // |feedback_vector| at |slot_id|, and the call counts in
167  // the |feedback_vector| at |slot_id+1|.
168  void CollectCallFeedback(compiler::Node* target, compiler::Node* context,
169  compiler::Node* maybe_feedback_vector,
170  compiler::Node* slot_id);
171 
172  // Call JSFunction or Callable |function| with |args| arguments, possibly
173  // including the receiver depending on |receiver_mode|. After the call returns
174  // directly dispatches to the next bytecode.
175  void CallJSAndDispatch(compiler::Node* function, compiler::Node* context,
176  const RegListNodePair& args,
177  ConvertReceiverMode receiver_mode);
178 
179  // Call JSFunction or Callable |function| with |arg_count| arguments (not
180  // including receiver) passed as |args|, possibly including the receiver
181  // depending on |receiver_mode|. After the call returns directly dispatches to
182  // the next bytecode.
183  template <class... TArgs>
184  void CallJSAndDispatch(Node* function, Node* context, Node* arg_count,
185  ConvertReceiverMode receiver_mode, TArgs... args);
186 
187  // Call JSFunction or Callable |function| with |args|
188  // arguments (not including receiver), and the final argument being spread.
189  // After the call returns directly dispatches to the next bytecode.
190  void CallJSWithSpreadAndDispatch(compiler::Node* function,
191  compiler::Node* context,
192  const RegListNodePair& args,
193  compiler::Node* slot_id,
194  compiler::Node* feedback_vector);
195 
196  // Call constructor |target| with |args| arguments (not including receiver).
197  // The |new_target| is the same as the |target| for the new keyword, but
198  // differs for the super keyword.
199  compiler::Node* Construct(compiler::Node* target, compiler::Node* context,
200  compiler::Node* new_target,
201  const RegListNodePair& args,
202  compiler::Node* slot_id,
203  compiler::Node* feedback_vector);
204 
205  // Call constructor |target| with |args| arguments (not including
206  // receiver). The last argument is always a spread. The |new_target| is the
207  // same as the |target| for the new keyword, but differs for the super
208  // keyword.
209  compiler::Node* ConstructWithSpread(compiler::Node* target,
210  compiler::Node* context,
211  compiler::Node* new_target,
212  const RegListNodePair& args,
213  compiler::Node* slot_id,
214  compiler::Node* feedback_vector);
215 
216  // Call runtime function with |args| arguments which will return |return_size|
217  // number of values.
218  compiler::Node* CallRuntimeN(compiler::Node* function_id,
219  compiler::Node* context,
220  const RegListNodePair& args,
221  int return_size = 1);
222 
223  // Jump forward relative to the current bytecode by the |jump_offset|.
224  compiler::Node* Jump(compiler::Node* jump_offset);
225 
226  // Jump backward relative to the current bytecode by the |jump_offset|.
227  compiler::Node* JumpBackward(compiler::Node* jump_offset);
228 
229  // Jump forward relative to the current bytecode by |jump_offset| if the
230  // word values |lhs| and |rhs| are equal.
231  void JumpIfWordEqual(compiler::Node* lhs, compiler::Node* rhs,
232  compiler::Node* jump_offset);
233 
234  // Jump forward relative to the current bytecode by |jump_offset| if the
235  // word values |lhs| and |rhs| are not equal.
236  void JumpIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
237  compiler::Node* jump_offset);
238 
239  // Updates the profiler interrupt budget for a return.
240  void UpdateInterruptBudgetOnReturn();
241 
242  // Returns the OSR nesting level from the bytecode header.
243  compiler::Node* LoadOSRNestingLevel();
244 
245  // Dispatch to the bytecode.
246  compiler::Node* Dispatch();
247 
248  // Dispatch bytecode as wide operand variant.
249  void DispatchWide(OperandScale operand_scale);
250 
251  // Dispatch to |target_bytecode| at |new_bytecode_offset|.
252  // |target_bytecode| should be equivalent to loading from the offset.
253  compiler::Node* DispatchToBytecode(compiler::Node* target_bytecode,
254  compiler::Node* new_bytecode_offset);
255 
256  // Abort with the given abort reason.
257  void Abort(AbortReason abort_reason);
258  void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
259  AbortReason abort_reason);
260  // Abort if |register_count| is invalid for given register file array.
261  void AbortIfRegisterCountInvalid(compiler::Node* parameters_and_registers,
262  compiler::Node* formal_parameter_count,
263  compiler::Node* register_count);
264 
265  // Dispatch to frame dropper trampoline if necessary.
266  void MaybeDropFrames(compiler::Node* context);
267 
268  // Returns the offset from the BytecodeArrayPointer of the current bytecode.
269  compiler::Node* BytecodeOffset();
270 
271  protected:
272  Bytecode bytecode() const { return bytecode_; }
273  static bool TargetSupportsUnalignedAccess();
274 
275  void ToNumberOrNumeric(Object::Conversion mode);
276 
277  private:
278  // Returns a tagged pointer to the current function's BytecodeArray object.
279  compiler::Node* BytecodeArrayTaggedPointer();
280 
281  // Returns a raw pointer to first entry in the interpreter dispatch table.
282  compiler::Node* DispatchTableRawPointer();
283 
284  // Returns the accumulator value without checking whether bytecode
285  // uses it. This is intended to be used only in dispatch and in
286  // tracing as these need to bypass accumulator use validity checks.
287  compiler::Node* GetAccumulatorUnchecked();
288 
289  // Returns the frame pointer for the interpreted frame of the function being
290  // interpreted.
291  compiler::Node* GetInterpretedFramePointer();
292 
293  // Operations on registers.
294  compiler::Node* RegisterLocation(Register reg);
295  compiler::Node* RegisterLocation(compiler::Node* reg_index);
296  compiler::Node* NextRegister(compiler::Node* reg_index);
297  compiler::Node* LoadRegister(Node* reg_index);
298  void StoreRegister(compiler::Node* value, compiler::Node* reg_index);
299 
300  // Saves and restores interpreter bytecode offset to the interpreter stack
301  // frame when performing a call.
302  void CallPrologue();
303  void CallEpilogue();
304 
305  // Increment the dispatch counter for the (current, next) bytecode pair.
306  void TraceBytecodeDispatch(compiler::Node* target_index);
307 
308  // Traces the current bytecode by calling |function_id|.
309  void TraceBytecode(Runtime::FunctionId function_id);
310 
311  // Updates the bytecode array's interrupt budget by a 32-bit unsigned |weight|
312  // and calls Runtime::kInterrupt if counter reaches zero. If |backward|, then
313  // the interrupt budget is decremented, otherwise it is incremented.
314  void UpdateInterruptBudget(compiler::Node* weight, bool backward);
315 
316  // Returns the offset of register |index| relative to RegisterFilePointer().
317  compiler::Node* RegisterFrameOffset(compiler::Node* index);
318 
319  // Returns the offset of an operand relative to the current bytecode offset.
320  compiler::Node* OperandOffset(int operand_index);
321 
322  // Returns a value built from an sequence of bytes in the bytecode
323  // array starting at |relative_offset| from the current bytecode.
324  // The |result_type| determines the size and signedness. of the
325  // value read. This method should only be used on architectures that
326  // do not support unaligned memory accesses.
327  compiler::Node* BytecodeOperandReadUnaligned(
328  int relative_offset, MachineType result_type,
329  LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
330 
331  // Returns zero- or sign-extended to word32 value of the operand.
332  compiler::Node* BytecodeOperandUnsignedByte(
333  int operand_index,
334  LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
335  compiler::Node* BytecodeOperandSignedByte(
336  int operand_index,
337  LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
338  compiler::Node* BytecodeOperandUnsignedShort(
339  int operand_index,
340  LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
341  compiler::Node* BytecodeOperandSignedShort(
342  int operand_index,
343  LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
344  compiler::Node* BytecodeOperandUnsignedQuad(
345  int operand_index,
346  LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
347  compiler::Node* BytecodeOperandSignedQuad(
348  int operand_index,
349  LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
350 
351  // Returns zero- or sign-extended to word32 value of the operand of
352  // given size.
353  compiler::Node* BytecodeSignedOperand(
354  int operand_index, OperandSize operand_size,
355  LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
356  compiler::Node* BytecodeUnsignedOperand(
357  int operand_index, OperandSize operand_size,
358  LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
359 
360  // Returns the word-size sign-extended register index for bytecode operand
361  // |operand_index| in the current bytecode. Value is not poisoned on
362  // speculation since the value loaded from the register is poisoned instead.
363  compiler::Node* BytecodeOperandReg(
364  int operand_index,
365  LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
366 
367  // Returns the word zero-extended index immediate for bytecode operand
368  // |operand_index| in the current bytecode for use when loading a .
369  compiler::Node* BytecodeOperandConstantPoolIdx(
370  int operand_index,
371  LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
372 
373  // Jump relative to the current bytecode by the |jump_offset|. If |backward|,
374  // then jump backward (subtract the offset), otherwise jump forward (add the
375  // offset). Helper function for Jump and JumpBackward.
376  compiler::Node* Jump(compiler::Node* jump_offset, bool backward);
377 
378  // Jump forward relative to the current bytecode by |jump_offset| if the
379  // |condition| is true. Helper function for JumpIfWordEqual and
380  // JumpIfWordNotEqual.
381  void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
382 
383  // Save the bytecode offset to the interpreter frame.
384  void SaveBytecodeOffset();
385  // Reload the bytecode offset from the interpreter frame.
386  Node* ReloadBytecodeOffset();
387 
388  // Updates and returns BytecodeOffset() advanced by the current bytecode's
389  // size. Traces the exit of the current bytecode.
390  compiler::Node* Advance();
391 
392  // Updates and returns BytecodeOffset() advanced by delta bytecodes.
393  // Traces the exit of the current bytecode.
394  compiler::Node* Advance(int delta);
395  compiler::Node* Advance(compiler::Node* delta, bool backward = false);
396 
397  // Load the bytecode at |bytecode_offset|.
398  compiler::Node* LoadBytecode(compiler::Node* bytecode_offset);
399 
400  // Look ahead for Star and inline it in a branch. Returns a new target
401  // bytecode node for dispatch.
402  compiler::Node* StarDispatchLookahead(compiler::Node* target_bytecode);
403 
404  // Build code for Star at the current BytecodeOffset() and Advance() to the
405  // next dispatch offset.
406  void InlineStar();
407 
408  // Dispatch to the bytecode handler with code offset |handler|.
409  compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler,
410  compiler::Node* bytecode_offset,
411  compiler::Node* target_bytecode);
412 
413  // Dispatch to the bytecode handler with code entry point |handler_entry|.
414  compiler::Node* DispatchToBytecodeHandlerEntry(
415  compiler::Node* handler_entry, compiler::Node* bytecode_offset,
416  compiler::Node* target_bytecode);
417 
418  int CurrentBytecodeSize() const;
419 
420  OperandScale operand_scale() const { return operand_scale_; }
421 
422  Bytecode bytecode_;
423  OperandScale operand_scale_;
424  CodeStubAssembler::Variable interpreted_frame_pointer_;
425  CodeStubAssembler::Variable bytecode_array_;
426  CodeStubAssembler::Variable bytecode_offset_;
427  CodeStubAssembler::Variable dispatch_table_;
428  CodeStubAssembler::Variable accumulator_;
429  AccumulatorUse accumulator_use_;
430  bool made_call_;
431  bool reloaded_frame_ptr_;
432  bool bytecode_array_valid_;
433  bool disable_stack_check_across_call_;
434  compiler::Node* stack_pointer_before_call_;
435 
436  DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);
437 };
438 
439 } // namespace interpreter
440 } // namespace internal
441 } // namespace v8
442 
443 #endif // V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
Definition: libplatform.h:13