V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
liftoff-assembler.h
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_
6 #define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_
7 
8 #include <iosfwd>
9 #include <memory>
10 
11 #include "src/base/bits.h"
12 #include "src/frames.h"
13 #include "src/macro-assembler.h"
14 #include "src/wasm/baseline/liftoff-assembler-defs.h"
15 #include "src/wasm/baseline/liftoff-register.h"
16 #include "src/wasm/function-body-decoder.h"
17 #include "src/wasm/wasm-code-manager.h"
18 #include "src/wasm/wasm-module.h"
19 #include "src/wasm/wasm-opcodes.h"
20 #include "src/wasm/wasm-value.h"
21 
22 namespace v8 {
23 namespace internal {
24 
25 // Forward declarations.
26 namespace compiler {
27 class CallDescriptor;
28 }
29 
30 namespace wasm {
31 
33  public:
34  // Each slot in our stack frame currently has exactly 8 bytes.
35  static constexpr uint32_t kStackSlotSize = 8;
36 
37  static constexpr ValueType kWasmIntPtr =
38  kPointerSize == 8 ? kWasmI64 : kWasmI32;
39 
40  class VarState {
41  public:
42  enum Location : uint8_t { kStack, kRegister, KIntConst };
43 
44  explicit VarState(ValueType type) : loc_(kStack), type_(type) {}
45  explicit VarState(ValueType type, LiftoffRegister r)
46  : loc_(kRegister), type_(type), reg_(r) {
47  DCHECK_EQ(r.reg_class(), reg_class_for(type));
48  }
49  explicit VarState(ValueType type, int32_t i32_const)
50  : loc_(KIntConst), type_(type), i32_const_(i32_const) {
51  DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
52  }
53 
54  bool operator==(const VarState& other) const {
55  if (loc_ != other.loc_) return false;
56  if (type_ != other.type_) return false;
57  switch (loc_) {
58  case kStack:
59  return true;
60  case kRegister:
61  return reg_ == other.reg_;
62  case KIntConst:
63  return i32_const_ == other.i32_const_;
64  }
65  UNREACHABLE();
66  }
67 
68  bool is_stack() const { return loc_ == kStack; }
69  bool is_gp_reg() const { return loc_ == kRegister && reg_.is_gp(); }
70  bool is_fp_reg() const { return loc_ == kRegister && reg_.is_fp(); }
71  bool is_reg() const { return loc_ == kRegister; }
72  bool is_const() const { return loc_ == KIntConst; }
73 
74  ValueType type() const { return type_; }
75 
76  Location loc() const { return loc_; }
77 
78  int32_t i32_const() const {
79  DCHECK_EQ(loc_, KIntConst);
80  return i32_const_;
81  }
82  WasmValue constant() const {
83  DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
84  DCHECK_EQ(loc_, KIntConst);
85  return type_ == kWasmI32 ? WasmValue(i32_const_)
86  : WasmValue(int64_t{i32_const_});
87  }
88 
89  Register gp_reg() const { return reg().gp(); }
90  DoubleRegister fp_reg() const { return reg().fp(); }
91  LiftoffRegister reg() const {
92  DCHECK_EQ(loc_, kRegister);
93  return reg_;
94  }
95  RegClass reg_class() const { return reg().reg_class(); }
96 
97  void MakeStack() { loc_ = kStack; }
98 
99  private:
100  Location loc_;
101  // TODO(wasm): This is redundant, the decoder already knows the type of each
102  // stack value. Try to collapse.
103  ValueType type_;
104 
105  union {
106  LiftoffRegister reg_; // used if loc_ == kRegister
107  int32_t i32_const_; // used if loc_ == KIntConst
108  };
109  };
110 
111  ASSERT_TRIVIALLY_COPYABLE(VarState);
112 
113  struct CacheState {
114  // Allow default construction, move construction, and move assignment.
115  CacheState() = default;
116  CacheState(CacheState&&) = default;
117  CacheState& operator=(CacheState&&) = default;
118 
119  // TODO(clemensh): Improve memory management here; avoid std::vector.
120  std::vector<VarState> stack_state;
121  LiftoffRegList used_registers;
122  uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
123  LiftoffRegList last_spilled_regs;
124  // TODO(clemensh): Remove stack_base; use ControlBase::stack_depth.
125  uint32_t stack_base = 0;
126 
127  bool has_unused_register(RegClass rc, LiftoffRegList pinned = {}) const {
128  if (kNeedI64RegPair && rc == kGpRegPair) {
129  LiftoffRegList available_regs =
130  kGpCacheRegList.MaskOut(used_registers).MaskOut(pinned);
131  return available_regs.GetNumRegsSet() >= 2;
132  }
133  DCHECK(rc == kGpReg || rc == kFpReg);
134  LiftoffRegList candidates = GetCacheRegList(rc);
135  return has_unused_register(candidates, pinned);
136  }
137 
138  bool has_unused_register(LiftoffRegList candidates,
139  LiftoffRegList pinned = {}) const {
140  LiftoffRegList available_regs =
141  candidates.MaskOut(used_registers).MaskOut(pinned);
142  return !available_regs.is_empty();
143  }
144 
145  LiftoffRegister unused_register(RegClass rc,
146  LiftoffRegList pinned = {}) const {
147  if (kNeedI64RegPair && rc == kGpRegPair) {
148  Register low = pinned.set(unused_register(kGpReg, pinned)).gp();
149  Register high = unused_register(kGpReg, pinned).gp();
150  return LiftoffRegister::ForPair(low, high);
151  }
152  DCHECK(rc == kGpReg || rc == kFpReg);
153  LiftoffRegList candidates = GetCacheRegList(rc);
154  return unused_register(candidates, pinned);
155  }
156 
157  LiftoffRegister unused_register(LiftoffRegList candidates,
158  LiftoffRegList pinned = {}) const {
159  LiftoffRegList available_regs =
160  candidates.MaskOut(used_registers).MaskOut(pinned);
161  return available_regs.GetFirstRegSet();
162  }
163 
164  void inc_used(LiftoffRegister reg) {
165  if (reg.is_pair()) {
166  inc_used(reg.low());
167  inc_used(reg.high());
168  return;
169  }
170  used_registers.set(reg);
171  DCHECK_GT(kMaxInt, register_use_count[reg.liftoff_code()]);
172  ++register_use_count[reg.liftoff_code()];
173  }
174 
175  // Returns whether this was the last use.
176  void dec_used(LiftoffRegister reg) {
177  DCHECK(is_used(reg));
178  if (reg.is_pair()) {
179  dec_used(reg.low());
180  dec_used(reg.high());
181  return;
182  }
183  int code = reg.liftoff_code();
184  DCHECK_LT(0, register_use_count[code]);
185  if (--register_use_count[code] == 0) used_registers.clear(reg);
186  }
187 
188  bool is_used(LiftoffRegister reg) const {
189  if (reg.is_pair()) return is_used(reg.low()) || is_used(reg.high());
190  bool used = used_registers.has(reg);
191  DCHECK_EQ(used, register_use_count[reg.liftoff_code()] != 0);
192  return used;
193  }
194 
195  uint32_t get_use_count(LiftoffRegister reg) const {
196  if (reg.is_pair()) {
197  DCHECK_EQ(register_use_count[reg.low().liftoff_code()],
198  register_use_count[reg.high().liftoff_code()]);
199  reg = reg.low();
200  }
201  DCHECK_GT(arraysize(register_use_count), reg.liftoff_code());
202  return register_use_count[reg.liftoff_code()];
203  }
204 
205  void clear_used(LiftoffRegister reg) {
206  register_use_count[reg.liftoff_code()] = 0;
207  used_registers.clear(reg);
208  }
209 
210  bool is_free(LiftoffRegister reg) const { return !is_used(reg); }
211 
212  void reset_used_registers() {
213  used_registers = {};
214  memset(register_use_count, 0, sizeof(register_use_count));
215  }
216 
217  LiftoffRegister GetNextSpillReg(LiftoffRegList candidates,
218  LiftoffRegList pinned = {}) {
219  LiftoffRegList unpinned = candidates.MaskOut(pinned);
220  DCHECK(!unpinned.is_empty());
221  // This method should only be called if none of the candidates is free.
222  DCHECK(unpinned.MaskOut(used_registers).is_empty());
223  LiftoffRegList unspilled = unpinned.MaskOut(last_spilled_regs);
224  if (unspilled.is_empty()) {
225  unspilled = unpinned;
226  last_spilled_regs = {};
227  }
228  LiftoffRegister reg = unspilled.GetFirstRegSet();
229  last_spilled_regs.set(reg);
230  return reg;
231  }
232 
233  // TODO(clemensh): Don't copy the full parent state (this makes us N^2).
234  void InitMerge(const CacheState& source, uint32_t num_locals,
235  uint32_t arity);
236 
237  void Steal(CacheState& source);
238 
239  void Split(const CacheState& source);
240 
241  uint32_t stack_height() const {
242  return static_cast<uint32_t>(stack_state.size());
243  }
244 
245  private:
246  // Make the copy assignment operator private (to be used from {Split()}).
247  CacheState& operator=(const CacheState&) = default;
248  // Disallow copy construction.
249  CacheState(const CacheState&) = delete;
250  };
251 
253  ~LiftoffAssembler() override;
254 
255  LiftoffRegister PopToRegister(LiftoffRegList pinned = {});
256 
257  void PushRegister(ValueType type, LiftoffRegister reg) {
258  DCHECK_EQ(reg_class_for(type), reg.reg_class());
259  cache_state_.inc_used(reg);
260  cache_state_.stack_state.emplace_back(type, reg);
261  }
262 
263  void SpillRegister(LiftoffRegister);
264 
265  uint32_t GetNumUses(LiftoffRegister reg) {
266  return cache_state_.get_use_count(reg);
267  }
268 
269  // Get an unused register for class {rc}, reusing one of {try_first} if
270  // possible.
271  LiftoffRegister GetUnusedRegister(
272  RegClass rc, std::initializer_list<LiftoffRegister> try_first,
273  LiftoffRegList pinned = {}) {
274  for (LiftoffRegister reg : try_first) {
275  DCHECK_EQ(reg.reg_class(), rc);
276  if (cache_state_.is_free(reg)) return reg;
277  }
278  return GetUnusedRegister(rc, pinned);
279  }
280 
281  // Get an unused register for class {rc}, potentially spilling to free one.
282  LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned = {}) {
283  if (kNeedI64RegPair && rc == kGpRegPair) {
284  LiftoffRegList candidates = kGpCacheRegList;
285  Register low = pinned.set(GetUnusedRegister(candidates, pinned)).gp();
286  Register high = GetUnusedRegister(candidates, pinned).gp();
287  return LiftoffRegister::ForPair(low, high);
288  }
289  DCHECK(rc == kGpReg || rc == kFpReg);
290  LiftoffRegList candidates = GetCacheRegList(rc);
291  return GetUnusedRegister(candidates, pinned);
292  }
293 
294  // Get an unused register of {candidates}, potentially spilling to free one.
295  LiftoffRegister GetUnusedRegister(LiftoffRegList candidates,
296  LiftoffRegList pinned = {}) {
297  if (cache_state_.has_unused_register(candidates, pinned)) {
298  return cache_state_.unused_register(candidates, pinned);
299  }
300  return SpillOneRegister(candidates, pinned);
301  }
302 
303  void MergeFullStackWith(CacheState&);
304  void MergeStackWith(CacheState&, uint32_t arity);
305 
306  void Spill(uint32_t index);
307  void SpillLocals();
308  void SpillAllRegisters();
309 
310  // Call this method whenever spilling something, such that the number of used
311  // spill slot can be tracked and the stack frame will be allocated big enough.
312  void RecordUsedSpillSlot(uint32_t index) {
313  if (index >= num_used_spill_slots_) num_used_spill_slots_ = index + 1;
314  }
315 
316  // Load parameters into the right registers / stack slots for the call.
317  // Move {*target} into another register if needed and update {*target} to that
318  // register, or {no_reg} if target was spilled to the stack.
319  void PrepareCall(FunctionSig*, compiler::CallDescriptor*,
320  Register* target = nullptr,
321  Register* target_instance = nullptr);
322  // Process return values of the call.
323  void FinishCall(FunctionSig*, compiler::CallDescriptor*);
324 
325  // Move {src} into {dst}. {src} and {dst} must be different.
326  void Move(LiftoffRegister dst, LiftoffRegister src, ValueType);
327 
328  // Parallel register move: For a list of tuples <dst, src, type>, move the
329  // {src} register of type {type} into {dst}. If {src} equals {dst}, ignore
330  // that tuple.
332  LiftoffRegister dst;
333  LiftoffRegister src;
334  ValueType type;
335  template <typename Dst, typename Src>
336  ParallelRegisterMoveTuple(Dst dst, Src src, ValueType type)
337  : dst(dst), src(src), type(type) {}
338  };
339  void ParallelRegisterMove(Vector<ParallelRegisterMoveTuple>);
340 
341 #ifdef ENABLE_SLOW_DCHECKS
342  // Validate that the register use counts reflect the state of the cache.
343  bool ValidateCacheState() const;
344 #endif
345 
347  // Platform-specific part. //
349 
350  // This function emits machine code to prepare the stack frame, before the
351  // size of the stack frame is known. It returns an offset in the machine code
352  // which can later be patched (via {PatchPrepareStackFrame)} when the size of
353  // the frame is known.
354  inline int PrepareStackFrame();
355  inline void PatchPrepareStackFrame(int offset, uint32_t stack_slots);
356  inline void FinishCode();
357  inline void AbortCompilation();
358 
359  inline void LoadConstant(LiftoffRegister, WasmValue,
360  RelocInfo::Mode rmode = RelocInfo::NONE);
361  inline void LoadFromInstance(Register dst, uint32_t offset, int size);
362  inline void SpillInstance(Register instance);
363  inline void FillInstanceInto(Register dst);
364  inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg,
365  uint32_t offset_imm, LoadType type, LiftoffRegList pinned,
366  uint32_t* protected_load_pc = nullptr,
367  bool is_load_mem = false);
368  inline void Store(Register dst_addr, Register offset_reg, uint32_t offset_imm,
370  uint32_t* protected_store_pc = nullptr,
371  bool is_store_mem = false);
372  inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
373  ValueType);
374  inline void MoveStackValue(uint32_t dst_index, uint32_t src_index, ValueType);
375 
376  inline void Move(Register dst, Register src, ValueType);
377  inline void Move(DoubleRegister dst, DoubleRegister src, ValueType);
378 
379  inline void Spill(uint32_t index, LiftoffRegister, ValueType);
380  inline void Spill(uint32_t index, WasmValue);
381  inline void Fill(LiftoffRegister, uint32_t index, ValueType);
382  // Only used on 32-bit systems: Fill a register from a "half stack slot", i.e.
383  // 4 bytes on the stack holding half of a 64-bit value. The two half_indexes
384  // corresponding to slot {index} are {2*index} and {2*index-1}.
385  inline void FillI64Half(Register, uint32_t half_index);
386 
387  // i32 binops.
388  inline void emit_i32_add(Register dst, Register lhs, Register rhs);
389  inline void emit_i32_sub(Register dst, Register lhs, Register rhs);
390  inline void emit_i32_mul(Register dst, Register lhs, Register rhs);
391  inline void emit_i32_divs(Register dst, Register lhs, Register rhs,
392  Label* trap_div_by_zero,
393  Label* trap_div_unrepresentable);
394  inline void emit_i32_divu(Register dst, Register lhs, Register rhs,
395  Label* trap_div_by_zero);
396  inline void emit_i32_rems(Register dst, Register lhs, Register rhs,
397  Label* trap_rem_by_zero);
398  inline void emit_i32_remu(Register dst, Register lhs, Register rhs,
399  Label* trap_rem_by_zero);
400  inline void emit_i32_and(Register dst, Register lhs, Register rhs);
401  inline void emit_i32_or(Register dst, Register lhs, Register rhs);
402  inline void emit_i32_xor(Register dst, Register lhs, Register rhs);
403  inline void emit_i32_shl(Register dst, Register src, Register amount,
404  LiftoffRegList pinned = {});
405  inline void emit_i32_sar(Register dst, Register src, Register amount,
406  LiftoffRegList pinned = {});
407  inline void emit_i32_shr(Register dst, Register src, Register amount,
408  LiftoffRegList pinned = {});
409  inline void emit_i32_shr(Register dst, Register src, int amount);
410 
411  // i32 unops.
412  inline bool emit_i32_clz(Register dst, Register src);
413  inline bool emit_i32_ctz(Register dst, Register src);
414  inline bool emit_i32_popcnt(Register dst, Register src);
415 
416  // i64 binops.
417  inline void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
418  LiftoffRegister rhs);
419  inline void emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
420  LiftoffRegister rhs);
421  inline void emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
422  LiftoffRegister rhs);
423  inline bool emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
424  LiftoffRegister rhs, Label* trap_div_by_zero,
425  Label* trap_div_unrepresentable);
426  inline bool emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
427  LiftoffRegister rhs, Label* trap_div_by_zero);
428  inline bool emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
429  LiftoffRegister rhs, Label* trap_rem_by_zero);
430  inline bool emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
431  LiftoffRegister rhs, Label* trap_rem_by_zero);
432  inline void emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
433  LiftoffRegister rhs);
434  inline void emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
435  LiftoffRegister rhs);
436  inline void emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
437  LiftoffRegister rhs);
438  inline void emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
439  Register amount, LiftoffRegList pinned = {});
440  inline void emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
441  Register amount, LiftoffRegList pinned = {});
442  inline void emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
443  Register amount, LiftoffRegList pinned = {});
444  inline void emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
445  int amount);
446 
447  inline void emit_i32_to_intptr(Register dst, Register src);
448 
449  inline void emit_ptrsize_add(Register dst, Register lhs, Register rhs) {
450  if (kPointerSize == 8) {
451  emit_i64_add(LiftoffRegister(dst), LiftoffRegister(lhs),
452  LiftoffRegister(rhs));
453  } else {
454  emit_i32_add(dst, lhs, rhs);
455  }
456  }
457  inline void emit_ptrsize_sub(Register dst, Register lhs, Register rhs) {
458  if (kPointerSize == 8) {
459  emit_i64_sub(LiftoffRegister(dst), LiftoffRegister(lhs),
460  LiftoffRegister(rhs));
461  } else {
462  emit_i32_sub(dst, lhs, rhs);
463  }
464  }
465  inline void emit_ptrsize_and(Register dst, Register lhs, Register rhs) {
466  if (kPointerSize == 8) {
467  emit_i64_and(LiftoffRegister(dst), LiftoffRegister(lhs),
468  LiftoffRegister(rhs));
469  } else {
470  emit_i32_and(dst, lhs, rhs);
471  }
472  }
473  inline void emit_ptrsize_shr(Register dst, Register src, int amount) {
474  if (kPointerSize == 8) {
475  emit_i64_shr(LiftoffRegister(dst), LiftoffRegister(src), amount);
476  } else {
477  emit_i32_shr(dst, src, amount);
478  }
479  }
480 
481  // f32 binops.
482  inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
483  DoubleRegister rhs);
484  inline void emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
485  DoubleRegister rhs);
486  inline void emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
487  DoubleRegister rhs);
488  inline void emit_f32_div(DoubleRegister dst, DoubleRegister lhs,
489  DoubleRegister rhs);
490  inline void emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
491  DoubleRegister rhs);
492  inline void emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
493  DoubleRegister rhs);
494  inline void emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
495  DoubleRegister rhs);
496 
497  // f32 unops.
498  inline void emit_f32_abs(DoubleRegister dst, DoubleRegister src);
499  inline void emit_f32_neg(DoubleRegister dst, DoubleRegister src);
500  inline bool emit_f32_ceil(DoubleRegister dst, DoubleRegister src);
501  inline bool emit_f32_floor(DoubleRegister dst, DoubleRegister src);
502  inline bool emit_f32_trunc(DoubleRegister dst, DoubleRegister src);
503  inline bool emit_f32_nearest_int(DoubleRegister dst, DoubleRegister src);
504  inline void emit_f32_sqrt(DoubleRegister dst, DoubleRegister src);
505 
506  // f64 binops.
507  inline void emit_f64_add(DoubleRegister dst, DoubleRegister lhs,
508  DoubleRegister rhs);
509  inline void emit_f64_sub(DoubleRegister dst, DoubleRegister lhs,
510  DoubleRegister rhs);
511  inline void emit_f64_mul(DoubleRegister dst, DoubleRegister lhs,
512  DoubleRegister rhs);
513  inline void emit_f64_div(DoubleRegister dst, DoubleRegister lhs,
514  DoubleRegister rhs);
515  inline void emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
516  DoubleRegister rhs);
517  inline void emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
518  DoubleRegister rhs);
519  inline void emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
520  DoubleRegister rhs);
521 
522  // f64 unops.
523  inline void emit_f64_abs(DoubleRegister dst, DoubleRegister src);
524  inline void emit_f64_neg(DoubleRegister dst, DoubleRegister src);
525  inline bool emit_f64_ceil(DoubleRegister dst, DoubleRegister src);
526  inline bool emit_f64_floor(DoubleRegister dst, DoubleRegister src);
527  inline bool emit_f64_trunc(DoubleRegister dst, DoubleRegister src);
528  inline bool emit_f64_nearest_int(DoubleRegister dst, DoubleRegister src);
529  inline void emit_f64_sqrt(DoubleRegister dst, DoubleRegister src);
530 
531  inline bool emit_type_conversion(WasmOpcode opcode, LiftoffRegister dst,
532  LiftoffRegister src, Label* trap = nullptr);
533 
534  inline void emit_i32_signextend_i8(Register dst, Register src);
535  inline void emit_i32_signextend_i16(Register dst, Register src);
536  inline void emit_i64_signextend_i8(LiftoffRegister dst, LiftoffRegister src);
537  inline void emit_i64_signextend_i16(LiftoffRegister dst, LiftoffRegister src);
538  inline void emit_i64_signextend_i32(LiftoffRegister dst, LiftoffRegister src);
539 
540  inline void emit_jump(Label*);
541  inline void emit_jump(Register);
542 
543  inline void emit_cond_jump(Condition, Label*, ValueType value, Register lhs,
544  Register rhs = no_reg);
545  // Set {dst} to 1 if condition holds, 0 otherwise.
546  inline void emit_i32_eqz(Register dst, Register src);
547  inline void emit_i32_set_cond(Condition, Register dst, Register lhs,
548  Register rhs);
549  inline void emit_i64_eqz(Register dst, LiftoffRegister src);
550  inline void emit_i64_set_cond(Condition condition, Register dst,
551  LiftoffRegister lhs, LiftoffRegister rhs);
552  inline void emit_f32_set_cond(Condition condition, Register dst,
553  DoubleRegister lhs, DoubleRegister rhs);
554  inline void emit_f64_set_cond(Condition condition, Register dst,
555  DoubleRegister lhs, DoubleRegister rhs);
556 
557  inline void StackCheck(Label* ool_code, Register limit_address);
558 
559  inline void CallTrapCallbackForTesting();
560 
561  inline void AssertUnreachable(AbortReason reason);
562 
563  inline void PushRegisters(LiftoffRegList);
564  inline void PopRegisters(LiftoffRegList);
565 
566  inline void DropStackSlotsAndRet(uint32_t num_stack_slots);
567 
568  // Execute a C call. Arguments are pushed to the stack and a pointer to this
569  // region is passed to the C function. If {out_argument_type != kWasmStmt},
570  // this is the return value of the C function, stored in {rets[0]}. Further
571  // outputs (specified in {sig->returns()}) are read from the buffer and stored
572  // in the remaining {rets} registers.
573  inline void CallC(FunctionSig* sig, const LiftoffRegister* args,
574  const LiftoffRegister* rets, ValueType out_argument_type,
575  int stack_bytes, ExternalReference ext_ref);
576 
577  inline void CallNativeWasmCode(Address addr);
578  // Indirect call: If {target == no_reg}, then pop the target from the stack.
579  inline void CallIndirect(FunctionSig* sig,
580  compiler::CallDescriptor* call_descriptor,
581  Register target);
582  inline void CallRuntimeStub(WasmCode::RuntimeStubId sid);
583 
584  // Reserve space in the current frame, store address to space in {addr}.
585  inline void AllocateStackSlot(Register addr, uint32_t size);
586  inline void DeallocateStackSlot(uint32_t size);
587 
589  // End of platform-specific part. //
591 
592  uint32_t num_locals() const { return num_locals_; }
593  void set_num_locals(uint32_t num_locals);
594 
595  uint32_t GetTotalFrameSlotCount() const {
596  return num_locals_ + num_used_spill_slots_;
597  }
598 
599  ValueType local_type(uint32_t index) {
600  DCHECK_GT(num_locals_, index);
601  ValueType* locals =
602  num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
603  return locals[index];
604  }
605 
606  void set_local_type(uint32_t index, ValueType type) {
607  ValueType* locals =
608  num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
609  locals[index] = type;
610  }
611 
612  CacheState* cache_state() { return &cache_state_; }
613  const CacheState* cache_state() const { return &cache_state_; }
614 
615  bool did_bailout() { return bailout_reason_ != nullptr; }
616  const char* bailout_reason() const { return bailout_reason_; }
617 
618  void bailout(const char* reason) {
619  if (bailout_reason_ != nullptr) return;
620  AbortCompilation();
621  bailout_reason_ = reason;
622  }
623 
624  private:
625  uint32_t num_locals_ = 0;
626  static constexpr uint32_t kInlineLocalTypes = 8;
627  union {
628  ValueType local_types_[kInlineLocalTypes];
629  ValueType* more_local_types_;
630  };
631  static_assert(sizeof(ValueType) == 1,
632  "Reconsider this inlining if ValueType gets bigger");
633  CacheState cache_state_;
634  uint32_t num_used_spill_slots_ = 0;
635  const char* bailout_reason_ = nullptr;
636 
637  LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
638  LiftoffRegList pinned);
639 };
640 
641 std::ostream& operator<<(std::ostream& os, LiftoffAssembler::VarState);
642 
643 // =======================================================================
644 // Partially platform-independent implementations of the platform-dependent
645 // part.
646 
647 #ifdef V8_TARGET_ARCH_32_BIT
648 
649 namespace liftoff {
650 template <void (LiftoffAssembler::*op)(Register, Register, Register)>
651 void EmitI64IndependentHalfOperation(LiftoffAssembler* assm,
652  LiftoffRegister dst, LiftoffRegister lhs,
653  LiftoffRegister rhs) {
654  // If {dst.low_gp()} does not overlap with {lhs.high_gp()} or {rhs.high_gp()},
655  // just first compute the lower half, then the upper half.
656  if (dst.low() != lhs.high() && dst.low() != rhs.high()) {
657  (assm->*op)(dst.low_gp(), lhs.low_gp(), rhs.low_gp());
658  (assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
659  return;
660  }
661  // If {dst.high_gp()} does not overlap with {lhs.low_gp()} or {rhs.low_gp()},
662  // we can compute this the other way around.
663  if (dst.high() != lhs.low() && dst.high() != rhs.low()) {
664  (assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
665  (assm->*op)(dst.low_gp(), lhs.low_gp(), rhs.low_gp());
666  return;
667  }
668  // Otherwise, we need a temporary register.
669  Register tmp =
670  assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
671  (assm->*op)(tmp, lhs.low_gp(), rhs.low_gp());
672  (assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
673  assm->Move(dst.low_gp(), tmp, kWasmI32);
674 }
675 } // namespace liftoff
676 
677 void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
678  LiftoffRegister rhs) {
679  liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_and>(
680  this, dst, lhs, rhs);
681 }
682 
683 void LiftoffAssembler::emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
684  LiftoffRegister rhs) {
685  liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_or>(
686  this, dst, lhs, rhs);
687 }
688 
689 void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
690  LiftoffRegister rhs) {
691  liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_xor>(
692  this, dst, lhs, rhs);
693 }
694 
695 #endif // V8_TARGET_ARCH_32_BIT
696 
697 // End of the partially platform-independent implementations of the
698 // platform-dependent part.
699 // =======================================================================
700 
702  public:
703  explicit LiftoffStackSlots(LiftoffAssembler* wasm_asm) : asm_(wasm_asm) {}
704 
705  void Add(const LiftoffAssembler::VarState& src, uint32_t src_index,
706  RegPairHalf half) {
707  slots_.emplace_back(src, src_index, half);
708  }
709  void Add(const LiftoffAssembler::VarState& src) { slots_.emplace_back(src); }
710 
711  inline void Construct();
712 
713  private:
714  struct Slot {
715  // Allow move construction.
716  Slot(Slot&&) = default;
717  Slot(const LiftoffAssembler::VarState& src, uint32_t src_index,
718  RegPairHalf half)
719  : src_(src), src_index_(src_index), half_(half) {}
720  explicit Slot(const LiftoffAssembler::VarState& src)
721  : src_(src), half_(kLowWord) {}
722 
723  const LiftoffAssembler::VarState src_;
724  uint32_t src_index_ = 0;
725  RegPairHalf half_;
726  };
727 
728  std::vector<Slot> slots_;
729  LiftoffAssembler* const asm_;
730 };
731 
732 } // namespace wasm
733 } // namespace internal
734 } // namespace v8
735 
736 // Include platform specific implementation.
737 #if V8_TARGET_ARCH_IA32
738 #include "src/wasm/baseline/ia32/liftoff-assembler-ia32.h"
739 #elif V8_TARGET_ARCH_X64
740 #include "src/wasm/baseline/x64/liftoff-assembler-x64.h"
741 #elif V8_TARGET_ARCH_ARM64
742 #include "src/wasm/baseline/arm64/liftoff-assembler-arm64.h"
743 #elif V8_TARGET_ARCH_ARM
744 #include "src/wasm/baseline/arm/liftoff-assembler-arm.h"
745 #elif V8_TARGET_ARCH_PPC
746 #include "src/wasm/baseline/ppc/liftoff-assembler-ppc.h"
747 #elif V8_TARGET_ARCH_MIPS
748 #include "src/wasm/baseline/mips/liftoff-assembler-mips.h"
749 #elif V8_TARGET_ARCH_MIPS64
750 #include "src/wasm/baseline/mips64/liftoff-assembler-mips64.h"
751 #elif V8_TARGET_ARCH_S390
752 #include "src/wasm/baseline/s390/liftoff-assembler-s390.h"
753 #else
754 #error Unsupported architecture.
755 #endif
756 
757 #endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_
Definition: libplatform.h:13