V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
liftoff-assembler.cc
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/wasm/baseline/liftoff-assembler.h"
6 
7 #include <sstream>
8 
9 #include "src/assembler-inl.h"
10 #include "src/compiler/linkage.h"
11 #include "src/compiler/wasm-compiler.h"
12 #include "src/macro-assembler-inl.h"
13 #include "src/wasm/function-body-decoder-impl.h"
14 #include "src/wasm/wasm-opcodes.h"
15 
16 namespace v8 {
17 namespace internal {
18 namespace wasm {
19 
20 using VarState = LiftoffAssembler::VarState;
21 
22 namespace {
23 
24 #define __ asm_->
25 
26 #define TRACE(...) \
27  do { \
28  if (FLAG_trace_liftoff) PrintF("[liftoff] " __VA_ARGS__); \
29  } while (false)
30 
31 class StackTransferRecipe {
32  struct RegisterMove {
33  LiftoffRegister dst;
34  LiftoffRegister src;
35  ValueType type;
36  constexpr RegisterMove(LiftoffRegister dst, LiftoffRegister src,
37  ValueType type)
38  : dst(dst), src(src), type(type) {}
39  };
40  struct RegisterLoad {
41  enum LoadKind : uint8_t {
42  kConstant, // load a constant value into a register.
43  kStack, // fill a register from a stack slot.
44  kHalfStack // fill one half of a register pair from half a stack slot.
45  };
46 
47  LiftoffRegister dst;
48  LoadKind kind;
49  ValueType type;
50  int32_t value; // i32 constant value or stack index, depending on kind.
51 
52  // Named constructors.
53  static RegisterLoad Const(LiftoffRegister dst, WasmValue constant) {
54  if (constant.type() == kWasmI32) {
55  return {dst, kConstant, kWasmI32, constant.to_i32()};
56  }
57  DCHECK_EQ(kWasmI64, constant.type());
58  DCHECK_EQ(constant.to_i32_unchecked(), constant.to_i64_unchecked());
59  return {dst, kConstant, kWasmI64, constant.to_i32_unchecked()};
60  }
61  static RegisterLoad Stack(LiftoffRegister dst, int32_t stack_index,
62  ValueType type) {
63  return {dst, kStack, type, stack_index};
64  }
65  static RegisterLoad HalfStack(LiftoffRegister dst,
66  int32_t half_stack_index) {
67  return {dst, kHalfStack, kWasmI32, half_stack_index};
68  }
69 
70  private:
71  RegisterLoad(LiftoffRegister dst, LoadKind kind, ValueType type,
72  int32_t value)
73  : dst(dst), kind(kind), type(type), value(value) {}
74  };
75 
76  public:
77  explicit StackTransferRecipe(LiftoffAssembler* wasm_asm) : asm_(wasm_asm) {}
78  ~StackTransferRecipe() { Execute(); }
79 
80  void Execute() {
81  // First, execute register moves. Then load constants and stack values into
82  // registers.
83 
84  if ((move_dst_regs_ & move_src_regs_).is_empty()) {
85  // No overlap in src and dst registers. Just execute the moves in any
86  // order.
87  for (RegisterMove& rm : register_moves_) {
88  asm_->Move(rm.dst, rm.src, rm.type);
89  }
90  register_moves_.clear();
91  } else {
92  // Keep use counters of src registers.
93  uint32_t src_reg_use_count[kAfterMaxLiftoffRegCode] = {0};
94  for (RegisterMove& rm : register_moves_) {
95  ++src_reg_use_count[rm.src.liftoff_code()];
96  }
97  // Now repeatedly iterate the list of register moves, and execute those
98  // whose dst register does not appear as src any more. The remaining moves
99  // are compacted during this iteration.
100  // If no more moves can be executed (because of a cycle), spill one
101  // register to the stack, add a RegisterLoad to reload it later, and
102  // continue.
103  uint32_t next_spill_slot = asm_->cache_state()->stack_height();
104  while (!register_moves_.empty()) {
105  int executed_moves = 0;
106  for (auto& rm : register_moves_) {
107  if (src_reg_use_count[rm.dst.liftoff_code()] == 0) {
108  asm_->Move(rm.dst, rm.src, rm.type);
109  ++executed_moves;
110  DCHECK_LT(0, src_reg_use_count[rm.src.liftoff_code()]);
111  --src_reg_use_count[rm.src.liftoff_code()];
112  } else if (executed_moves) {
113  // Compaction: Move not-executed moves to the beginning of the list.
114  (&rm)[-executed_moves] = rm;
115  }
116  }
117  if (executed_moves == 0) {
118  // There is a cycle. Spill one register, then continue.
119  // TODO(clemensh): Use an unused register if available.
120  RegisterMove& rm = register_moves_.back();
121  LiftoffRegister spill_reg = rm.src;
122  asm_->Spill(next_spill_slot, spill_reg, rm.type);
123  // Remember to reload into the destination register later.
124  LoadStackSlot(register_moves_.back().dst, next_spill_slot, rm.type);
125  DCHECK_EQ(1, src_reg_use_count[spill_reg.liftoff_code()]);
126  src_reg_use_count[spill_reg.liftoff_code()] = 0;
127  ++next_spill_slot;
128  executed_moves = 1;
129  }
130  register_moves_.erase(register_moves_.end() - executed_moves,
131  register_moves_.end());
132  }
133  }
134 
135  for (RegisterLoad& rl : register_loads_) {
136  switch (rl.kind) {
137  case RegisterLoad::kConstant:
138  asm_->LoadConstant(rl.dst, rl.type == kWasmI64
139  ? WasmValue(int64_t{rl.value})
140  : WasmValue(int32_t{rl.value}));
141  break;
142  case RegisterLoad::kStack:
143  asm_->Fill(rl.dst, rl.value, rl.type);
144  break;
145  case RegisterLoad::kHalfStack:
146  // As half of a register pair, {rl.dst} must be a gp register.
147  asm_->FillI64Half(rl.dst.gp(), rl.value);
148  break;
149  }
150  }
151  register_loads_.clear();
152  }
153 
154  void TransferStackSlot(const LiftoffAssembler::CacheState& dst_state,
155  uint32_t dst_index, uint32_t src_index) {
156  const VarState& dst = dst_state.stack_state[dst_index];
157  const VarState& src = __ cache_state()->stack_state[src_index];
158  DCHECK_EQ(dst.type(), src.type());
159  switch (dst.loc()) {
160  case VarState::kStack:
161  switch (src.loc()) {
162  case VarState::kStack:
163  if (src_index == dst_index) break;
164  asm_->MoveStackValue(dst_index, src_index, src.type());
165  break;
166  case VarState::kRegister:
167  asm_->Spill(dst_index, src.reg(), src.type());
168  break;
169  case VarState::KIntConst:
170  asm_->Spill(dst_index, src.constant());
171  break;
172  }
173  break;
174  case VarState::kRegister:
175  LoadIntoRegister(dst.reg(), src, src_index);
176  break;
177  case VarState::KIntConst:
178  DCHECK_EQ(dst, src);
179  break;
180  }
181  }
182 
183  void LoadIntoRegister(LiftoffRegister dst,
184  const LiftoffAssembler::VarState& src,
185  uint32_t src_index) {
186  switch (src.loc()) {
187  case VarState::kStack:
188  LoadStackSlot(dst, src_index, src.type());
189  break;
190  case VarState::kRegister:
191  DCHECK_EQ(dst.reg_class(), src.reg_class());
192  if (dst != src.reg()) MoveRegister(dst, src.reg(), src.type());
193  break;
194  case VarState::KIntConst:
195  LoadConstant(dst, src.constant());
196  break;
197  }
198  }
199 
200  void LoadI64HalfIntoRegister(LiftoffRegister dst,
201  const LiftoffAssembler::VarState& src,
202  uint32_t index, RegPairHalf half) {
203  // Use CHECK such that the remaining code is statically dead if
204  // {kNeedI64RegPair} is false.
205  CHECK(kNeedI64RegPair);
206  DCHECK_EQ(kWasmI64, src.type());
207  switch (src.loc()) {
208  case VarState::kStack:
209  LoadI64HalfStackSlot(dst, 2 * index - (half == kLowWord ? 0 : 1));
210  break;
211  case VarState::kRegister: {
212  LiftoffRegister src_half =
213  half == kLowWord ? src.reg().low() : src.reg().high();
214  if (dst != src_half) MoveRegister(dst, src_half, kWasmI32);
215  break;
216  }
217  case VarState::KIntConst:
218  int32_t value = src.i32_const();
219  // The high word is the sign extension of the low word.
220  if (half == kHighWord) value = value >> 31;
221  LoadConstant(dst, WasmValue(value));
222  break;
223  }
224  }
225 
226  void MoveRegister(LiftoffRegister dst, LiftoffRegister src, ValueType type) {
227  DCHECK_NE(dst, src);
228  DCHECK_EQ(dst.reg_class(), src.reg_class());
229  DCHECK_EQ(reg_class_for(type), src.reg_class());
230  if (src.is_pair()) {
231  DCHECK_EQ(kWasmI64, type);
232  if (dst.low() != src.low()) MoveRegister(dst.low(), src.low(), kWasmI32);
233  if (dst.high() != src.high())
234  MoveRegister(dst.high(), src.high(), kWasmI32);
235  return;
236  }
237  DCHECK(!move_dst_regs_.has(dst));
238  move_dst_regs_.set(dst);
239  move_src_regs_.set(src);
240  register_moves_.emplace_back(dst, src, type);
241  }
242 
243  void LoadConstant(LiftoffRegister dst, WasmValue value) {
244  register_loads_.push_back(RegisterLoad::Const(dst, value));
245  }
246 
247  void LoadStackSlot(LiftoffRegister dst, uint32_t stack_index,
248  ValueType type) {
249  register_loads_.push_back(RegisterLoad::Stack(dst, stack_index, type));
250  }
251 
252  void LoadI64HalfStackSlot(LiftoffRegister dst, uint32_t half_stack_index) {
253  register_loads_.push_back(RegisterLoad::HalfStack(dst, half_stack_index));
254  }
255 
256  private:
257  // TODO(clemensh): Avoid unconditionally allocating on the heap.
258  std::vector<RegisterMove> register_moves_;
259  std::vector<RegisterLoad> register_loads_;
260  LiftoffRegList move_dst_regs_;
261  LiftoffRegList move_src_regs_;
262  LiftoffAssembler* const asm_;
263 };
264 
265 } // namespace
266 
267 // TODO(clemensh): Don't copy the full parent state (this makes us N^2).
268 void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
269  uint32_t num_locals,
270  uint32_t arity) {
271  DCHECK(stack_state.empty());
272  DCHECK_GE(source.stack_height(), stack_base);
273  stack_state.resize(stack_base + arity, VarState(kWasmStmt));
274 
275  // |------locals------|--(in between)--|--(discarded)--|----merge----|
276  // <-- num_locals --> ^stack_base <-- arity -->
277 
278  // First, initialize merge slots and locals. Keep them in the registers which
279  // are being used in {source}, but avoid using a register multiple times. Use
280  // unused registers where necessary and possible.
281  for (int range = 0; range < 2; ++range) {
282  auto src_idx = range ? 0 : source.stack_state.size() - arity;
283  auto src_end = range ? num_locals : source.stack_state.size();
284  auto dst_idx = range ? 0 : stack_state.size() - arity;
285  for (; src_idx < src_end; ++src_idx, ++dst_idx) {
286  auto& dst = stack_state[dst_idx];
287  auto& src = source.stack_state[src_idx];
288  // Just initialize to any register; will be overwritten before use.
289  LiftoffRegister reg = kGpCacheRegList.GetFirstRegSet();
290  RegClass rc = src.is_reg() ? src.reg_class() : reg_class_for(src.type());
291  if (src.is_reg() && is_free(src.reg())) {
292  reg = src.reg();
293  } else if (has_unused_register(rc)) {
294  reg = unused_register(rc);
295  } else {
296  // Make this a stack slot.
297  dst = VarState(src.type());
298  continue;
299  }
300  dst = VarState(src.type(), reg);
301  inc_used(reg);
302  }
303  }
304  // Last, initialize the section in between. Here, constants are allowed, but
305  // registers which are already used for the merge region or locals must be
306  // spilled.
307  for (uint32_t i = num_locals; i < stack_base; ++i) {
308  auto& dst = stack_state[i];
309  auto& src = source.stack_state[i];
310  if (src.is_reg()) {
311  if (is_used(src.reg())) {
312  // Make this a stack slot.
313  dst = VarState(src.type());
314  } else {
315  dst = VarState(src.type(), src.reg());
316  inc_used(src.reg());
317  }
318  } else if (src.is_const()) {
319  dst = src;
320  } else {
321  DCHECK(src.is_stack());
322  // Make this a stack slot.
323  dst = VarState(src.type());
324  }
325  }
326  last_spilled_regs = source.last_spilled_regs;
327 }
328 
329 void LiftoffAssembler::CacheState::Steal(CacheState& source) {
330  // Just use the move assignment operator.
331  *this = std::move(source);
332 }
333 
334 void LiftoffAssembler::CacheState::Split(const CacheState& source) {
335  // Call the private copy assignment operator.
336  *this = source;
337 }
338 
339 namespace {
340 
341 constexpr AssemblerOptions DefaultLiftoffOptions() {
342  return AssemblerOptions{};
343 }
344 
345 } // namespace
346 
347 // TODO(clemensh): Provide a reasonably sized buffer, based on wasm function
348 // size.
349 LiftoffAssembler::LiftoffAssembler()
350  : TurboAssembler(nullptr, DefaultLiftoffOptions(), nullptr, 0,
351  CodeObjectRequired::kNo) {
352  set_abort_hard(true); // Avoid calls to Abort.
353 }
354 
355 LiftoffAssembler::~LiftoffAssembler() {
356  if (num_locals_ > kInlineLocalTypes) {
357  free(more_local_types_);
358  }
359 }
360 
361 LiftoffRegister LiftoffAssembler::PopToRegister(LiftoffRegList pinned) {
362  DCHECK(!cache_state_.stack_state.empty());
363  VarState slot = cache_state_.stack_state.back();
364  cache_state_.stack_state.pop_back();
365  switch (slot.loc()) {
366  case VarState::kStack: {
367  LiftoffRegister reg =
368  GetUnusedRegister(reg_class_for(slot.type()), pinned);
369  Fill(reg, cache_state_.stack_height(), slot.type());
370  return reg;
371  }
372  case VarState::kRegister:
373  cache_state_.dec_used(slot.reg());
374  return slot.reg();
375  case VarState::KIntConst: {
376  RegClass rc =
377  kNeedI64RegPair && slot.type() == kWasmI64 ? kGpRegPair : kGpReg;
378  LiftoffRegister reg = GetUnusedRegister(rc, pinned);
379  LoadConstant(reg, slot.constant());
380  return reg;
381  }
382  }
383  UNREACHABLE();
384 }
385 
386 void LiftoffAssembler::MergeFullStackWith(CacheState& target) {
387  DCHECK_EQ(cache_state_.stack_height(), target.stack_height());
388  // TODO(clemensh): Reuse the same StackTransferRecipe object to save some
389  // allocations.
390  StackTransferRecipe transfers(this);
391  for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
392  transfers.TransferStackSlot(target, i, i);
393  }
394 }
395 
396 void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity) {
397  // Before: ----------------|------ pop_count -----|--- arity ---|
398  // ^target_stack_height ^stack_base ^stack_height
399  // After: ----|-- arity --|
400  // ^ ^target_stack_height
401  // ^target_stack_base
402  uint32_t stack_height = cache_state_.stack_height();
403  uint32_t target_stack_height = target.stack_height();
404  DCHECK_LE(target_stack_height, stack_height);
405  DCHECK_LE(arity, target_stack_height);
406  uint32_t stack_base = stack_height - arity;
407  uint32_t target_stack_base = target_stack_height - arity;
408  StackTransferRecipe transfers(this);
409  for (uint32_t i = 0; i < target_stack_base; ++i) {
410  transfers.TransferStackSlot(target, i, i);
411  }
412  for (uint32_t i = 0; i < arity; ++i) {
413  transfers.TransferStackSlot(target, target_stack_base + i, stack_base + i);
414  }
415 }
416 
417 void LiftoffAssembler::Spill(uint32_t index) {
418  auto& slot = cache_state_.stack_state[index];
419  switch (slot.loc()) {
420  case VarState::kStack:
421  return;
422  case VarState::kRegister:
423  Spill(index, slot.reg(), slot.type());
424  cache_state_.dec_used(slot.reg());
425  break;
426  case VarState::KIntConst:
427  Spill(index, slot.constant());
428  break;
429  }
430  slot.MakeStack();
431 }
432 
433 void LiftoffAssembler::SpillLocals() {
434  for (uint32_t i = 0; i < num_locals_; ++i) {
435  Spill(i);
436  }
437 }
438 
439 void LiftoffAssembler::SpillAllRegisters() {
440  for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
441  auto& slot = cache_state_.stack_state[i];
442  if (!slot.is_reg()) continue;
443  Spill(i, slot.reg(), slot.type());
444  slot.MakeStack();
445  }
446  cache_state_.reset_used_registers();
447 }
448 
449 void LiftoffAssembler::PrepareCall(FunctionSig* sig,
450  compiler::CallDescriptor* call_descriptor,
451  Register* target,
452  Register* target_instance) {
453  uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
454  // Input 0 is the call target.
455  constexpr size_t kInputShift = 1;
456 
457  // Spill all cache slots which are not being used as parameters.
458  // Don't update any register use counters, they will be reset later anyway.
459  for (uint32_t idx = 0, end = cache_state_.stack_height() - num_params;
460  idx < end; ++idx) {
461  VarState& slot = cache_state_.stack_state[idx];
462  if (!slot.is_reg()) continue;
463  Spill(idx, slot.reg(), slot.type());
464  slot.MakeStack();
465  }
466 
467  LiftoffStackSlots stack_slots(this);
468  StackTransferRecipe stack_transfers(this);
469  LiftoffRegList param_regs;
470 
471  // Move the target instance (if supplied) into the correct instance register.
472  compiler::LinkageLocation instance_loc =
473  call_descriptor->GetInputLocation(kInputShift);
474  DCHECK(instance_loc.IsRegister() && !instance_loc.IsAnyRegister());
475  Register instance_reg = Register::from_code(instance_loc.AsRegister());
476  param_regs.set(instance_reg);
477  if (target_instance && *target_instance != instance_reg) {
478  stack_transfers.MoveRegister(LiftoffRegister(instance_reg),
479  LiftoffRegister(*target_instance),
480  kWasmIntPtr);
481  }
482 
483  // Now move all parameter values into the right slot for the call.
484  // Don't pop values yet, such that the stack height is still correct when
485  // executing the {stack_transfers}.
486  // Process parameters backwards, such that pushes of caller frame slots are
487  // in the correct order.
488  uint32_t param_base = cache_state_.stack_height() - num_params;
489  uint32_t call_desc_input_idx =
490  static_cast<uint32_t>(call_descriptor->InputCount());
491  for (uint32_t i = num_params; i > 0; --i) {
492  const uint32_t param = i - 1;
493  ValueType type = sig->GetParam(param);
494  const bool is_pair = kNeedI64RegPair && type == kWasmI64;
495  const int num_lowered_params = is_pair ? 2 : 1;
496  const uint32_t stack_idx = param_base + param;
497  const VarState& slot = cache_state_.stack_state[stack_idx];
498  // Process both halfs of a register pair separately, because they are passed
499  // as separate parameters. One or both of them could end up on the stack.
500  for (int lowered_idx = 0; lowered_idx < num_lowered_params; ++lowered_idx) {
501  const RegPairHalf half =
502  is_pair && lowered_idx == 0 ? kHighWord : kLowWord;
503  --call_desc_input_idx;
504  compiler::LinkageLocation loc =
505  call_descriptor->GetInputLocation(call_desc_input_idx);
506  if (loc.IsRegister()) {
507  DCHECK(!loc.IsAnyRegister());
508  RegClass rc = is_pair ? kGpReg : reg_class_for(type);
509  int reg_code = loc.AsRegister();
510 #if V8_TARGET_ARCH_ARM
511  // Liftoff assumes a one-to-one mapping between float registers and
512  // double registers, and so does not distinguish between f32 and f64
513  // registers. The f32 register code must therefore be halved in order to
514  // pass the f64 code to Liftoff.
515  DCHECK_IMPLIES(type == kWasmF32, (reg_code % 2) == 0);
516  LiftoffRegister reg = LiftoffRegister::from_code(
517  rc, (type == kWasmF32) ? (reg_code / 2) : reg_code);
518 #else
519  LiftoffRegister reg = LiftoffRegister::from_code(rc, reg_code);
520 #endif
521  param_regs.set(reg);
522  if (is_pair) {
523  stack_transfers.LoadI64HalfIntoRegister(reg, slot, stack_idx, half);
524  } else {
525  stack_transfers.LoadIntoRegister(reg, slot, stack_idx);
526  }
527  } else {
528  DCHECK(loc.IsCallerFrameSlot());
529  stack_slots.Add(slot, stack_idx, half);
530  }
531  }
532  }
533  // {call_desc_input_idx} should point after the instance parameter now.
534  DCHECK_EQ(call_desc_input_idx, kInputShift + 1);
535 
536  // If the target register overlaps with a parameter register, then move the
537  // target to another free register, or spill to the stack.
538  if (target && param_regs.has(LiftoffRegister(*target))) {
539  // Try to find another free register.
540  LiftoffRegList free_regs = kGpCacheRegList.MaskOut(param_regs);
541  if (!free_regs.is_empty()) {
542  LiftoffRegister new_target = free_regs.GetFirstRegSet();
543  stack_transfers.MoveRegister(new_target, LiftoffRegister(*target),
544  kWasmIntPtr);
545  *target = new_target.gp();
546  } else {
547  stack_slots.Add(LiftoffAssembler::VarState(LiftoffAssembler::kWasmIntPtr,
548  LiftoffRegister(*target)));
549  *target = no_reg;
550  }
551  }
552 
553  // Create all the slots.
554  stack_slots.Construct();
555  // Execute the stack transfers before filling the instance register.
556  stack_transfers.Execute();
557 
558  // Pop parameters from the value stack.
559  auto stack_end = cache_state_.stack_state.end();
560  cache_state_.stack_state.erase(stack_end - num_params, stack_end);
561 
562  // Reset register use counters.
563  cache_state_.reset_used_registers();
564 
565  // Reload the instance from the stack.
566  if (!target_instance) {
567  FillInstanceInto(instance_reg);
568  }
569 }
570 
571 void LiftoffAssembler::FinishCall(FunctionSig* sig,
572  compiler::CallDescriptor* call_descriptor) {
573  const size_t return_count = sig->return_count();
574  if (return_count != 0) {
575  DCHECK_EQ(1, return_count);
576  ValueType return_type = sig->GetReturn(0);
577  const bool need_pair = kNeedI64RegPair && return_type == kWasmI64;
578  DCHECK_EQ(need_pair ? 2 : 1, call_descriptor->ReturnCount());
579  RegClass rc = need_pair ? kGpReg : reg_class_for(return_type);
580 #if V8_TARGET_ARCH_ARM
581  // If the return register was not d0 for f32, the code value would have to
582  // be halved as is done for the parameter registers.
583  DCHECK_EQ(call_descriptor->GetReturnLocation(0).AsRegister(), 0);
584 #endif
585  LiftoffRegister return_reg = LiftoffRegister::from_code(
586  rc, call_descriptor->GetReturnLocation(0).AsRegister());
587  DCHECK(GetCacheRegList(rc).has(return_reg));
588  if (need_pair) {
589  LiftoffRegister high_reg = LiftoffRegister::from_code(
590  rc, call_descriptor->GetReturnLocation(1).AsRegister());
591  DCHECK(GetCacheRegList(rc).has(high_reg));
592  return_reg = LiftoffRegister::ForPair(return_reg.gp(), high_reg.gp());
593  }
594  DCHECK(!cache_state_.is_used(return_reg));
595  PushRegister(return_type, return_reg);
596  }
597 }
598 
599 void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src,
600  ValueType type) {
601  DCHECK_EQ(dst.reg_class(), src.reg_class());
602  DCHECK_NE(dst, src);
603  if (kNeedI64RegPair && dst.is_pair()) {
604  // Use the {StackTransferRecipe} to move pairs, as the registers in the
605  // pairs might overlap.
606  StackTransferRecipe(this).MoveRegister(dst, src, type);
607  } else if (dst.is_gp()) {
608  Move(dst.gp(), src.gp(), type);
609  } else {
610  Move(dst.fp(), src.fp(), type);
611  }
612 }
613 
614 void LiftoffAssembler::ParallelRegisterMove(
615  Vector<ParallelRegisterMoveTuple> tuples) {
616  StackTransferRecipe stack_transfers(this);
617  for (auto tuple : tuples) {
618  if (tuple.dst == tuple.src) continue;
619  stack_transfers.MoveRegister(tuple.dst, tuple.src, tuple.type);
620  }
621 }
622 
623 #ifdef ENABLE_SLOW_DCHECKS
624 bool LiftoffAssembler::ValidateCacheState() const {
625  uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
626  LiftoffRegList used_regs;
627  for (const VarState& var : cache_state_.stack_state) {
628  if (!var.is_reg()) continue;
629  LiftoffRegister reg = var.reg();
630  if (kNeedI64RegPair && reg.is_pair()) {
631  ++register_use_count[reg.low().liftoff_code()];
632  ++register_use_count[reg.high().liftoff_code()];
633  } else {
634  ++register_use_count[reg.liftoff_code()];
635  }
636  used_regs.set(reg);
637  }
638  bool valid = memcmp(register_use_count, cache_state_.register_use_count,
639  sizeof(register_use_count)) == 0 &&
640  used_regs == cache_state_.used_registers;
641  if (valid) return true;
642  std::ostringstream os;
643  os << "Error in LiftoffAssembler::ValidateCacheState().\n";
644  os << "expected: used_regs " << used_regs << ", counts "
645  << PrintCollection(register_use_count) << "\n";
646  os << "found: used_regs " << cache_state_.used_registers << ", counts "
647  << PrintCollection(cache_state_.register_use_count) << "\n";
648  os << "Use --trace-liftoff to debug.";
649  FATAL("%s", os.str().c_str());
650 }
651 #endif
652 
653 LiftoffRegister LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates,
654  LiftoffRegList pinned) {
655  // Spill one cached value to free a register.
656  LiftoffRegister spill_reg = cache_state_.GetNextSpillReg(candidates, pinned);
657  SpillRegister(spill_reg);
658  return spill_reg;
659 }
660 
661 void LiftoffAssembler::SpillRegister(LiftoffRegister reg) {
662  int remaining_uses = cache_state_.get_use_count(reg);
663  DCHECK_LT(0, remaining_uses);
664  for (uint32_t idx = cache_state_.stack_height() - 1;; --idx) {
665  DCHECK_GT(cache_state_.stack_height(), idx);
666  auto* slot = &cache_state_.stack_state[idx];
667  if (!slot->is_reg() || !slot->reg().overlaps(reg)) continue;
668  if (slot->reg().is_pair()) {
669  // Make sure to decrement *both* registers in a pair, because the
670  // {clear_used} call below only clears one of them.
671  cache_state_.dec_used(slot->reg().low());
672  cache_state_.dec_used(slot->reg().high());
673  }
674  Spill(idx, slot->reg(), slot->type());
675  slot->MakeStack();
676  if (--remaining_uses == 0) break;
677  }
678  cache_state_.clear_used(reg);
679 }
680 
681 void LiftoffAssembler::set_num_locals(uint32_t num_locals) {
682  DCHECK_EQ(0, num_locals_); // only call this once.
683  num_locals_ = num_locals;
684  if (num_locals > kInlineLocalTypes) {
685  more_local_types_ =
686  reinterpret_cast<ValueType*>(malloc(num_locals * sizeof(ValueType)));
687  DCHECK_NOT_NULL(more_local_types_);
688  }
689 }
690 
691 std::ostream& operator<<(std::ostream& os, VarState slot) {
692  os << ValueTypes::TypeName(slot.type()) << ":";
693  switch (slot.loc()) {
694  case VarState::kStack:
695  return os << "s";
696  case VarState::kRegister:
697  return os << slot.reg();
698  case VarState::KIntConst:
699  return os << "c" << slot.i32_const();
700  }
701  UNREACHABLE();
702 }
703 
704 #undef __
705 #undef TRACE
706 
707 } // namespace wasm
708 } // namespace internal
709 } // namespace v8
Definition: libplatform.h:13