V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
gap-resolver.cc
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/backend/gap-resolver.h"
6 
7 #include <algorithm>
8 #include <set>
9 
10 #include "src/register-configuration.h"
11 
12 namespace v8 {
13 namespace internal {
14 namespace compiler {
15 
16 namespace {
17 
18 // Splits a FP move between two location operands into the equivalent series of
19 // moves between smaller sub-operands, e.g. a double move to two single moves.
20 // This helps reduce the number of cycles that would normally occur under FP
21 // aliasing, and makes swaps much easier to implement.
22 MoveOperands* Split(MoveOperands* move, MachineRepresentation smaller_rep,
23  ParallelMove* moves) {
24  DCHECK(!kSimpleFPAliasing);
25  // Splitting is only possible when the slot size is the same as float size.
26  DCHECK_EQ(kPointerSize, kFloatSize);
27  const LocationOperand& src_loc = LocationOperand::cast(move->source());
28  const LocationOperand& dst_loc = LocationOperand::cast(move->destination());
29  MachineRepresentation dst_rep = dst_loc.representation();
30  DCHECK_NE(smaller_rep, dst_rep);
31  auto src_kind = src_loc.location_kind();
32  auto dst_kind = dst_loc.location_kind();
33 
34  int aliases =
35  1 << (ElementSizeLog2Of(dst_rep) - ElementSizeLog2Of(smaller_rep));
36  int base = -1;
37  USE(base);
38  DCHECK_EQ(aliases, RegisterConfiguration::Default()->GetAliases(
39  dst_rep, 0, smaller_rep, &base));
40 
41  int src_index = -1;
42  int slot_size = (1 << ElementSizeLog2Of(smaller_rep)) / kPointerSize;
43  int src_step = 1;
44  if (src_kind == LocationOperand::REGISTER) {
45  src_index = src_loc.register_code() * aliases;
46  } else {
47  src_index = src_loc.index();
48  // For operands that occupy multiple slots, the index refers to the last
49  // slot. On little-endian architectures, we start at the high slot and use a
50  // negative step so that register-to-slot moves are in the correct order.
51  src_step = -slot_size;
52  }
53  int dst_index = -1;
54  int dst_step = 1;
55  if (dst_kind == LocationOperand::REGISTER) {
56  dst_index = dst_loc.register_code() * aliases;
57  } else {
58  dst_index = dst_loc.index();
59  dst_step = -slot_size;
60  }
61 
62  // Reuse 'move' for the first fragment. It is not pending.
63  move->set_source(AllocatedOperand(src_kind, smaller_rep, src_index));
64  move->set_destination(AllocatedOperand(dst_kind, smaller_rep, dst_index));
65  // Add the remaining fragment moves.
66  for (int i = 1; i < aliases; ++i) {
67  src_index += src_step;
68  dst_index += dst_step;
69  moves->AddMove(AllocatedOperand(src_kind, smaller_rep, src_index),
70  AllocatedOperand(dst_kind, smaller_rep, dst_index));
71  }
72  // Return the first fragment.
73  return move;
74 }
75 
76 } // namespace
77 
78 void GapResolver::Resolve(ParallelMove* moves) {
79  // Clear redundant moves, and collect FP move representations if aliasing
80  // is non-simple.
81  int reps = 0;
82  for (size_t i = 0; i < moves->size();) {
83  MoveOperands* move = (*moves)[i];
84  if (move->IsRedundant()) {
85  (*moves)[i] = moves->back();
86  moves->pop_back();
87  continue;
88  }
89  i++;
90  if (!kSimpleFPAliasing && move->destination().IsFPRegister()) {
91  reps |= RepresentationBit(
92  LocationOperand::cast(move->destination()).representation());
93  }
94  }
95 
96  if (!kSimpleFPAliasing) {
97  if (reps && !base::bits::IsPowerOfTwo(reps)) {
98  // Start with the smallest FP moves, so we never encounter smaller moves
99  // in the middle of a cycle of larger moves.
100  if ((reps & RepresentationBit(MachineRepresentation::kFloat32)) != 0) {
101  split_rep_ = MachineRepresentation::kFloat32;
102  for (size_t i = 0; i < moves->size(); ++i) {
103  auto move = (*moves)[i];
104  if (!move->IsEliminated() && move->destination().IsFloatRegister())
105  PerformMove(moves, move);
106  }
107  }
108  if ((reps & RepresentationBit(MachineRepresentation::kFloat64)) != 0) {
109  split_rep_ = MachineRepresentation::kFloat64;
110  for (size_t i = 0; i < moves->size(); ++i) {
111  auto move = (*moves)[i];
112  if (!move->IsEliminated() && move->destination().IsDoubleRegister())
113  PerformMove(moves, move);
114  }
115  }
116  }
117  split_rep_ = MachineRepresentation::kSimd128;
118  }
119 
120  for (size_t i = 0; i < moves->size(); ++i) {
121  auto move = (*moves)[i];
122  if (!move->IsEliminated()) PerformMove(moves, move);
123  }
124 }
125 
126 void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) {
127  // Each call to this function performs a move and deletes it from the move
128  // graph. We first recursively perform any move blocking this one. We mark a
129  // move as "pending" on entry to PerformMove in order to detect cycles in the
130  // move graph. We use operand swaps to resolve cycles, which means that a
131  // call to PerformMove could change any source operand in the move graph.
132  DCHECK(!move->IsPending());
133  DCHECK(!move->IsRedundant());
134 
135  // Clear this move's destination to indicate a pending move. The actual
136  // destination is saved on the side.
137  InstructionOperand source = move->source();
138  DCHECK(!source.IsInvalid()); // Or else it will look eliminated.
139  InstructionOperand destination = move->destination();
140  move->SetPending();
141 
142  // We may need to split moves between FP locations differently.
143  const bool is_fp_loc_move =
144  !kSimpleFPAliasing && destination.IsFPLocationOperand();
145 
146  // Perform a depth-first traversal of the move graph to resolve dependencies.
147  // Any unperformed, unpending move with a source the same as this one's
148  // destination blocks this one so recursively perform all such moves.
149  for (size_t i = 0; i < moves->size(); ++i) {
150  auto other = (*moves)[i];
151  if (other->IsEliminated()) continue;
152  if (other->IsPending()) continue;
153  if (other->source().InterferesWith(destination)) {
154  if (is_fp_loc_move &&
155  LocationOperand::cast(other->source()).representation() >
156  split_rep_) {
157  // 'other' must also be an FP location move. Break it into fragments
158  // of the same size as 'move'. 'other' is set to one of the fragments,
159  // and the rest are appended to 'moves'.
160  other = Split(other, split_rep_, moves);
161  // 'other' may not block destination now.
162  if (!other->source().InterferesWith(destination)) continue;
163  }
164  // Though PerformMove can change any source operand in the move graph,
165  // this call cannot create a blocking move via a swap (this loop does not
166  // miss any). Assume there is a non-blocking move with source A and this
167  // move is blocked on source B and there is a swap of A and B. Then A and
168  // B must be involved in the same cycle (or they would not be swapped).
169  // Since this move's destination is B and there is only a single incoming
170  // edge to an operand, this move must also be involved in the same cycle.
171  // In that case, the blocking move will be created but will be "pending"
172  // when we return from PerformMove.
173  PerformMove(moves, other);
174  }
175  }
176 
177  // This move's source may have changed due to swaps to resolve cycles and so
178  // it may now be the last move in the cycle. If so remove it.
179  source = move->source();
180  if (source.EqualsCanonicalized(destination)) {
181  move->Eliminate();
182  return;
183  }
184 
185  // We are about to resolve this move and don't need it marked as pending, so
186  // restore its destination.
187  move->set_destination(destination);
188 
189  // The move may be blocked on a (at most one) pending move, in which case we
190  // have a cycle. Search for such a blocking move and perform a swap to
191  // resolve it.
192  auto blocker =
193  std::find_if(moves->begin(), moves->end(), [&](MoveOperands* move) {
194  return !move->IsEliminated() &&
195  move->source().InterferesWith(destination);
196  });
197  if (blocker == moves->end()) {
198  // The easy case: This move is not blocked.
199  assembler_->AssembleMove(&source, &destination);
200  move->Eliminate();
201  return;
202  }
203 
204  // Ensure source is a register or both are stack slots, to limit swap cases.
205  if (source.IsStackSlot() || source.IsFPStackSlot()) {
206  std::swap(source, destination);
207  }
208  assembler_->AssembleSwap(&source, &destination);
209  move->Eliminate();
210 
211  // Update outstanding moves whose source may now have been moved.
212  if (is_fp_loc_move) {
213  // We may have to split larger moves.
214  for (size_t i = 0; i < moves->size(); ++i) {
215  auto other = (*moves)[i];
216  if (other->IsEliminated()) continue;
217  if (source.InterferesWith(other->source())) {
218  if (LocationOperand::cast(other->source()).representation() >
219  split_rep_) {
220  other = Split(other, split_rep_, moves);
221  if (!source.InterferesWith(other->source())) continue;
222  }
223  other->set_source(destination);
224  } else if (destination.InterferesWith(other->source())) {
225  if (LocationOperand::cast(other->source()).representation() >
226  split_rep_) {
227  other = Split(other, split_rep_, moves);
228  if (!destination.InterferesWith(other->source())) continue;
229  }
230  other->set_source(source);
231  }
232  }
233  } else {
234  for (auto other : *moves) {
235  if (other->IsEliminated()) continue;
236  if (source.EqualsCanonicalized(other->source())) {
237  other->set_source(destination);
238  } else if (destination.EqualsCanonicalized(other->source())) {
239  other->set_source(source);
240  }
241  }
242  }
243 }
244 } // namespace compiler
245 } // namespace internal
246 } // namespace v8
Definition: libplatform.h:13