V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
constant-pool.cc
1 // Copyright 2018 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/constant-pool.h"
6 #include "src/assembler-inl.h"
7 
8 namespace v8 {
9 namespace internal {
10 
11 #if defined(V8_TARGET_ARCH_PPC)
12 
13 ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits,
14  int double_reach_bits) {
15  info_[ConstantPoolEntry::INTPTR].entries.reserve(64);
16  info_[ConstantPoolEntry::INTPTR].regular_reach_bits = ptr_reach_bits;
17  info_[ConstantPoolEntry::DOUBLE].regular_reach_bits = double_reach_bits;
18 }
19 
20 ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess(
21  ConstantPoolEntry::Type type) const {
22  const PerTypeEntryInfo& info = info_[type];
23 
24  if (info.overflow()) return ConstantPoolEntry::OVERFLOWED;
25 
26  int dbl_count = info_[ConstantPoolEntry::DOUBLE].regular_count;
27  int dbl_offset = dbl_count * kDoubleSize;
28  int ptr_count = info_[ConstantPoolEntry::INTPTR].regular_count;
29  int ptr_offset = ptr_count * kPointerSize + dbl_offset;
30 
31  if (type == ConstantPoolEntry::DOUBLE) {
32  // Double overflow detection must take into account the reach for both types
33  int ptr_reach_bits = info_[ConstantPoolEntry::INTPTR].regular_reach_bits;
34  if (!is_uintn(dbl_offset, info.regular_reach_bits) ||
35  (ptr_count > 0 &&
36  !is_uintn(ptr_offset + kDoubleSize - kPointerSize, ptr_reach_bits))) {
37  return ConstantPoolEntry::OVERFLOWED;
38  }
39  } else {
40  DCHECK(type == ConstantPoolEntry::INTPTR);
41  if (!is_uintn(ptr_offset, info.regular_reach_bits)) {
42  return ConstantPoolEntry::OVERFLOWED;
43  }
44  }
45 
46  return ConstantPoolEntry::REGULAR;
47 }
48 
49 ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry(
50  ConstantPoolEntry& entry, ConstantPoolEntry::Type type) {
51  DCHECK(!emitted_label_.is_bound());
52  PerTypeEntryInfo& info = info_[type];
53  const int entry_size = ConstantPoolEntry::size(type);
54  bool merged = false;
55 
56  if (entry.sharing_ok()) {
57  // Try to merge entries
58  std::vector<ConstantPoolEntry>::iterator it = info.shared_entries.begin();
59  int end = static_cast<int>(info.shared_entries.size());
60  for (int i = 0; i < end; i++, it++) {
61  if ((entry_size == kPointerSize) ? entry.value() == it->value()
62  : entry.value64() == it->value64()) {
63  // Merge with found entry.
64  entry.set_merged_index(i);
65  merged = true;
66  break;
67  }
68  }
69  }
70 
71  // By definition, merged entries have regular access.
72  DCHECK(!merged || entry.merged_index() < info.regular_count);
73  ConstantPoolEntry::Access access =
74  (merged ? ConstantPoolEntry::REGULAR : NextAccess(type));
75 
76  // Enforce an upper bound on search time by limiting the search to
77  // unique sharable entries which fit in the regular section.
78  if (entry.sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) {
79  info.shared_entries.push_back(entry);
80  } else {
81  info.entries.push_back(entry);
82  }
83 
84  // We're done if we found a match or have already triggered the
85  // overflow state.
86  if (merged || info.overflow()) return access;
87 
88  if (access == ConstantPoolEntry::REGULAR) {
89  info.regular_count++;
90  } else {
91  info.overflow_start = static_cast<int>(info.entries.size()) - 1;
92  }
93 
94  return access;
95 }
96 
97 void ConstantPoolBuilder::EmitSharedEntries(Assembler* assm,
98  ConstantPoolEntry::Type type) {
99  PerTypeEntryInfo& info = info_[type];
100  std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
101  const int entry_size = ConstantPoolEntry::size(type);
102  int base = emitted_label_.pos();
103  DCHECK_GT(base, 0);
104  int shared_end = static_cast<int>(shared_entries.size());
105  std::vector<ConstantPoolEntry>::iterator shared_it = shared_entries.begin();
106  for (int i = 0; i < shared_end; i++, shared_it++) {
107  int offset = assm->pc_offset() - base;
108  shared_it->set_offset(offset); // Save offset for merged entries.
109  if (entry_size == kPointerSize) {
110  assm->dp(shared_it->value());
111  } else {
112  assm->dq(shared_it->value64());
113  }
114  DCHECK(is_uintn(offset, info.regular_reach_bits));
115 
116  // Patch load sequence with correct offset.
117  assm->PatchConstantPoolAccessInstruction(shared_it->position(), offset,
118  ConstantPoolEntry::REGULAR, type);
119  }
120 }
121 
122 void ConstantPoolBuilder::EmitGroup(Assembler* assm,
123  ConstantPoolEntry::Access access,
124  ConstantPoolEntry::Type type) {
125  PerTypeEntryInfo& info = info_[type];
126  const bool overflow = info.overflow();
127  std::vector<ConstantPoolEntry>& entries = info.entries;
128  std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
129  const int entry_size = ConstantPoolEntry::size(type);
130  int base = emitted_label_.pos();
131  DCHECK_GT(base, 0);
132  int begin;
133  int end;
134 
135  if (access == ConstantPoolEntry::REGULAR) {
136  // Emit any shared entries first
137  EmitSharedEntries(assm, type);
138  }
139 
140  if (access == ConstantPoolEntry::REGULAR) {
141  begin = 0;
142  end = overflow ? info.overflow_start : static_cast<int>(entries.size());
143  } else {
144  DCHECK(access == ConstantPoolEntry::OVERFLOWED);
145  if (!overflow) return;
146  begin = info.overflow_start;
147  end = static_cast<int>(entries.size());
148  }
149 
150  std::vector<ConstantPoolEntry>::iterator it = entries.begin();
151  if (begin > 0) std::advance(it, begin);
152  for (int i = begin; i < end; i++, it++) {
153  // Update constant pool if necessary and get the entry's offset.
154  int offset;
155  ConstantPoolEntry::Access entry_access;
156  if (!it->is_merged()) {
157  // Emit new entry
158  offset = assm->pc_offset() - base;
159  entry_access = access;
160  if (entry_size == kPointerSize) {
161  assm->dp(it->value());
162  } else {
163  assm->dq(it->value64());
164  }
165  } else {
166  // Retrieve offset from shared entry.
167  offset = shared_entries[it->merged_index()].offset();
168  entry_access = ConstantPoolEntry::REGULAR;
169  }
170 
171  DCHECK(entry_access == ConstantPoolEntry::OVERFLOWED ||
172  is_uintn(offset, info.regular_reach_bits));
173 
174  // Patch load sequence with correct offset.
175  assm->PatchConstantPoolAccessInstruction(it->position(), offset,
176  entry_access, type);
177  }
178 }
179 
180 // Emit and return position of pool. Zero implies no constant pool.
181 int ConstantPoolBuilder::Emit(Assembler* assm) {
182  bool emitted = emitted_label_.is_bound();
183  bool empty = IsEmpty();
184 
185  if (!emitted) {
186  // Mark start of constant pool. Align if necessary.
187  if (!empty) assm->DataAlign(kDoubleSize);
188  assm->bind(&emitted_label_);
189  if (!empty) {
190  // Emit in groups based on access and type.
191  // Emit doubles first for alignment purposes.
192  EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::DOUBLE);
193  EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::INTPTR);
194  if (info_[ConstantPoolEntry::DOUBLE].overflow()) {
195  assm->DataAlign(kDoubleSize);
196  EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
197  ConstantPoolEntry::DOUBLE);
198  }
199  if (info_[ConstantPoolEntry::INTPTR].overflow()) {
200  EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
201  ConstantPoolEntry::INTPTR);
202  }
203  }
204  }
205 
206  return !empty ? emitted_label_.pos() : 0;
207 }
208 
209 #endif // defined(V8_TARGET_ARCH_PPC)
210 
211 } // namespace internal
212 } // namespace v8
Definition: libplatform.h:13