V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
slot-set.h
1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_SLOT_SET_H_
6 #define V8_HEAP_SLOT_SET_H_
7 
8 #include <map>
9 #include <stack>
10 
11 #include "src/allocation.h"
12 #include "src/base/atomic-utils.h"
13 #include "src/base/bits.h"
14 #include "src/utils.h"
15 
16 namespace v8 {
17 namespace internal {
18 
19 enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
20 
21 // Data structure for maintaining a set of slots in a standard (non-large)
22 // page. The base address of the page must be set with SetPageStart before any
23 // operation.
24 // The data structure assumes that the slots are pointer size aligned and
25 // splits the valid slot offset range into kBuckets buckets.
26 // Each bucket is a bitmap with a bit corresponding to a single slot offset.
27 class SlotSet : public Malloced {
28  public:
29  enum EmptyBucketMode {
30  FREE_EMPTY_BUCKETS, // An empty bucket will be deallocated immediately.
31  PREFREE_EMPTY_BUCKETS, // An empty bucket will be unlinked from the slot
32  // set, but deallocated on demand by a sweeper
33  // thread.
34  KEEP_EMPTY_BUCKETS // An empty bucket will be kept.
35  };
36 
37  SlotSet() {
38  for (int i = 0; i < kBuckets; i++) {
39  StoreBucket(&buckets_[i], nullptr);
40  }
41  }
42 
43  ~SlotSet() {
44  for (int i = 0; i < kBuckets; i++) {
45  ReleaseBucket(i);
46  }
47  FreeToBeFreedBuckets();
48  }
49 
50  void SetPageStart(Address page_start) { page_start_ = page_start; }
51 
52  // The slot offset specifies a slot at address page_start_ + slot_offset.
53  // This method should only be called on the main thread because concurrent
54  // allocation of the bucket is not thread-safe.
55  //
56  // AccessMode defines whether there can be concurrent access on the buckets
57  // or not.
58  template <AccessMode access_mode = AccessMode::ATOMIC>
59  void Insert(int slot_offset) {
60  int bucket_index, cell_index, bit_index;
61  SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
62  Bucket bucket = LoadBucket<access_mode>(&buckets_[bucket_index]);
63  if (bucket == nullptr) {
64  bucket = AllocateBucket();
65  if (!SwapInNewBucket<access_mode>(&buckets_[bucket_index], bucket)) {
66  DeleteArray<uint32_t>(bucket);
67  bucket = LoadBucket<access_mode>(&buckets_[bucket_index]);
68  }
69  }
70  // Check that monotonicity is preserved, i.e., once a bucket is set we do
71  // not free it concurrently.
72  DCHECK_NOT_NULL(bucket);
73  DCHECK_EQ(bucket, LoadBucket<access_mode>(&buckets_[bucket_index]));
74  uint32_t mask = 1u << bit_index;
75  if ((LoadCell<access_mode>(&bucket[cell_index]) & mask) == 0) {
76  SetCellBits<access_mode>(&bucket[cell_index], mask);
77  }
78  }
79 
80  // The slot offset specifies a slot at address page_start_ + slot_offset.
81  // Returns true if the set contains the slot.
82  bool Contains(int slot_offset) {
83  int bucket_index, cell_index, bit_index;
84  SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
85  Bucket bucket = LoadBucket(&buckets_[bucket_index]);
86  if (bucket == nullptr) return false;
87  return (LoadCell(&bucket[cell_index]) & (1u << bit_index)) != 0;
88  }
89 
90  // The slot offset specifies a slot at address page_start_ + slot_offset.
91  void Remove(int slot_offset) {
92  int bucket_index, cell_index, bit_index;
93  SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
94  Bucket bucket = LoadBucket(&buckets_[bucket_index]);
95  if (bucket != nullptr) {
96  uint32_t cell = LoadCell(&bucket[cell_index]);
97  uint32_t bit_mask = 1u << bit_index;
98  if (cell & bit_mask) {
99  ClearCellBits(&bucket[cell_index], bit_mask);
100  }
101  }
102  }
103 
104  // The slot offsets specify a range of slots at addresses:
105  // [page_start_ + start_offset ... page_start_ + end_offset).
106  void RemoveRange(int start_offset, int end_offset, EmptyBucketMode mode) {
107  CHECK_LE(end_offset, 1 << kPageSizeBits);
108  DCHECK_LE(start_offset, end_offset);
109  int start_bucket, start_cell, start_bit;
110  SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit);
111  int end_bucket, end_cell, end_bit;
112  SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit);
113  uint32_t start_mask = (1u << start_bit) - 1;
114  uint32_t end_mask = ~((1u << end_bit) - 1);
115  Bucket bucket;
116  if (start_bucket == end_bucket && start_cell == end_cell) {
117  bucket = LoadBucket(&buckets_[start_bucket]);
118  if (bucket != nullptr) {
119  ClearCellBits(&bucket[start_cell], ~(start_mask | end_mask));
120  }
121  return;
122  }
123  int current_bucket = start_bucket;
124  int current_cell = start_cell;
125  bucket = LoadBucket(&buckets_[current_bucket]);
126  if (bucket != nullptr) {
127  ClearCellBits(&bucket[current_cell], ~start_mask);
128  }
129  current_cell++;
130  if (current_bucket < end_bucket) {
131  if (bucket != nullptr) {
132  ClearBucket(bucket, current_cell, kCellsPerBucket);
133  }
134  // The rest of the current bucket is cleared.
135  // Move on to the next bucket.
136  current_bucket++;
137  current_cell = 0;
138  }
139  DCHECK(current_bucket == end_bucket ||
140  (current_bucket < end_bucket && current_cell == 0));
141  while (current_bucket < end_bucket) {
142  if (mode == PREFREE_EMPTY_BUCKETS) {
143  PreFreeEmptyBucket(current_bucket);
144  } else if (mode == FREE_EMPTY_BUCKETS) {
145  ReleaseBucket(current_bucket);
146  } else {
147  DCHECK(mode == KEEP_EMPTY_BUCKETS);
148  bucket = LoadBucket(&buckets_[current_bucket]);
149  if (bucket != nullptr) {
150  ClearBucket(bucket, 0, kCellsPerBucket);
151  }
152  }
153  current_bucket++;
154  }
155  // All buckets between start_bucket and end_bucket are cleared.
156  bucket = LoadBucket(&buckets_[current_bucket]);
157  DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
158  if (current_bucket == kBuckets || bucket == nullptr) {
159  return;
160  }
161  while (current_cell < end_cell) {
162  StoreCell(&bucket[current_cell], 0);
163  current_cell++;
164  }
165  // All cells between start_cell and end_cell are cleared.
166  DCHECK(current_bucket == end_bucket && current_cell == end_cell);
167  ClearCellBits(&bucket[end_cell], ~end_mask);
168  }
169 
170  // The slot offset specifies a slot at address page_start_ + slot_offset.
171  bool Lookup(int slot_offset) {
172  int bucket_index, cell_index, bit_index;
173  SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
174  Bucket bucket = LoadBucket(&buckets_[bucket_index]);
175  if (bucket == nullptr) return false;
176  return (LoadCell(&bucket[cell_index]) & (1u << bit_index)) != 0;
177  }
178 
179  // Iterate over all slots in the set and for each slot invoke the callback.
180  // If the callback returns REMOVE_SLOT then the slot is removed from the set.
181  // Returns the new number of slots.
182  // This method should only be called on the main thread.
183  //
184  // Sample usage:
185  // Iterate([](MaybeObjectSlot slot) {
186  // if (good(slot)) return KEEP_SLOT;
187  // else return REMOVE_SLOT;
188  // });
189  template <typename Callback>
190  int Iterate(Callback callback, EmptyBucketMode mode) {
191  int new_count = 0;
192  for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
193  Bucket bucket = LoadBucket(&buckets_[bucket_index]);
194  if (bucket != nullptr) {
195  int in_bucket_count = 0;
196  int cell_offset = bucket_index * kBitsPerBucket;
197  for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
198  uint32_t cell = LoadCell(&bucket[i]);
199  if (cell) {
200  uint32_t old_cell = cell;
201  uint32_t mask = 0;
202  while (cell) {
203  int bit_offset = base::bits::CountTrailingZeros(cell);
204  uint32_t bit_mask = 1u << bit_offset;
205  uint32_t slot = (cell_offset + bit_offset) << kPointerSizeLog2;
206  if (callback(MaybeObjectSlot(page_start_ + slot)) == KEEP_SLOT) {
207  ++in_bucket_count;
208  } else {
209  mask |= bit_mask;
210  }
211  cell ^= bit_mask;
212  }
213  uint32_t new_cell = old_cell & ~mask;
214  if (old_cell != new_cell) {
215  ClearCellBits(&bucket[i], mask);
216  }
217  }
218  }
219  if (mode == PREFREE_EMPTY_BUCKETS && in_bucket_count == 0) {
220  PreFreeEmptyBucket(bucket_index);
221  }
222  new_count += in_bucket_count;
223  }
224  }
225  return new_count;
226  }
227 
228  int NumberOfPreFreedEmptyBuckets() {
229  base::MutexGuard guard(&to_be_freed_buckets_mutex_);
230  return static_cast<int>(to_be_freed_buckets_.size());
231  }
232 
233  void PreFreeEmptyBuckets() {
234  for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
235  Bucket bucket = LoadBucket(&buckets_[bucket_index]);
236  if (bucket != nullptr) {
237  if (IsEmptyBucket(bucket)) {
238  PreFreeEmptyBucket(bucket_index);
239  }
240  }
241  }
242  }
243 
244  void FreeEmptyBuckets() {
245  for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
246  Bucket bucket = LoadBucket(&buckets_[bucket_index]);
247  if (bucket != nullptr) {
248  if (IsEmptyBucket(bucket)) {
249  ReleaseBucket(bucket_index);
250  }
251  }
252  }
253  }
254 
255  void FreeToBeFreedBuckets() {
256  base::MutexGuard guard(&to_be_freed_buckets_mutex_);
257  while (!to_be_freed_buckets_.empty()) {
258  Bucket top = to_be_freed_buckets_.top();
259  to_be_freed_buckets_.pop();
260  DeleteArray<uint32_t>(top);
261  }
262  DCHECK_EQ(0u, to_be_freed_buckets_.size());
263  }
264 
265  private:
266  typedef uint32_t* Bucket;
267  static const int kMaxSlots = (1 << kPageSizeBits) / kPointerSize;
268  static const int kCellsPerBucket = 32;
269  static const int kCellsPerBucketLog2 = 5;
270  static const int kBitsPerCell = 32;
271  static const int kBitsPerCellLog2 = 5;
272  static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
273  static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
274  static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell;
275 
276  Bucket AllocateBucket() {
277  Bucket result = NewArray<uint32_t>(kCellsPerBucket);
278  for (int i = 0; i < kCellsPerBucket; i++) {
279  result[i] = 0;
280  }
281  return result;
282  }
283 
284  void ClearBucket(Bucket bucket, int start_cell, int end_cell) {
285  DCHECK_GE(start_cell, 0);
286  DCHECK_LE(end_cell, kCellsPerBucket);
287  int current_cell = start_cell;
288  while (current_cell < kCellsPerBucket) {
289  StoreCell(&bucket[current_cell], 0);
290  current_cell++;
291  }
292  }
293 
294  void PreFreeEmptyBucket(int bucket_index) {
295  Bucket bucket = LoadBucket(&buckets_[bucket_index]);
296  if (bucket != nullptr) {
297  base::MutexGuard guard(&to_be_freed_buckets_mutex_);
298  to_be_freed_buckets_.push(bucket);
299  StoreBucket(&buckets_[bucket_index], nullptr);
300  }
301  }
302 
303  void ReleaseBucket(int bucket_index) {
304  Bucket bucket = LoadBucket(&buckets_[bucket_index]);
305  StoreBucket(&buckets_[bucket_index], nullptr);
306  DeleteArray<uint32_t>(bucket);
307  }
308 
309  template <AccessMode access_mode = AccessMode::ATOMIC>
310  Bucket LoadBucket(Bucket* bucket) {
311  if (access_mode == AccessMode::ATOMIC)
312  return base::AsAtomicPointer::Acquire_Load(bucket);
313  return *bucket;
314  }
315 
316  template <AccessMode access_mode = AccessMode::ATOMIC>
317  void StoreBucket(Bucket* bucket, Bucket value) {
318  if (access_mode == AccessMode::ATOMIC) {
319  base::AsAtomicPointer::Release_Store(bucket, value);
320  } else {
321  *bucket = value;
322  }
323  }
324 
325  bool IsEmptyBucket(Bucket bucket) {
326  for (int i = 0; i < kCellsPerBucket; i++) {
327  if (LoadCell(&bucket[i])) {
328  return false;
329  }
330  }
331  return true;
332  }
333 
334  template <AccessMode access_mode = AccessMode::ATOMIC>
335  bool SwapInNewBucket(Bucket* bucket, Bucket value) {
336  if (access_mode == AccessMode::ATOMIC) {
337  return base::AsAtomicPointer::Release_CompareAndSwap(bucket, nullptr,
338  value) == nullptr;
339  } else {
340  DCHECK_NULL(*bucket);
341  *bucket = value;
342  return true;
343  }
344  }
345 
346  template <AccessMode access_mode = AccessMode::ATOMIC>
347  uint32_t LoadCell(uint32_t* cell) {
348  if (access_mode == AccessMode::ATOMIC)
349  return base::AsAtomic32::Acquire_Load(cell);
350  return *cell;
351  }
352 
353  void StoreCell(uint32_t* cell, uint32_t value) {
354  base::AsAtomic32::Release_Store(cell, value);
355  }
356 
357  void ClearCellBits(uint32_t* cell, uint32_t mask) {
358  base::AsAtomic32::SetBits(cell, 0u, mask);
359  }
360 
361  template <AccessMode access_mode = AccessMode::ATOMIC>
362  void SetCellBits(uint32_t* cell, uint32_t mask) {
363  if (access_mode == AccessMode::ATOMIC) {
364  base::AsAtomic32::SetBits(cell, mask, mask);
365  } else {
366  *cell = (*cell & ~mask) | mask;
367  }
368  }
369 
370  // Converts the slot offset into bucket/cell/bit index.
371  void SlotToIndices(int slot_offset, int* bucket_index, int* cell_index,
372  int* bit_index) {
373  DCHECK_EQ(slot_offset % kPointerSize, 0);
374  int slot = slot_offset >> kPointerSizeLog2;
375  DCHECK(slot >= 0 && slot <= kMaxSlots);
376  *bucket_index = slot >> kBitsPerBucketLog2;
377  *cell_index = (slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1);
378  *bit_index = slot & (kBitsPerCell - 1);
379  }
380 
381  Bucket buckets_[kBuckets];
382  Address page_start_;
383  base::Mutex to_be_freed_buckets_mutex_;
384  std::stack<uint32_t*> to_be_freed_buckets_;
385 };
386 
387 enum SlotType {
388  EMBEDDED_OBJECT_SLOT,
389  OBJECT_SLOT,
390  CODE_TARGET_SLOT,
391  CODE_ENTRY_SLOT,
392  CLEARED_SLOT
393 };
394 
395 // Data structure for maintaining a list of typed slots in a page.
396 // Typed slots can only appear in Code and JSFunction objects, so
397 // the maximum possible offset is limited by the LargePage::kMaxCodePageSize.
398 // The implementation is a chain of chunks, where each chunks is an array of
399 // encoded (slot type, slot offset) pairs.
400 // There is no duplicate detection and we do not expect many duplicates because
401 // typed slots contain V8 internal pointers that are not directly exposed to JS.
402 class TypedSlots {
403  public:
404  static const int kMaxOffset = 1 << 29;
405  TypedSlots() = default;
406  virtual ~TypedSlots();
407  V8_EXPORT_PRIVATE void Insert(SlotType type, uint32_t host_offset,
408  uint32_t offset);
409  V8_EXPORT_PRIVATE void Merge(TypedSlots* other);
410 
411  protected:
412  class OffsetField : public BitField<int, 0, 29> {};
413  class TypeField : public BitField<SlotType, 29, 3> {};
414  struct TypedSlot {
415  uint32_t type_and_offset;
416  uint32_t host_offset;
417  };
418  struct Chunk {
419  Chunk* next;
420  TypedSlot* buffer;
421  int32_t capacity;
422  int32_t count;
423  };
424  static const int kInitialBufferSize = 100;
425  static const int kMaxBufferSize = 16 * KB;
426  static int NextCapacity(int capacity) {
427  return Min(kMaxBufferSize, capacity * 2);
428  }
429  Chunk* EnsureChunk();
430  Chunk* NewChunk(Chunk* next, int capacity);
431  Chunk* head_ = nullptr;
432  Chunk* tail_ = nullptr;
433 };
434 
435 // A multiset of per-page typed slots that allows concurrent iteration
436 // clearing of invalid slots.
437 class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
438  public:
439  // The PREFREE_EMPTY_CHUNKS indicates that chunks detected as empty
440  // during the iteration are queued in to_be_freed_chunks_, which are
441  // then freed in FreeToBeFreedChunks.
442  enum IterationMode { PREFREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
443 
444  explicit TypedSlotSet(Address page_start) : page_start_(page_start) {}
445 
446  ~TypedSlotSet() override;
447 
448  // Iterate over all slots in the set and for each slot invoke the callback.
449  // If the callback returns REMOVE_SLOT then the slot is removed from the set.
450  // Returns the new number of slots.
451  //
452  // Sample usage:
453  // Iterate([](SlotType slot_type, Address slot_address) {
454  // if (good(slot_type, slot_address)) return KEEP_SLOT;
455  // else return REMOVE_SLOT;
456  // });
457  // This can run concurrently to ClearInvalidSlots().
458  template <typename Callback>
459  int Iterate(Callback callback, IterationMode mode) {
460  STATIC_ASSERT(CLEARED_SLOT < 8);
461  Chunk* chunk = head_;
462  Chunk* previous = nullptr;
463  int new_count = 0;
464  while (chunk != nullptr) {
465  TypedSlot* buffer = chunk->buffer;
466  int count = chunk->count;
467  bool empty = true;
468  for (int i = 0; i < count; i++) {
469  TypedSlot slot = LoadTypedSlot(buffer + i);
470  SlotType type = TypeField::decode(slot.type_and_offset);
471  if (type != CLEARED_SLOT) {
472  uint32_t offset = OffsetField::decode(slot.type_and_offset);
473  Address addr = page_start_ + offset;
474  Address host_addr = page_start_ + slot.host_offset;
475  if (callback(type, host_addr, addr) == KEEP_SLOT) {
476  new_count++;
477  empty = false;
478  } else {
479  ClearTypedSlot(buffer + i);
480  }
481  }
482  }
483  Chunk* next = chunk->next;
484  if (mode == PREFREE_EMPTY_CHUNKS && empty) {
485  // We remove the chunk from the list but let it still point its next
486  // chunk to allow concurrent iteration.
487  if (previous) {
488  StoreNext(previous, next);
489  } else {
490  StoreHead(next);
491  }
492  base::MutexGuard guard(&to_be_freed_chunks_mutex_);
493  to_be_freed_chunks_.push(std::unique_ptr<Chunk>(chunk));
494  } else {
495  previous = chunk;
496  }
497  chunk = next;
498  }
499  return new_count;
500  }
501 
502  // Clears all slots that have the offset in the specified ranges.
503  // This can run concurrently to Iterate().
504  void ClearInvalidSlots(const std::map<uint32_t, uint32_t>& invalid_ranges);
505 
506  // Frees empty chunks accumulated by PREFREE_EMPTY_CHUNKS.
507  void FreeToBeFreedChunks();
508 
509  private:
510  // Atomic operations used by Iterate and ClearInvalidSlots;
511  Chunk* LoadNext(Chunk* chunk) {
512  return base::AsAtomicPointer::Relaxed_Load(&chunk->next);
513  }
514  void StoreNext(Chunk* chunk, Chunk* next) {
515  return base::AsAtomicPointer::Relaxed_Store(&chunk->next, next);
516  }
517  Chunk* LoadHead() { return base::AsAtomicPointer::Relaxed_Load(&head_); }
518  void StoreHead(Chunk* chunk) {
519  base::AsAtomicPointer::Relaxed_Store(&head_, chunk);
520  }
521  TypedSlot LoadTypedSlot(TypedSlot* slot) {
522  // Order is important here and should match that of ClearTypedSlot. The
523  // order guarantees that type != CLEARED_SLOT implies valid host_offset.
524  TypedSlot result;
525  result.host_offset = base::AsAtomic32::Acquire_Load(&slot->host_offset);
526  result.type_and_offset =
527  base::AsAtomic32::Relaxed_Load(&slot->type_and_offset);
528  return result;
529  }
530  void ClearTypedSlot(TypedSlot* slot) {
531  // Order is important here and should match that of LoadTypedSlot.
532  base::AsAtomic32::Relaxed_Store(
533  &slot->type_and_offset,
534  TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0));
535  base::AsAtomic32::Release_Store(&slot->host_offset, 0);
536  }
537 
538  Address page_start_;
539  base::Mutex to_be_freed_chunks_mutex_;
540  std::stack<std::unique_ptr<Chunk>> to_be_freed_chunks_;
541 };
542 
543 } // namespace internal
544 } // namespace v8
545 
546 #endif // V8_HEAP_SLOT_SET_H_
Definition: libplatform.h:13