5 #ifndef V8_HEAP_SLOT_SET_H_ 6 #define V8_HEAP_SLOT_SET_H_ 11 #include "src/allocation.h" 12 #include "src/base/atomic-utils.h" 13 #include "src/base/bits.h" 14 #include "src/utils.h" 19 enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
29 enum EmptyBucketMode {
31 PREFREE_EMPTY_BUCKETS,
38 for (
int i = 0;
i < kBuckets;
i++) {
39 StoreBucket(&buckets_[
i],
nullptr);
44 for (
int i = 0;
i < kBuckets;
i++) {
47 FreeToBeFreedBuckets();
50 void SetPageStart(
Address page_start) { page_start_ = page_start; }
58 template <AccessMode access_mode = AccessMode::ATOMIC>
59 void Insert(
int slot_offset) {
60 int bucket_index, cell_index, bit_index;
61 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
62 Bucket bucket = LoadBucket<access_mode>(&buckets_[bucket_index]);
63 if (bucket ==
nullptr) {
64 bucket = AllocateBucket();
65 if (!SwapInNewBucket<access_mode>(&buckets_[bucket_index], bucket)) {
66 DeleteArray<uint32_t>(bucket);
67 bucket = LoadBucket<access_mode>(&buckets_[bucket_index]);
72 DCHECK_NOT_NULL(bucket);
73 DCHECK_EQ(bucket, LoadBucket<access_mode>(&buckets_[bucket_index]));
75 if ((LoadCell<access_mode>(&bucket[cell_index]) & mask) == 0) {
76 SetCellBits<access_mode>(&bucket[cell_index], mask);
82 bool Contains(
int slot_offset) {
83 int bucket_index, cell_index, bit_index;
84 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
85 Bucket bucket = LoadBucket(&buckets_[bucket_index]);
86 if (bucket ==
nullptr)
return false;
87 return (LoadCell(&bucket[cell_index]) & (1u << bit_index)) != 0;
91 void Remove(
int slot_offset) {
92 int bucket_index, cell_index, bit_index;
93 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
94 Bucket bucket = LoadBucket(&buckets_[bucket_index]);
95 if (bucket !=
nullptr) {
96 uint32_t cell = LoadCell(&bucket[cell_index]);
98 if (cell & bit_mask) {
99 ClearCellBits(&bucket[cell_index], bit_mask);
106 void RemoveRange(
int start_offset,
int end_offset, EmptyBucketMode mode) {
107 CHECK_LE(end_offset, 1 << kPageSizeBits);
108 DCHECK_LE(start_offset, end_offset);
109 int start_bucket, start_cell, start_bit;
110 SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit);
111 int end_bucket, end_cell, end_bit;
112 SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit);
113 uint32_t start_mask = (1u << start_bit) - 1;
114 uint32_t end_mask = ~((1u << end_bit) - 1);
116 if (start_bucket == end_bucket && start_cell == end_cell) {
117 bucket = LoadBucket(&buckets_[start_bucket]);
118 if (bucket !=
nullptr) {
119 ClearCellBits(&bucket[start_cell], ~(start_mask | end_mask));
123 int current_bucket = start_bucket;
124 int current_cell = start_cell;
125 bucket = LoadBucket(&buckets_[current_bucket]);
126 if (bucket !=
nullptr) {
127 ClearCellBits(&bucket[current_cell], ~start_mask);
130 if (current_bucket < end_bucket) {
131 if (bucket !=
nullptr) {
132 ClearBucket(bucket, current_cell, kCellsPerBucket);
139 DCHECK(current_bucket == end_bucket ||
140 (current_bucket < end_bucket && current_cell == 0));
141 while (current_bucket < end_bucket) {
142 if (mode == PREFREE_EMPTY_BUCKETS) {
143 PreFreeEmptyBucket(current_bucket);
144 }
else if (mode == FREE_EMPTY_BUCKETS) {
145 ReleaseBucket(current_bucket);
147 DCHECK(mode == KEEP_EMPTY_BUCKETS);
148 bucket = LoadBucket(&buckets_[current_bucket]);
149 if (bucket !=
nullptr) {
150 ClearBucket(bucket, 0, kCellsPerBucket);
156 bucket = LoadBucket(&buckets_[current_bucket]);
157 DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
158 if (current_bucket == kBuckets || bucket ==
nullptr) {
161 while (current_cell < end_cell) {
162 StoreCell(&bucket[current_cell], 0);
166 DCHECK(current_bucket == end_bucket && current_cell == end_cell);
167 ClearCellBits(&bucket[end_cell], ~end_mask);
171 bool Lookup(
int slot_offset) {
172 int bucket_index, cell_index, bit_index;
173 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
174 Bucket bucket = LoadBucket(&buckets_[bucket_index]);
175 if (bucket ==
nullptr)
return false;
176 return (LoadCell(&bucket[cell_index]) & (1u << bit_index)) != 0;
189 template <
typename Callback>
190 int Iterate(Callback callback, EmptyBucketMode mode) {
192 for (
int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
193 Bucket bucket = LoadBucket(&buckets_[bucket_index]);
194 if (bucket !=
nullptr) {
195 int in_bucket_count = 0;
196 int cell_offset = bucket_index * kBitsPerBucket;
197 for (
int i = 0;
i < kCellsPerBucket;
i++, cell_offset += kBitsPerCell) {
203 int bit_offset = base::bits::CountTrailingZeros(cell);
204 uint32_t bit_mask = 1u << bit_offset;
205 uint32_t slot = (cell_offset + bit_offset) << kPointerSizeLog2;
213 uint32_t new_cell = old_cell & ~mask;
214 if (old_cell != new_cell) {
215 ClearCellBits(&bucket[
i], mask);
219 if (mode == PREFREE_EMPTY_BUCKETS && in_bucket_count == 0) {
220 PreFreeEmptyBucket(bucket_index);
222 new_count += in_bucket_count;
228 int NumberOfPreFreedEmptyBuckets() {
229 base::MutexGuard guard(&to_be_freed_buckets_mutex_);
230 return static_cast<int>(to_be_freed_buckets_.size());
233 void PreFreeEmptyBuckets() {
234 for (
int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
235 Bucket bucket = LoadBucket(&buckets_[bucket_index]);
236 if (bucket !=
nullptr) {
237 if (IsEmptyBucket(bucket)) {
238 PreFreeEmptyBucket(bucket_index);
244 void FreeEmptyBuckets() {
245 for (
int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
246 Bucket bucket = LoadBucket(&buckets_[bucket_index]);
247 if (bucket !=
nullptr) {
248 if (IsEmptyBucket(bucket)) {
249 ReleaseBucket(bucket_index);
255 void FreeToBeFreedBuckets() {
256 base::MutexGuard guard(&to_be_freed_buckets_mutex_);
257 while (!to_be_freed_buckets_.empty()) {
258 Bucket top = to_be_freed_buckets_.top();
259 to_be_freed_buckets_.pop();
260 DeleteArray<uint32_t>(top);
262 DCHECK_EQ(0u, to_be_freed_buckets_.size());
267 static const int kMaxSlots = (1 << kPageSizeBits) / kPointerSize;
268 static const int kCellsPerBucket = 32;
269 static const int kCellsPerBucketLog2 = 5;
270 static const int kBitsPerCell = 32;
271 static const int kBitsPerCellLog2 = 5;
272 static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
273 static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
274 static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell;
277 Bucket result = NewArray<uint32_t>(kCellsPerBucket);
278 for (
int i = 0;
i < kCellsPerBucket;
i++) {
284 void ClearBucket(
Bucket bucket,
int start_cell,
int end_cell) {
285 DCHECK_GE(start_cell, 0);
286 DCHECK_LE(end_cell, kCellsPerBucket);
287 int current_cell = start_cell;
288 while (current_cell < kCellsPerBucket) {
289 StoreCell(&bucket[current_cell], 0);
294 void PreFreeEmptyBucket(
int bucket_index) {
295 Bucket bucket = LoadBucket(&buckets_[bucket_index]);
296 if (bucket !=
nullptr) {
297 base::MutexGuard guard(&to_be_freed_buckets_mutex_);
298 to_be_freed_buckets_.push(bucket);
299 StoreBucket(&buckets_[bucket_index],
nullptr);
303 void ReleaseBucket(
int bucket_index) {
304 Bucket bucket = LoadBucket(&buckets_[bucket_index]);
305 StoreBucket(&buckets_[bucket_index],
nullptr);
306 DeleteArray<uint32_t>(bucket);
309 template <AccessMode access_mode = AccessMode::ATOMIC>
311 if (access_mode == AccessMode::ATOMIC)
312 return base::AsAtomicPointer::Acquire_Load(bucket);
316 template <AccessMode access_mode = AccessMode::ATOMIC>
318 if (access_mode == AccessMode::ATOMIC) {
319 base::AsAtomicPointer::Release_Store(bucket, value);
325 bool IsEmptyBucket(
Bucket bucket) {
326 for (
int i = 0;
i < kCellsPerBucket;
i++) {
327 if (LoadCell(&bucket[
i])) {
334 template <AccessMode access_mode = AccessMode::ATOMIC>
336 if (access_mode == AccessMode::ATOMIC) {
337 return base::AsAtomicPointer::Release_CompareAndSwap(bucket,
nullptr,
340 DCHECK_NULL(*bucket);
346 template <AccessMode access_mode = AccessMode::ATOMIC>
348 if (access_mode == AccessMode::ATOMIC)
349 return base::AsAtomic32::Acquire_Load(cell);
354 base::AsAtomic32::Release_Store(cell, value);
358 base::AsAtomic32::SetBits(cell, 0u, mask);
361 template <AccessMode access_mode = AccessMode::ATOMIC>
363 if (access_mode == AccessMode::ATOMIC) {
364 base::AsAtomic32::SetBits(cell, mask, mask);
366 *cell = (*cell & ~mask) | mask;
371 void SlotToIndices(
int slot_offset,
int* bucket_index,
int* cell_index,
373 DCHECK_EQ(slot_offset % kPointerSize, 0);
374 int slot = slot_offset >> kPointerSizeLog2;
375 DCHECK(slot >= 0 && slot <= kMaxSlots);
376 *bucket_index = slot >> kBitsPerBucketLog2;
377 *cell_index = (slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1);
378 *bit_index = slot & (kBitsPerCell - 1);
381 Bucket buckets_[kBuckets];
384 std::stack<uint32_t*> to_be_freed_buckets_;
388 EMBEDDED_OBJECT_SLOT,
404 static const int kMaxOffset = 1 << 29;
407 V8_EXPORT_PRIVATE
void Insert(SlotType
type,
uint32_t host_offset,
409 V8_EXPORT_PRIVATE
void Merge(
TypedSlots* other);
424 static const int kInitialBufferSize = 100;
425 static const int kMaxBufferSize = 16 * KB;
426 static int NextCapacity(
int capacity) {
427 return Min(kMaxBufferSize, capacity * 2);
429 Chunk* EnsureChunk();
430 Chunk* NewChunk(Chunk* next,
int capacity);
431 Chunk* head_ =
nullptr;
432 Chunk* tail_ =
nullptr;
442 enum IterationMode { PREFREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
458 template <
typename Callback>
459 int Iterate(Callback callback, IterationMode mode) {
460 STATIC_ASSERT(CLEARED_SLOT < 8);
461 Chunk* chunk = head_;
462 Chunk* previous =
nullptr;
464 while (chunk !=
nullptr) {
466 int count = chunk->count;
468 for (
int i = 0;
i < count;
i++) {
470 SlotType
type = TypeField::decode(slot.type_and_offset);
471 if (
type != CLEARED_SLOT) {
472 uint32_t offset = OffsetField::decode(slot.type_and_offset);
473 Address addr = page_start_ + offset;
474 Address host_addr = page_start_ + slot.host_offset;
475 if (callback(
type, host_addr, addr) == KEEP_SLOT) {
479 ClearTypedSlot(buffer +
i);
483 Chunk* next = chunk->next;
484 if (mode == PREFREE_EMPTY_CHUNKS && empty) {
488 StoreNext(previous, next);
492 base::MutexGuard guard(&to_be_freed_chunks_mutex_);
493 to_be_freed_chunks_.push(std::unique_ptr<Chunk>(chunk));
504 void ClearInvalidSlots(
const std::map<uint32_t, uint32_t>& invalid_ranges);
507 void FreeToBeFreedChunks();
511 Chunk* LoadNext(Chunk* chunk) {
512 return base::AsAtomicPointer::Relaxed_Load(&chunk->next);
514 void StoreNext(Chunk* chunk, Chunk* next) {
515 return base::AsAtomicPointer::Relaxed_Store(&chunk->next, next);
517 Chunk* LoadHead() {
return base::AsAtomicPointer::Relaxed_Load(&head_); }
518 void StoreHead(Chunk* chunk) {
519 base::AsAtomicPointer::Relaxed_Store(&head_, chunk);
525 result.host_offset = base::AsAtomic32::Acquire_Load(&slot->host_offset);
526 result.type_and_offset =
527 base::AsAtomic32::Relaxed_Load(&slot->type_and_offset);
532 base::AsAtomic32::Relaxed_Store(
533 &slot->type_and_offset,
534 TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0));
535 base::AsAtomic32::Release_Store(&slot->host_offset, 0);
540 std::stack<std::unique_ptr<Chunk>> to_be_freed_chunks_;
546 #endif // V8_HEAP_SLOT_SET_H_