5 #include "src/heap/store-buffer.h" 9 #include "src/base/bits.h" 10 #include "src/base/macros.h" 11 #include "src/base/template-utils.h" 12 #include "src/counters.h" 13 #include "src/heap/incremental-marking.h" 14 #include "src/heap/store-buffer-inl.h" 15 #include "src/isolate.h" 16 #include "src/objects-inl.h" 22 StoreBuffer::StoreBuffer(Heap* heap)
23 : heap_(heap), top_(nullptr), current_(0), mode_(NOT_IN_GC) {
24 for (
int i = 0;
i < kStoreBuffers;
i++) {
27 lazy_top_[
i] =
nullptr;
29 task_running_ =
false;
30 insertion_callback = &InsertDuringRuntime;
31 deletion_callback = &DeleteDuringRuntime;
34 void StoreBuffer::SetUp() {
41 const size_t requested_size = RoundUp(kStoreBufferSize * kStoreBuffers,
45 STATIC_ASSERT(base::bits::IsPowerOfTwo(kStoreBufferSize));
46 const size_t alignment =
48 void* hint = AlignedAddress(heap_->GetRandomMmapAddr(), alignment);
49 VirtualMemory reservation(page_allocator, requested_size, hint, alignment);
50 if (!reservation.IsReserved()) {
51 heap_->FatalProcessOutOfMemory(
"StoreBuffer::SetUp");
54 Address start = reservation.address();
55 const size_t allocated_size = reservation.size();
57 start_[0] =
reinterpret_cast<Address*
>(start);
58 limit_[0] = start_[0] + (kStoreBufferSize / kPointerSize);
59 start_[1] = limit_[0];
60 limit_[1] = start_[1] + (kStoreBufferSize / kPointerSize);
63 Address* vm_limit =
reinterpret_cast<Address*
>(start + allocated_size);
65 for (
int i = 0;
i < kStoreBuffers;
i++) {
66 DCHECK(reinterpret_cast<Address>(start_[
i]) >= reservation.address());
67 DCHECK(reinterpret_cast<Address>(limit_[
i]) >= reservation.address());
68 DCHECK(start_[
i] <= vm_limit);
69 DCHECK(limit_[
i] <= vm_limit);
70 DCHECK_EQ(0, reinterpret_cast<Address>(limit_[
i]) & kStoreBufferMask);
74 const size_t used_size = RoundUp(requested_size, CommitPageSize());
75 if (!reservation.SetPermissions(start, used_size,
76 PageAllocator::kReadWrite)) {
77 heap_->FatalProcessOutOfMemory(
"StoreBuffer::SetUp");
80 top_ = start_[current_];
81 virtual_memory_.TakeControl(&reservation);
84 void StoreBuffer::TearDown() {
85 if (virtual_memory_.IsReserved()) virtual_memory_.Free();
87 for (
int i = 0;
i < kStoreBuffers;
i++) {
90 lazy_top_[
i] =
nullptr;
94 void StoreBuffer::DeleteDuringRuntime(StoreBuffer* store_buffer, Address start,
96 DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
97 store_buffer->InsertDeletionIntoStoreBuffer(start, end);
100 void StoreBuffer::InsertDuringRuntime(StoreBuffer* store_buffer, Address slot) {
101 DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
102 store_buffer->InsertIntoStoreBuffer(slot);
105 void StoreBuffer::DeleteDuringGarbageCollection(StoreBuffer* store_buffer,
106 Address start, Address end) {
108 DCHECK(store_buffer->Empty());
109 DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
110 Page* page = Page::FromAddress(start);
112 RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
113 SlotSet::PREFREE_EMPTY_BUCKETS);
115 RememberedSet<OLD_TO_NEW>::Remove(page, start);
119 void StoreBuffer::InsertDuringGarbageCollection(StoreBuffer* store_buffer,
121 DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
122 RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
125 void StoreBuffer::SetMode(StoreBufferMode mode) {
127 if (mode == NOT_IN_GC) {
128 insertion_callback = &InsertDuringRuntime;
129 deletion_callback = &DeleteDuringRuntime;
131 insertion_callback = &InsertDuringGarbageCollection;
132 deletion_callback = &DeleteDuringGarbageCollection;
136 int StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
137 isolate->heap()->store_buffer()->FlipStoreBuffers();
138 isolate->counters()->store_buffer_overflows()->Increment();
143 void StoreBuffer::FlipStoreBuffers() {
144 base::MutexGuard guard(&mutex_);
145 int other = (current_ + 1) % kStoreBuffers;
146 MoveEntriesToRememberedSet(other);
147 lazy_top_[current_] = top_;
149 top_ = start_[current_];
151 if (!task_running_ && FLAG_concurrent_store_buffer) {
152 task_running_ =
true;
153 V8::GetCurrentPlatform()->CallOnWorkerThread(
154 base::make_unique<Task>(heap_->isolate(),
this));
158 void StoreBuffer::MoveEntriesToRememberedSet(
int index) {
159 if (!lazy_top_[index])
return;
161 DCHECK_LT(index, kStoreBuffers);
162 Address last_inserted_addr = kNullAddress;
166 base::MutexGuard guard(heap_->lo_space()->chunk_map_mutex());
167 for (Address* current = start_[index]; current < lazy_top_[index];
169 Address addr = *current;
170 MemoryChunk* chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
171 if (IsDeletionAddress(addr)) {
172 last_inserted_addr = kNullAddress;
174 Address end = *current;
175 DCHECK(!IsDeletionAddress(end));
176 addr = UnmarkDeletionAddress(addr);
178 RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, addr, end,
179 SlotSet::PREFREE_EMPTY_BUCKETS);
181 RememberedSet<OLD_TO_NEW>::Remove(chunk, addr);
184 DCHECK(!IsDeletionAddress(addr));
185 if (addr != last_inserted_addr) {
186 RememberedSet<OLD_TO_NEW>::Insert(chunk, addr);
187 last_inserted_addr = addr;
191 lazy_top_[index] =
nullptr;
194 void StoreBuffer::MoveAllEntriesToRememberedSet() {
195 base::MutexGuard guard(&mutex_);
196 int other = (current_ + 1) % kStoreBuffers;
197 MoveEntriesToRememberedSet(other);
198 lazy_top_[current_] = top_;
199 MoveEntriesToRememberedSet(current_);
200 top_ = start_[current_];
203 void StoreBuffer::ConcurrentlyProcessStoreBuffer() {
204 base::MutexGuard guard(&mutex_);
205 int other = (current_ + 1) % kStoreBuffers;
206 MoveEntriesToRememberedSet(other);
207 task_running_ =
false;
virtual size_t AllocatePageSize()=0
virtual size_t CommitPageSize()=0