V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
store-buffer.cc
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/store-buffer.h"
6 
7 #include <algorithm>
8 
9 #include "src/base/bits.h"
10 #include "src/base/macros.h"
11 #include "src/base/template-utils.h"
12 #include "src/counters.h"
13 #include "src/heap/incremental-marking.h"
14 #include "src/heap/store-buffer-inl.h"
15 #include "src/isolate.h"
16 #include "src/objects-inl.h"
17 #include "src/v8.h"
18 
19 namespace v8 {
20 namespace internal {
21 
22 StoreBuffer::StoreBuffer(Heap* heap)
23  : heap_(heap), top_(nullptr), current_(0), mode_(NOT_IN_GC) {
24  for (int i = 0; i < kStoreBuffers; i++) {
25  start_[i] = nullptr;
26  limit_[i] = nullptr;
27  lazy_top_[i] = nullptr;
28  }
29  task_running_ = false;
30  insertion_callback = &InsertDuringRuntime;
31  deletion_callback = &DeleteDuringRuntime;
32 }
33 
34 void StoreBuffer::SetUp() {
35  v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
36  // Round up the requested size in order to fulfill the VirtualMemory's
37  // requrements on the requested size alignment. This may cause a bit of
38  // memory wastage if the actual CommitPageSize() will be bigger than the
39  // kMinExpectedOSPageSize value but this is a trade-off for keeping the
40  // store buffer overflow check in write barriers cheap.
41  const size_t requested_size = RoundUp(kStoreBufferSize * kStoreBuffers,
42  page_allocator->CommitPageSize());
43  // Allocate buffer memory aligned at least to kStoreBufferSize. This lets us
44  // use a bit test to detect the ends of the buffers.
45  STATIC_ASSERT(base::bits::IsPowerOfTwo(kStoreBufferSize));
46  const size_t alignment =
47  std::max<size_t>(kStoreBufferSize, page_allocator->AllocatePageSize());
48  void* hint = AlignedAddress(heap_->GetRandomMmapAddr(), alignment);
49  VirtualMemory reservation(page_allocator, requested_size, hint, alignment);
50  if (!reservation.IsReserved()) {
51  heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
52  }
53 
54  Address start = reservation.address();
55  const size_t allocated_size = reservation.size();
56 
57  start_[0] = reinterpret_cast<Address*>(start);
58  limit_[0] = start_[0] + (kStoreBufferSize / kPointerSize);
59  start_[1] = limit_[0];
60  limit_[1] = start_[1] + (kStoreBufferSize / kPointerSize);
61 
62  // Sanity check the buffers.
63  Address* vm_limit = reinterpret_cast<Address*>(start + allocated_size);
64  USE(vm_limit);
65  for (int i = 0; i < kStoreBuffers; i++) {
66  DCHECK(reinterpret_cast<Address>(start_[i]) >= reservation.address());
67  DCHECK(reinterpret_cast<Address>(limit_[i]) >= reservation.address());
68  DCHECK(start_[i] <= vm_limit);
69  DCHECK(limit_[i] <= vm_limit);
70  DCHECK_EQ(0, reinterpret_cast<Address>(limit_[i]) & kStoreBufferMask);
71  }
72 
73  // Set RW permissions only on the pages we use.
74  const size_t used_size = RoundUp(requested_size, CommitPageSize());
75  if (!reservation.SetPermissions(start, used_size,
76  PageAllocator::kReadWrite)) {
77  heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
78  }
79  current_ = 0;
80  top_ = start_[current_];
81  virtual_memory_.TakeControl(&reservation);
82 }
83 
84 void StoreBuffer::TearDown() {
85  if (virtual_memory_.IsReserved()) virtual_memory_.Free();
86  top_ = nullptr;
87  for (int i = 0; i < kStoreBuffers; i++) {
88  start_[i] = nullptr;
89  limit_[i] = nullptr;
90  lazy_top_[i] = nullptr;
91  }
92 }
93 
94 void StoreBuffer::DeleteDuringRuntime(StoreBuffer* store_buffer, Address start,
95  Address end) {
96  DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
97  store_buffer->InsertDeletionIntoStoreBuffer(start, end);
98 }
99 
100 void StoreBuffer::InsertDuringRuntime(StoreBuffer* store_buffer, Address slot) {
101  DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
102  store_buffer->InsertIntoStoreBuffer(slot);
103 }
104 
105 void StoreBuffer::DeleteDuringGarbageCollection(StoreBuffer* store_buffer,
106  Address start, Address end) {
107  // In GC the store buffer has to be empty at any time.
108  DCHECK(store_buffer->Empty());
109  DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
110  Page* page = Page::FromAddress(start);
111  if (end) {
112  RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
113  SlotSet::PREFREE_EMPTY_BUCKETS);
114  } else {
115  RememberedSet<OLD_TO_NEW>::Remove(page, start);
116  }
117 }
118 
119 void StoreBuffer::InsertDuringGarbageCollection(StoreBuffer* store_buffer,
120  Address slot) {
121  DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
122  RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
123 }
124 
125 void StoreBuffer::SetMode(StoreBufferMode mode) {
126  mode_ = mode;
127  if (mode == NOT_IN_GC) {
128  insertion_callback = &InsertDuringRuntime;
129  deletion_callback = &DeleteDuringRuntime;
130  } else {
131  insertion_callback = &InsertDuringGarbageCollection;
132  deletion_callback = &DeleteDuringGarbageCollection;
133  }
134 }
135 
136 int StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
137  isolate->heap()->store_buffer()->FlipStoreBuffers();
138  isolate->counters()->store_buffer_overflows()->Increment();
139  // Called by RecordWriteCodeStubAssembler, which doesnt accept void type
140  return 0;
141 }
142 
143 void StoreBuffer::FlipStoreBuffers() {
144  base::MutexGuard guard(&mutex_);
145  int other = (current_ + 1) % kStoreBuffers;
146  MoveEntriesToRememberedSet(other);
147  lazy_top_[current_] = top_;
148  current_ = other;
149  top_ = start_[current_];
150 
151  if (!task_running_ && FLAG_concurrent_store_buffer) {
152  task_running_ = true;
153  V8::GetCurrentPlatform()->CallOnWorkerThread(
154  base::make_unique<Task>(heap_->isolate(), this));
155  }
156 }
157 
158 void StoreBuffer::MoveEntriesToRememberedSet(int index) {
159  if (!lazy_top_[index]) return;
160  DCHECK_GE(index, 0);
161  DCHECK_LT(index, kStoreBuffers);
162  Address last_inserted_addr = kNullAddress;
163 
164  // We are taking the chunk map mutex here because the page lookup of addr
165  // below may require us to check if addr is part of a large page.
166  base::MutexGuard guard(heap_->lo_space()->chunk_map_mutex());
167  for (Address* current = start_[index]; current < lazy_top_[index];
168  current++) {
169  Address addr = *current;
170  MemoryChunk* chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
171  if (IsDeletionAddress(addr)) {
172  last_inserted_addr = kNullAddress;
173  current++;
174  Address end = *current;
175  DCHECK(!IsDeletionAddress(end));
176  addr = UnmarkDeletionAddress(addr);
177  if (end) {
178  RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, addr, end,
179  SlotSet::PREFREE_EMPTY_BUCKETS);
180  } else {
181  RememberedSet<OLD_TO_NEW>::Remove(chunk, addr);
182  }
183  } else {
184  DCHECK(!IsDeletionAddress(addr));
185  if (addr != last_inserted_addr) {
186  RememberedSet<OLD_TO_NEW>::Insert(chunk, addr);
187  last_inserted_addr = addr;
188  }
189  }
190  }
191  lazy_top_[index] = nullptr;
192 }
193 
194 void StoreBuffer::MoveAllEntriesToRememberedSet() {
195  base::MutexGuard guard(&mutex_);
196  int other = (current_ + 1) % kStoreBuffers;
197  MoveEntriesToRememberedSet(other);
198  lazy_top_[current_] = top_;
199  MoveEntriesToRememberedSet(current_);
200  top_ = start_[current_];
201 }
202 
203 void StoreBuffer::ConcurrentlyProcessStoreBuffer() {
204  base::MutexGuard guard(&mutex_);
205  int other = (current_ + 1) % kStoreBuffers;
206  MoveEntriesToRememberedSet(other);
207  task_running_ = false;
208 }
209 
210 } // namespace internal
211 } // namespace v8
Definition: libplatform.h:13
virtual size_t AllocatePageSize()=0
virtual size_t CommitPageSize()=0