V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
spaces.cc
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/spaces.h"
6 
7 #include <utility>
8 
9 #include "src/base/bits.h"
10 #include "src/base/macros.h"
11 #include "src/base/platform/semaphore.h"
12 #include "src/base/template-utils.h"
13 #include "src/counters.h"
14 #include "src/heap/array-buffer-tracker.h"
15 #include "src/heap/concurrent-marking.h"
16 #include "src/heap/gc-tracer.h"
17 #include "src/heap/heap-controller.h"
18 #include "src/heap/incremental-marking-inl.h"
19 #include "src/heap/mark-compact.h"
20 #include "src/heap/remembered-set.h"
21 #include "src/heap/slot-set.h"
22 #include "src/heap/sweeper.h"
23 #include "src/msan.h"
24 #include "src/objects-inl.h"
25 #include "src/objects/js-array-buffer-inl.h"
26 #include "src/objects/js-array-inl.h"
27 #include "src/snapshot/snapshot.h"
28 #include "src/v8.h"
29 #include "src/vm-state-inl.h"
30 
31 namespace v8 {
32 namespace internal {
33 
34 // These checks are here to ensure that the lower 32 bits of any real heap
35 // object can't overlap with the lower 32 bits of cleared weak reference value
36 // and therefore it's enough to compare only the lower 32 bits of a MaybeObject
37 // in order to figure out if it's a cleared weak reference or not.
38 STATIC_ASSERT(kClearedWeakHeapObjectLower32 > 0);
39 STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
40 STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize);
41 
42 // ----------------------------------------------------------------------------
43 // HeapObjectIterator
44 
45 HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
46  : cur_addr_(kNullAddress),
47  cur_end_(kNullAddress),
48  space_(space),
49  page_range_(space->first_page(), nullptr),
50  current_page_(page_range_.begin()) {}
51 
52 HeapObjectIterator::HeapObjectIterator(Page* page)
53  : cur_addr_(kNullAddress),
54  cur_end_(kNullAddress),
55  space_(reinterpret_cast<PagedSpace*>(page->owner())),
56  page_range_(page),
57  current_page_(page_range_.begin()) {
58 #ifdef DEBUG
59  Space* owner = page->owner();
60  DCHECK(owner == page->heap()->old_space() ||
61  owner == page->heap()->map_space() ||
62  owner == page->heap()->code_space() ||
63  owner == page->heap()->read_only_space());
64 #endif // DEBUG
65 }
66 
67 // We have hit the end of the page and should advance to the next block of
68 // objects. This happens at the end of the page.
69 bool HeapObjectIterator::AdvanceToNextPage() {
70  DCHECK_EQ(cur_addr_, cur_end_);
71  if (current_page_ == page_range_.end()) return false;
72  Page* cur_page = *(current_page_++);
73  Heap* heap = space_->heap();
74 
75  heap->mark_compact_collector()->sweeper()->EnsurePageIsIterable(cur_page);
76 #ifdef ENABLE_MINOR_MC
77  if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE))
78  heap->minor_mark_compact_collector()->MakeIterable(
79  cur_page, MarkingTreatmentMode::CLEAR,
80  FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
81 #else
82  DCHECK(!cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE));
83 #endif // ENABLE_MINOR_MC
84  cur_addr_ = cur_page->area_start();
85  cur_end_ = cur_page->area_end();
86  DCHECK(cur_page->SweepingDone());
87  return true;
88 }
89 
90 PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
91  : heap_(heap) {
92  DCHECK_EQ(heap->gc_state(), Heap::NOT_IN_GC);
93 
94  for (SpaceIterator it(heap_); it.has_next();) {
95  it.next()->PauseAllocationObservers();
96  }
97 }
98 
99 PauseAllocationObserversScope::~PauseAllocationObserversScope() {
100  for (SpaceIterator it(heap_); it.has_next();) {
101  it.next()->ResumeAllocationObservers();
102  }
103 }
104 
105 static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
106  LAZY_INSTANCE_INITIALIZER;
107 
108 Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
109  base::MutexGuard guard(&mutex_);
110  auto it = recently_freed_.find(code_range_size);
111  if (it == recently_freed_.end() || it->second.empty()) {
112  return reinterpret_cast<Address>(GetRandomMmapAddr());
113  }
114  Address result = it->second.back();
115  it->second.pop_back();
116  return result;
117 }
118 
119 void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
120  size_t code_range_size) {
121  base::MutexGuard guard(&mutex_);
122  recently_freed_[code_range_size].push_back(code_range_start);
123 }
124 
125 // -----------------------------------------------------------------------------
126 // MemoryAllocator
127 //
128 
129 MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
130  size_t code_range_size)
131  : isolate_(isolate),
132  data_page_allocator_(isolate->page_allocator()),
133  code_page_allocator_(nullptr),
134  capacity_(RoundUp(capacity, Page::kPageSize)),
135  size_(0),
136  size_executable_(0),
137  lowest_ever_allocated_(static_cast<Address>(-1ll)),
138  highest_ever_allocated_(kNullAddress),
139  unmapper_(isolate->heap(), this) {
140  InitializeCodePageAllocator(data_page_allocator_, code_range_size);
141 }
142 
143 void MemoryAllocator::InitializeCodePageAllocator(
144  v8::PageAllocator* page_allocator, size_t requested) {
145  DCHECK_NULL(code_page_allocator_instance_.get());
146 
147  code_page_allocator_ = page_allocator;
148 
149  if (requested == 0) {
150  if (!kRequiresCodeRange) return;
151  // When a target requires the code range feature, we put all code objects
152  // in a kMaximalCodeRangeSize range of virtual address space, so that
153  // they can call each other with near calls.
154  requested = kMaximalCodeRangeSize;
155  } else if (requested <= kMinimumCodeRangeSize) {
156  requested = kMinimumCodeRangeSize;
157  }
158 
159  const size_t reserved_area =
160  kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
161  if (requested < (kMaximalCodeRangeSize - reserved_area)) {
162  requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
163  // Fullfilling both reserved pages requirement and huge code area
164  // alignments is not supported (requires re-implementation).
165  DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
166  }
167  DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
168 
169  Address hint =
170  RoundDown(code_range_address_hint.Pointer()->GetAddressHint(requested),
171  page_allocator->AllocatePageSize());
172  VirtualMemory reservation(
173  page_allocator, requested, reinterpret_cast<void*>(hint),
174  Max(kMinExpectedOSPageSize, page_allocator->AllocatePageSize()));
175  if (!reservation.IsReserved()) {
176  V8::FatalProcessOutOfMemory(isolate_,
177  "CodeRange setup: allocate virtual memory");
178  }
179  code_range_ = reservation.region();
180 
181  // We are sure that we have mapped a block of requested addresses.
182  DCHECK_GE(reservation.size(), requested);
183  Address base = reservation.address();
184 
185  // On some platforms, specifically Win64, we need to reserve some pages at
186  // the beginning of an executable space. See
187  // https://cs.chromium.org/chromium/src/components/crash/content/
188  // app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
189  // for details.
190  if (reserved_area > 0) {
191  if (!reservation.SetPermissions(base, reserved_area,
192  PageAllocator::kReadWrite))
193  V8::FatalProcessOutOfMemory(isolate_, "CodeRange setup: set permissions");
194 
195  base += reserved_area;
196  }
197  Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
198  size_t size =
199  RoundDown(reservation.size() - (aligned_base - base) - reserved_area,
200  MemoryChunk::kPageSize);
201  DCHECK(IsAligned(aligned_base, kMinExpectedOSPageSize));
202 
203  LOG(isolate_,
204  NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
205  requested));
206 
207  heap_reservation_.TakeControl(&reservation);
208  code_page_allocator_instance_ = base::make_unique<base::BoundedPageAllocator>(
209  page_allocator, aligned_base, size,
210  static_cast<size_t>(MemoryChunk::kAlignment));
211  code_page_allocator_ = code_page_allocator_instance_.get();
212 }
213 
214 void MemoryAllocator::TearDown() {
215  unmapper()->TearDown();
216 
217  // Check that spaces were torn down before MemoryAllocator.
218  DCHECK_EQ(size_, 0u);
219  // TODO(gc) this will be true again when we fix FreeMemory.
220  // DCHECK_EQ(0, size_executable_);
221  capacity_ = 0;
222 
223  if (last_chunk_.IsReserved()) {
224  last_chunk_.Free();
225  }
226 
227  if (code_page_allocator_instance_.get()) {
228  DCHECK(!code_range_.is_empty());
229  code_range_address_hint.Pointer()->NotifyFreedCodeRange(code_range_.begin(),
230  code_range_.size());
231  code_range_ = base::AddressRegion();
232  code_page_allocator_instance_.reset();
233  }
234  code_page_allocator_ = nullptr;
235  data_page_allocator_ = nullptr;
236 }
237 
239  public:
240  explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
241  : CancelableTask(isolate),
242  unmapper_(unmapper),
243  tracer_(isolate->heap()->tracer()) {}
244 
245  private:
246  void RunInternal() override {
247  TRACE_BACKGROUND_GC(tracer_,
248  GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
249  unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
250  unmapper_->active_unmapping_tasks_--;
251  unmapper_->pending_unmapping_tasks_semaphore_.Signal();
252  if (FLAG_trace_unmapper) {
253  PrintIsolate(unmapper_->heap_->isolate(),
254  "UnmapFreeMemoryTask Done: id=%" PRIu64 "\n", id());
255  }
256  }
257 
258  Unmapper* const unmapper_;
259  GCTracer* const tracer_;
260  DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
261 };
262 
263 void MemoryAllocator::Unmapper::FreeQueuedChunks() {
264  if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
265  if (!MakeRoomForNewTasks()) {
266  // kMaxUnmapperTasks are already running. Avoid creating any more.
267  if (FLAG_trace_unmapper) {
268  PrintIsolate(heap_->isolate(),
269  "Unmapper::FreeQueuedChunks: reached task limit (%d)\n",
270  kMaxUnmapperTasks);
271  }
272  return;
273  }
274  auto task = base::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
275  if (FLAG_trace_unmapper) {
276  PrintIsolate(heap_->isolate(),
277  "Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
278  task->id());
279  }
280  DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
281  DCHECK_LE(active_unmapping_tasks_, pending_unmapping_tasks_);
282  DCHECK_GE(active_unmapping_tasks_, 0);
283  active_unmapping_tasks_++;
284  task_ids_[pending_unmapping_tasks_++] = task->id();
285  V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
286  } else {
287  PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
288  }
289 }
290 
291 void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
292  for (int i = 0; i < pending_unmapping_tasks_; i++) {
293  if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
294  TryAbortResult::kTaskAborted) {
295  pending_unmapping_tasks_semaphore_.Wait();
296  }
297  }
298  pending_unmapping_tasks_ = 0;
299  active_unmapping_tasks_ = 0;
300 
301  if (FLAG_trace_unmapper) {
302  PrintIsolate(
303  heap_->isolate(),
304  "Unmapper::CancelAndWaitForPendingTasks: no tasks remaining\n");
305  }
306 }
307 
308 void MemoryAllocator::Unmapper::PrepareForMarkCompact() {
309  CancelAndWaitForPendingTasks();
310  // Free non-regular chunks because they cannot be re-used.
311  PerformFreeMemoryOnQueuedNonRegularChunks();
312 }
313 
314 void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
315  CancelAndWaitForPendingTasks();
316  PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
317 }
318 
319 bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
320  DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
321 
322  if (active_unmapping_tasks_ == 0 && pending_unmapping_tasks_ > 0) {
323  // All previous unmapping tasks have been run to completion.
324  // Finalize those tasks to make room for new ones.
325  CancelAndWaitForPendingTasks();
326  }
327  return pending_unmapping_tasks_ != kMaxUnmapperTasks;
328 }
329 
330 void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks() {
331  MemoryChunk* chunk = nullptr;
332  while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
333  allocator_->PerformFreeMemory(chunk);
334  }
335 }
336 
337 template <MemoryAllocator::Unmapper::FreeMode mode>
338 void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
339  MemoryChunk* chunk = nullptr;
340  if (FLAG_trace_unmapper) {
341  PrintIsolate(
342  heap_->isolate(),
343  "Unmapper::PerformFreeMemoryOnQueuedChunks: %d queued chunks\n",
344  NumberOfChunks());
345  }
346  // Regular chunks.
347  while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
348  bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
349  allocator_->PerformFreeMemory(chunk);
350  if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
351  }
352  if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
353  // The previous loop uncommitted any pages marked as pooled and added them
354  // to the pooled list. In case of kReleasePooled we need to free them
355  // though.
356  while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
357  allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
358  }
359  }
360  PerformFreeMemoryOnQueuedNonRegularChunks();
361 }
362 
363 void MemoryAllocator::Unmapper::TearDown() {
364  CHECK_EQ(0, pending_unmapping_tasks_);
365  PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
366  for (int i = 0; i < kNumberOfChunkQueues; i++) {
367  DCHECK(chunks_[i].empty());
368  }
369 }
370 
371 size_t MemoryAllocator::Unmapper::NumberOfCommittedChunks() {
372  base::MutexGuard guard(&mutex_);
373  return chunks_[kRegular].size() + chunks_[kNonRegular].size();
374 }
375 
376 int MemoryAllocator::Unmapper::NumberOfChunks() {
377  base::MutexGuard guard(&mutex_);
378  size_t result = 0;
379  for (int i = 0; i < kNumberOfChunkQueues; i++) {
380  result += chunks_[i].size();
381  }
382  return static_cast<int>(result);
383 }
384 
385 size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
386  base::MutexGuard guard(&mutex_);
387 
388  size_t sum = 0;
389  // kPooled chunks are already uncommited. We only have to account for
390  // kRegular and kNonRegular chunks.
391  for (auto& chunk : chunks_[kRegular]) {
392  sum += chunk->size();
393  }
394  for (auto& chunk : chunks_[kNonRegular]) {
395  sum += chunk->size();
396  }
397  return sum;
398 }
399 
400 bool MemoryAllocator::CommitMemory(VirtualMemory* reservation) {
401  Address base = reservation->address();
402  size_t size = reservation->size();
403  if (!reservation->SetPermissions(base, size, PageAllocator::kReadWrite)) {
404  return false;
405  }
406  UpdateAllocatedSpaceLimits(base, base + size);
407  isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
408  return true;
409 }
410 
411 bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) {
412  size_t size = reservation->size();
413  if (!reservation->SetPermissions(reservation->address(), size,
414  PageAllocator::kNoAccess)) {
415  return false;
416  }
417  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
418  return true;
419 }
420 
421 void MemoryAllocator::FreeMemory(v8::PageAllocator* page_allocator,
422  Address base, size_t size) {
423  CHECK(FreePages(page_allocator, reinterpret_cast<void*>(base), size));
424 }
425 
426 Address MemoryAllocator::AllocateAlignedMemory(
427  size_t reserve_size, size_t commit_size, size_t alignment,
428  Executability executable, void* hint, VirtualMemory* controller) {
429  v8::PageAllocator* page_allocator = this->page_allocator(executable);
430  DCHECK(commit_size <= reserve_size);
431  VirtualMemory reservation(page_allocator, reserve_size, hint, alignment);
432  if (!reservation.IsReserved()) return kNullAddress;
433  Address base = reservation.address();
434  size_ += reservation.size();
435 
436  if (executable == EXECUTABLE) {
437  if (!CommitExecutableMemory(&reservation, base, commit_size,
438  reserve_size)) {
439  base = kNullAddress;
440  }
441  } else {
442  if (reservation.SetPermissions(base, commit_size,
443  PageAllocator::kReadWrite)) {
444  UpdateAllocatedSpaceLimits(base, base + commit_size);
445  } else {
446  base = kNullAddress;
447  }
448  }
449 
450  if (base == kNullAddress) {
451  // Failed to commit the body. Free the mapping and any partially committed
452  // regions inside it.
453  reservation.Free();
454  size_ -= reserve_size;
455  return kNullAddress;
456  }
457 
458  controller->TakeControl(&reservation);
459  return base;
460 }
461 
462 void MemoryChunk::DiscardUnusedMemory(Address addr, size_t size) {
463  base::AddressRegion memory_area =
464  MemoryAllocator::ComputeDiscardMemoryArea(addr, size);
465  if (memory_area.size() != 0) {
466  MemoryAllocator* memory_allocator = heap_->memory_allocator();
467  v8::PageAllocator* page_allocator =
468  memory_allocator->page_allocator(executable());
469  CHECK(page_allocator->DiscardSystemPages(
470  reinterpret_cast<void*>(memory_area.begin()), memory_area.size()));
471  }
472 }
473 
474 size_t MemoryChunkLayout::CodePageGuardStartOffset() {
475  // We are guarding code pages: the first OS page after the header
476  // will be protected as non-writable.
477  return ::RoundUp(Page::kHeaderSize, MemoryAllocator::GetCommitPageSize());
478 }
479 
480 size_t MemoryChunkLayout::CodePageGuardSize() {
481  return MemoryAllocator::GetCommitPageSize();
482 }
483 
484 intptr_t MemoryChunkLayout::ObjectStartOffsetInCodePage() {
485  // We are guarding code pages: the first OS page after the header
486  // will be protected as non-writable.
487  return CodePageGuardStartOffset() + CodePageGuardSize();
488 }
489 
490 intptr_t MemoryChunkLayout::ObjectEndOffsetInCodePage() {
491  // We are guarding code pages: the last OS page will be protected as
492  // non-writable.
493  return Page::kPageSize -
494  static_cast<int>(MemoryAllocator::GetCommitPageSize());
495 }
496 
497 size_t MemoryChunkLayout::AllocatableMemoryInCodePage() {
498  size_t memory = ObjectEndOffsetInCodePage() - ObjectStartOffsetInCodePage();
499  DCHECK_LE(kMaxRegularHeapObjectSize, memory);
500  return memory;
501 }
502 
503 intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
504  return MemoryChunk::kHeaderSize +
505  (kPointerSize - MemoryChunk::kHeaderSize % kPointerSize);
506 }
507 
508 size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
509  AllocationSpace space) {
510  if (space == CODE_SPACE) {
511  return ObjectStartOffsetInCodePage();
512  }
513  return ObjectStartOffsetInDataPage();
514 }
515 
516 size_t MemoryChunkLayout::AllocatableMemoryInDataPage() {
517  size_t memory = MemoryChunk::kPageSize - ObjectStartOffsetInDataPage();
518  DCHECK_LE(kMaxRegularHeapObjectSize, memory);
519  return memory;
520 }
521 
522 size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
523  AllocationSpace space) {
524  if (space == CODE_SPACE) {
525  return AllocatableMemoryInCodePage();
526  }
527  return AllocatableMemoryInDataPage();
528 }
529 
530 Heap* MemoryChunk::synchronized_heap() {
531  return reinterpret_cast<Heap*>(
532  base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
533 }
534 
535 void MemoryChunk::InitializationMemoryFence() {
536  base::SeqCst_MemoryFence();
537 #ifdef THREAD_SANITIZER
538  // Since TSAN does not process memory fences, we use the following annotation
539  // to tell TSAN that there is no data race when emitting a
540  // InitializationMemoryFence. Note that the other thread still needs to
541  // perform MemoryChunk::synchronized_heap().
542  base::Release_Store(reinterpret_cast<base::AtomicWord*>(&heap_),
543  reinterpret_cast<base::AtomicWord>(heap_));
544 #endif
545 }
546 
547 void MemoryChunk::SetReadAndExecutable() {
548  DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
549  DCHECK(owner()->identity() == CODE_SPACE ||
550  owner()->identity() == CODE_LO_SPACE);
551  // Decrementing the write_unprotect_counter_ and changing the page
552  // protection mode has to be atomic.
553  base::MutexGuard guard(page_protection_change_mutex_);
554  if (write_unprotect_counter_ == 0) {
555  // This is a corner case that may happen when we have a
556  // CodeSpaceMemoryModificationScope open and this page was newly
557  // added.
558  return;
559  }
560  write_unprotect_counter_--;
561  DCHECK_LT(write_unprotect_counter_, kMaxWriteUnprotectCounter);
562  if (write_unprotect_counter_ == 0) {
563  Address protect_start =
564  address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
565  size_t page_size = MemoryAllocator::GetCommitPageSize();
566  DCHECK(IsAligned(protect_start, page_size));
567  size_t protect_size = RoundUp(area_size(), page_size);
568  CHECK(reservation_.SetPermissions(protect_start, protect_size,
569  PageAllocator::kReadExecute));
570  }
571 }
572 
573 void MemoryChunk::SetReadAndWritable() {
574  DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
575  DCHECK(owner()->identity() == CODE_SPACE ||
576  owner()->identity() == CODE_LO_SPACE);
577  // Incrementing the write_unprotect_counter_ and changing the page
578  // protection mode has to be atomic.
579  base::MutexGuard guard(page_protection_change_mutex_);
580  write_unprotect_counter_++;
581  DCHECK_LE(write_unprotect_counter_, kMaxWriteUnprotectCounter);
582  if (write_unprotect_counter_ == 1) {
583  Address unprotect_start =
584  address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
585  size_t page_size = MemoryAllocator::GetCommitPageSize();
586  DCHECK(IsAligned(unprotect_start, page_size));
587  size_t unprotect_size = RoundUp(area_size(), page_size);
588  CHECK(reservation_.SetPermissions(unprotect_start, unprotect_size,
589  PageAllocator::kReadWrite));
590  }
591 }
592 
593 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
594  Address area_start, Address area_end,
595  Executability executable, Space* owner,
596  VirtualMemory reservation) {
597  MemoryChunk* chunk = FromAddress(base);
598 
599  DCHECK(base == chunk->address());
600 
601  chunk->heap_ = heap;
602  chunk->size_ = size;
603  chunk->area_start_ = area_start;
604  chunk->area_end_ = area_end;
605  chunk->flags_ = Flags(NO_FLAGS);
606  chunk->set_owner(owner);
607  chunk->InitializeReservedMemory();
608  base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
609  base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
610  base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
611  nullptr);
612  base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
613  nullptr);
614  chunk->invalidated_slots_ = nullptr;
615  chunk->skip_list_ = nullptr;
616  chunk->progress_bar_ = 0;
617  chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
618  chunk->set_concurrent_sweeping_state(kSweepingDone);
619  chunk->page_protection_change_mutex_ = new base::Mutex();
620  chunk->write_unprotect_counter_ = 0;
621  chunk->mutex_ = new base::Mutex();
622  chunk->allocated_bytes_ = chunk->area_size();
623  chunk->wasted_memory_ = 0;
624  chunk->young_generation_bitmap_ = nullptr;
625  chunk->marking_bitmap_ = nullptr;
626  chunk->local_tracker_ = nullptr;
627 
628  chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
629  0;
630  chunk->external_backing_store_bytes_
631  [ExternalBackingStoreType::kExternalString] = 0;
632 
633  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
634  chunk->categories_[i] = nullptr;
635  }
636 
637  chunk->AllocateMarkingBitmap();
638  if (owner->identity() == RO_SPACE) {
639  heap->incremental_marking()
640  ->non_atomic_marking_state()
641  ->bitmap(chunk)
642  ->MarkAllBits();
643  } else {
644  heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
645  0);
646  }
647 
648  DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
649 
650  if (executable == EXECUTABLE) {
651  chunk->SetFlag(IS_EXECUTABLE);
652  if (heap->write_protect_code_memory()) {
653  chunk->write_unprotect_counter_ =
654  heap->code_space_memory_modification_scope_depth();
655  } else {
656  size_t page_size = MemoryAllocator::GetCommitPageSize();
657  DCHECK(IsAligned(area_start, page_size));
658  size_t area_size = RoundUp(area_end - area_start, page_size);
659  CHECK(reservation.SetPermissions(area_start, area_size,
660  PageAllocator::kReadWriteExecute));
661  }
662  }
663 
664  chunk->reservation_ = std::move(reservation);
665 
666  return chunk;
667 }
668 
669 Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
670  Page* page = static_cast<Page*>(chunk);
671  DCHECK_EQ(MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
672  page->owner()->identity()),
673  page->area_size());
674  // Make sure that categories are initialized before freeing the area.
675  page->ResetAllocatedBytes();
676  page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
677  page->AllocateFreeListCategories();
678  page->InitializeFreeListCategories();
679  page->list_node().Initialize();
680  page->InitializationMemoryFence();
681  return page;
682 }
683 
684 Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
685  DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
686  bool in_to_space = (id() != kFromSpace);
687  chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
688  : MemoryChunk::IN_FROM_SPACE);
689  DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
690  : MemoryChunk::IN_TO_SPACE));
691  Page* page = static_cast<Page*>(chunk);
692  page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
693  page->AllocateLocalTracker();
694  page->list_node().Initialize();
695 #ifdef ENABLE_MINOR_MC
696  if (FLAG_minor_mc) {
697  page->AllocateYoungGenerationBitmap();
698  heap()
699  ->minor_mark_compact_collector()
700  ->non_atomic_marking_state()
701  ->ClearLiveness(page);
702  }
703 #endif // ENABLE_MINOR_MC
704  page->InitializationMemoryFence();
705  return page;
706 }
707 
708 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
709  Executability executable) {
710  if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
711  STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
712  FATAL("Code page is too large.");
713  }
714 
715  MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
716 
717  LargePage* page = static_cast<LargePage*>(chunk);
718  page->list_node().Initialize();
719  return page;
720 }
721 
722 void Page::AllocateFreeListCategories() {
723  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
724  categories_[i] = new FreeListCategory(
725  reinterpret_cast<PagedSpace*>(owner())->free_list(), this);
726  }
727 }
728 
729 void Page::InitializeFreeListCategories() {
730  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
731  categories_[i]->Initialize(static_cast<FreeListCategoryType>(i));
732  }
733 }
734 
735 void Page::ReleaseFreeListCategories() {
736  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
737  if (categories_[i] != nullptr) {
738  delete categories_[i];
739  categories_[i] = nullptr;
740  }
741  }
742 }
743 
744 Page* Page::ConvertNewToOld(Page* old_page) {
745  DCHECK(old_page);
746  DCHECK(old_page->InNewSpace());
747  OldSpace* old_space = old_page->heap()->old_space();
748  old_page->set_owner(old_space);
749  old_page->SetFlags(0, static_cast<uintptr_t>(~0));
750  Page* new_page = old_space->InitializePage(old_page, NOT_EXECUTABLE);
751  old_space->AddPage(new_page);
752  return new_page;
753 }
754 
755 size_t MemoryChunk::CommittedPhysicalMemory() {
756  if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
757  return size();
758  return high_water_mark_;
759 }
760 
761 bool MemoryChunk::IsPagedSpace() const {
762  return owner()->identity() != LO_SPACE;
763 }
764 
765 bool MemoryChunk::InOldSpace() const {
766  return owner()->identity() == OLD_SPACE;
767 }
768 
769 bool MemoryChunk::InLargeObjectSpace() const {
770  return owner()->identity() == LO_SPACE;
771 }
772 
773 MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
774  size_t commit_area_size,
775  Executability executable,
776  Space* owner) {
777  DCHECK_LE(commit_area_size, reserve_area_size);
778 
779  size_t chunk_size;
780  Heap* heap = isolate_->heap();
781  Address base = kNullAddress;
782  VirtualMemory reservation;
783  Address area_start = kNullAddress;
784  Address area_end = kNullAddress;
785  void* address_hint =
786  AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment);
787 
788  //
789  // MemoryChunk layout:
790  //
791  // Executable
792  // +----------------------------+<- base aligned with MemoryChunk::kAlignment
793  // | Header |
794  // +----------------------------+<- base + CodePageGuardStartOffset
795  // | Guard |
796  // +----------------------------+<- area_start_
797  // | Area |
798  // +----------------------------+<- area_end_ (area_start + commit_area_size)
799  // | Committed but not used |
800  // +----------------------------+<- aligned at OS page boundary
801  // | Reserved but not committed |
802  // +----------------------------+<- aligned at OS page boundary
803  // | Guard |
804  // +----------------------------+<- base + chunk_size
805  //
806  // Non-executable
807  // +----------------------------+<- base aligned with MemoryChunk::kAlignment
808  // | Header |
809  // +----------------------------+<- area_start_ (base + area_start_)
810  // | Area |
811  // +----------------------------+<- area_end_ (area_start + commit_area_size)
812  // | Committed but not used |
813  // +----------------------------+<- aligned at OS page boundary
814  // | Reserved but not committed |
815  // +----------------------------+<- base + chunk_size
816  //
817 
818  if (executable == EXECUTABLE) {
819  chunk_size = ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() +
820  reserve_area_size +
821  MemoryChunkLayout::CodePageGuardSize(),
822  GetCommitPageSize());
823 
824  // Size of header (not executable) plus area (executable).
825  size_t commit_size = ::RoundUp(
826  MemoryChunkLayout::CodePageGuardStartOffset() + commit_area_size,
827  GetCommitPageSize());
828  base =
829  AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
830  executable, address_hint, &reservation);
831  if (base == kNullAddress) return nullptr;
832  // Update executable memory size.
833  size_executable_ += reservation.size();
834 
835  if (Heap::ShouldZapGarbage()) {
836  ZapBlock(base, MemoryChunkLayout::CodePageGuardStartOffset(), kZapValue);
837  ZapBlock(base + MemoryChunkLayout::ObjectStartOffsetInCodePage(),
838  commit_area_size, kZapValue);
839  }
840 
841  area_start = base + MemoryChunkLayout::ObjectStartOffsetInCodePage();
842  area_end = area_start + commit_area_size;
843  } else {
844  chunk_size = ::RoundUp(
845  MemoryChunkLayout::ObjectStartOffsetInDataPage() + reserve_area_size,
846  GetCommitPageSize());
847  size_t commit_size = ::RoundUp(
848  MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
849  GetCommitPageSize());
850  base =
851  AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
852  executable, address_hint, &reservation);
853 
854  if (base == kNullAddress) return nullptr;
855 
856  if (Heap::ShouldZapGarbage()) {
857  ZapBlock(
858  base,
859  MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
860  kZapValue);
861  }
862 
863  area_start = base + MemoryChunkLayout::ObjectStartOffsetInDataPage();
864  area_end = area_start + commit_area_size;
865  }
866 
867  // Use chunk_size for statistics and callbacks because we assume that they
868  // treat reserved but not-yet committed memory regions of chunks as allocated.
869  isolate_->counters()->memory_allocated()->Increment(
870  static_cast<int>(chunk_size));
871 
872  LOG(isolate_,
873  NewEvent("MemoryChunk", reinterpret_cast<void*>(base), chunk_size));
874 
875  // We cannot use the last chunk in the address space because we would
876  // overflow when comparing top and limit if this chunk is used for a
877  // linear allocation area.
878  if ((base + chunk_size) == 0u) {
879  CHECK(!last_chunk_.IsReserved());
880  last_chunk_.TakeControl(&reservation);
881  UncommitMemory(&last_chunk_);
882  size_ -= chunk_size;
883  if (executable == EXECUTABLE) {
884  size_executable_ -= chunk_size;
885  }
886  CHECK(last_chunk_.IsReserved());
887  return AllocateChunk(reserve_area_size, commit_area_size, executable,
888  owner);
889  }
890 
891  MemoryChunk* chunk =
892  MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
893  executable, owner, std::move(reservation));
894 
895  if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
896  return chunk;
897 }
898 
899 void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
900  if (is_marking) {
901  SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
902  SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
903  SetFlag(MemoryChunk::INCREMENTAL_MARKING);
904  } else {
905  ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
906  SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
907  ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
908  }
909 }
910 
911 void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
912  SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
913  if (is_marking) {
914  SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
915  SetFlag(MemoryChunk::INCREMENTAL_MARKING);
916  } else {
917  ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
918  ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
919  }
920 }
921 
922 void Page::ResetAllocatedBytes() { allocated_bytes_ = area_size(); }
923 
924 void Page::AllocateLocalTracker() {
925  DCHECK_NULL(local_tracker_);
926  local_tracker_ = new LocalArrayBufferTracker(this);
927 }
928 
929 bool Page::contains_array_buffers() {
930  return local_tracker_ != nullptr && !local_tracker_->IsEmpty();
931 }
932 
933 void Page::ResetFreeListStatistics() {
934  wasted_memory_ = 0;
935 }
936 
937 size_t Page::AvailableInFreeList() {
938  size_t sum = 0;
939  ForAllFreeListCategories([&sum](FreeListCategory* category) {
940  sum += category->available();
941  });
942  return sum;
943 }
944 
945 #ifdef DEBUG
946 namespace {
947 // Skips filler starting from the given filler until the end address.
948 // Returns the first address after the skipped fillers.
949 Address SkipFillers(HeapObject* filler, Address end) {
950  Address addr = filler->address();
951  while (addr < end) {
952  filler = HeapObject::FromAddress(addr);
953  CHECK(filler->IsFiller());
954  addr = filler->address() + filler->Size();
955  }
956  return addr;
957 }
958 } // anonymous namespace
959 #endif // DEBUG
960 
961 size_t Page::ShrinkToHighWaterMark() {
962  // Shrinking only makes sense outside of the CodeRange, where we don't care
963  // about address space fragmentation.
964  VirtualMemory* reservation = reserved_memory();
965  if (!reservation->IsReserved()) return 0;
966 
967  // Shrink pages to high water mark. The water mark points either to a filler
968  // or the area_end.
969  HeapObject* filler = HeapObject::FromAddress(HighWaterMark());
970  if (filler->address() == area_end()) return 0;
971  CHECK(filler->IsFiller());
972  // Ensure that no objects were allocated in [filler, area_end) region.
973  DCHECK_EQ(area_end(), SkipFillers(filler, area_end()));
974  // Ensure that no objects will be allocated on this page.
975  DCHECK_EQ(0u, AvailableInFreeList());
976 
977  size_t unused = RoundDown(static_cast<size_t>(area_end() - filler->address()),
978  MemoryAllocator::GetCommitPageSize());
979  if (unused > 0) {
980  DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
981  if (FLAG_trace_gc_verbose) {
982  PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
983  reinterpret_cast<void*>(this),
984  reinterpret_cast<void*>(area_end()),
985  reinterpret_cast<void*>(area_end() - unused));
986  }
987  heap()->CreateFillerObjectAt(
988  filler->address(),
989  static_cast<int>(area_end() - filler->address() - unused),
990  ClearRecordedSlots::kNo);
991  heap()->memory_allocator()->PartialFreeMemory(
992  this, address() + size() - unused, unused, area_end() - unused);
993  if (filler->address() != area_end()) {
994  CHECK(filler->IsFiller());
995  CHECK_EQ(filler->address() + filler->Size(), area_end());
996  }
997  }
998  return unused;
999 }
1000 
1001 void Page::CreateBlackArea(Address start, Address end) {
1002  DCHECK(heap()->incremental_marking()->black_allocation());
1003  DCHECK_EQ(Page::FromAddress(start), this);
1004  DCHECK_NE(start, end);
1005  DCHECK_EQ(Page::FromAddress(end - 1), this);
1006  IncrementalMarking::MarkingState* marking_state =
1007  heap()->incremental_marking()->marking_state();
1008  marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
1009  AddressToMarkbitIndex(end));
1010  marking_state->IncrementLiveBytes(this, static_cast<intptr_t>(end - start));
1011 }
1012 
1013 void Page::DestroyBlackArea(Address start, Address end) {
1014  DCHECK(heap()->incremental_marking()->black_allocation());
1015  DCHECK_EQ(Page::FromAddress(start), this);
1016  DCHECK_NE(start, end);
1017  DCHECK_EQ(Page::FromAddress(end - 1), this);
1018  IncrementalMarking::MarkingState* marking_state =
1019  heap()->incremental_marking()->marking_state();
1020  marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
1021  AddressToMarkbitIndex(end));
1022  marking_state->IncrementLiveBytes(this, -static_cast<intptr_t>(end - start));
1023 }
1024 
1025 void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
1026  size_t bytes_to_free,
1027  Address new_area_end) {
1028  VirtualMemory* reservation = chunk->reserved_memory();
1029  DCHECK(reservation->IsReserved());
1030  chunk->size_ -= bytes_to_free;
1031  chunk->area_end_ = new_area_end;
1032  if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
1033  // Add guard page at the end.
1034  size_t page_size = GetCommitPageSize();
1035  DCHECK_EQ(0, chunk->area_end_ % static_cast<Address>(page_size));
1036  DCHECK_EQ(chunk->address() + chunk->size(),
1037  chunk->area_end() + MemoryChunkLayout::CodePageGuardSize());
1038  reservation->SetPermissions(chunk->area_end_, page_size,
1039  PageAllocator::kNoAccess);
1040  }
1041  // On e.g. Windows, a reservation may be larger than a page and releasing
1042  // partially starting at |start_free| will also release the potentially
1043  // unused part behind the current page.
1044  const size_t released_bytes = reservation->Release(start_free);
1045  DCHECK_GE(size_, released_bytes);
1046  size_ -= released_bytes;
1047  isolate_->counters()->memory_allocated()->Decrement(
1048  static_cast<int>(released_bytes));
1049 }
1050 
1051 void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
1052  DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
1053  LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
1054 
1055  isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
1056  chunk->IsEvacuationCandidate());
1057 
1058  VirtualMemory* reservation = chunk->reserved_memory();
1059  const size_t size =
1060  reservation->IsReserved() ? reservation->size() : chunk->size();
1061  DCHECK_GE(size_, static_cast<size_t>(size));
1062  size_ -= size;
1063  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
1064  if (chunk->executable() == EXECUTABLE) {
1065  DCHECK_GE(size_executable_, size);
1066  size_executable_ -= size;
1067  }
1068 
1069  chunk->SetFlag(MemoryChunk::PRE_FREED);
1070 
1071  if (chunk->executable()) UnregisterExecutableMemoryChunk(chunk);
1072 }
1073 
1074 
1075 void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
1076  DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
1077  chunk->ReleaseAllocatedMemory();
1078 
1079  VirtualMemory* reservation = chunk->reserved_memory();
1080  if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
1081  UncommitMemory(reservation);
1082  } else {
1083  if (reservation->IsReserved()) {
1084  reservation->Free();
1085  } else {
1086  // Only read-only pages can have non-initialized reservation object.
1087  DCHECK_EQ(RO_SPACE, chunk->owner()->identity());
1088  FreeMemory(page_allocator(chunk->executable()), chunk->address(),
1089  chunk->size());
1090  }
1091  }
1092 }
1093 
1094 template <MemoryAllocator::FreeMode mode>
1095 void MemoryAllocator::Free(MemoryChunk* chunk) {
1096  switch (mode) {
1097  case kFull:
1098  PreFreeMemory(chunk);
1099  PerformFreeMemory(chunk);
1100  break;
1101  case kAlreadyPooled:
1102  // Pooled pages cannot be touched anymore as their memory is uncommitted.
1103  // Pooled pages are not-executable.
1104  FreeMemory(data_page_allocator(), chunk->address(),
1105  static_cast<size_t>(MemoryChunk::kPageSize));
1106  break;
1107  case kPooledAndQueue:
1108  DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
1109  DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
1110  chunk->SetFlag(MemoryChunk::POOLED);
1111  V8_FALLTHROUGH;
1112  case kPreFreeAndQueue:
1113  PreFreeMemory(chunk);
1114  // The chunks added to this queue will be freed by a concurrent thread.
1115  unmapper()->AddMemoryChunkSafe(chunk);
1116  break;
1117  }
1118 }
1119 
1120 template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
1121  MemoryAllocator::kFull>(MemoryChunk* chunk);
1122 
1123 template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
1124  MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
1125 
1126 template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
1127  MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
1128 
1129 template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
1130  MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
1131 
1132 template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
1133 Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
1134  Executability executable) {
1135  MemoryChunk* chunk = nullptr;
1136  if (alloc_mode == kPooled) {
1137  DCHECK_EQ(size, static_cast<size_t>(
1138  MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
1139  owner->identity())));
1140  DCHECK_EQ(executable, NOT_EXECUTABLE);
1141  chunk = AllocatePagePooled(owner);
1142  }
1143  if (chunk == nullptr) {
1144  chunk = AllocateChunk(size, size, executable, owner);
1145  }
1146  if (chunk == nullptr) return nullptr;
1147  return owner->InitializePage(chunk, executable);
1148 }
1149 
1150 template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
1151  Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
1152  size_t size, PagedSpace* owner, Executability executable);
1153 template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
1154  Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
1155  size_t size, SemiSpace* owner, Executability executable);
1156 template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
1157  Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
1158  size_t size, SemiSpace* owner, Executability executable);
1159 
1160 LargePage* MemoryAllocator::AllocateLargePage(size_t size,
1161  LargeObjectSpace* owner,
1162  Executability executable) {
1163  MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
1164  if (chunk == nullptr) return nullptr;
1165  return LargePage::Initialize(isolate_->heap(), chunk, executable);
1166 }
1167 
1168 template <typename SpaceType>
1169 MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
1170  MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
1171  if (chunk == nullptr) return nullptr;
1172  const int size = MemoryChunk::kPageSize;
1173  const Address start = reinterpret_cast<Address>(chunk);
1174  const Address area_start =
1175  start +
1176  MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(owner->identity());
1177  const Address area_end = start + size;
1178  // Pooled pages are always regular data pages.
1179  DCHECK_NE(CODE_SPACE, owner->identity());
1180  VirtualMemory reservation(data_page_allocator(), start, size);
1181  if (!CommitMemory(&reservation)) return nullptr;
1182  if (Heap::ShouldZapGarbage()) {
1183  ZapBlock(start, size, kZapValue);
1184  }
1185  MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
1186  NOT_EXECUTABLE, owner, std::move(reservation));
1187  size_ += size;
1188  return chunk;
1189 }
1190 
1191 void MemoryAllocator::ZapBlock(Address start, size_t size,
1192  uintptr_t zap_value) {
1193  DCHECK_EQ(start % kPointerSize, 0);
1194  DCHECK_EQ(size % kPointerSize, 0);
1195  for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
1196  Memory<Address>(start + s) = static_cast<Address>(zap_value);
1197  }
1198 }
1199 
1200 intptr_t MemoryAllocator::GetCommitPageSize() {
1201  if (FLAG_v8_os_page_size != 0) {
1202  DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
1203  return FLAG_v8_os_page_size * KB;
1204  } else {
1205  return CommitPageSize();
1206  }
1207 }
1208 
1209 base::AddressRegion MemoryAllocator::ComputeDiscardMemoryArea(Address addr,
1210  size_t size) {
1211  size_t page_size = MemoryAllocator::GetCommitPageSize();
1212  if (size < page_size + FreeSpace::kSize) {
1213  return base::AddressRegion(0, 0);
1214  }
1215  Address discardable_start = RoundUp(addr + FreeSpace::kSize, page_size);
1216  Address discardable_end = RoundDown(addr + size, page_size);
1217  if (discardable_start >= discardable_end) return base::AddressRegion(0, 0);
1218  return base::AddressRegion(discardable_start,
1219  discardable_end - discardable_start);
1220 }
1221 
1222 bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
1223  size_t commit_size,
1224  size_t reserved_size) {
1225  const size_t page_size = GetCommitPageSize();
1226  // All addresses and sizes must be aligned to the commit page size.
1227  DCHECK(IsAligned(start, page_size));
1228  DCHECK_EQ(0, commit_size % page_size);
1229  DCHECK_EQ(0, reserved_size % page_size);
1230  const size_t guard_size = MemoryChunkLayout::CodePageGuardSize();
1231  const size_t pre_guard_offset = MemoryChunkLayout::CodePageGuardStartOffset();
1232  const size_t code_area_offset =
1233  MemoryChunkLayout::ObjectStartOffsetInCodePage();
1234  // reserved_size includes two guard regions, commit_size does not.
1235  DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
1236  const Address pre_guard_page = start + pre_guard_offset;
1237  const Address code_area = start + code_area_offset;
1238  const Address post_guard_page = start + reserved_size - guard_size;
1239  // Commit the non-executable header, from start to pre-code guard page.
1240  if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) {
1241  // Create the pre-code guard page, following the header.
1242  if (vm->SetPermissions(pre_guard_page, page_size,
1243  PageAllocator::kNoAccess)) {
1244  // Commit the executable code body.
1245  if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
1246  PageAllocator::kReadWrite)) {
1247  // Create the post-code guard page.
1248  if (vm->SetPermissions(post_guard_page, page_size,
1249  PageAllocator::kNoAccess)) {
1250  UpdateAllocatedSpaceLimits(start, code_area + commit_size);
1251  return true;
1252  }
1253  vm->SetPermissions(code_area, commit_size, PageAllocator::kNoAccess);
1254  }
1255  }
1256  vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
1257  }
1258  return false;
1259 }
1260 
1261 
1262 // -----------------------------------------------------------------------------
1263 // MemoryChunk implementation
1264 
1265 void MemoryChunk::ReleaseAllocatedMemory() {
1266  if (skip_list_ != nullptr) {
1267  delete skip_list_;
1268  skip_list_ = nullptr;
1269  }
1270  if (mutex_ != nullptr) {
1271  delete mutex_;
1272  mutex_ = nullptr;
1273  }
1274  if (page_protection_change_mutex_ != nullptr) {
1275  delete page_protection_change_mutex_;
1276  page_protection_change_mutex_ = nullptr;
1277  }
1278  ReleaseSlotSet<OLD_TO_NEW>();
1279  ReleaseSlotSet<OLD_TO_OLD>();
1280  ReleaseTypedSlotSet<OLD_TO_NEW>();
1281  ReleaseTypedSlotSet<OLD_TO_OLD>();
1282  ReleaseInvalidatedSlots();
1283  if (local_tracker_ != nullptr) ReleaseLocalTracker();
1284  if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
1285  if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
1286 
1287  if (IsPagedSpace()) {
1288  Page* page = static_cast<Page*>(this);
1289  page->ReleaseFreeListCategories();
1290  }
1291 }
1292 
1293 static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
1294  size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
1295  DCHECK_LT(0, pages);
1296  SlotSet* slot_set = new SlotSet[pages];
1297  for (size_t i = 0; i < pages; i++) {
1298  slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
1299  }
1300  return slot_set;
1301 }
1302 
1303 template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
1304 template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
1305 
1306 template <RememberedSetType type>
1307 SlotSet* MemoryChunk::AllocateSlotSet() {
1308  SlotSet* slot_set = AllocateAndInitializeSlotSet(size_, address());
1309  SlotSet* old_slot_set = base::AsAtomicPointer::Release_CompareAndSwap(
1310  &slot_set_[type], nullptr, slot_set);
1311  if (old_slot_set != nullptr) {
1312  delete[] slot_set;
1313  slot_set = old_slot_set;
1314  }
1315  DCHECK(slot_set);
1316  return slot_set;
1317 }
1318 
1319 template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
1320 template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
1321 
1322 template <RememberedSetType type>
1323 void MemoryChunk::ReleaseSlotSet() {
1324  SlotSet* slot_set = slot_set_[type];
1325  if (slot_set) {
1326  slot_set_[type] = nullptr;
1327  delete[] slot_set;
1328  }
1329 }
1330 
1331 template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_NEW>();
1332 template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
1333 
1334 template <RememberedSetType type>
1335 TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
1336  TypedSlotSet* typed_slot_set = new TypedSlotSet(address());
1337  TypedSlotSet* old_value = base::AsAtomicPointer::Release_CompareAndSwap(
1338  &typed_slot_set_[type], nullptr, typed_slot_set);
1339  if (old_value != nullptr) {
1340  delete typed_slot_set;
1341  typed_slot_set = old_value;
1342  }
1343  DCHECK(typed_slot_set);
1344  return typed_slot_set;
1345 }
1346 
1347 template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
1348 template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
1349 
1350 template <RememberedSetType type>
1351 void MemoryChunk::ReleaseTypedSlotSet() {
1352  TypedSlotSet* typed_slot_set = typed_slot_set_[type];
1353  if (typed_slot_set) {
1354  typed_slot_set_[type] = nullptr;
1355  delete typed_slot_set;
1356  }
1357 }
1358 
1359 InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
1360  DCHECK_NULL(invalidated_slots_);
1361  invalidated_slots_ = new InvalidatedSlots();
1362  return invalidated_slots_;
1363 }
1364 
1365 void MemoryChunk::ReleaseInvalidatedSlots() {
1366  if (invalidated_slots_) {
1367  delete invalidated_slots_;
1368  invalidated_slots_ = nullptr;
1369  }
1370 }
1371 
1372 void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject* object,
1373  int size) {
1374  if (!ShouldSkipEvacuationSlotRecording()) {
1375  if (invalidated_slots() == nullptr) {
1376  AllocateInvalidatedSlots();
1377  }
1378  int old_size = (*invalidated_slots())[object];
1379  (*invalidated_slots())[object] = std::max(old_size, size);
1380  }
1381 }
1382 
1383 bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject* object) {
1384  if (ShouldSkipEvacuationSlotRecording()) {
1385  // Invalidated slots do not matter if we are not recording slots.
1386  return true;
1387  }
1388  if (invalidated_slots() == nullptr) {
1389  return false;
1390  }
1391  return invalidated_slots()->find(object) != invalidated_slots()->end();
1392 }
1393 
1394 void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject* old_start,
1395  HeapObject* new_start) {
1396  DCHECK_LT(old_start, new_start);
1397  DCHECK_EQ(MemoryChunk::FromHeapObject(old_start),
1398  MemoryChunk::FromHeapObject(new_start));
1399  if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots()) {
1400  auto it = invalidated_slots()->find(old_start);
1401  if (it != invalidated_slots()->end()) {
1402  int old_size = it->second;
1403  int delta = static_cast<int>(new_start->address() - old_start->address());
1404  invalidated_slots()->erase(it);
1405  (*invalidated_slots())[new_start] = old_size - delta;
1406  }
1407  }
1408 }
1409 
1410 void MemoryChunk::ReleaseLocalTracker() {
1411  DCHECK_NOT_NULL(local_tracker_);
1412  delete local_tracker_;
1413  local_tracker_ = nullptr;
1414 }
1415 
1416 void MemoryChunk::AllocateYoungGenerationBitmap() {
1417  DCHECK_NULL(young_generation_bitmap_);
1418  young_generation_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
1419 }
1420 
1421 void MemoryChunk::ReleaseYoungGenerationBitmap() {
1422  DCHECK_NOT_NULL(young_generation_bitmap_);
1423  free(young_generation_bitmap_);
1424  young_generation_bitmap_ = nullptr;
1425 }
1426 
1427 void MemoryChunk::AllocateMarkingBitmap() {
1428  DCHECK_NULL(marking_bitmap_);
1429  marking_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
1430 }
1431 
1432 void MemoryChunk::ReleaseMarkingBitmap() {
1433  DCHECK_NOT_NULL(marking_bitmap_);
1434  free(marking_bitmap_);
1435  marking_bitmap_ = nullptr;
1436 }
1437 
1438 // -----------------------------------------------------------------------------
1439 // PagedSpace implementation
1440 
1441 void Space::AddAllocationObserver(AllocationObserver* observer) {
1442  allocation_observers_.push_back(observer);
1443  StartNextInlineAllocationStep();
1444 }
1445 
1446 void Space::RemoveAllocationObserver(AllocationObserver* observer) {
1447  auto it = std::find(allocation_observers_.begin(),
1448  allocation_observers_.end(), observer);
1449  DCHECK(allocation_observers_.end() != it);
1450  allocation_observers_.erase(it);
1451  StartNextInlineAllocationStep();
1452 }
1453 
1454 void Space::PauseAllocationObservers() { allocation_observers_paused_ = true; }
1455 
1456 void Space::ResumeAllocationObservers() {
1457  allocation_observers_paused_ = false;
1458 }
1459 
1460 void Space::AllocationStep(int bytes_since_last, Address soon_object,
1461  int size) {
1462  if (!AllocationObserversActive()) {
1463  return;
1464  }
1465 
1466  DCHECK(!heap()->allocation_step_in_progress());
1467  heap()->set_allocation_step_in_progress(true);
1468  heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
1469  for (AllocationObserver* observer : allocation_observers_) {
1470  observer->AllocationStep(bytes_since_last, soon_object, size);
1471  }
1472  heap()->set_allocation_step_in_progress(false);
1473 }
1474 
1475 intptr_t Space::GetNextInlineAllocationStepSize() {
1476  intptr_t next_step = 0;
1477  for (AllocationObserver* observer : allocation_observers_) {
1478  next_step = next_step ? Min(next_step, observer->bytes_to_next_step())
1479  : observer->bytes_to_next_step();
1480  }
1481  DCHECK(allocation_observers_.size() == 0 || next_step > 0);
1482  return next_step;
1483 }
1484 
1485 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
1486  Executability executable)
1487  : SpaceWithLinearArea(heap, space), executable_(executable) {
1488  area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
1489  accounting_stats_.Clear();
1490 }
1491 
1492 void PagedSpace::TearDown() {
1493  while (!memory_chunk_list_.Empty()) {
1494  MemoryChunk* chunk = memory_chunk_list_.front();
1495  memory_chunk_list_.Remove(chunk);
1496  heap()->memory_allocator()->Free<MemoryAllocator::kFull>(chunk);
1497  }
1498  accounting_stats_.Clear();
1499 }
1500 
1501 void PagedSpace::RefillFreeList() {
1502  // Any PagedSpace might invoke RefillFreeList. We filter all but our old
1503  // generation spaces out.
1504  if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
1505  identity() != MAP_SPACE && identity() != RO_SPACE) {
1506  return;
1507  }
1508  MarkCompactCollector* collector = heap()->mark_compact_collector();
1509  size_t added = 0;
1510  {
1511  Page* p = nullptr;
1512  while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
1513  // Only during compaction pages can actually change ownership. This is
1514  // safe because there exists no other competing action on the page links
1515  // during compaction.
1516  if (is_local()) {
1517  DCHECK_NE(this, p->owner());
1518  PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
1519  base::MutexGuard guard(owner->mutex());
1520  owner->RefineAllocatedBytesAfterSweeping(p);
1521  owner->RemovePage(p);
1522  added += AddPage(p);
1523  } else {
1524  base::MutexGuard guard(mutex());
1525  DCHECK_EQ(this, p->owner());
1526  RefineAllocatedBytesAfterSweeping(p);
1527  added += RelinkFreeListCategories(p);
1528  }
1529  added += p->wasted_memory();
1530  if (is_local() && (added > kCompactionMemoryWanted)) break;
1531  }
1532  }
1533 }
1534 
1535 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
1536  base::MutexGuard guard(mutex());
1537 
1538  DCHECK(identity() == other->identity());
1539  // Unmerged fields:
1540  // area_size_
1541  other->FreeLinearAllocationArea();
1542 
1543  // The linear allocation area of {other} should be destroyed now.
1544  DCHECK_EQ(kNullAddress, other->top());
1545  DCHECK_EQ(kNullAddress, other->limit());
1546 
1547  // Move over pages.
1548  for (auto it = other->begin(); it != other->end();) {
1549  Page* p = *(it++);
1550  // Relinking requires the category to be unlinked.
1551  other->RemovePage(p);
1552  AddPage(p);
1553  DCHECK_EQ(p->AvailableInFreeList(),
1554  p->AvailableInFreeListFromAllocatedBytes());
1555  }
1556  DCHECK_EQ(0u, other->Size());
1557  DCHECK_EQ(0u, other->Capacity());
1558 }
1559 
1560 
1561 size_t PagedSpace::CommittedPhysicalMemory() {
1562  if (!base::OS::HasLazyCommits()) return CommittedMemory();
1563  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1564  size_t size = 0;
1565  for (Page* page : *this) {
1566  size += page->CommittedPhysicalMemory();
1567  }
1568  return size;
1569 }
1570 
1571 bool PagedSpace::ContainsSlow(Address addr) {
1572  Page* p = Page::FromAddress(addr);
1573  for (Page* page : *this) {
1574  if (page == p) return true;
1575  }
1576  return false;
1577 }
1578 
1579 void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
1580  CHECK(page->SweepingDone());
1581  auto marking_state =
1582  heap()->incremental_marking()->non_atomic_marking_state();
1583  // The live_byte on the page was accounted in the space allocated
1584  // bytes counter. After sweeping allocated_bytes() contains the
1585  // accurate live byte count on the page.
1586  size_t old_counter = marking_state->live_bytes(page);
1587  size_t new_counter = page->allocated_bytes();
1588  DCHECK_GE(old_counter, new_counter);
1589  if (old_counter > new_counter) {
1590  DecreaseAllocatedBytes(old_counter - new_counter, page);
1591  // Give the heap a chance to adjust counters in response to the
1592  // more precise and smaller old generation size.
1593  heap()->NotifyRefinedOldGenerationSize(old_counter - new_counter);
1594  }
1595  marking_state->SetLiveBytes(page, 0);
1596 }
1597 
1598 Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
1599  base::MutexGuard guard(mutex());
1600  // Check for pages that still contain free list entries. Bail out for smaller
1601  // categories.
1602  const int minimum_category =
1603  static_cast<int>(FreeList::SelectFreeListCategoryType(size_in_bytes));
1604  Page* page = free_list()->GetPageForCategoryType(kHuge);
1605  if (!page && static_cast<int>(kLarge) >= minimum_category)
1606  page = free_list()->GetPageForCategoryType(kLarge);
1607  if (!page && static_cast<int>(kMedium) >= minimum_category)
1608  page = free_list()->GetPageForCategoryType(kMedium);
1609  if (!page && static_cast<int>(kSmall) >= minimum_category)
1610  page = free_list()->GetPageForCategoryType(kSmall);
1611  if (!page && static_cast<int>(kTiny) >= minimum_category)
1612  page = free_list()->GetPageForCategoryType(kTiny);
1613  if (!page && static_cast<int>(kTiniest) >= minimum_category)
1614  page = free_list()->GetPageForCategoryType(kTiniest);
1615  if (!page) return nullptr;
1616  RemovePage(page);
1617  return page;
1618 }
1619 
1620 size_t PagedSpace::AddPage(Page* page) {
1621  CHECK(page->SweepingDone());
1622  page->set_owner(this);
1623  memory_chunk_list_.PushBack(page);
1624  AccountCommitted(page->size());
1625  IncreaseCapacity(page->area_size());
1626  IncreaseAllocatedBytes(page->allocated_bytes(), page);
1627  for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
1628  ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
1629  IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
1630  }
1631  return RelinkFreeListCategories(page);
1632 }
1633 
1634 void PagedSpace::RemovePage(Page* page) {
1635  CHECK(page->SweepingDone());
1636  memory_chunk_list_.Remove(page);
1637  UnlinkFreeListCategories(page);
1638  DecreaseAllocatedBytes(page->allocated_bytes(), page);
1639  DecreaseCapacity(page->area_size());
1640  AccountUncommitted(page->size());
1641  for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
1642  ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
1643  DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
1644  }
1645 }
1646 
1647 size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
1648  size_t unused = page->ShrinkToHighWaterMark();
1649  accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
1650  AccountUncommitted(unused);
1651  return unused;
1652 }
1653 
1654 void PagedSpace::ResetFreeList() {
1655  for (Page* page : *this) {
1656  free_list_.EvictFreeListItems(page);
1657  }
1658  DCHECK(free_list_.IsEmpty());
1659 }
1660 
1661 void PagedSpace::ShrinkImmortalImmovablePages() {
1662  DCHECK(!heap()->deserialization_complete());
1663  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1664  FreeLinearAllocationArea();
1665  ResetFreeList();
1666  for (Page* page : *this) {
1667  DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
1668  ShrinkPageToHighWaterMark(page);
1669  }
1670 }
1671 
1672 bool PagedSpace::Expand() {
1673  // Always lock against the main space as we can only adjust capacity and
1674  // pages concurrently for the main paged space.
1675  base::MutexGuard guard(heap()->paged_space(identity())->mutex());
1676 
1677  const int size = AreaSize();
1678 
1679  if (!heap()->CanExpandOldGeneration(size)) return false;
1680 
1681  Page* page =
1682  heap()->memory_allocator()->AllocatePage(size, this, executable());
1683  if (page == nullptr) return false;
1684  // Pages created during bootstrapping may contain immortal immovable objects.
1685  if (!heap()->deserialization_complete()) page->MarkNeverEvacuate();
1686  AddPage(page);
1687  Free(page->area_start(), page->area_size(),
1688  SpaceAccountingMode::kSpaceAccounted);
1689  return true;
1690 }
1691 
1692 
1693 int PagedSpace::CountTotalPages() {
1694  int count = 0;
1695  for (Page* page : *this) {
1696  count++;
1697  USE(page);
1698  }
1699  return count;
1700 }
1701 
1702 
1703 void PagedSpace::ResetFreeListStatistics() {
1704  for (Page* page : *this) {
1705  page->ResetFreeListStatistics();
1706  }
1707 }
1708 
1709 void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
1710  SetTopAndLimit(top, limit);
1711  if (top != kNullAddress && top != limit &&
1712  heap()->incremental_marking()->black_allocation()) {
1713  Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
1714  }
1715 }
1716 
1717 void PagedSpace::DecreaseLimit(Address new_limit) {
1718  Address old_limit = limit();
1719  DCHECK_LE(top(), new_limit);
1720  DCHECK_GE(old_limit, new_limit);
1721  if (new_limit != old_limit) {
1722  SetTopAndLimit(top(), new_limit);
1723  Free(new_limit, old_limit - new_limit,
1724  SpaceAccountingMode::kSpaceAccounted);
1725  if (heap()->incremental_marking()->black_allocation()) {
1726  Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
1727  old_limit);
1728  }
1729  }
1730 }
1731 
1732 Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
1733  size_t min_size) {
1734  DCHECK_GE(end - start, min_size);
1735 
1736  if (heap()->inline_allocation_disabled()) {
1737  // Fit the requested area exactly.
1738  return start + min_size;
1739  } else if (SupportsInlineAllocation() && AllocationObserversActive()) {
1740  // Generated code may allocate inline from the linear allocation area for.
1741  // To make sure we can observe these allocations, we use a lower limit.
1742  size_t step = GetNextInlineAllocationStepSize();
1743 
1744  // TODO(ofrobots): there is subtle difference between old space and new
1745  // space here. Any way to avoid it? `step - 1` makes more sense as we would
1746  // like to sample the object that straddles the `start + step` boundary.
1747  // Rounding down further would introduce a small statistical error in
1748  // sampling. However, presently PagedSpace requires limit to be aligned.
1749  size_t rounded_step;
1750  if (identity() == NEW_SPACE) {
1751  DCHECK_GE(step, 1);
1752  rounded_step = step - 1;
1753  } else {
1754  rounded_step = RoundSizeDownToObjectAlignment(static_cast<int>(step));
1755  }
1756  return Min(static_cast<Address>(start + min_size + rounded_step), end);
1757  } else {
1758  // The entire node can be used as the linear allocation area.
1759  return end;
1760  }
1761 }
1762 
1763 void PagedSpace::MarkLinearAllocationAreaBlack() {
1764  DCHECK(heap()->incremental_marking()->black_allocation());
1765  Address current_top = top();
1766  Address current_limit = limit();
1767  if (current_top != kNullAddress && current_top != current_limit) {
1768  Page::FromAllocationAreaAddress(current_top)
1769  ->CreateBlackArea(current_top, current_limit);
1770  }
1771 }
1772 
1773 void PagedSpace::UnmarkLinearAllocationArea() {
1774  Address current_top = top();
1775  Address current_limit = limit();
1776  if (current_top != kNullAddress && current_top != current_limit) {
1777  Page::FromAllocationAreaAddress(current_top)
1778  ->DestroyBlackArea(current_top, current_limit);
1779  }
1780 }
1781 
1782 void PagedSpace::FreeLinearAllocationArea() {
1783  // Mark the old linear allocation area with a free space map so it can be
1784  // skipped when scanning the heap.
1785  Address current_top = top();
1786  Address current_limit = limit();
1787  if (current_top == kNullAddress) {
1788  DCHECK_EQ(kNullAddress, current_limit);
1789  return;
1790  }
1791 
1792  if (heap()->incremental_marking()->black_allocation()) {
1793  Page* page = Page::FromAllocationAreaAddress(current_top);
1794 
1795  // Clear the bits in the unused black area.
1796  if (current_top != current_limit) {
1797  IncrementalMarking::MarkingState* marking_state =
1798  heap()->incremental_marking()->marking_state();
1799  marking_state->bitmap(page)->ClearRange(
1800  page->AddressToMarkbitIndex(current_top),
1801  page->AddressToMarkbitIndex(current_limit));
1802  marking_state->IncrementLiveBytes(
1803  page, -static_cast<int>(current_limit - current_top));
1804  }
1805  }
1806 
1807  InlineAllocationStep(current_top, kNullAddress, kNullAddress, 0);
1808  SetTopAndLimit(kNullAddress, kNullAddress);
1809  DCHECK_GE(current_limit, current_top);
1810 
1811  // The code page of the linear allocation area needs to be unprotected
1812  // because we are going to write a filler into that memory area below.
1813  if (identity() == CODE_SPACE) {
1814  heap()->UnprotectAndRegisterMemoryChunk(
1815  MemoryChunk::FromAddress(current_top));
1816  }
1817  Free(current_top, current_limit - current_top,
1818  SpaceAccountingMode::kSpaceAccounted);
1819 }
1820 
1821 void PagedSpace::ReleasePage(Page* page) {
1822  DCHECK_EQ(
1823  0, heap()->incremental_marking()->non_atomic_marking_state()->live_bytes(
1824  page));
1825  DCHECK_EQ(page->owner(), this);
1826 
1827  free_list_.EvictFreeListItems(page);
1828  DCHECK(!free_list_.ContainsPageFreeListItems(page));
1829 
1830  if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
1831  DCHECK(!top_on_previous_step_);
1832  allocation_info_.Reset(kNullAddress, kNullAddress);
1833  }
1834 
1835  AccountUncommitted(page->size());
1836  accounting_stats_.DecreaseCapacity(page->area_size());
1837  heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
1838 }
1839 
1840 void PagedSpace::SetReadAndExecutable() {
1841  DCHECK(identity() == CODE_SPACE);
1842  for (Page* page : *this) {
1843  CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
1844  page->SetReadAndExecutable();
1845  }
1846 }
1847 
1848 void PagedSpace::SetReadAndWritable() {
1849  DCHECK(identity() == CODE_SPACE);
1850  for (Page* page : *this) {
1851  CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
1852  page->SetReadAndWritable();
1853  }
1854 }
1855 
1856 std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
1857  return std::unique_ptr<ObjectIterator>(new HeapObjectIterator(this));
1858 }
1859 
1860 bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
1861  DCHECK(IsAligned(size_in_bytes, kPointerSize));
1862  DCHECK_LE(top(), limit());
1863 #ifdef DEBUG
1864  if (top() != limit()) {
1865  DCHECK_EQ(Page::FromAddress(top()), Page::FromAddress(limit() - 1));
1866  }
1867 #endif
1868  // Don't free list allocate if there is linear space available.
1869  DCHECK_LT(static_cast<size_t>(limit() - top()), size_in_bytes);
1870 
1871  // Mark the old linear allocation area with a free space map so it can be
1872  // skipped when scanning the heap. This also puts it back in the free list
1873  // if it is big enough.
1874  FreeLinearAllocationArea();
1875 
1876  if (!is_local()) {
1877  heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
1878  heap()->GCFlagsForIncrementalMarking(),
1879  kGCCallbackScheduleIdleGarbageCollection);
1880  }
1881 
1882  size_t new_node_size = 0;
1883  FreeSpace* new_node = free_list_.Allocate(size_in_bytes, &new_node_size);
1884  if (new_node == nullptr) return false;
1885 
1886  DCHECK_GE(new_node_size, size_in_bytes);
1887 
1888  // The old-space-step might have finished sweeping and restarted marking.
1889  // Verify that it did not turn the page of the new node into an evacuation
1890  // candidate.
1891  DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
1892 
1893  // Memory in the linear allocation area is counted as allocated. We may free
1894  // a little of this again immediately - see below.
1895  Page* page = Page::FromAddress(new_node->address());
1896  IncreaseAllocatedBytes(new_node_size, page);
1897 
1898  Address start = new_node->address();
1899  Address end = new_node->address() + new_node_size;
1900  Address limit = ComputeLimit(start, end, size_in_bytes);
1901  DCHECK_LE(limit, end);
1902  DCHECK_LE(size_in_bytes, limit - start);
1903  if (limit != end) {
1904  if (identity() == CODE_SPACE) {
1905  heap()->UnprotectAndRegisterMemoryChunk(page);
1906  }
1907  Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
1908  }
1909  SetLinearAllocationArea(start, limit);
1910 
1911  return true;
1912 }
1913 
1914 #ifdef DEBUG
1915 void PagedSpace::Print() {}
1916 #endif
1917 
1918 #ifdef VERIFY_HEAP
1919 void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
1920  bool allocation_pointer_found_in_space =
1921  (allocation_info_.top() == allocation_info_.limit());
1922  size_t external_space_bytes[kNumTypes];
1923  size_t external_page_bytes[kNumTypes];
1924 
1925  for (int i = 0; i < kNumTypes; i++) {
1926  external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
1927  }
1928 
1929  for (Page* page : *this) {
1930  CHECK(page->owner() == this);
1931 
1932  for (int i = 0; i < kNumTypes; i++) {
1933  external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
1934  }
1935 
1936  if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
1937  allocation_pointer_found_in_space = true;
1938  }
1939  CHECK(page->SweepingDone());
1940  HeapObjectIterator it(page);
1941  Address end_of_previous_object = page->area_start();
1942  Address top = page->area_end();
1943 
1944  for (HeapObject* object = it.Next(); object != nullptr;
1945  object = it.Next()) {
1946  CHECK(end_of_previous_object <= object->address());
1947 
1948  // The first word should be a map, and we expect all map pointers to
1949  // be in map space.
1950  Map map = object->map();
1951  CHECK(map->IsMap());
1952  CHECK(heap()->map_space()->Contains(map) ||
1953  heap()->read_only_space()->Contains(map));
1954 
1955  // Perform space-specific object verification.
1956  VerifyObject(object);
1957 
1958  // The object itself should look OK.
1959  object->ObjectVerify(isolate);
1960 
1961  if (!FLAG_verify_heap_skip_remembered_set) {
1962  heap()->VerifyRememberedSetFor(object);
1963  }
1964 
1965  // All the interior pointers should be contained in the heap.
1966  int size = object->Size();
1967  object->IterateBody(map, size, visitor);
1968  CHECK(object->address() + size <= top);
1969  end_of_previous_object = object->address() + size;
1970 
1971  if (object->IsExternalString()) {
1972  ExternalString external_string = ExternalString::cast(object);
1973  size_t size = external_string->ExternalPayloadSize();
1974  external_page_bytes[ExternalBackingStoreType::kExternalString] += size;
1975  } else if (object->IsJSArrayBuffer()) {
1976  JSArrayBuffer* array_buffer = JSArrayBuffer::cast(object);
1977  if (ArrayBufferTracker::IsTracked(array_buffer)) {
1978  size_t size = array_buffer->byte_length();
1979  external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
1980  }
1981  }
1982  }
1983  for (int i = 0; i < kNumTypes; i++) {
1984  ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
1985  CHECK_EQ(external_page_bytes[t], page->ExternalBackingStoreBytes(t));
1986  external_space_bytes[t] += external_page_bytes[t];
1987  }
1988  }
1989  for (int i = 0; i < kNumTypes; i++) {
1990  ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
1991  CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
1992  }
1993  CHECK(allocation_pointer_found_in_space);
1994 #ifdef DEBUG
1995  VerifyCountersAfterSweeping();
1996 #endif
1997 }
1998 
1999 void PagedSpace::VerifyLiveBytes() {
2000  IncrementalMarking::MarkingState* marking_state =
2001  heap()->incremental_marking()->marking_state();
2002  for (Page* page : *this) {
2003  CHECK(page->SweepingDone());
2004  HeapObjectIterator it(page);
2005  int black_size = 0;
2006  for (HeapObject* object = it.Next(); object != nullptr;
2007  object = it.Next()) {
2008  // All the interior pointers should be contained in the heap.
2009  if (marking_state->IsBlack(object)) {
2010  black_size += object->Size();
2011  }
2012  }
2013  CHECK_LE(black_size, marking_state->live_bytes(page));
2014  }
2015 }
2016 #endif // VERIFY_HEAP
2017 
2018 #ifdef DEBUG
2019 void PagedSpace::VerifyCountersAfterSweeping() {
2020  size_t total_capacity = 0;
2021  size_t total_allocated = 0;
2022  for (Page* page : *this) {
2023  DCHECK(page->SweepingDone());
2024  total_capacity += page->area_size();
2025  HeapObjectIterator it(page);
2026  size_t real_allocated = 0;
2027  for (HeapObject* object = it.Next(); object != nullptr;
2028  object = it.Next()) {
2029  if (!object->IsFiller()) {
2030  real_allocated += object->Size();
2031  }
2032  }
2033  total_allocated += page->allocated_bytes();
2034  // The real size can be smaller than the accounted size if array trimming,
2035  // object slack tracking happened after sweeping.
2036  DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
2037  DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
2038  }
2039  DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
2040  DCHECK_EQ(total_allocated, accounting_stats_.Size());
2041 }
2042 
2043 void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
2044  // We need to refine the counters on pages that are already swept and have
2045  // not been moved over to the actual space. Otherwise, the AccountingStats
2046  // are just an over approximation.
2047  RefillFreeList();
2048 
2049  size_t total_capacity = 0;
2050  size_t total_allocated = 0;
2051  auto marking_state =
2052  heap()->incremental_marking()->non_atomic_marking_state();
2053  for (Page* page : *this) {
2054  size_t page_allocated =
2055  page->SweepingDone()
2056  ? page->allocated_bytes()
2057  : static_cast<size_t>(marking_state->live_bytes(page));
2058  total_capacity += page->area_size();
2059  total_allocated += page_allocated;
2060  DCHECK_EQ(page_allocated, accounting_stats_.AllocatedOnPage(page));
2061  }
2062  DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
2063  DCHECK_EQ(total_allocated, accounting_stats_.Size());
2064 }
2065 #endif
2066 
2067 // -----------------------------------------------------------------------------
2068 // NewSpace implementation
2069 
2070 NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
2071  size_t initial_semispace_capacity,
2072  size_t max_semispace_capacity)
2073  : SpaceWithLinearArea(heap, NEW_SPACE),
2074  to_space_(heap, kToSpace),
2075  from_space_(heap, kFromSpace) {
2076  DCHECK(initial_semispace_capacity <= max_semispace_capacity);
2077  DCHECK(
2078  base::bits::IsPowerOfTwo(static_cast<uint32_t>(max_semispace_capacity)));
2079 
2080  to_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
2081  from_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
2082  if (!to_space_.Commit()) {
2083  V8::FatalProcessOutOfMemory(heap->isolate(), "New space setup");
2084  }
2085  DCHECK(!from_space_.is_committed()); // No need to use memory yet.
2086  ResetLinearAllocationArea();
2087 }
2088 
2089 void NewSpace::TearDown() {
2090  allocation_info_.Reset(kNullAddress, kNullAddress);
2091 
2092  to_space_.TearDown();
2093  from_space_.TearDown();
2094 }
2095 
2096 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
2097 
2098 
2099 void NewSpace::Grow() {
2100  // Double the semispace size but only up to maximum capacity.
2101  DCHECK(TotalCapacity() < MaximumCapacity());
2102  size_t new_capacity =
2103  Min(MaximumCapacity(),
2104  static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
2105  if (to_space_.GrowTo(new_capacity)) {
2106  // Only grow from space if we managed to grow to-space.
2107  if (!from_space_.GrowTo(new_capacity)) {
2108  // If we managed to grow to-space but couldn't grow from-space,
2109  // attempt to shrink to-space.
2110  if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
2111  // We are in an inconsistent state because we could not
2112  // commit/uncommit memory from new space.
2113  FATAL("inconsistent state");
2114  }
2115  }
2116  }
2117  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2118 }
2119 
2120 
2121 void NewSpace::Shrink() {
2122  size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
2123  size_t rounded_new_capacity = ::RoundUp(new_capacity, Page::kPageSize);
2124  if (rounded_new_capacity < TotalCapacity() &&
2125  to_space_.ShrinkTo(rounded_new_capacity)) {
2126  // Only shrink from-space if we managed to shrink to-space.
2127  from_space_.Reset();
2128  if (!from_space_.ShrinkTo(rounded_new_capacity)) {
2129  // If we managed to shrink to-space but couldn't shrink from
2130  // space, attempt to grow to-space again.
2131  if (!to_space_.GrowTo(from_space_.current_capacity())) {
2132  // We are in an inconsistent state because we could not
2133  // commit/uncommit memory from new space.
2134  FATAL("inconsistent state");
2135  }
2136  }
2137  }
2138  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2139 }
2140 
2141 bool NewSpace::Rebalance() {
2142  // Order here is important to make use of the page pool.
2143  return to_space_.EnsureCurrentCapacity() &&
2144  from_space_.EnsureCurrentCapacity();
2145 }
2146 
2147 bool SemiSpace::EnsureCurrentCapacity() {
2148  if (is_committed()) {
2149  const int expected_pages =
2150  static_cast<int>(current_capacity_ / Page::kPageSize);
2151  MemoryChunk* current_page = first_page();
2152  int actual_pages = 0;
2153 
2154  // First iterate through the pages list until expected pages if so many
2155  // pages exist.
2156  while (current_page != nullptr && actual_pages < expected_pages) {
2157  actual_pages++;
2158  current_page = current_page->list_node().next();
2159  }
2160 
2161  // Free all overallocated pages which are behind current_page.
2162  while (current_page) {
2163  MemoryChunk* next_current = current_page->list_node().next();
2164  memory_chunk_list_.Remove(current_page);
2165  // Clear new space flags to avoid this page being treated as a new
2166  // space page that is potentially being swept.
2167  current_page->SetFlags(0, Page::kIsInNewSpaceMask);
2168  heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
2169  current_page);
2170  current_page = next_current;
2171  }
2172 
2173  // Add more pages if we have less than expected_pages.
2174  IncrementalMarking::NonAtomicMarkingState* marking_state =
2175  heap()->incremental_marking()->non_atomic_marking_state();
2176  while (actual_pages < expected_pages) {
2177  actual_pages++;
2178  current_page =
2179  heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
2180  MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
2181  NOT_EXECUTABLE);
2182  if (current_page == nullptr) return false;
2183  DCHECK_NOT_NULL(current_page);
2184  memory_chunk_list_.PushBack(current_page);
2185  marking_state->ClearLiveness(current_page);
2186  current_page->SetFlags(first_page()->GetFlags(),
2187  static_cast<uintptr_t>(Page::kCopyAllFlags));
2188  heap()->CreateFillerObjectAt(current_page->area_start(),
2189  static_cast<int>(current_page->area_size()),
2190  ClearRecordedSlots::kNo);
2191  }
2192  }
2193  return true;
2194 }
2195 
2196 LinearAllocationArea LocalAllocationBuffer::Close() {
2197  if (IsValid()) {
2198  heap_->CreateFillerObjectAt(
2199  allocation_info_.top(),
2200  static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
2201  ClearRecordedSlots::kNo);
2202  const LinearAllocationArea old_info = allocation_info_;
2203  allocation_info_ = LinearAllocationArea(kNullAddress, kNullAddress);
2204  return old_info;
2205  }
2206  return LinearAllocationArea(kNullAddress, kNullAddress);
2207 }
2208 
2209 LocalAllocationBuffer::LocalAllocationBuffer(
2210  Heap* heap, LinearAllocationArea allocation_info)
2211  : heap_(heap), allocation_info_(allocation_info) {
2212  if (IsValid()) {
2213  heap_->CreateFillerObjectAt(
2214  allocation_info_.top(),
2215  static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
2216  ClearRecordedSlots::kNo);
2217  }
2218 }
2219 
2220 
2221 LocalAllocationBuffer::LocalAllocationBuffer(
2222  const LocalAllocationBuffer& other) {
2223  *this = other;
2224 }
2225 
2226 
2227 LocalAllocationBuffer& LocalAllocationBuffer::operator=(
2228  const LocalAllocationBuffer& other) {
2229  Close();
2230  heap_ = other.heap_;
2231  allocation_info_ = other.allocation_info_;
2232 
2233  // This is needed since we (a) cannot yet use move-semantics, and (b) want
2234  // to make the use of the class easy by it as value and (c) implicitly call
2235  // {Close} upon copy.
2236  const_cast<LocalAllocationBuffer&>(other).allocation_info_.Reset(
2237  kNullAddress, kNullAddress);
2238  return *this;
2239 }
2240 
2241 void NewSpace::UpdateLinearAllocationArea() {
2242  // Make sure there is no unaccounted allocations.
2243  DCHECK(!AllocationObserversActive() || top_on_previous_step_ == top());
2244 
2245  Address new_top = to_space_.page_low();
2246  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2247  allocation_info_.Reset(new_top, to_space_.page_high());
2248  // The order of the following two stores is important.
2249  // See the corresponding loads in ConcurrentMarking::Run.
2250  original_limit_.store(limit(), std::memory_order_relaxed);
2251  original_top_.store(top(), std::memory_order_release);
2252  StartNextInlineAllocationStep();
2253  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2254 }
2255 
2256 void NewSpace::ResetLinearAllocationArea() {
2257  // Do a step to account for memory allocated so far before resetting.
2258  InlineAllocationStep(top(), top(), kNullAddress, 0);
2259  to_space_.Reset();
2260  UpdateLinearAllocationArea();
2261  // Clear all mark-bits in the to-space.
2262  IncrementalMarking::NonAtomicMarkingState* marking_state =
2263  heap()->incremental_marking()->non_atomic_marking_state();
2264  for (Page* p : to_space_) {
2265  marking_state->ClearLiveness(p);
2266  // Concurrent marking may have local live bytes for this page.
2267  heap()->concurrent_marking()->ClearLiveness(p);
2268  }
2269 }
2270 
2271 void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
2272  Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
2273  allocation_info_.set_limit(new_limit);
2274  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2275 }
2276 
2277 void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
2278  Address new_limit = ComputeLimit(top(), limit(), min_size);
2279  DCHECK_LE(new_limit, limit());
2280  DecreaseLimit(new_limit);
2281 }
2282 
2283 bool NewSpace::AddFreshPage() {
2284  Address top = allocation_info_.top();
2285  DCHECK(!OldSpace::IsAtPageStart(top));
2286 
2287  // Do a step to account for memory allocated on previous page.
2288  InlineAllocationStep(top, top, kNullAddress, 0);
2289 
2290  if (!to_space_.AdvancePage()) {
2291  // No more pages left to advance.
2292  return false;
2293  }
2294 
2295  // Clear remainder of current page.
2296  Address limit = Page::FromAllocationAreaAddress(top)->area_end();
2297  int remaining_in_page = static_cast<int>(limit - top);
2298  heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
2299  UpdateLinearAllocationArea();
2300 
2301  return true;
2302 }
2303 
2304 
2305 bool NewSpace::AddFreshPageSynchronized() {
2306  base::MutexGuard guard(&mutex_);
2307  return AddFreshPage();
2308 }
2309 
2310 
2311 bool NewSpace::EnsureAllocation(int size_in_bytes,
2312  AllocationAlignment alignment) {
2313  Address old_top = allocation_info_.top();
2314  Address high = to_space_.page_high();
2315  int filler_size = Heap::GetFillToAlign(old_top, alignment);
2316  int aligned_size_in_bytes = size_in_bytes + filler_size;
2317 
2318  if (old_top + aligned_size_in_bytes > high) {
2319  // Not enough room in the page, try to allocate a new one.
2320  if (!AddFreshPage()) {
2321  return false;
2322  }
2323 
2324  old_top = allocation_info_.top();
2325  high = to_space_.page_high();
2326  filler_size = Heap::GetFillToAlign(old_top, alignment);
2327  }
2328 
2329  DCHECK(old_top + aligned_size_in_bytes <= high);
2330 
2331  if (allocation_info_.limit() < high) {
2332  // Either the limit has been lowered because linear allocation was disabled
2333  // or because incremental marking wants to get a chance to do a step,
2334  // or because idle scavenge job wants to get a chance to post a task.
2335  // Set the new limit accordingly.
2336  Address new_top = old_top + aligned_size_in_bytes;
2337  Address soon_object = old_top + filler_size;
2338  InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
2339  UpdateInlineAllocationLimit(aligned_size_in_bytes);
2340  }
2341  return true;
2342 }
2343 
2344 size_t LargeObjectSpace::Available() {
2345  // We return zero here since we cannot take advantage of already allocated
2346  // large object memory.
2347  return 0;
2348 }
2349 
2350 void SpaceWithLinearArea::StartNextInlineAllocationStep() {
2351  if (heap()->allocation_step_in_progress()) {
2352  // If we are mid-way through an existing step, don't start a new one.
2353  return;
2354  }
2355 
2356  if (AllocationObserversActive()) {
2357  top_on_previous_step_ = top();
2358  UpdateInlineAllocationLimit(0);
2359  } else {
2360  DCHECK_EQ(kNullAddress, top_on_previous_step_);
2361  }
2362 }
2363 
2364 void SpaceWithLinearArea::AddAllocationObserver(AllocationObserver* observer) {
2365  InlineAllocationStep(top(), top(), kNullAddress, 0);
2366  Space::AddAllocationObserver(observer);
2367  DCHECK_IMPLIES(top_on_previous_step_, AllocationObserversActive());
2368 }
2369 
2370 void SpaceWithLinearArea::RemoveAllocationObserver(
2371  AllocationObserver* observer) {
2372  Address top_for_next_step =
2373  allocation_observers_.size() == 1 ? kNullAddress : top();
2374  InlineAllocationStep(top(), top_for_next_step, kNullAddress, 0);
2375  Space::RemoveAllocationObserver(observer);
2376  DCHECK_IMPLIES(top_on_previous_step_, AllocationObserversActive());
2377 }
2378 
2379 void SpaceWithLinearArea::PauseAllocationObservers() {
2380  // Do a step to account for memory allocated so far.
2381  InlineAllocationStep(top(), kNullAddress, kNullAddress, 0);
2382  Space::PauseAllocationObservers();
2383  DCHECK_EQ(kNullAddress, top_on_previous_step_);
2384  UpdateInlineAllocationLimit(0);
2385 }
2386 
2387 void SpaceWithLinearArea::ResumeAllocationObservers() {
2388  DCHECK_EQ(kNullAddress, top_on_previous_step_);
2389  Space::ResumeAllocationObservers();
2390  StartNextInlineAllocationStep();
2391 }
2392 
2393 void SpaceWithLinearArea::InlineAllocationStep(Address top,
2394  Address top_for_next_step,
2395  Address soon_object,
2396  size_t size) {
2397  if (heap()->allocation_step_in_progress()) {
2398  // Avoid starting a new step if we are mid-way through an existing one.
2399  return;
2400  }
2401 
2402  if (top_on_previous_step_) {
2403  if (top < top_on_previous_step_) {
2404  // Generated code decreased the top pointer to do folded allocations.
2405  DCHECK_NE(top, kNullAddress);
2406  DCHECK_EQ(Page::FromAllocationAreaAddress(top),
2407  Page::FromAllocationAreaAddress(top_on_previous_step_));
2408  top_on_previous_step_ = top;
2409  }
2410  int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
2411  AllocationStep(bytes_allocated, soon_object, static_cast<int>(size));
2412  top_on_previous_step_ = top_for_next_step;
2413  }
2414 }
2415 
2416 std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator() {
2417  return std::unique_ptr<ObjectIterator>(new SemiSpaceIterator(this));
2418 }
2419 
2420 #ifdef VERIFY_HEAP
2421 // We do not use the SemiSpaceIterator because verification doesn't assume
2422 // that it works (it depends on the invariants we are checking).
2423 void NewSpace::Verify(Isolate* isolate) {
2424  // The allocation pointer should be in the space or at the very end.
2425  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2426 
2427  // There should be objects packed in from the low address up to the
2428  // allocation pointer.
2429  Address current = to_space_.first_page()->area_start();
2430  CHECK_EQ(current, to_space_.space_start());
2431 
2432  size_t external_space_bytes[kNumTypes];
2433  for (int i = 0; i < kNumTypes; i++) {
2434  external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
2435  }
2436 
2437  while (current != top()) {
2438  if (!Page::IsAlignedToPageSize(current)) {
2439  // The allocation pointer should not be in the middle of an object.
2440  CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
2441  current < top());
2442 
2443  HeapObject* object = HeapObject::FromAddress(current);
2444 
2445  // The first word should be a map, and we expect all map pointers to
2446  // be in map space or read-only space.
2447  Map map = object->map();
2448  CHECK(map->IsMap());
2449  CHECK(heap()->map_space()->Contains(map) ||
2450  heap()->read_only_space()->Contains(map));
2451 
2452  // The object should not be code or a map.
2453  CHECK(!object->IsMap());
2454  CHECK(!object->IsAbstractCode());
2455 
2456  // The object itself should look OK.
2457  object->ObjectVerify(isolate);
2458 
2459  // All the interior pointers should be contained in the heap.
2460  VerifyPointersVisitor visitor(heap());
2461  int size = object->Size();
2462  object->IterateBody(map, size, &visitor);
2463 
2464  if (object->IsExternalString()) {
2465  ExternalString external_string = ExternalString::cast(object);
2466  size_t size = external_string->ExternalPayloadSize();
2467  external_space_bytes[ExternalBackingStoreType::kExternalString] += size;
2468  } else if (object->IsJSArrayBuffer()) {
2469  JSArrayBuffer* array_buffer = JSArrayBuffer::cast(object);
2470  if (ArrayBufferTracker::IsTracked(array_buffer)) {
2471  size_t size = array_buffer->byte_length();
2472  external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
2473  }
2474  }
2475 
2476  current += size;
2477  } else {
2478  // At end of page, switch to next page.
2479  Page* page = Page::FromAllocationAreaAddress(current)->next_page();
2480  current = page->area_start();
2481  }
2482  }
2483 
2484  for (int i = 0; i < kNumTypes; i++) {
2485  ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
2486  CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
2487  }
2488 
2489  // Check semi-spaces.
2490  CHECK_EQ(from_space_.id(), kFromSpace);
2491  CHECK_EQ(to_space_.id(), kToSpace);
2492  from_space_.Verify();
2493  to_space_.Verify();
2494 }
2495 #endif
2496 
2497 // -----------------------------------------------------------------------------
2498 // SemiSpace implementation
2499 
2500 void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
2501  DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
2502  minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
2503  current_capacity_ = minimum_capacity_;
2504  maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
2505  committed_ = false;
2506 }
2507 
2508 
2509 void SemiSpace::TearDown() {
2510  // Properly uncommit memory to keep the allocator counters in sync.
2511  if (is_committed()) {
2512  Uncommit();
2513  }
2514  current_capacity_ = maximum_capacity_ = 0;
2515 }
2516 
2517 
2518 bool SemiSpace::Commit() {
2519  DCHECK(!is_committed());
2520  const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
2521  for (int pages_added = 0; pages_added < num_pages; pages_added++) {
2522  Page* new_page =
2523  heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
2524  MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
2525  NOT_EXECUTABLE);
2526  if (new_page == nullptr) {
2527  if (pages_added) RewindPages(pages_added);
2528  return false;
2529  }
2530  memory_chunk_list_.PushBack(new_page);
2531  }
2532  Reset();
2533  AccountCommitted(current_capacity_);
2534  if (age_mark_ == kNullAddress) {
2535  age_mark_ = first_page()->area_start();
2536  }
2537  committed_ = true;
2538  return true;
2539 }
2540 
2541 
2542 bool SemiSpace::Uncommit() {
2543  DCHECK(is_committed());
2544  while (!memory_chunk_list_.Empty()) {
2545  MemoryChunk* chunk = memory_chunk_list_.front();
2546  memory_chunk_list_.Remove(chunk);
2547  heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(chunk);
2548  }
2549  current_page_ = nullptr;
2550  AccountUncommitted(current_capacity_);
2551  committed_ = false;
2552  heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2553  return true;
2554 }
2555 
2556 
2557 size_t SemiSpace::CommittedPhysicalMemory() {
2558  if (!is_committed()) return 0;
2559  size_t size = 0;
2560  for (Page* p : *this) {
2561  size += p->CommittedPhysicalMemory();
2562  }
2563  return size;
2564 }
2565 
2566 bool SemiSpace::GrowTo(size_t new_capacity) {
2567  if (!is_committed()) {
2568  if (!Commit()) return false;
2569  }
2570  DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
2571  DCHECK_LE(new_capacity, maximum_capacity_);
2572  DCHECK_GT(new_capacity, current_capacity_);
2573  const size_t delta = new_capacity - current_capacity_;
2574  DCHECK(IsAligned(delta, AllocatePageSize()));
2575  const int delta_pages = static_cast<int>(delta / Page::kPageSize);
2576  DCHECK(last_page());
2577  IncrementalMarking::NonAtomicMarkingState* marking_state =
2578  heap()->incremental_marking()->non_atomic_marking_state();
2579  for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
2580  Page* new_page =
2581  heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
2582  MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
2583  NOT_EXECUTABLE);
2584  if (new_page == nullptr) {
2585  if (pages_added) RewindPages(pages_added);
2586  return false;
2587  }
2588  memory_chunk_list_.PushBack(new_page);
2589  marking_state->ClearLiveness(new_page);
2590  // Duplicate the flags that was set on the old page.
2591  new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
2592  }
2593  AccountCommitted(delta);
2594  current_capacity_ = new_capacity;
2595  return true;
2596 }
2597 
2598 void SemiSpace::RewindPages(int num_pages) {
2599  DCHECK_GT(num_pages, 0);
2600  DCHECK(last_page());
2601  while (num_pages > 0) {
2602  MemoryChunk* last = last_page();
2603  memory_chunk_list_.Remove(last);
2604  heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(last);
2605  num_pages--;
2606  }
2607 }
2608 
2609 bool SemiSpace::ShrinkTo(size_t new_capacity) {
2610  DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
2611  DCHECK_GE(new_capacity, minimum_capacity_);
2612  DCHECK_LT(new_capacity, current_capacity_);
2613  if (is_committed()) {
2614  const size_t delta = current_capacity_ - new_capacity;
2615  DCHECK(IsAligned(delta, Page::kPageSize));
2616  int delta_pages = static_cast<int>(delta / Page::kPageSize);
2617  RewindPages(delta_pages);
2618  AccountUncommitted(delta);
2619  heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2620  }
2621  current_capacity_ = new_capacity;
2622  return true;
2623 }
2624 
2625 void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
2626  for (Page* page : *this) {
2627  page->set_owner(this);
2628  page->SetFlags(flags, mask);
2629  if (id_ == kToSpace) {
2630  page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
2631  page->SetFlag(MemoryChunk::IN_TO_SPACE);
2632  page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
2633  heap()->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(
2634  page, 0);
2635  } else {
2636  page->SetFlag(MemoryChunk::IN_FROM_SPACE);
2637  page->ClearFlag(MemoryChunk::IN_TO_SPACE);
2638  }
2639  DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
2640  page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
2641  }
2642 }
2643 
2644 
2645 void SemiSpace::Reset() {
2646  DCHECK(first_page());
2647  DCHECK(last_page());
2648  current_page_ = first_page();
2649  pages_used_ = 0;
2650 }
2651 
2652 void SemiSpace::RemovePage(Page* page) {
2653  if (current_page_ == page) {
2654  if (page->prev_page()) {
2655  current_page_ = page->prev_page();
2656  }
2657  }
2658  memory_chunk_list_.Remove(page);
2659  for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
2660  ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
2661  DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
2662  }
2663 }
2664 
2665 void SemiSpace::PrependPage(Page* page) {
2666  page->SetFlags(current_page()->GetFlags(),
2667  static_cast<uintptr_t>(Page::kCopyAllFlags));
2668  page->set_owner(this);
2669  memory_chunk_list_.PushFront(page);
2670  pages_used_++;
2671  for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
2672  ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
2673  IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
2674  }
2675 }
2676 
2677 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
2678  // We won't be swapping semispaces without data in them.
2679  DCHECK(from->first_page());
2680  DCHECK(to->first_page());
2681 
2682  intptr_t saved_to_space_flags = to->current_page()->GetFlags();
2683 
2684  // We swap all properties but id_.
2685  std::swap(from->current_capacity_, to->current_capacity_);
2686  std::swap(from->maximum_capacity_, to->maximum_capacity_);
2687  std::swap(from->minimum_capacity_, to->minimum_capacity_);
2688  std::swap(from->age_mark_, to->age_mark_);
2689  std::swap(from->committed_, to->committed_);
2690  std::swap(from->memory_chunk_list_, to->memory_chunk_list_);
2691  std::swap(from->current_page_, to->current_page_);
2692  std::swap(from->external_backing_store_bytes_,
2693  to->external_backing_store_bytes_);
2694 
2695  to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
2696  from->FixPagesFlags(0, 0);
2697 }
2698 
2699 void SemiSpace::set_age_mark(Address mark) {
2700  DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
2701  age_mark_ = mark;
2702  // Mark all pages up to the one containing mark.
2703  for (Page* p : PageRange(space_start(), mark)) {
2704  p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
2705  }
2706 }
2707 
2708 std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator() {
2709  // Use the NewSpace::NewObjectIterator to iterate the ToSpace.
2710  UNREACHABLE();
2711 }
2712 
2713 #ifdef DEBUG
2714 void SemiSpace::Print() {}
2715 #endif
2716 
2717 #ifdef VERIFY_HEAP
2718 void SemiSpace::Verify() {
2719  bool is_from_space = (id_ == kFromSpace);
2720  size_t external_backing_store_bytes[kNumTypes];
2721 
2722  for (int i = 0; i < kNumTypes; i++) {
2723  external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
2724  }
2725 
2726  for (Page* page : *this) {
2727  CHECK_EQ(page->owner(), this);
2728  CHECK(page->InNewSpace());
2729  CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
2730  : MemoryChunk::IN_TO_SPACE));
2731  CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
2732  : MemoryChunk::IN_FROM_SPACE));
2733  CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
2734  if (!is_from_space) {
2735  // The pointers-from-here-are-interesting flag isn't updated dynamically
2736  // on from-space pages, so it might be out of sync with the marking state.
2737  if (page->heap()->incremental_marking()->IsMarking()) {
2738  CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
2739  } else {
2740  CHECK(
2741  !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
2742  }
2743  }
2744  for (int i = 0; i < kNumTypes; i++) {
2745  ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
2746  external_backing_store_bytes[t] += page->ExternalBackingStoreBytes(t);
2747  }
2748 
2749  CHECK_IMPLIES(page->list_node().prev(),
2750  page->list_node().prev()->list_node().next() == page);
2751  }
2752  for (int i = 0; i < kNumTypes; i++) {
2753  ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
2754  CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
2755  }
2756 }
2757 #endif
2758 
2759 #ifdef DEBUG
2760 void SemiSpace::AssertValidRange(Address start, Address end) {
2761  // Addresses belong to same semi-space
2762  Page* page = Page::FromAllocationAreaAddress(start);
2763  Page* end_page = Page::FromAllocationAreaAddress(end);
2764  SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
2765  DCHECK_EQ(space, end_page->owner());
2766  // Start address is before end address, either on same page,
2767  // or end address is on a later page in the linked list of
2768  // semi-space pages.
2769  if (page == end_page) {
2770  DCHECK_LE(start, end);
2771  } else {
2772  while (page != end_page) {
2773  page = page->next_page();
2774  }
2775  DCHECK(page);
2776  }
2777 }
2778 #endif
2779 
2780 
2781 // -----------------------------------------------------------------------------
2782 // SemiSpaceIterator implementation.
2783 
2784 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
2785  Initialize(space->first_allocatable_address(), space->top());
2786 }
2787 
2788 
2789 void SemiSpaceIterator::Initialize(Address start, Address end) {
2790  SemiSpace::AssertValidRange(start, end);
2791  current_ = start;
2792  limit_ = end;
2793 }
2794 
2795 size_t NewSpace::CommittedPhysicalMemory() {
2796  if (!base::OS::HasLazyCommits()) return CommittedMemory();
2797  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2798  size_t size = to_space_.CommittedPhysicalMemory();
2799  if (from_space_.is_committed()) {
2800  size += from_space_.CommittedPhysicalMemory();
2801  }
2802  return size;
2803 }
2804 
2805 
2806 // -----------------------------------------------------------------------------
2807 // Free lists for old object spaces implementation
2808 
2809 
2810 void FreeListCategory::Reset() {
2811  set_top(nullptr);
2812  set_prev(nullptr);
2813  set_next(nullptr);
2814  available_ = 0;
2815 }
2816 
2817 FreeSpace* FreeListCategory::PickNodeFromList(size_t minimum_size,
2818  size_t* node_size) {
2819  DCHECK(page()->CanAllocate());
2820  FreeSpace* node = top();
2821  if (node == nullptr || static_cast<size_t>(node->Size()) < minimum_size) {
2822  *node_size = 0;
2823  return nullptr;
2824  }
2825  set_top(node->next());
2826  *node_size = node->Size();
2827  available_ -= *node_size;
2828  return node;
2829 }
2830 
2831 FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
2832  size_t* node_size) {
2833  DCHECK(page()->CanAllocate());
2834  FreeSpace* prev_non_evac_node = nullptr;
2835  for (FreeSpace* cur_node = top(); cur_node != nullptr;
2836  cur_node = cur_node->next()) {
2837  size_t size = cur_node->size();
2838  if (size >= minimum_size) {
2839  DCHECK_GE(available_, size);
2840  available_ -= size;
2841  if (cur_node == top()) {
2842  set_top(cur_node->next());
2843  }
2844  if (prev_non_evac_node != nullptr) {
2845  MemoryChunk* chunk =
2846  MemoryChunk::FromAddress(prev_non_evac_node->address());
2847  if (chunk->owner()->identity() == CODE_SPACE) {
2848  chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
2849  }
2850  prev_non_evac_node->set_next(cur_node->next());
2851  }
2852  *node_size = size;
2853  return cur_node;
2854  }
2855 
2856  prev_non_evac_node = cur_node;
2857  }
2858  return nullptr;
2859 }
2860 
2861 void FreeListCategory::Free(Address start, size_t size_in_bytes,
2862  FreeMode mode) {
2863  DCHECK(page()->CanAllocate());
2864  FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
2865  free_space->set_next(top());
2866  set_top(free_space);
2867  available_ += size_in_bytes;
2868  if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
2869  owner()->AddCategory(this);
2870  }
2871 }
2872 
2873 
2874 void FreeListCategory::RepairFreeList(Heap* heap) {
2875  FreeSpace* n = top();
2876  while (n != nullptr) {
2877  ObjectSlot map_location(n->address());
2878  if (*map_location == nullptr) {
2879  map_location.store(ReadOnlyRoots(heap).free_space_map());
2880  } else {
2881  DCHECK(*map_location == ReadOnlyRoots(heap).free_space_map());
2882  }
2883  n = n->next();
2884  }
2885 }
2886 
2887 void FreeListCategory::Relink() {
2888  DCHECK(!is_linked());
2889  owner()->AddCategory(this);
2890 }
2891 
2892 FreeList::FreeList() : wasted_bytes_(0) {
2893  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2894  categories_[i] = nullptr;
2895  }
2896  Reset();
2897 }
2898 
2899 
2900 void FreeList::Reset() {
2901  ForAllFreeListCategories(
2902  [](FreeListCategory* category) { category->Reset(); });
2903  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2904  categories_[i] = nullptr;
2905  }
2906  ResetStats();
2907 }
2908 
2909 size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
2910  Page* page = Page::FromAddress(start);
2911  page->DecreaseAllocatedBytes(size_in_bytes);
2912 
2913  // Blocks have to be a minimum size to hold free list items.
2914  if (size_in_bytes < kMinBlockSize) {
2915  page->add_wasted_memory(size_in_bytes);
2916  wasted_bytes_ += size_in_bytes;
2917  return size_in_bytes;
2918  }
2919 
2920  // Insert other blocks at the head of a free list of the appropriate
2921  // magnitude.
2922  FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
2923  page->free_list_category(type)->Free(start, size_in_bytes, mode);
2924  DCHECK_EQ(page->AvailableInFreeList(),
2925  page->AvailableInFreeListFromAllocatedBytes());
2926  return 0;
2927 }
2928 
2929 FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, size_t minimum_size,
2930  size_t* node_size) {
2931  FreeListCategoryIterator it(this, type);
2932  FreeSpace* node = nullptr;
2933  while (it.HasNext()) {
2934  FreeListCategory* current = it.Next();
2935  node = current->PickNodeFromList(minimum_size, node_size);
2936  if (node != nullptr) {
2937  DCHECK(IsVeryLong() || Available() == SumFreeLists());
2938  return node;
2939  }
2940  RemoveCategory(current);
2941  }
2942  return node;
2943 }
2944 
2945 FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type,
2946  size_t minimum_size, size_t* node_size) {
2947  if (categories_[type] == nullptr) return nullptr;
2948  FreeSpace* node =
2949  categories_[type]->PickNodeFromList(minimum_size, node_size);
2950  if (node != nullptr) {
2951  DCHECK(IsVeryLong() || Available() == SumFreeLists());
2952  }
2953  return node;
2954 }
2955 
2956 FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
2957  size_t* node_size,
2958  size_t minimum_size) {
2959  FreeListCategoryIterator it(this, type);
2960  FreeSpace* node = nullptr;
2961  while (it.HasNext()) {
2962  FreeListCategory* current = it.Next();
2963  node = current->SearchForNodeInList(minimum_size, node_size);
2964  if (node != nullptr) {
2965  DCHECK(IsVeryLong() || Available() == SumFreeLists());
2966  return node;
2967  }
2968  if (current->is_empty()) {
2969  RemoveCategory(current);
2970  }
2971  }
2972  return node;
2973 }
2974 
2975 FreeSpace* FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
2976  DCHECK_GE(kMaxBlockSize, size_in_bytes);
2977  FreeSpace* node = nullptr;
2978  // First try the allocation fast path: try to allocate the minimum element
2979  // size of a free list category. This operation is constant time.
2980  FreeListCategoryType type =
2981  SelectFastAllocationFreeListCategoryType(size_in_bytes);
2982  for (int i = type; i < kHuge && node == nullptr; i++) {
2983  node = FindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
2984  node_size);
2985  }
2986 
2987  if (node == nullptr) {
2988  // Next search the huge list for free list nodes. This takes linear time in
2989  // the number of huge elements.
2990  node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
2991  }
2992 
2993  if (node == nullptr && type != kHuge) {
2994  // We didn't find anything in the huge list. Now search the best fitting
2995  // free list for a node that has at least the requested size.
2996  type = SelectFreeListCategoryType(size_in_bytes);
2997  node = TryFindNodeIn(type, size_in_bytes, node_size);
2998  }
2999 
3000  if (node != nullptr) {
3001  Page::FromAddress(node->address())->IncreaseAllocatedBytes(*node_size);
3002  }
3003 
3004  DCHECK(IsVeryLong() || Available() == SumFreeLists());
3005  return node;
3006 }
3007 
3008 size_t FreeList::EvictFreeListItems(Page* page) {
3009  size_t sum = 0;
3010  page->ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
3011  DCHECK_EQ(this, category->owner());
3012  sum += category->available();
3013  RemoveCategory(category);
3014  category->Reset();
3015  });
3016  return sum;
3017 }
3018 
3019 bool FreeList::ContainsPageFreeListItems(Page* page) {
3020  bool contained = false;
3021  page->ForAllFreeListCategories(
3022  [this, &contained](FreeListCategory* category) {
3023  if (category->owner() == this && category->is_linked()) {
3024  contained = true;
3025  }
3026  });
3027  return contained;
3028 }
3029 
3030 void FreeList::RepairLists(Heap* heap) {
3031  ForAllFreeListCategories(
3032  [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
3033 }
3034 
3035 bool FreeList::AddCategory(FreeListCategory* category) {
3036  FreeListCategoryType type = category->type_;
3037  DCHECK_LT(type, kNumberOfCategories);
3038  FreeListCategory* top = categories_[type];
3039 
3040  if (category->is_empty()) return false;
3041  if (top == category) return false;
3042 
3043  // Common double-linked list insertion.
3044  if (top != nullptr) {
3045  top->set_prev(category);
3046  }
3047  category->set_next(top);
3048  categories_[type] = category;
3049  return true;
3050 }
3051 
3052 void FreeList::RemoveCategory(FreeListCategory* category) {
3053  FreeListCategoryType type = category->type_;
3054  DCHECK_LT(type, kNumberOfCategories);
3055  FreeListCategory* top = categories_[type];
3056 
3057  // Common double-linked list removal.
3058  if (top == category) {
3059  categories_[type] = category->next();
3060  }
3061  if (category->prev() != nullptr) {
3062  category->prev()->set_next(category->next());
3063  }
3064  if (category->next() != nullptr) {
3065  category->next()->set_prev(category->prev());
3066  }
3067  category->set_next(nullptr);
3068  category->set_prev(nullptr);
3069 }
3070 
3071 void FreeList::PrintCategories(FreeListCategoryType type) {
3072  FreeListCategoryIterator it(this, type);
3073  PrintF("FreeList[%p, top=%p, %d] ", static_cast<void*>(this),
3074  static_cast<void*>(categories_[type]), type);
3075  while (it.HasNext()) {
3076  FreeListCategory* current = it.Next();
3077  PrintF("%p -> ", static_cast<void*>(current));
3078  }
3079  PrintF("null\n");
3080 }
3081 
3082 
3083 #ifdef DEBUG
3084 size_t FreeListCategory::SumFreeList() {
3085  size_t sum = 0;
3086  FreeSpace* cur = top();
3087  while (cur != nullptr) {
3088  DCHECK_EQ(cur->map(),
3089  page()->heap()->isolate()->root(RootIndex::kFreeSpaceMap));
3090  sum += cur->relaxed_read_size();
3091  cur = cur->next();
3092  }
3093  return sum;
3094 }
3095 
3096 int FreeListCategory::FreeListLength() {
3097  int length = 0;
3098  FreeSpace* cur = top();
3099  while (cur != nullptr) {
3100  length++;
3101  cur = cur->next();
3102  if (length == kVeryLongFreeList) return length;
3103  }
3104  return length;
3105 }
3106 
3107 bool FreeList::IsVeryLong() {
3108  int len = 0;
3109  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
3110  FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
3111  while (it.HasNext()) {
3112  len += it.Next()->FreeListLength();
3113  if (len >= FreeListCategory::kVeryLongFreeList) return true;
3114  }
3115  }
3116  return false;
3117 }
3118 
3119 
3120 // This can take a very long time because it is linear in the number of entries
3121 // on the free list, so it should not be called if FreeListLength returns
3122 // kVeryLongFreeList.
3123 size_t FreeList::SumFreeLists() {
3124  size_t sum = 0;
3125  ForAllFreeListCategories(
3126  [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
3127  return sum;
3128 }
3129 #endif
3130 
3131 
3132 // -----------------------------------------------------------------------------
3133 // OldSpace implementation
3134 
3135 void PagedSpace::PrepareForMarkCompact() {
3136  // We don't have a linear allocation area while sweeping. It will be restored
3137  // on the first allocation after the sweep.
3138  FreeLinearAllocationArea();
3139 
3140  // Clear the free list before a full GC---it will be rebuilt afterward.
3141  free_list_.Reset();
3142 }
3143 
3144 size_t PagedSpace::SizeOfObjects() {
3145  CHECK_GE(limit(), top());
3146  DCHECK_GE(Size(), static_cast<size_t>(limit() - top()));
3147  return Size() - (limit() - top());
3148 }
3149 
3150 bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
3151  MarkCompactCollector* collector = heap()->mark_compact_collector();
3152  if (collector->sweeping_in_progress()) {
3153  // Wait for the sweeper threads here and complete the sweeping phase.
3154  collector->EnsureSweepingCompleted();
3155 
3156  // After waiting for the sweeper threads, there may be new free-list
3157  // entries.
3158  return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
3159  }
3160  return false;
3161 }
3162 
3163 bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
3164  MarkCompactCollector* collector = heap()->mark_compact_collector();
3165  if (FLAG_concurrent_sweeping && collector->sweeping_in_progress()) {
3166  collector->sweeper()->ParallelSweepSpace(identity(), 0);
3167  RefillFreeList();
3168  return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
3169  }
3170  return false;
3171 }
3172 
3173 bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
3174  VMState<GC> state(heap()->isolate());
3175  RuntimeCallTimerScope runtime_timer(
3176  heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
3177  return RawSlowRefillLinearAllocationArea(size_in_bytes);
3178 }
3179 
3180 bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
3181  return RawSlowRefillLinearAllocationArea(size_in_bytes);
3182 }
3183 
3184 bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
3185  // Allocation in this space has failed.
3186  DCHECK_GE(size_in_bytes, 0);
3187  const int kMaxPagesToSweep = 1;
3188 
3189  if (RefillLinearAllocationAreaFromFreeList(size_in_bytes)) return true;
3190 
3191  MarkCompactCollector* collector = heap()->mark_compact_collector();
3192  // Sweeping is still in progress.
3193  if (collector->sweeping_in_progress()) {
3194  if (FLAG_concurrent_sweeping && !is_local() &&
3195  !collector->sweeper()->AreSweeperTasksRunning()) {
3196  collector->EnsureSweepingCompleted();
3197  }
3198 
3199  // First try to refill the free-list, concurrent sweeper threads
3200  // may have freed some objects in the meantime.
3201  RefillFreeList();
3202 
3203  // Retry the free list allocation.
3204  if (RefillLinearAllocationAreaFromFreeList(
3205  static_cast<size_t>(size_in_bytes)))
3206  return true;
3207 
3208  // If sweeping is still in progress try to sweep pages.
3209  int max_freed = collector->sweeper()->ParallelSweepSpace(
3210  identity(), size_in_bytes, kMaxPagesToSweep);
3211  RefillFreeList();
3212  if (max_freed >= size_in_bytes) {
3213  if (RefillLinearAllocationAreaFromFreeList(
3214  static_cast<size_t>(size_in_bytes)))
3215  return true;
3216  }
3217  } else if (is_local()) {
3218  // Sweeping not in progress and we are on a {CompactionSpace}. This can
3219  // only happen when we are evacuating for the young generation.
3220  PagedSpace* main_space = heap()->paged_space(identity());
3221  Page* page = main_space->RemovePageSafe(size_in_bytes);
3222  if (page != nullptr) {
3223  AddPage(page);
3224  if (RefillLinearAllocationAreaFromFreeList(
3225  static_cast<size_t>(size_in_bytes)))
3226  return true;
3227  }
3228  }
3229 
3230  if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
3231  DCHECK((CountTotalPages() > 1) ||
3232  (static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
3233  return RefillLinearAllocationAreaFromFreeList(
3234  static_cast<size_t>(size_in_bytes));
3235  }
3236 
3237  // If sweeper threads are active, wait for them at that point and steal
3238  // elements form their free-lists. Allocation may still fail their which
3239  // would indicate that there is not enough memory for the given allocation.
3240  return SweepAndRetryAllocation(size_in_bytes);
3241 }
3242 
3243 // -----------------------------------------------------------------------------
3244 // MapSpace implementation
3245 
3246 #ifdef VERIFY_HEAP
3247 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
3248 #endif
3249 
3250 ReadOnlySpace::ReadOnlySpace(Heap* heap)
3251  : PagedSpace(heap, RO_SPACE, NOT_EXECUTABLE),
3252  is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()) {
3253 }
3254 
3255 void ReadOnlyPage::MakeHeaderRelocatable() {
3256  if (mutex_ != nullptr) {
3257  // TODO(v8:7464): heap_ and owner_ need to be cleared as well.
3258  delete mutex_;
3259  mutex_ = nullptr;
3260  local_tracker_ = nullptr;
3261  reservation_.Reset();
3262  }
3263 }
3264 
3265 void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
3266  const size_t page_size = MemoryAllocator::GetCommitPageSize();
3267  const size_t area_start_offset =
3268  RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage(), page_size);
3269  MemoryAllocator* memory_allocator = heap()->memory_allocator();
3270  for (Page* p : *this) {
3271  ReadOnlyPage* page = static_cast<ReadOnlyPage*>(p);
3272  if (access == PageAllocator::kRead) {
3273  page->MakeHeaderRelocatable();
3274  }
3275 
3276  // Read only pages don't have valid reservation object so we get proper
3277  // page allocator manually.
3278  v8::PageAllocator* page_allocator =
3279  memory_allocator->page_allocator(page->executable());
3280  CHECK(SetPermissions(page_allocator, page->address() + area_start_offset,
3281  page->size() - area_start_offset, access));
3282  }
3283 }
3284 
3285 // After we have booted, we have created a map which represents free space
3286 // on the heap. If there was already a free list then the elements on it
3287 // were created with the wrong FreeSpaceMap (normally nullptr), so we need to
3288 // fix them.
3289 void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
3290  free_list_.RepairLists(heap());
3291  // Each page may have a small free space that is not tracked by a free list.
3292  // Those free spaces still contain null as their map pointer.
3293  // Overwrite them with new fillers.
3294  for (Page* page : *this) {
3295  int size = static_cast<int>(page->wasted_memory());
3296  if (size == 0) {
3297  // If there is no wasted memory then all free space is in the free list.
3298  continue;
3299  }
3300  Address start = page->HighWaterMark();
3301  Address end = page->area_end();
3302  if (start < end - size) {
3303  // A region at the high watermark is already in free list.
3304  HeapObject* filler = HeapObject::FromAddress(start);
3305  CHECK(filler->IsFiller());
3306  start += filler->Size();
3307  }
3308  CHECK_EQ(size, static_cast<int>(end - start));
3309  heap()->CreateFillerObjectAt(start, size, ClearRecordedSlots::kNo);
3310  }
3311 }
3312 
3313 void ReadOnlySpace::ClearStringPaddingIfNeeded() {
3314  if (is_string_padding_cleared_) return;
3315 
3316  WritableScope writable_scope(this);
3317  for (Page* page : *this) {
3318  HeapObjectIterator iterator(page);
3319  for (HeapObject* o = iterator.Next(); o != nullptr; o = iterator.Next()) {
3320  if (o->IsSeqOneByteString()) {
3321  SeqOneByteString::cast(o)->clear_padding();
3322  } else if (o->IsSeqTwoByteString()) {
3323  SeqTwoByteString::cast(o)->clear_padding();
3324  }
3325  }
3326  }
3327  is_string_padding_cleared_ = true;
3328 }
3329 
3330 void ReadOnlySpace::MarkAsReadOnly() {
3331  DCHECK(!is_marked_read_only_);
3332  FreeLinearAllocationArea();
3333  is_marked_read_only_ = true;
3334  SetPermissionsForPages(PageAllocator::kRead);
3335 }
3336 
3337 void ReadOnlySpace::MarkAsReadWrite() {
3338  DCHECK(is_marked_read_only_);
3339  SetPermissionsForPages(PageAllocator::kReadWrite);
3340  is_marked_read_only_ = false;
3341 }
3342 
3343 Address LargePage::GetAddressToShrink(Address object_address,
3344  size_t object_size) {
3345  if (executable() == EXECUTABLE) {
3346  return 0;
3347  }
3348  size_t used_size = ::RoundUp((object_address - address()) + object_size,
3349  MemoryAllocator::GetCommitPageSize());
3350  if (used_size < CommittedPhysicalMemory()) {
3351  return address() + used_size;
3352  }
3353  return 0;
3354 }
3355 
3356 void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
3357  RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
3358  SlotSet::FREE_EMPTY_BUCKETS);
3359  RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
3360  SlotSet::FREE_EMPTY_BUCKETS);
3361  RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
3362  RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
3363 }
3364 
3365 // -----------------------------------------------------------------------------
3366 // LargeObjectIterator
3367 
3368 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
3369  current_ = space->first_page();
3370 }
3371 
3372 
3373 HeapObject* LargeObjectIterator::Next() {
3374  if (current_ == nullptr) return nullptr;
3375 
3376  HeapObject* object = current_->GetObject();
3377  current_ = current_->next_page();
3378  return object;
3379 }
3380 
3381 
3382 // -----------------------------------------------------------------------------
3383 // LargeObjectSpace
3384 
3385 LargeObjectSpace::LargeObjectSpace(Heap* heap)
3386  : LargeObjectSpace(heap, LO_SPACE) {}
3387 
3388 LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
3389  : Space(heap, id),
3390  size_(0),
3391  page_count_(0),
3392  objects_size_(0),
3393  chunk_map_(1024) {}
3394 
3395 void LargeObjectSpace::TearDown() {
3396  while (!memory_chunk_list_.Empty()) {
3397  LargePage* page = first_page();
3398  LOG(heap()->isolate(),
3399  DeleteEvent("LargeObjectChunk",
3400  reinterpret_cast<void*>(page->address())));
3401  memory_chunk_list_.Remove(page);
3402  heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
3403  }
3404 }
3405 
3406 AllocationResult LargeObjectSpace::AllocateRaw(int object_size) {
3407  return AllocateRaw(object_size, NOT_EXECUTABLE);
3408 }
3409 
3410 AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
3411  Executability executable) {
3412  // Check if we want to force a GC before growing the old space further.
3413  // If so, fail the allocation.
3414  if (!heap()->CanExpandOldGeneration(object_size) ||
3415  !heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
3416  return AllocationResult::Retry(identity());
3417  }
3418 
3419  LargePage* page = AllocateLargePage(object_size, executable);
3420  if (page == nullptr) return AllocationResult::Retry(identity());
3421  page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
3422  HeapObject* object = page->GetObject();
3423  heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
3424  heap()->GCFlagsForIncrementalMarking(),
3425  kGCCallbackScheduleIdleGarbageCollection);
3426  if (heap()->incremental_marking()->black_allocation()) {
3427  heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
3428  }
3429  DCHECK_IMPLIES(
3430  heap()->incremental_marking()->black_allocation(),
3431  heap()->incremental_marking()->marking_state()->IsBlack(object));
3432  page->InitializationMemoryFence();
3433  return object;
3434 }
3435 
3436 LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
3437  Executability executable) {
3438  LargePage* page = heap()->memory_allocator()->AllocateLargePage(
3439  object_size, this, executable);
3440  if (page == nullptr) return nullptr;
3441  DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
3442 
3443  Register(page, object_size);
3444 
3445  HeapObject* object = page->GetObject();
3446 
3447  heap()->CreateFillerObjectAt(object->address(), object_size,
3448  ClearRecordedSlots::kNo);
3449  AllocationStep(object_size, object->address(), object_size);
3450  return page;
3451 }
3452 
3453 
3454 size_t LargeObjectSpace::CommittedPhysicalMemory() {
3455  // On a platform that provides lazy committing of memory, we over-account
3456  // the actually committed memory. There is no easy way right now to support
3457  // precise accounting of committed memory in large object space.
3458  return CommittedMemory();
3459 }
3460 
3461 
3462 // GC support
3463 Object* LargeObjectSpace::FindObject(Address a) {
3464  LargePage* page = FindPage(a);
3465  if (page != nullptr) {
3466  return page->GetObject();
3467  }
3468  return Smi::kZero; // Signaling not found.
3469 }
3470 
3471 LargePage* LargeObjectSpace::FindPage(Address a) {
3472  const Address key = MemoryChunk::FromAddress(a)->address();
3473  auto it = chunk_map_.find(key);
3474  if (it != chunk_map_.end()) {
3475  LargePage* page = it->second;
3476  if (page->Contains(a)) {
3477  return page;
3478  }
3479  }
3480  return nullptr;
3481 }
3482 
3483 
3484 void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
3485  IncrementalMarking::NonAtomicMarkingState* marking_state =
3486  heap()->incremental_marking()->non_atomic_marking_state();
3487  LargeObjectIterator it(this);
3488  for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
3489  if (marking_state->IsBlackOrGrey(obj)) {
3490  Marking::MarkWhite(marking_state->MarkBitFrom(obj));
3491  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
3492  RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
3493  chunk->ResetProgressBar();
3494  marking_state->SetLiveBytes(chunk, 0);
3495  }
3496  DCHECK(marking_state->IsWhite(obj));
3497  }
3498 }
3499 
3500 void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
3501  // There may be concurrent access on the chunk map. We have to take the lock
3502  // here.
3503  base::MutexGuard guard(&chunk_map_mutex_);
3504  for (Address current = reinterpret_cast<Address>(page);
3505  current < reinterpret_cast<Address>(page) + page->size();
3506  current += MemoryChunk::kPageSize) {
3507  chunk_map_[current] = page;
3508  }
3509 }
3510 
3511 void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
3512  RemoveChunkMapEntries(page, page->address());
3513 }
3514 
3515 void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
3516  Address free_start) {
3517  for (Address current = ::RoundUp(free_start, MemoryChunk::kPageSize);
3518  current < reinterpret_cast<Address>(page) + page->size();
3519  current += MemoryChunk::kPageSize) {
3520  chunk_map_.erase(current);
3521  }
3522 }
3523 
3524 void LargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
3525  DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
3526  DCHECK(page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
3527  DCHECK(!page->IsFlagSet(MemoryChunk::IN_TO_SPACE));
3528  size_t object_size = static_cast<size_t>(page->GetObject()->Size());
3529  reinterpret_cast<NewLargeObjectSpace*>(page->owner())
3530  ->Unregister(page, object_size);
3531  Register(page, object_size);
3532  page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
3533  page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
3534  page->set_owner(this);
3535 }
3536 
3537 void LargeObjectSpace::Register(LargePage* page, size_t object_size) {
3538  size_ += static_cast<int>(page->size());
3539  AccountCommitted(page->size());
3540  objects_size_ += object_size;
3541  page_count_++;
3542  memory_chunk_list_.PushBack(page);
3543 
3544  InsertChunkMapEntries(page);
3545 }
3546 
3547 void LargeObjectSpace::Unregister(LargePage* page, size_t object_size) {
3548  size_ -= static_cast<int>(page->size());
3549  AccountUncommitted(page->size());
3550  objects_size_ -= object_size;
3551  page_count_--;
3552  memory_chunk_list_.Remove(page);
3553 
3554  RemoveChunkMapEntries(page);
3555 }
3556 
3557 void LargeObjectSpace::FreeUnmarkedObjects() {
3558  LargePage* current = first_page();
3559  IncrementalMarking::NonAtomicMarkingState* marking_state =
3560  heap()->incremental_marking()->non_atomic_marking_state();
3561  // Right-trimming does not update the objects_size_ counter. We are lazily
3562  // updating it after every GC.
3563  objects_size_ = 0;
3564  while (current) {
3565  LargePage* next_current = current->next_page();
3566  HeapObject* object = current->GetObject();
3567  DCHECK(!marking_state->IsGrey(object));
3568  if (marking_state->IsBlack(object)) {
3569  Address free_start;
3570  size_t size = static_cast<size_t>(object->Size());
3571  objects_size_ += size;
3572  if ((free_start = current->GetAddressToShrink(object->address(), size)) !=
3573  0) {
3574  DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
3575  current->ClearOutOfLiveRangeSlots(free_start);
3576  RemoveChunkMapEntries(current, free_start);
3577  const size_t bytes_to_free =
3578  current->size() - (free_start - current->address());
3579  heap()->memory_allocator()->PartialFreeMemory(
3580  current, free_start, bytes_to_free,
3581  current->area_start() + object->Size());
3582  size_ -= bytes_to_free;
3583  AccountUncommitted(bytes_to_free);
3584  }
3585  } else {
3586  memory_chunk_list_.Remove(current);
3587 
3588  // Free the chunk.
3589  size_ -= static_cast<int>(current->size());
3590  AccountUncommitted(current->size());
3591  page_count_--;
3592 
3593  RemoveChunkMapEntries(current);
3594  heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
3595  current);
3596  }
3597  current = next_current;
3598  }
3599 }
3600 
3601 bool LargeObjectSpace::Contains(HeapObject* object) {
3602  Address address = object->address();
3603  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3604 
3605  bool owned = (chunk->owner() == this);
3606 
3607  SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
3608 
3609  return owned;
3610 }
3611 
3612 std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
3613  return std::unique_ptr<ObjectIterator>(new LargeObjectIterator(this));
3614 }
3615 
3616 #ifdef VERIFY_HEAP
3617 // We do not assume that the large object iterator works, because it depends
3618 // on the invariants we are checking during verification.
3619 void LargeObjectSpace::Verify(Isolate* isolate) {
3620  size_t external_backing_store_bytes[kNumTypes];
3621 
3622  for (int i = 0; i < kNumTypes; i++) {
3623  external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
3624  }
3625 
3626  for (LargePage* chunk = first_page(); chunk != nullptr;
3627  chunk = chunk->next_page()) {
3628  // Each chunk contains an object that starts at the large object page's
3629  // object area start.
3630  HeapObject* object = chunk->GetObject();
3631  Page* page = Page::FromAddress(object->address());
3632  CHECK(object->address() == page->area_start());
3633 
3634  // The first word should be a map, and we expect all map pointers to be
3635  // in map space or read-only space.
3636  Map map = object->map();
3637  CHECK(map->IsMap());
3638  CHECK(heap()->map_space()->Contains(map) ||
3639  heap()->read_only_space()->Contains(map));
3640 
3641  // We have only the following types in the large object space:
3642  CHECK(object->IsAbstractCode() || object->IsSeqString() ||
3643  object->IsExternalString() || object->IsThinString() ||
3644  object->IsFixedArray() || object->IsFixedDoubleArray() ||
3645  object->IsWeakFixedArray() || object->IsWeakArrayList() ||
3646  object->IsPropertyArray() || object->IsByteArray() ||
3647  object->IsFeedbackVector() || object->IsBigInt() ||
3648  object->IsFreeSpace() || object->IsFeedbackMetadata() ||
3649  object->IsContext());
3650 
3651  // The object itself should look OK.
3652  object->ObjectVerify(isolate);
3653 
3654  if (!FLAG_verify_heap_skip_remembered_set) {
3655  heap()->VerifyRememberedSetFor(object);
3656  }
3657 
3658  // Byte arrays and strings don't have interior pointers.
3659  if (object->IsAbstractCode()) {
3660  VerifyPointersVisitor code_visitor(heap());
3661  object->IterateBody(map, object->Size(), &code_visitor);
3662  } else if (object->IsFixedArray()) {
3663  FixedArray array = FixedArray::cast(object);
3664  for (int j = 0; j < array->length(); j++) {
3665  Object* element = array->get(j);
3666  if (element->IsHeapObject()) {
3667  HeapObject* element_object = HeapObject::cast(element);
3668  CHECK(heap()->Contains(element_object));
3669  CHECK(element_object->map()->IsMap());
3670  }
3671  }
3672  } else if (object->IsPropertyArray()) {
3673  PropertyArray array = PropertyArray::cast(object);
3674  for (int j = 0; j < array->length(); j++) {
3675  Object* property = array->get(j);
3676  if (property->IsHeapObject()) {
3677  HeapObject* property_object = HeapObject::cast(property);
3678  CHECK(heap()->Contains(property_object));
3679  CHECK(property_object->map()->IsMap());
3680  }
3681  }
3682  }
3683  for (int i = 0; i < kNumTypes; i++) {
3684  ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
3685  external_backing_store_bytes[t] += chunk->ExternalBackingStoreBytes(t);
3686  }
3687  }
3688  for (int i = 0; i < kNumTypes; i++) {
3689  ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
3690  CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
3691  }
3692 }
3693 #endif
3694 
3695 #ifdef DEBUG
3696 void LargeObjectSpace::Print() {
3697  StdoutStream os;
3698  LargeObjectIterator it(this);
3699  for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
3700  obj->Print(os);
3701  }
3702 }
3703 
3704 void Page::Print() {
3705  // Make a best-effort to print the objects in the page.
3706  PrintF("Page@%p in %s\n", reinterpret_cast<void*>(this->address()),
3707  this->owner()->name());
3708  printf(" --------------------------------------\n");
3709  HeapObjectIterator objects(this);
3710  unsigned mark_size = 0;
3711  for (HeapObject* object = objects.Next(); object != nullptr;
3712  object = objects.Next()) {
3713  bool is_marked =
3714  heap()->incremental_marking()->marking_state()->IsBlackOrGrey(object);
3715  PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
3716  if (is_marked) {
3717  mark_size += object->Size();
3718  }
3719  object->ShortPrint();
3720  PrintF("\n");
3721  }
3722  printf(" --------------------------------------\n");
3723  printf(" Marked: %x, LiveCount: %" V8PRIdPTR "\n", mark_size,
3724  heap()->incremental_marking()->marking_state()->live_bytes(this));
3725 }
3726 
3727 #endif // DEBUG
3728 
3729 NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap)
3730  : LargeObjectSpace(heap, NEW_LO_SPACE) {}
3731 
3732 AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
3733  // TODO(hpayer): Add heap growing strategy here.
3734  LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
3735  if (page == nullptr) return AllocationResult::Retry(identity());
3736  page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
3737  page->SetFlag(MemoryChunk::IN_TO_SPACE);
3738  page->InitializationMemoryFence();
3739  return page->GetObject();
3740 }
3741 
3742 size_t NewLargeObjectSpace::Available() {
3743  // TODO(hpayer): Update as soon as we have a growing strategy.
3744  return 0;
3745 }
3746 
3747 void NewLargeObjectSpace::Flip() {
3748  for (LargePage* chunk = first_page(); chunk != nullptr;
3749  chunk = chunk->next_page()) {
3750  chunk->SetFlag(MemoryChunk::IN_FROM_SPACE);
3751  chunk->ClearFlag(MemoryChunk::IN_TO_SPACE);
3752  }
3753 }
3754 
3755 void NewLargeObjectSpace::FreeAllObjects() {
3756  LargePage* current = first_page();
3757  while (current) {
3758  LargePage* next_current = current->next_page();
3759  Unregister(current, static_cast<size_t>(current->GetObject()->Size()));
3760  heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
3761  current);
3762  current = next_current;
3763  }
3764  // Right-trimming does not update the objects_size_ counter. We are lazily
3765  // updating it after every GC.
3766  objects_size_ = 0;
3767 }
3768 
3769 CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
3770  : LargeObjectSpace(heap, CODE_LO_SPACE) {}
3771 
3772 AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
3773  return LargeObjectSpace::AllocateRaw(object_size, EXECUTABLE);
3774 }
3775 
3776 } // namespace internal
3777 } // namespace v8
Definition: libplatform.h:13
virtual size_t AllocatePageSize()=0
virtual bool DiscardSystemPages(void *address, size_t size)
Definition: v8-platform.h:259