V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
spaces.h
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_
7 
8 #include <list>
9 #include <map>
10 #include <memory>
11 #include <unordered_map>
12 #include <unordered_set>
13 #include <vector>
14 
15 #include "src/allocation.h"
16 #include "src/base/atomic-utils.h"
17 #include "src/base/bounded-page-allocator.h"
18 #include "src/base/export-template.h"
19 #include "src/base/iterator.h"
20 #include "src/base/list.h"
21 #include "src/base/platform/mutex.h"
22 #include "src/cancelable-task.h"
23 #include "src/flags.h"
24 #include "src/globals.h"
25 #include "src/heap/heap.h"
26 #include "src/heap/invalidated-slots.h"
27 #include "src/heap/marking.h"
28 #include "src/objects.h"
29 #include "src/objects/heap-object.h"
30 #include "src/objects/map.h"
31 #include "src/utils.h"
32 
33 namespace v8 {
34 namespace internal {
35 
36 namespace heap {
37 class HeapTester;
38 class TestCodePageAllocatorScope;
39 } // namespace heap
40 
41 class AllocationObserver;
42 class CompactionSpace;
43 class CompactionSpaceCollection;
44 class FreeList;
45 class Isolate;
46 class LinearAllocationArea;
47 class LocalArrayBufferTracker;
48 class MemoryAllocator;
49 class MemoryChunk;
50 class MemoryChunkLayout;
51 class Page;
52 class PagedSpace;
53 class SemiSpace;
54 class SkipList;
55 class SlotsBuffer;
56 class SlotSet;
57 class TypedSlotSet;
58 class Space;
59 
60 // -----------------------------------------------------------------------------
61 // Heap structures:
62 //
63 // A JS heap consists of a young generation, an old generation, and a large
64 // object space. The young generation is divided into two semispaces. A
65 // scavenger implements Cheney's copying algorithm. The old generation is
66 // separated into a map space and an old object space. The map space contains
67 // all (and only) map objects, the rest of old objects go into the old space.
68 // The old generation is collected by a mark-sweep-compact collector.
69 //
70 // The semispaces of the young generation are contiguous. The old and map
71 // spaces consists of a list of pages. A page has a page header and an object
72 // area.
73 //
74 // There is a separate large object space for objects larger than
75 // kMaxRegularHeapObjectSize, so that they do not have to move during
76 // collection. The large object space is paged. Pages in large object space
77 // may be larger than the page size.
78 //
79 // A store-buffer based write barrier is used to keep track of intergenerational
80 // references. See heap/store-buffer.h.
81 //
82 // During scavenges and mark-sweep collections we sometimes (after a store
83 // buffer overflow) iterate intergenerational pointers without decoding heap
84 // object maps so if the page belongs to old space or large object space
85 // it is essential to guarantee that the page does not contain any
86 // garbage pointers to new space: every pointer aligned word which satisfies
87 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
88 // new space. Thus objects in old space and large object spaces should have a
89 // special layout (e.g. no bare integer fields). This requirement does not
90 // apply to map space which is iterated in a special fashion. However we still
91 // require pointer fields of dead maps to be cleaned.
92 //
93 // To enable lazy cleaning of old space pages we can mark chunks of the page
94 // as being garbage. Garbage sections are marked with a special map. These
95 // sections are skipped when scanning the page, even if we are otherwise
96 // scanning without regard for object boundaries. Garbage sections are chained
97 // together to form a free list after a GC. Garbage sections created outside
98 // of GCs by object trunctation etc. may not be in the free list chain. Very
99 // small free spaces are ignored, they need only be cleaned of bogus pointers
100 // into new space.
101 //
102 // Each page may have up to one special garbage section. The start of this
103 // section is denoted by the top field in the space. The end of the section
104 // is denoted by the limit field in the space. This special garbage section
105 // is not marked with a free space map in the data. The point of this section
106 // is to enable linear allocation without having to constantly update the byte
107 // array every time the top field is updated and a new object is created. The
108 // special garbage section is not in the chain of garbage sections.
109 //
110 // Since the top and limit fields are in the space, not the page, only one page
111 // has a special garbage section, and if the top and limit are equal then there
112 // is no special garbage section.
113 
114 // Some assertion macros used in the debugging mode.
115 
116 #define DCHECK_PAGE_ALIGNED(address) DCHECK_EQ(0, (address)&kPageAlignmentMask)
117 
118 #define DCHECK_OBJECT_ALIGNED(address) \
119  DCHECK_EQ(0, (address)&kObjectAlignmentMask)
120 
121 #define DCHECK_OBJECT_SIZE(size) \
122  DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize))
123 
124 #define DCHECK_CODEOBJECT_SIZE(size, code_space) \
125  DCHECK((0 < size) && (size <= code_space->AreaSize()))
126 
127 enum FreeListCategoryType {
128  kTiniest,
129  kTiny,
130  kSmall,
131  kMedium,
132  kLarge,
133  kHuge,
134 
135  kFirstCategory = kTiniest,
136  kLastCategory = kHuge,
137  kNumberOfCategories = kLastCategory + 1,
138  kInvalidCategory
139 };
140 
141 enum FreeMode { kLinkCategory, kDoNotLinkCategory };
142 
143 enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted };
144 
145 enum RememberedSetType {
146  OLD_TO_NEW,
147  OLD_TO_OLD,
148  NUMBER_OF_REMEMBERED_SET_TYPES = OLD_TO_OLD + 1
149 };
150 
151 // A free list category maintains a linked list of free memory blocks.
153  public:
154  FreeListCategory(FreeList* free_list, Page* page)
155  : free_list_(free_list),
156  page_(page),
157  type_(kInvalidCategory),
158  available_(0),
159  top_(nullptr),
160  prev_(nullptr),
161  next_(nullptr) {}
162 
163  void Initialize(FreeListCategoryType type) {
164  type_ = type;
165  available_ = 0;
166  top_ = nullptr;
167  prev_ = nullptr;
168  next_ = nullptr;
169  }
170 
171  void Reset();
172 
173  void ResetStats() { Reset(); }
174 
175  void RepairFreeList(Heap* heap);
176 
177  // Relinks the category into the currently owning free list. Requires that the
178  // category is currently unlinked.
179  void Relink();
180 
181  void Free(Address address, size_t size_in_bytes, FreeMode mode);
182 
183  // Performs a single try to pick a node of at least |minimum_size| from the
184  // category. Stores the actual size in |node_size|. Returns nullptr if no
185  // node is found.
186  FreeSpace* PickNodeFromList(size_t minimum_size, size_t* node_size);
187 
188  // Picks a node of at least |minimum_size| from the category. Stores the
189  // actual size in |node_size|. Returns nullptr if no node is found.
190  FreeSpace* SearchForNodeInList(size_t minimum_size, size_t* node_size);
191 
192  inline FreeList* owner();
193  inline Page* page() const { return page_; }
194  inline bool is_linked();
195  bool is_empty() { return top() == nullptr; }
196  size_t available() const { return available_; }
197 
198  void set_free_list(FreeList* free_list) { free_list_ = free_list; }
199 
200 #ifdef DEBUG
201  size_t SumFreeList();
202  int FreeListLength();
203 #endif
204 
205  private:
206  // For debug builds we accurately compute free lists lengths up until
207  // {kVeryLongFreeList} by manually walking the list.
208  static const int kVeryLongFreeList = 500;
209 
210  FreeSpace* top() { return top_; }
211  void set_top(FreeSpace* top) { top_ = top; }
212  FreeListCategory* prev() { return prev_; }
213  void set_prev(FreeListCategory* prev) { prev_ = prev; }
214  FreeListCategory* next() { return next_; }
215  void set_next(FreeListCategory* next) { next_ = next; }
216 
217  // This FreeListCategory is owned by the given free_list_.
218  FreeList* free_list_;
219 
220  // This FreeListCategory holds free list entries of the given page_.
221  Page* const page_;
222 
223  // |type_|: The type of this free list category.
224  FreeListCategoryType type_;
225 
226  // |available_|: Total available bytes in all blocks of this free list
227  // category.
228  size_t available_;
229 
230  // |top_|: Points to the top FreeSpace* in the free list category.
231  FreeSpace* top_;
232 
233  FreeListCategory* prev_;
234  FreeListCategory* next_;
235 
236  friend class FreeList;
237  friend class PagedSpace;
238 
239  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListCategory);
240 };
241 
243  public:
244  static size_t CodePageGuardStartOffset();
245  static size_t CodePageGuardSize();
246  static intptr_t ObjectStartOffsetInCodePage();
247  static intptr_t ObjectEndOffsetInCodePage();
248  static size_t AllocatableMemoryInCodePage();
249  static intptr_t ObjectStartOffsetInDataPage();
250  V8_EXPORT_PRIVATE static size_t AllocatableMemoryInDataPage();
251  static size_t ObjectStartOffsetInMemoryChunk(AllocationSpace space);
252  static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
253 };
254 
255 // MemoryChunk represents a memory region owned by a specific space.
256 // It is divided into the header and the body. Chunk start is always
257 // 1MB aligned. Start of the body is aligned so it can accommodate
258 // any heap object.
259 class MemoryChunk {
260  public:
261  // Use with std data structures.
262  struct Hasher {
263  size_t operator()(MemoryChunk* const chunk) const {
264  return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
265  }
266  };
267 
268  enum Flag {
269  NO_FLAGS = 0u,
270  IS_EXECUTABLE = 1u << 0,
271  POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
272  POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
273  // A page in new space has one of the next two flags set.
274  IN_FROM_SPACE = 1u << 3,
275  IN_TO_SPACE = 1u << 4,
276  NEW_SPACE_BELOW_AGE_MARK = 1u << 5,
277  EVACUATION_CANDIDATE = 1u << 6,
278  NEVER_EVACUATE = 1u << 7,
279 
280  // Large objects can have a progress bar in their page header. These object
281  // are scanned in increments and will be kept black while being scanned.
282  // Even if the mutator writes to them they will be kept black and a white
283  // to grey transition is performed in the value.
284  HAS_PROGRESS_BAR = 1u << 8,
285 
286  // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
287  // from new to old space during evacuation.
288  PAGE_NEW_OLD_PROMOTION = 1u << 9,
289 
290  // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
291  // within the new space during evacuation.
292  PAGE_NEW_NEW_PROMOTION = 1u << 10,
293 
294  // This flag is intended to be used for testing. Works only when both
295  // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
296  // are set. It forces the page to become an evacuation candidate at next
297  // candidates selection cycle.
298  FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
299 
300  // This flag is intended to be used for testing.
301  NEVER_ALLOCATE_ON_PAGE = 1u << 12,
302 
303  // The memory chunk is already logically freed, however the actual freeing
304  // still has to be performed.
305  PRE_FREED = 1u << 13,
306 
307  // |POOLED|: When actually freeing this chunk, only uncommit and do not
308  // give up the reservation as we still reuse the chunk at some point.
309  POOLED = 1u << 14,
310 
311  // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
312  // has been aborted and needs special handling by the sweeper.
313  COMPACTION_WAS_ABORTED = 1u << 15,
314 
315  // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
316  // on pages is sometimes aborted. The flag is used to avoid repeatedly
317  // triggering on the same page.
318  COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
319 
320  // |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
321  // to iterate the page.
322  SWEEP_TO_ITERATE = 1u << 17,
323 
324  // |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
325  // enabled.
326  INCREMENTAL_MARKING = 1u << 18
327  };
328 
329  using Flags = uintptr_t;
330 
331  static const Flags kPointersToHereAreInterestingMask =
332  POINTERS_TO_HERE_ARE_INTERESTING;
333 
334  static const Flags kPointersFromHereAreInterestingMask =
335  POINTERS_FROM_HERE_ARE_INTERESTING;
336 
337  static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
338 
339  static const Flags kIsInNewSpaceMask = IN_FROM_SPACE | IN_TO_SPACE;
340 
341  static const Flags kSkipEvacuationSlotsRecordingMask =
342  kEvacuationCandidateMask | kIsInNewSpaceMask;
343 
344  // |kSweepingDone|: The page state when sweeping is complete or sweeping must
345  // not be performed on that page. Sweeper threads that are done with their
346  // work will set this value and not touch the page anymore.
347  // |kSweepingPending|: This page is ready for parallel sweeping.
348  // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
349  enum ConcurrentSweepingState {
350  kSweepingDone,
351  kSweepingPending,
352  kSweepingInProgress,
353  };
354 
355  static const intptr_t kAlignment =
356  (static_cast<uintptr_t>(1) << kPageSizeBits);
357 
358  static const intptr_t kAlignmentMask = kAlignment - 1;
359 
360  static const intptr_t kSizeOffset = 0;
361  static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
362  static const intptr_t kMarkBitmapOffset = kFlagsOffset + kPointerSize;
363  static const intptr_t kReservationOffset = kMarkBitmapOffset + kPointerSize;
364 
365  static const size_t kHeaderSize =
366  kSizeOffset // NOLINT
367  + kSizetSize // size_t size
368  + kUIntptrSize // uintptr_t flags_
369  + kPointerSize // Bitmap* marking_bitmap_
370  + 3 * kPointerSize // VirtualMemory reservation_
371  + kPointerSize // Address area_start_
372  + kPointerSize // Address area_end_
373  + kPointerSize // Address owner_
374  + kPointerSize // Heap* heap_
375  + kIntptrSize // intptr_t progress_bar_
376  + kIntptrSize // std::atomic<intptr_t> live_byte_count_
377  + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
378  + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
379  + kPointerSize // InvalidatedSlots* invalidated_slots_
380  + kPointerSize // SkipList* skip_list_
381  + kPointerSize // std::atomic<intptr_t> high_water_mark_
382  + kPointerSize // base::Mutex* mutex_
383  +
384  kPointerSize // std::atomic<ConcurrentSweepingState> concurrent_sweeping_
385  + kPointerSize // base::Mutex* page_protection_change_mutex_
386  + kPointerSize // unitptr_t write_unprotect_counter_
387  + kSizetSize * ExternalBackingStoreType::kNumTypes
388  // std::atomic<size_t> external_backing_store_bytes_
389  + kSizetSize // size_t allocated_bytes_
390  + kSizetSize // size_t wasted_memory_
391  + kPointerSize * 2 // base::ListNode
392  + kPointerSize * kNumberOfCategories
393  // FreeListCategory categories_[kNumberOfCategories]
394  + kPointerSize // LocalArrayBufferTracker* local_tracker_
395  + kIntptrSize // std::atomic<intptr_t> young_generation_live_byte_count_
396  + kPointerSize; // Bitmap* young_generation_bitmap_
397 
398  // Page size in bytes. This must be a multiple of the OS page size.
399  static const int kPageSize = 1 << kPageSizeBits;
400 
401  // Maximum number of nested code memory modification scopes.
402  // TODO(6792,mstarzinger): Drop to 3 or lower once WebAssembly is off heap.
403  static const int kMaxWriteUnprotectCounter = 4;
404 
405  // Only works if the pointer is in the first kPageSize of the MemoryChunk.
406  static MemoryChunk* FromAddress(Address a) {
407  return reinterpret_cast<MemoryChunk*>(a & ~kAlignmentMask);
408  }
409  // Only works if the object is in the first kPageSize of the MemoryChunk.
410  static MemoryChunk* FromHeapObject(const HeapObject* o) {
411  return reinterpret_cast<MemoryChunk*>(reinterpret_cast<Address>(o) &
412  ~kAlignmentMask);
413  }
414  // Only works if the object is in the first kPageSize of the MemoryChunk.
415  static MemoryChunk* FromHeapObject(const HeapObjectPtr o) {
416  return reinterpret_cast<MemoryChunk*>(o.ptr() & ~kAlignmentMask);
417  }
418 
419  void SetOldGenerationPageFlags(bool is_marking);
420  void SetYoungGenerationPageFlags(bool is_marking);
421 
422  static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
423 
424  static inline void UpdateHighWaterMark(Address mark) {
425  if (mark == kNullAddress) return;
426  // Need to subtract one from the mark because when a chunk is full the
427  // top points to the next address after the chunk, which effectively belongs
428  // to another chunk. See the comment to Page::FromTopOrLimit.
429  MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
430  intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
431  intptr_t old_mark = 0;
432  do {
433  old_mark = chunk->high_water_mark_;
434  } while (
435  (new_mark > old_mark) &&
436  !chunk->high_water_mark_.compare_exchange_weak(old_mark, new_mark));
437  }
438 
439  static inline void MoveExternalBackingStoreBytes(
440  ExternalBackingStoreType type, MemoryChunk* from, MemoryChunk* to,
441  size_t amount);
442 
443  void DiscardUnusedMemory(Address addr, size_t size);
444 
445  Address address() const {
446  return reinterpret_cast<Address>(const_cast<MemoryChunk*>(this));
447  }
448 
449  base::Mutex* mutex() { return mutex_; }
450 
451  bool Contains(Address addr) {
452  return addr >= area_start() && addr < area_end();
453  }
454 
455  // Checks whether |addr| can be a limit of addresses in this page. It's a
456  // limit if it's in the page, or if it's just after the last byte of the page.
457  bool ContainsLimit(Address addr) {
458  return addr >= area_start() && addr <= area_end();
459  }
460 
461  void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
462  concurrent_sweeping_ = state;
463  }
464 
465  ConcurrentSweepingState concurrent_sweeping_state() {
466  return static_cast<ConcurrentSweepingState>(concurrent_sweeping_.load());
467  }
468 
469  bool SweepingDone() { return concurrent_sweeping_ == kSweepingDone; }
470 
471  size_t size() const { return size_; }
472  void set_size(size_t size) { size_ = size; }
473 
474  inline Heap* heap() const { return heap_; }
475 
476  Heap* synchronized_heap();
477 
478  inline SkipList* skip_list() { return skip_list_; }
479 
480  inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
481 
482  template <RememberedSetType type>
483  bool ContainsSlots() {
484  return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr ||
485  invalidated_slots() != nullptr;
486  }
487 
488  template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
489  SlotSet* slot_set() {
490  if (access_mode == AccessMode::ATOMIC)
491  return base::AsAtomicPointer::Acquire_Load(&slot_set_[type]);
492  return slot_set_[type];
493  }
494 
495  template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
496  TypedSlotSet* typed_slot_set() {
497  if (access_mode == AccessMode::ATOMIC)
498  return base::AsAtomicPointer::Acquire_Load(&typed_slot_set_[type]);
499  return typed_slot_set_[type];
500  }
501 
502  template <RememberedSetType type>
503  SlotSet* AllocateSlotSet();
504  // Not safe to be called concurrently.
505  template <RememberedSetType type>
506  void ReleaseSlotSet();
507  template <RememberedSetType type>
508  TypedSlotSet* AllocateTypedSlotSet();
509  // Not safe to be called concurrently.
510  template <RememberedSetType type>
511  void ReleaseTypedSlotSet();
512 
513  InvalidatedSlots* AllocateInvalidatedSlots();
514  void ReleaseInvalidatedSlots();
515  void RegisterObjectWithInvalidatedSlots(HeapObject* object, int size);
516  // Updates invalidated_slots after array left-trimming.
517  void MoveObjectWithInvalidatedSlots(HeapObject* old_start,
518  HeapObject* new_start);
519  bool RegisteredObjectWithInvalidatedSlots(HeapObject* object);
520  InvalidatedSlots* invalidated_slots() { return invalidated_slots_; }
521 
522  void ReleaseLocalTracker();
523 
524  void AllocateYoungGenerationBitmap();
525  void ReleaseYoungGenerationBitmap();
526 
527  void AllocateMarkingBitmap();
528  void ReleaseMarkingBitmap();
529 
530  Address area_start() { return area_start_; }
531  Address area_end() { return area_end_; }
532  size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
533 
534  // Approximate amount of physical memory committed for this chunk.
535  size_t CommittedPhysicalMemory();
536 
537  Address HighWaterMark() { return address() + high_water_mark_; }
538 
539  int progress_bar() {
540  DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
541  return static_cast<int>(progress_bar_);
542  }
543 
544  void set_progress_bar(int progress_bar) {
545  DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
546  progress_bar_ = progress_bar;
547  }
548 
549  void ResetProgressBar() {
550  if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
551  set_progress_bar(0);
552  }
553  }
554 
555  inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
556  size_t amount);
557 
558  inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
559  size_t amount);
560 
561  size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) {
562  return external_backing_store_bytes_[type];
563  }
564 
565  inline uint32_t AddressToMarkbitIndex(Address addr) const {
566  return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
567  }
568 
569  inline Address MarkbitIndexToAddress(uint32_t index) const {
570  return this->address() + (index << kPointerSizeLog2);
571  }
572 
573  template <AccessMode access_mode = AccessMode::NON_ATOMIC>
574  void SetFlag(Flag flag) {
575  if (access_mode == AccessMode::NON_ATOMIC) {
576  flags_ |= flag;
577  } else {
578  base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
579  }
580  }
581 
582  template <AccessMode access_mode = AccessMode::NON_ATOMIC>
583  bool IsFlagSet(Flag flag) {
584  return (GetFlags<access_mode>() & flag) != 0;
585  }
586 
587  void ClearFlag(Flag flag) { flags_ &= ~flag; }
588  // Set or clear multiple flags at a time. The flags in the mask are set to
589  // the value in "flags", the rest retain the current value in |flags_|.
590  void SetFlags(uintptr_t flags, uintptr_t mask) {
591  flags_ = (flags_ & ~mask) | (flags & mask);
592  }
593 
594  // Return all current flags.
595  template <AccessMode access_mode = AccessMode::NON_ATOMIC>
596  uintptr_t GetFlags() {
597  if (access_mode == AccessMode::NON_ATOMIC) {
598  return flags_;
599  } else {
600  return base::AsAtomicWord::Relaxed_Load(&flags_);
601  }
602  }
603 
604  bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
605 
606  void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
607 
608  bool CanAllocate() {
609  return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
610  }
611 
612  template <AccessMode access_mode = AccessMode::NON_ATOMIC>
613  bool IsEvacuationCandidate() {
614  DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
615  IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
616  return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
617  }
618 
619  template <AccessMode access_mode = AccessMode::NON_ATOMIC>
620  bool ShouldSkipEvacuationSlotRecording() {
621  uintptr_t flags = GetFlags<access_mode>();
622  return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
623  ((flags & COMPACTION_WAS_ABORTED) == 0);
624  }
625 
626  Executability executable() {
627  return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
628  }
629 
630  bool InNewSpace() { return (flags_ & kIsInNewSpaceMask) != 0; }
631 
632  bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
633 
634  bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
635 
636  bool InOldSpace() const;
637 
638  bool InLargeObjectSpace() const;
639 
640  inline bool IsInNewLargeObjectSpace() const;
641 
642  Space* owner() const { return owner_; }
643 
644  void set_owner(Space* space) { owner_ = space; }
645 
646  bool IsPagedSpace() const;
647 
648  // Emits a memory barrier. For TSAN builds the other thread needs to perform
649  // MemoryChunk::synchronized_heap() to simulate the barrier.
650  void InitializationMemoryFence();
651 
652  void SetReadAndExecutable();
653  void SetReadAndWritable();
654 
655  base::ListNode<MemoryChunk>& list_node() { return list_node_; }
656 
657  protected:
658  static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
659  Address area_start, Address area_end,
660  Executability executable, Space* owner,
661  VirtualMemory reservation);
662 
663  // Should be called when memory chunk is about to be freed.
664  void ReleaseAllocatedMemory();
665 
666  VirtualMemory* reserved_memory() { return &reservation_; }
667 
668  size_t size_;
669  uintptr_t flags_;
670 
671  Bitmap* marking_bitmap_;
672 
673  // If the chunk needs to remember its memory reservation, it is stored here.
674  VirtualMemory reservation_;
675 
676  // Start and end of allocatable memory on this chunk.
677  Address area_start_;
678  Address area_end_;
679 
680  // The space owning this memory chunk.
681  std::atomic<Space*> owner_;
682 
683  Heap* heap_;
684 
685  // Used by the incremental marker to keep track of the scanning progress in
686  // large objects that have a progress bar and are scanned in increments.
687  intptr_t progress_bar_;
688 
689  // Count of bytes marked black on page.
690  std::atomic<intptr_t> live_byte_count_;
691 
692  // A single slot set for small pages (of size kPageSize) or an array of slot
693  // set for large pages. In the latter case the number of entries in the array
694  // is ceil(size() / kPageSize).
695  SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
696  TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
697  InvalidatedSlots* invalidated_slots_;
698 
699  SkipList* skip_list_;
700 
701  // Assuming the initial allocation on a page is sequential,
702  // count highest number of bytes ever allocated on the page.
703  std::atomic<intptr_t> high_water_mark_;
704 
705  base::Mutex* mutex_;
706 
707  std::atomic<intptr_t> concurrent_sweeping_;
708 
709  base::Mutex* page_protection_change_mutex_;
710 
711  // This field is only relevant for code pages. It depicts the number of
712  // times a component requested this page to be read+writeable. The
713  // counter is decremented when a component resets to read+executable.
714  // If Value() == 0 => The memory is read and executable.
715  // If Value() >= 1 => The Memory is read and writable (and maybe executable).
716  // The maximum value is limited by {kMaxWriteUnprotectCounter} to prevent
717  // excessive nesting of scopes.
718  // All executable MemoryChunks are allocated rw based on the assumption that
719  // they will be used immediatelly for an allocation. They are initialized
720  // with the number of open CodeSpaceMemoryModificationScopes. The caller
721  // that triggers the page allocation is responsible for decrementing the
722  // counter.
723  uintptr_t write_unprotect_counter_;
724 
725  // Byte allocated on the page, which includes all objects on the page
726  // and the linear allocation area.
727  size_t allocated_bytes_;
728 
729  // Tracks off-heap memory used by this memory chunk.
730  std::atomic<size_t> external_backing_store_bytes_[kNumTypes];
731 
732  // Freed memory that was not added to the free list.
733  size_t wasted_memory_;
734 
735  base::ListNode<MemoryChunk> list_node_;
736 
737  FreeListCategory* categories_[kNumberOfCategories];
738 
739  LocalArrayBufferTracker* local_tracker_;
740 
741  std::atomic<intptr_t> young_generation_live_byte_count_;
742  Bitmap* young_generation_bitmap_;
743 
744  private:
745  void InitializeReservedMemory() { reservation_.Reset(); }
746 
747  friend class ConcurrentMarkingState;
748  friend class IncrementalMarkingState;
749  friend class MajorAtomicMarkingState;
750  friend class MajorMarkingState;
751  friend class MajorNonAtomicMarkingState;
752  friend class MemoryAllocator;
753  friend class MemoryChunkValidator;
754  friend class MinorMarkingState;
755  friend class MinorNonAtomicMarkingState;
756  friend class PagedSpace;
757 };
758 
759 static_assert(sizeof(std::atomic<intptr_t>) == kPointerSize,
760  "sizeof(std::atomic<intptr_t>) == kPointerSize");
761 
762 // -----------------------------------------------------------------------------
763 // A page is a memory chunk of a size 512K. Large object pages may be larger.
764 //
765 // The only way to get a page pointer is by calling factory methods:
766 // Page* p = Page::FromAddress(addr); or
767 // Page* p = Page::FromTopOrLimit(top);
768 class Page : public MemoryChunk {
769  public:
770  static const intptr_t kCopyAllFlags = ~0;
771 
772  // Page flags copied from from-space to to-space when flipping semispaces.
773  static const intptr_t kCopyOnFlipFlagsMask =
774  static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
775  static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
776  static_cast<intptr_t>(MemoryChunk::INCREMENTAL_MARKING);
777 
778  // Returns the page containing a given address. The address ranges
779  // from [page_addr .. page_addr + kPageSize[. This only works if the object
780  // is in fact in a page.
781  static Page* FromAddress(Address addr) {
782  return reinterpret_cast<Page*>(addr & ~kPageAlignmentMask);
783  }
784  static Page* FromHeapObject(const HeapObject* o) {
785  return reinterpret_cast<Page*>(reinterpret_cast<Address>(o) &
786  ~kAlignmentMask);
787  }
788 
789  // Returns the page containing the address provided. The address can
790  // potentially point righter after the page. To be also safe for tagged values
791  // we subtract a hole word. The valid address ranges from
792  // [page_addr + area_start_ .. page_addr + kPageSize + kPointerSize].
793  static Page* FromAllocationAreaAddress(Address address) {
794  return Page::FromAddress(address - kPointerSize);
795  }
796 
797  // Checks if address1 and address2 are on the same new space page.
798  static bool OnSamePage(Address address1, Address address2) {
799  return Page::FromAddress(address1) == Page::FromAddress(address2);
800  }
801 
802  // Checks whether an address is page aligned.
803  static bool IsAlignedToPageSize(Address addr) {
804  return (addr & kPageAlignmentMask) == 0;
805  }
806 
807  static Page* ConvertNewToOld(Page* old_page);
808 
809  inline void MarkNeverAllocateForTesting();
810  inline void MarkEvacuationCandidate();
811  inline void ClearEvacuationCandidate();
812 
813  Page* next_page() { return static_cast<Page*>(list_node_.next()); }
814  Page* prev_page() { return static_cast<Page*>(list_node_.prev()); }
815 
816  template <typename Callback>
817  inline void ForAllFreeListCategories(Callback callback) {
818  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
819  callback(categories_[i]);
820  }
821  }
822 
823  // Returns the offset of a given address to this page.
824  inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
825 
826  // Returns the address for a given offset to the this page.
827  Address OffsetToAddress(size_t offset) {
828  Address address_in_page = address() + offset;
829  DCHECK_GE(address_in_page, area_start_);
830  DCHECK_LT(address_in_page, area_end_);
831  return address_in_page;
832  }
833 
834  // WaitUntilSweepingCompleted only works when concurrent sweeping is in
835  // progress. In particular, when we know that right before this call a
836  // sweeper thread was sweeping this page.
837  void WaitUntilSweepingCompleted() {
838  mutex_->Lock();
839  mutex_->Unlock();
840  DCHECK(SweepingDone());
841  }
842 
843  void AllocateLocalTracker();
844  inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
845  bool contains_array_buffers();
846 
847  void ResetFreeListStatistics();
848 
849  size_t AvailableInFreeList();
850 
851  size_t AvailableInFreeListFromAllocatedBytes() {
852  DCHECK_GE(area_size(), wasted_memory() + allocated_bytes());
853  return area_size() - wasted_memory() - allocated_bytes();
854  }
855 
856  FreeListCategory* free_list_category(FreeListCategoryType type) {
857  return categories_[type];
858  }
859 
860  size_t wasted_memory() { return wasted_memory_; }
861  void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
862  size_t allocated_bytes() { return allocated_bytes_; }
863  void IncreaseAllocatedBytes(size_t bytes) {
864  DCHECK_LE(bytes, area_size());
865  allocated_bytes_ += bytes;
866  }
867  void DecreaseAllocatedBytes(size_t bytes) {
868  DCHECK_LE(bytes, area_size());
869  DCHECK_GE(allocated_bytes(), bytes);
870  allocated_bytes_ -= bytes;
871  }
872 
873  void ResetAllocatedBytes();
874 
875  size_t ShrinkToHighWaterMark();
876 
877  V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
878  void DestroyBlackArea(Address start, Address end);
879 
880  void InitializeFreeListCategories();
881  void AllocateFreeListCategories();
882  void ReleaseFreeListCategories();
883 
884 #ifdef DEBUG
885  void Print();
886 #endif // DEBUG
887 
888  private:
889  enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
890 
891  friend class MemoryAllocator;
892 };
893 
894 class ReadOnlyPage : public Page {
895  public:
896  // Clears any pointers in the header that point out of the page that would
897  // otherwise make the header non-relocatable.
898  void MakeHeaderRelocatable();
899 
900  private:
901  friend class ReadOnlySpace;
902 };
903 
904 class LargePage : public MemoryChunk {
905  public:
906  // A limit to guarantee that we do not overflow typed slot offset in
907  // the old to old remembered set.
908  // Note that this limit is higher than what assembler already imposes on
909  // x64 and ia32 architectures.
910  static const int kMaxCodePageSize = 512 * MB;
911 
912  static LargePage* FromHeapObject(const HeapObject* o) {
913  return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
914  }
915 
916  HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
917 
918  inline LargePage* next_page() {
919  return static_cast<LargePage*>(list_node_.next());
920  }
921 
922  // Uncommit memory that is not in use anymore by the object. If the object
923  // cannot be shrunk 0 is returned.
924  Address GetAddressToShrink(Address object_address, size_t object_size);
925 
926  void ClearOutOfLiveRangeSlots(Address free_start);
927 
928  private:
929  static LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
930  Executability executable);
931 
932  friend class MemoryAllocator;
933 };
934 
935 
936 // ----------------------------------------------------------------------------
937 // Space is the abstract superclass for all allocation spaces.
938 class Space : public Malloced {
939  public:
940  Space(Heap* heap, AllocationSpace id)
941  : allocation_observers_paused_(false),
942  heap_(heap),
943  id_(id),
944  committed_(0),
945  max_committed_(0) {
946  external_backing_store_bytes_ =
947  new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
948  external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
949  external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] =
950  0;
951  }
952 
953  static inline void MoveExternalBackingStoreBytes(
954  ExternalBackingStoreType type, Space* from, Space* to, size_t amount);
955 
956  virtual ~Space() {
957  delete[] external_backing_store_bytes_;
958  external_backing_store_bytes_ = nullptr;
959  }
960 
961  Heap* heap() const { return heap_; }
962 
963  // Identity used in error reporting.
964  AllocationSpace identity() { return id_; }
965 
966  const char* name() { return AllocationSpaceName(id_); }
967 
968  V8_EXPORT_PRIVATE virtual void AddAllocationObserver(
969  AllocationObserver* observer);
970 
971  V8_EXPORT_PRIVATE virtual void RemoveAllocationObserver(
972  AllocationObserver* observer);
973 
974  V8_EXPORT_PRIVATE virtual void PauseAllocationObservers();
975 
976  V8_EXPORT_PRIVATE virtual void ResumeAllocationObservers();
977 
978  V8_EXPORT_PRIVATE virtual void StartNextInlineAllocationStep() {}
979 
980  void AllocationStep(int bytes_since_last, Address soon_object, int size);
981 
982  // Return the total amount committed memory for this space, i.e., allocatable
983  // memory and page headers.
984  virtual size_t CommittedMemory() { return committed_; }
985 
986  virtual size_t MaximumCommittedMemory() { return max_committed_; }
987 
988  // Returns allocated size.
989  virtual size_t Size() = 0;
990 
991  // Returns size of objects. Can differ from the allocated size
992  // (e.g. see LargeObjectSpace).
993  virtual size_t SizeOfObjects() { return Size(); }
994 
995  // Approximate amount of physical memory committed for this space.
996  virtual size_t CommittedPhysicalMemory() = 0;
997 
998  // Return the available bytes without growing.
999  virtual size_t Available() = 0;
1000 
1001  virtual int RoundSizeDownToObjectAlignment(int size) {
1002  if (id_ == CODE_SPACE) {
1003  return RoundDown(size, kCodeAlignment);
1004  } else {
1005  return RoundDown(size, kPointerSize);
1006  }
1007  }
1008 
1009  virtual std::unique_ptr<ObjectIterator> GetObjectIterator() = 0;
1010 
1011  void AccountCommitted(size_t bytes) {
1012  DCHECK_GE(committed_ + bytes, committed_);
1013  committed_ += bytes;
1014  if (committed_ > max_committed_) {
1015  max_committed_ = committed_;
1016  }
1017  }
1018 
1019  void AccountUncommitted(size_t bytes) {
1020  DCHECK_GE(committed_, committed_ - bytes);
1021  committed_ -= bytes;
1022  }
1023 
1024  inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
1025  size_t amount);
1026 
1027  inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
1028  size_t amount);
1029 
1030  // Returns amount of off-heap memory in-use by objects in this Space.
1031  virtual size_t ExternalBackingStoreBytes(
1032  ExternalBackingStoreType type) const {
1033  return external_backing_store_bytes_[type];
1034  }
1035 
1036  V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
1037 
1038  MemoryChunk* first_page() { return memory_chunk_list_.front(); }
1039  MemoryChunk* last_page() { return memory_chunk_list_.back(); }
1040 
1041  base::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
1042 
1043 #ifdef DEBUG
1044  virtual void Print() = 0;
1045 #endif
1046 
1047  protected:
1048  intptr_t GetNextInlineAllocationStepSize();
1049  bool AllocationObserversActive() {
1050  return !allocation_observers_paused_ && !allocation_observers_.empty();
1051  }
1052 
1053  std::vector<AllocationObserver*> allocation_observers_;
1054 
1055  // The List manages the pages that belong to the given space.
1056  base::List<MemoryChunk> memory_chunk_list_;
1057 
1058  // Tracks off-heap memory used by this space.
1059  std::atomic<size_t>* external_backing_store_bytes_;
1060 
1061  private:
1062  bool allocation_observers_paused_;
1063  Heap* heap_;
1064  AllocationSpace id_;
1065 
1066  // Keeps track of committed memory in a space.
1067  size_t committed_;
1068  size_t max_committed_;
1069 
1070  DISALLOW_COPY_AND_ASSIGN(Space);
1071 };
1072 
1073 
1075  // Computed offsets should match the compiler generated ones.
1076  STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
1077 
1078  // Validate our estimates on the header size.
1079  STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
1080  STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
1081  STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
1082 };
1083 
1084 
1085 // The process-wide singleton that keeps track of code range regions with the
1086 // intention to reuse free code range regions as a workaround for CFG memory
1087 // leaks (see crbug.com/870054).
1089  public:
1090  // Returns the most recently freed code range start address for the given
1091  // size. If there is no such entry, then a random address is returned.
1092  V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size);
1093 
1094  V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start,
1095  size_t code_range_size);
1096 
1097  private:
1098  base::Mutex mutex_;
1099  // A map from code range size to an array of recently freed code range
1100  // addresses. There should be O(1) different code range sizes.
1101  // The length of each array is limited by the peak number of code ranges,
1102  // which should be also O(1).
1103  std::unordered_map<size_t, std::vector<Address>> recently_freed_;
1104 };
1105 
1106 class SkipList {
1107  public:
1108  SkipList() { Clear(); }
1109 
1110  void Clear() {
1111  for (int idx = 0; idx < kSize; idx++) {
1112  starts_[idx] = static_cast<Address>(-1);
1113  }
1114  }
1115 
1116  Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
1117 
1118  void AddObject(Address addr, int size) {
1119  int start_region = RegionNumber(addr);
1120  int end_region = RegionNumber(addr + size - kPointerSize);
1121  for (int idx = start_region; idx <= end_region; idx++) {
1122  if (starts_[idx] > addr) {
1123  starts_[idx] = addr;
1124  } else {
1125  // In the first region, there may already be an object closer to the
1126  // start of the region. Do not change the start in that case. If this
1127  // is not the first region, you probably added overlapping objects.
1128  DCHECK_EQ(start_region, idx);
1129  }
1130  }
1131  }
1132 
1133  static inline int RegionNumber(Address addr) {
1134  return (addr & kPageAlignmentMask) >> kRegionSizeLog2;
1135  }
1136 
1137  static void Update(Address addr, int size) {
1138  Page* page = Page::FromAddress(addr);
1139  SkipList* list = page->skip_list();
1140  if (list == nullptr) {
1141  list = new SkipList();
1142  page->set_skip_list(list);
1143  }
1144 
1145  list->AddObject(addr, size);
1146  }
1147 
1148  private:
1149  static const int kRegionSizeLog2 = 13;
1150  static const int kRegionSize = 1 << kRegionSizeLog2;
1151  static const int kSize = Page::kPageSize / kRegionSize;
1152 
1153  STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1154 
1155  Address starts_[kSize];
1156 };
1157 
1158 
1159 // ----------------------------------------------------------------------------
1160 // A space acquires chunks of memory from the operating system. The memory
1161 // allocator allocates and deallocates pages for the paged heap spaces and large
1162 // pages for large object space.
1163 class V8_EXPORT_PRIVATE MemoryAllocator {
1164  public:
1165  // Unmapper takes care of concurrently unmapping and uncommitting memory
1166  // chunks.
1167  class Unmapper {
1168  public:
1169  class UnmapFreeMemoryTask;
1170 
1171  Unmapper(Heap* heap, MemoryAllocator* allocator)
1172  : heap_(heap),
1173  allocator_(allocator),
1174  pending_unmapping_tasks_semaphore_(0),
1175  pending_unmapping_tasks_(0),
1176  active_unmapping_tasks_(0) {
1177  chunks_[kRegular].reserve(kReservedQueueingSlots);
1178  chunks_[kPooled].reserve(kReservedQueueingSlots);
1179  }
1180 
1181  void AddMemoryChunkSafe(MemoryChunk* chunk) {
1182  if (chunk->IsPagedSpace() && chunk->executable() != EXECUTABLE) {
1183  AddMemoryChunkSafe<kRegular>(chunk);
1184  } else {
1185  AddMemoryChunkSafe<kNonRegular>(chunk);
1186  }
1187  }
1188 
1189  MemoryChunk* TryGetPooledMemoryChunkSafe() {
1190  // Procedure:
1191  // (1) Try to get a chunk that was declared as pooled and already has
1192  // been uncommitted.
1193  // (2) Try to steal any memory chunk of kPageSize that would've been
1194  // unmapped.
1195  MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
1196  if (chunk == nullptr) {
1197  chunk = GetMemoryChunkSafe<kRegular>();
1198  if (chunk != nullptr) {
1199  // For stolen chunks we need to manually free any allocated memory.
1200  chunk->ReleaseAllocatedMemory();
1201  }
1202  }
1203  return chunk;
1204  }
1205 
1206  V8_EXPORT_PRIVATE void FreeQueuedChunks();
1207  void CancelAndWaitForPendingTasks();
1208  void PrepareForMarkCompact();
1209  void EnsureUnmappingCompleted();
1210  V8_EXPORT_PRIVATE void TearDown();
1211  size_t NumberOfCommittedChunks();
1212  int NumberOfChunks();
1213  size_t CommittedBufferedMemory();
1214 
1215  private:
1216  static const int kReservedQueueingSlots = 64;
1217  static const int kMaxUnmapperTasks = 4;
1218 
1219  enum ChunkQueueType {
1220  kRegular, // Pages of kPageSize that do not live in a CodeRange and
1221  // can thus be used for stealing.
1222  kNonRegular, // Large chunks and executable chunks.
1223  kPooled, // Pooled chunks, already uncommited and ready for reuse.
1224  kNumberOfChunkQueues,
1225  };
1226 
1227  enum class FreeMode {
1228  kUncommitPooled,
1229  kReleasePooled,
1230  };
1231 
1232  template <ChunkQueueType type>
1233  void AddMemoryChunkSafe(MemoryChunk* chunk) {
1234  base::MutexGuard guard(&mutex_);
1235  chunks_[type].push_back(chunk);
1236  }
1237 
1238  template <ChunkQueueType type>
1239  MemoryChunk* GetMemoryChunkSafe() {
1240  base::MutexGuard guard(&mutex_);
1241  if (chunks_[type].empty()) return nullptr;
1242  MemoryChunk* chunk = chunks_[type].back();
1243  chunks_[type].pop_back();
1244  return chunk;
1245  }
1246 
1247  bool MakeRoomForNewTasks();
1248 
1249  template <FreeMode mode>
1250  void PerformFreeMemoryOnQueuedChunks();
1251 
1252  void PerformFreeMemoryOnQueuedNonRegularChunks();
1253 
1254  Heap* const heap_;
1255  MemoryAllocator* const allocator_;
1256  base::Mutex mutex_;
1257  std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
1258  CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
1259  base::Semaphore pending_unmapping_tasks_semaphore_;
1260  intptr_t pending_unmapping_tasks_;
1261  std::atomic<intptr_t> active_unmapping_tasks_;
1262 
1263  friend class MemoryAllocator;
1264  };
1265 
1266  enum AllocationMode {
1267  kRegular,
1268  kPooled,
1269  };
1270 
1271  enum FreeMode {
1272  kFull,
1273  kAlreadyPooled,
1274  kPreFreeAndQueue,
1275  kPooledAndQueue,
1276  };
1277 
1278  static intptr_t GetCommitPageSize();
1279 
1280  // Computes the memory area of discardable memory within a given memory area
1281  // [addr, addr+size) and returns the result as base::AddressRegion. If the
1282  // memory is not discardable base::AddressRegion is an empty region.
1283  static base::AddressRegion ComputeDiscardMemoryArea(Address addr,
1284  size_t size);
1285 
1286  MemoryAllocator(Isolate* isolate, size_t max_capacity,
1287  size_t code_range_size);
1288 
1289  void TearDown();
1290 
1291  // Allocates a Page from the allocator. AllocationMode is used to indicate
1292  // whether pooled allocation, which only works for MemoryChunk::kPageSize,
1293  // should be tried first.
1294  template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
1295  typename SpaceType>
1296  EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
1297  Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
1298 
1299  LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
1300  Executability executable);
1301 
1302  template <MemoryAllocator::FreeMode mode = kFull>
1303  EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
1304  void Free(MemoryChunk* chunk);
1305 
1306  // Returns allocated spaces in bytes.
1307  size_t Size() { return size_; }
1308 
1309  // Returns allocated executable spaces in bytes.
1310  size_t SizeExecutable() { return size_executable_; }
1311 
1312  // Returns the maximum available bytes of heaps.
1313  size_t Available() {
1314  const size_t size = Size();
1315  return capacity_ < size ? 0 : capacity_ - size;
1316  }
1317 
1318  // Returns an indication of whether a pointer is in a space that has
1319  // been allocated by this MemoryAllocator.
1320  V8_INLINE bool IsOutsideAllocatedSpace(Address address) {
1321  return address < lowest_ever_allocated_ ||
1322  address >= highest_ever_allocated_;
1323  }
1324 
1325  // Returns a MemoryChunk in which the memory region from commit_area_size to
1326  // reserve_area_size of the chunk area is reserved but not committed, it
1327  // could be committed later by calling MemoryChunk::CommitArea.
1328  MemoryChunk* AllocateChunk(size_t reserve_area_size, size_t commit_area_size,
1329  Executability executable, Space* space);
1330 
1331  Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
1332  size_t alignment, Executability executable,
1333  void* hint, VirtualMemory* controller);
1334 
1335  void FreeMemory(v8::PageAllocator* page_allocator, Address addr, size_t size);
1336 
1337  // Partially release |bytes_to_free| bytes starting at |start_free|. Note that
1338  // internally memory is freed from |start_free| to the end of the reservation.
1339  // Additional memory beyond the page is not accounted though, so
1340  // |bytes_to_free| is computed by the caller.
1341  void PartialFreeMemory(MemoryChunk* chunk, Address start_free,
1342  size_t bytes_to_free, Address new_area_end);
1343 
1344  // Checks if an allocated MemoryChunk was intended to be used for executable
1345  // memory.
1346  bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
1347  return executable_memory_.find(chunk) != executable_memory_.end();
1348  }
1349 
1350  // Commit memory region owned by given reservation object. Returns true if
1351  // it succeeded and false otherwise.
1352  bool CommitMemory(VirtualMemory* reservation);
1353 
1354  // Uncommit memory region owned by given reservation object. Returns true if
1355  // it succeeded and false otherwise.
1356  bool UncommitMemory(VirtualMemory* reservation);
1357 
1358  // Zaps a contiguous block of memory [start..(start+size)[ with
1359  // a given zap value.
1360  void ZapBlock(Address start, size_t size, uintptr_t zap_value);
1361 
1362  V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
1363  Address start,
1364  size_t commit_size,
1365  size_t reserved_size);
1366 
1367  // Page allocator instance for allocating non-executable pages.
1368  // Guaranteed to be a valid pointer.
1369  v8::PageAllocator* data_page_allocator() { return data_page_allocator_; }
1370 
1371  // Page allocator instance for allocating executable pages.
1372  // Guaranteed to be a valid pointer.
1373  v8::PageAllocator* code_page_allocator() { return code_page_allocator_; }
1374 
1375  // Returns page allocator suitable for allocating pages with requested
1376  // executability.
1377  v8::PageAllocator* page_allocator(Executability executable) {
1378  return executable == EXECUTABLE ? code_page_allocator_
1379  : data_page_allocator_;
1380  }
1381 
1382  // A region of memory that may contain executable code including reserved
1383  // OS page with read-write access in the beginning.
1384  const base::AddressRegion& code_range() const {
1385  // |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|
1386  DCHECK_IMPLIES(!code_range_.is_empty(), code_page_allocator_instance_);
1387  DCHECK_IMPLIES(!code_range_.is_empty(),
1388  code_range_.contains(code_page_allocator_instance_->begin(),
1389  code_page_allocator_instance_->size()));
1390  return code_range_;
1391  }
1392 
1393  Unmapper* unmapper() { return &unmapper_; }
1394 
1395  private:
1396  void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
1397  size_t requested);
1398 
1399  // PreFree logically frees the object, i.e., it takes care of the size
1400  // bookkeeping and calls the allocation callback.
1401  void PreFreeMemory(MemoryChunk* chunk);
1402 
1403  // FreeMemory can be called concurrently when PreFree was executed before.
1404  void PerformFreeMemory(MemoryChunk* chunk);
1405 
1406  // See AllocatePage for public interface. Note that currently we only support
1407  // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
1408  template <typename SpaceType>
1409  MemoryChunk* AllocatePagePooled(SpaceType* owner);
1410 
1411  // Initializes pages in a chunk. Returns the first page address.
1412  // This function and GetChunkId() are provided for the mark-compact
1413  // collector to rebuild page headers in the from space, which is
1414  // used as a marking stack and its page headers are destroyed.
1415  Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1416  PagedSpace* owner);
1417 
1418  void UpdateAllocatedSpaceLimits(Address low, Address high) {
1419  // The use of atomic primitives does not guarantee correctness (wrt.
1420  // desired semantics) by default. The loop here ensures that we update the
1421  // values only if they did not change in between.
1422  Address ptr = kNullAddress;
1423  do {
1424  ptr = lowest_ever_allocated_;
1425  } while ((low < ptr) &&
1426  !lowest_ever_allocated_.compare_exchange_weak(ptr, low));
1427  do {
1428  ptr = highest_ever_allocated_;
1429  } while ((high > ptr) &&
1430  !highest_ever_allocated_.compare_exchange_weak(ptr, high));
1431  }
1432 
1433  void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
1434  DCHECK(chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
1435  DCHECK_EQ(executable_memory_.find(chunk), executable_memory_.end());
1436  executable_memory_.insert(chunk);
1437  }
1438 
1439  void UnregisterExecutableMemoryChunk(MemoryChunk* chunk) {
1440  DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
1441  executable_memory_.erase(chunk);
1442  chunk->heap()->UnregisterUnprotectedMemoryChunk(chunk);
1443  }
1444 
1445  Isolate* isolate_;
1446 
1447  // This object controls virtual space reserved for V8 heap instance.
1448  // Depending on the configuration it may contain the following:
1449  // - no reservation (on 32-bit architectures)
1450  // - code range reservation used by bounded code page allocator (on 64-bit
1451  // architectures without pointers compression in V8 heap)
1452  // - data + code range reservation (on 64-bit architectures with pointers
1453  // compression in V8 heap)
1454  VirtualMemory heap_reservation_;
1455 
1456  // Page allocator used for allocating data pages. Depending on the
1457  // configuration it may be a page allocator instance provided by v8::Platform
1458  // or a BoundedPageAllocator (when pointer compression is enabled).
1459  v8::PageAllocator* data_page_allocator_;
1460 
1461  // Page allocator used for allocating code pages. Depending on the
1462  // configuration it may be a page allocator instance provided by v8::Platform
1463  // or a BoundedPageAllocator (when pointer compression is enabled or
1464  // on those 64-bit architectures where pc-relative 32-bit displacement
1465  // can be used for call and jump instructions).
1466  v8::PageAllocator* code_page_allocator_;
1467 
1468  // A part of the |heap_reservation_| that may contain executable code
1469  // including reserved page with read-write access in the beginning.
1470  // See details below.
1471  base::AddressRegion code_range_;
1472 
1473  // This unique pointer owns the instance of bounded code allocator
1474  // that controls executable pages allocation. It does not control the
1475  // optionally existing page in the beginning of the |code_range_|.
1476  // So, summarizing all above, the following conditions hold:
1477  // 1) |heap_reservation_| >= |code_range_|
1478  // 2) |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|.
1479  // 3) |heap_reservation_| is AllocatePageSize()-aligned
1480  // 4) |code_page_allocator_instance_| is MemoryChunk::kAlignment-aligned
1481  // 5) |code_range_| is CommitPageSize()-aligned
1482  std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_;
1483 
1484  // Maximum space size in bytes.
1485  size_t capacity_;
1486 
1487  // Allocated space size in bytes.
1488  std::atomic<size_t> size_;
1489  // Allocated executable space size in bytes.
1490  std::atomic<size_t> size_executable_;
1491 
1492  // We keep the lowest and highest addresses allocated as a quick way
1493  // of determining that pointers are outside the heap. The estimate is
1494  // conservative, i.e. not all addresses in 'allocated' space are allocated
1495  // to our heap. The range is [lowest, highest[, inclusive on the low end
1496  // and exclusive on the high end.
1497  std::atomic<Address> lowest_ever_allocated_;
1498  std::atomic<Address> highest_ever_allocated_;
1499 
1500  VirtualMemory last_chunk_;
1501  Unmapper unmapper_;
1502 
1503  // Data structure to remember allocated executable memory chunks.
1504  std::unordered_set<MemoryChunk*> executable_memory_;
1505 
1506  friend class heap::TestCodePageAllocatorScope;
1507 
1508  DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1509 };
1510 
1511 extern template Page*
1512 MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
1513  size_t size, PagedSpace* owner, Executability executable);
1514 extern template Page*
1515 MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
1516  size_t size, SemiSpace* owner, Executability executable);
1517 extern template Page*
1518 MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
1519  size_t size, SemiSpace* owner, Executability executable);
1520 
1521 // -----------------------------------------------------------------------------
1522 // Interface for heap object iterator to be implemented by all object space
1523 // object iterators.
1524 //
1525 // NOTE: The space specific object iterators also implements the own next()
1526 // method which is used to avoid using virtual functions
1527 // iterating a specific space.
1528 
1529 class V8_EXPORT_PRIVATE ObjectIterator : public Malloced {
1530  public:
1531  virtual ~ObjectIterator() = default;
1532  virtual HeapObject* Next() = 0;
1533 };
1534 
1535 template <class PAGE_TYPE>
1538  public:
1539  explicit PageIteratorImpl(PAGE_TYPE* p) : p_(p) {}
1540  PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE>& other) : p_(other.p_) {}
1541  PAGE_TYPE* operator*() { return p_; }
1542  bool operator==(const PageIteratorImpl<PAGE_TYPE>& rhs) {
1543  return rhs.p_ == p_;
1544  }
1545  bool operator!=(const PageIteratorImpl<PAGE_TYPE>& rhs) {
1546  return rhs.p_ != p_;
1547  }
1548  inline PageIteratorImpl<PAGE_TYPE>& operator++();
1549  inline PageIteratorImpl<PAGE_TYPE> operator++(int);
1550 
1551  private:
1552  PAGE_TYPE* p_;
1553 };
1554 
1557 
1558 class PageRange {
1559  public:
1560  typedef PageIterator iterator;
1561  PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
1562  explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
1563  inline PageRange(Address start, Address limit);
1564 
1565  iterator begin() { return iterator(begin_); }
1566  iterator end() { return iterator(end_); }
1567 
1568  private:
1569  Page* begin_;
1570  Page* end_;
1571 };
1572 
1573 // -----------------------------------------------------------------------------
1574 // Heap object iterator in new/old/map spaces.
1575 //
1576 // A HeapObjectIterator iterates objects from the bottom of the given space
1577 // to its top or from the bottom of the given page to its top.
1578 //
1579 // If objects are allocated in the page during iteration the iterator may
1580 // or may not iterate over those objects. The caller must create a new
1581 // iterator in order to be sure to visit these new objects.
1582 class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
1583  public:
1584  // Creates a new object iterator in a given space.
1585  explicit HeapObjectIterator(PagedSpace* space);
1586  explicit HeapObjectIterator(Page* page);
1587 
1588  // Advance to the next object, skipping free spaces and other fillers and
1589  // skipping the special garbage section of which there is one per space.
1590  // Returns nullptr when the iteration has ended.
1591  inline HeapObject* Next() override;
1592 
1593  private:
1594  // Fast (inlined) path of next().
1595  inline HeapObject* FromCurrentPage();
1596 
1597  // Slow path of next(), goes into the next page. Returns false if the
1598  // iteration has ended.
1599  bool AdvanceToNextPage();
1600 
1601  Address cur_addr_; // Current iteration point.
1602  Address cur_end_; // End iteration point.
1603  PagedSpace* space_;
1604  PageRange page_range_;
1605  PageRange::iterator current_page_;
1606 };
1607 
1608 
1609 // -----------------------------------------------------------------------------
1610 // A space has a circular list of pages. The next page can be accessed via
1611 // Page::next_page() call.
1612 
1613 // An abstraction of allocation and relocation pointers in a page-structured
1614 // space.
1616  public:
1617  LinearAllocationArea() : top_(kNullAddress), limit_(kNullAddress) {}
1618  LinearAllocationArea(Address top, Address limit) : top_(top), limit_(limit) {}
1619 
1620  void Reset(Address top, Address limit) {
1621  set_top(top);
1622  set_limit(limit);
1623  }
1624 
1625  V8_INLINE void set_top(Address top) {
1626  SLOW_DCHECK(top == kNullAddress || (top & kHeapObjectTagMask) == 0);
1627  top_ = top;
1628  }
1629 
1630  V8_INLINE Address top() const {
1631  SLOW_DCHECK(top_ == kNullAddress || (top_ & kHeapObjectTagMask) == 0);
1632  return top_;
1633  }
1634 
1635  Address* top_address() { return &top_; }
1636 
1637  V8_INLINE void set_limit(Address limit) { limit_ = limit; }
1638 
1639  V8_INLINE Address limit() const { return limit_; }
1640 
1641  Address* limit_address() { return &limit_; }
1642 
1643 #ifdef DEBUG
1644  bool VerifyPagedAllocation() {
1645  return (Page::FromAllocationAreaAddress(top_) ==
1646  Page::FromAllocationAreaAddress(limit_)) &&
1647  (top_ <= limit_);
1648  }
1649 #endif
1650 
1651  private:
1652  // Current allocation top.
1653  Address top_;
1654  // Current allocation limit.
1655  Address limit_;
1656 };
1657 
1658 
1659 // An abstraction of the accounting statistics of a page-structured space.
1660 //
1661 // The stats are only set by functions that ensure they stay balanced. These
1662 // functions increase or decrease one of the non-capacity stats in conjunction
1663 // with capacity, or else they always balance increases and decreases to the
1664 // non-capacity stats.
1666  public:
1667  AllocationStats() { Clear(); }
1668 
1669  // Zero out all the allocation statistics (i.e., no capacity).
1670  void Clear() {
1671  capacity_ = 0;
1672  max_capacity_ = 0;
1673  ClearSize();
1674  }
1675 
1676  void ClearSize() {
1677  size_ = 0;
1678 #ifdef DEBUG
1679  allocated_on_page_.clear();
1680 #endif
1681  }
1682 
1683  // Accessors for the allocation statistics.
1684  size_t Capacity() { return capacity_; }
1685  size_t MaxCapacity() { return max_capacity_; }
1686  size_t Size() { return size_; }
1687 #ifdef DEBUG
1688  size_t AllocatedOnPage(Page* page) { return allocated_on_page_[page]; }
1689 #endif
1690 
1691  void IncreaseAllocatedBytes(size_t bytes, Page* page) {
1692  DCHECK_GE(size_ + bytes, size_);
1693  size_ += bytes;
1694 #ifdef DEBUG
1695  allocated_on_page_[page] += bytes;
1696 #endif
1697  }
1698 
1699  void DecreaseAllocatedBytes(size_t bytes, Page* page) {
1700  DCHECK_GE(size_, bytes);
1701  size_ -= bytes;
1702 #ifdef DEBUG
1703  DCHECK_GE(allocated_on_page_[page], bytes);
1704  allocated_on_page_[page] -= bytes;
1705 #endif
1706  }
1707 
1708  void DecreaseCapacity(size_t bytes) {
1709  DCHECK_GE(capacity_, bytes);
1710  DCHECK_GE(capacity_ - bytes, size_);
1711  capacity_ -= bytes;
1712  }
1713 
1714  void IncreaseCapacity(size_t bytes) {
1715  DCHECK_GE(capacity_ + bytes, capacity_);
1716  capacity_ += bytes;
1717  if (capacity_ > max_capacity_) {
1718  max_capacity_ = capacity_;
1719  }
1720  }
1721 
1722  private:
1723  // |capacity_|: The number of object-area bytes (i.e., not including page
1724  // bookkeeping structures) currently in the space.
1725  // During evacuation capacity of the main spaces is accessed from multiple
1726  // threads to check the old generation hard limit.
1727  std::atomic<size_t> capacity_;
1728 
1729  // |max_capacity_|: The maximum capacity ever observed.
1730  size_t max_capacity_;
1731 
1732  // |size_|: The number of allocated bytes.
1733  size_t size_;
1734 
1735 #ifdef DEBUG
1736  std::unordered_map<Page*, size_t, Page::Hasher> allocated_on_page_;
1737 #endif
1738 };
1739 
1740 // A free list maintaining free blocks of memory. The free list is organized in
1741 // a way to encourage objects allocated around the same time to be near each
1742 // other. The normal way to allocate is intended to be by bumping a 'top'
1743 // pointer until it hits a 'limit' pointer. When the limit is hit we need to
1744 // find a new space to allocate from. This is done with the free list, which is
1745 // divided up into rough categories to cut down on waste. Having finer
1746 // categories would scatter allocation more.
1747 
1748 // The free list is organized in categories as follows:
1749 // kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
1750 // allocation, when categories >= small do not have entries anymore.
1751 // 11-31 words (tiny): The tiny blocks are only used for allocation, when
1752 // categories >= small do not have entries anymore.
1753 // 32-255 words (small): Used for allocating free space between 1-31 words in
1754 // size.
1755 // 256-2047 words (medium): Used for allocating free space between 32-255 words
1756 // in size.
1757 // 1048-16383 words (large): Used for allocating free space between 256-2047
1758 // words in size.
1759 // At least 16384 words (huge): This list is for objects of 2048 words or
1760 // larger. Empty pages are also added to this list.
1761 class V8_EXPORT_PRIVATE FreeList {
1762  public:
1763  // This method returns how much memory can be allocated after freeing
1764  // maximum_freed memory.
1765  static inline size_t GuaranteedAllocatable(size_t maximum_freed) {
1766  if (maximum_freed <= kTiniestListMax) {
1767  // Since we are not iterating over all list entries, we cannot guarantee
1768  // that we can find the maximum freed block in that free list.
1769  return 0;
1770  } else if (maximum_freed <= kTinyListMax) {
1771  return kTinyAllocationMax;
1772  } else if (maximum_freed <= kSmallListMax) {
1773  return kSmallAllocationMax;
1774  } else if (maximum_freed <= kMediumListMax) {
1775  return kMediumAllocationMax;
1776  } else if (maximum_freed <= kLargeListMax) {
1777  return kLargeAllocationMax;
1778  }
1779  return maximum_freed;
1780  }
1781 
1782  static FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
1783  if (size_in_bytes <= kTiniestListMax) {
1784  return kTiniest;
1785  } else if (size_in_bytes <= kTinyListMax) {
1786  return kTiny;
1787  } else if (size_in_bytes <= kSmallListMax) {
1788  return kSmall;
1789  } else if (size_in_bytes <= kMediumListMax) {
1790  return kMedium;
1791  } else if (size_in_bytes <= kLargeListMax) {
1792  return kLarge;
1793  }
1794  return kHuge;
1795  }
1796 
1797  FreeList();
1798 
1799  // Adds a node on the free list. The block of size {size_in_bytes} starting
1800  // at {start} is placed on the free list. The return value is the number of
1801  // bytes that were not added to the free list, because they freed memory block
1802  // was too small. Bookkeeping information will be written to the block, i.e.,
1803  // its contents will be destroyed. The start address should be word aligned,
1804  // and the size should be a non-zero multiple of the word size.
1805  size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
1806 
1807  // Allocates a free space node frome the free list of at least size_in_bytes
1808  // bytes. Returns the actual node size in node_size which can be bigger than
1809  // size_in_bytes. This method returns null if the allocation request cannot be
1810  // handled by the free list.
1811  V8_WARN_UNUSED_RESULT FreeSpace* Allocate(size_t size_in_bytes,
1812  size_t* node_size);
1813 
1814  // Clear the free list.
1815  void Reset();
1816 
1817  void ResetStats() {
1818  wasted_bytes_ = 0;
1819  ForAllFreeListCategories(
1820  [](FreeListCategory* category) { category->ResetStats(); });
1821  }
1822 
1823  // Return the number of bytes available on the free list.
1824  size_t Available() {
1825  size_t available = 0;
1826  ForAllFreeListCategories([&available](FreeListCategory* category) {
1827  available += category->available();
1828  });
1829  return available;
1830  }
1831 
1832  bool IsEmpty() {
1833  bool empty = true;
1834  ForAllFreeListCategories([&empty](FreeListCategory* category) {
1835  if (!category->is_empty()) empty = false;
1836  });
1837  return empty;
1838  }
1839 
1840  // Used after booting the VM.
1841  void RepairLists(Heap* heap);
1842 
1843  size_t EvictFreeListItems(Page* page);
1844  bool ContainsPageFreeListItems(Page* page);
1845 
1846  size_t wasted_bytes() { return wasted_bytes_; }
1847 
1848  template <typename Callback>
1849  void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
1850  FreeListCategory* current = categories_[type];
1851  while (current != nullptr) {
1852  FreeListCategory* next = current->next();
1853  callback(current);
1854  current = next;
1855  }
1856  }
1857 
1858  template <typename Callback>
1859  void ForAllFreeListCategories(Callback callback) {
1860  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
1861  ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
1862  }
1863  }
1864 
1865  bool AddCategory(FreeListCategory* category);
1866  void RemoveCategory(FreeListCategory* category);
1867  void PrintCategories(FreeListCategoryType type);
1868 
1869  // Returns a page containing an entry for a given type, or nullptr otherwise.
1870  inline Page* GetPageForCategoryType(FreeListCategoryType type);
1871 
1872 #ifdef DEBUG
1873  size_t SumFreeLists();
1874  bool IsVeryLong();
1875 #endif
1876 
1877  private:
1878  class FreeListCategoryIterator {
1879  public:
1880  FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
1881  : current_(free_list->categories_[type]) {}
1882 
1883  bool HasNext() { return current_ != nullptr; }
1884 
1885  FreeListCategory* Next() {
1886  DCHECK(HasNext());
1887  FreeListCategory* tmp = current_;
1888  current_ = current_->next();
1889  return tmp;
1890  }
1891 
1892  private:
1893  FreeListCategory* current_;
1894  };
1895 
1896  // The size range of blocks, in bytes.
1897  static const size_t kMinBlockSize = 3 * kPointerSize;
1898 
1899  // This is a conservative upper bound. The actual maximum block size takes
1900  // padding and alignment of data and code pages into account.
1901  static const size_t kMaxBlockSize = Page::kPageSize;
1902 
1903  static const size_t kTiniestListMax = 0xa * kPointerSize;
1904  static const size_t kTinyListMax = 0x1f * kPointerSize;
1905  static const size_t kSmallListMax = 0xff * kPointerSize;
1906  static const size_t kMediumListMax = 0x7ff * kPointerSize;
1907  static const size_t kLargeListMax = 0x3fff * kPointerSize;
1908  static const size_t kTinyAllocationMax = kTiniestListMax;
1909  static const size_t kSmallAllocationMax = kTinyListMax;
1910  static const size_t kMediumAllocationMax = kSmallListMax;
1911  static const size_t kLargeAllocationMax = kMediumListMax;
1912 
1913  // Walks all available categories for a given |type| and tries to retrieve
1914  // a node. Returns nullptr if the category is empty.
1915  FreeSpace* FindNodeIn(FreeListCategoryType type, size_t minimum_size,
1916  size_t* node_size);
1917 
1918  // Tries to retrieve a node from the first category in a given |type|.
1919  // Returns nullptr if the category is empty or the top entry is smaller
1920  // than minimum_size.
1921  FreeSpace* TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
1922  size_t* node_size);
1923 
1924  // Searches a given |type| for a node of at least |minimum_size|.
1925  FreeSpace* SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
1926  size_t minimum_size);
1927 
1928  // The tiny categories are not used for fast allocation.
1929  FreeListCategoryType SelectFastAllocationFreeListCategoryType(
1930  size_t size_in_bytes) {
1931  if (size_in_bytes <= kSmallAllocationMax) {
1932  return kSmall;
1933  } else if (size_in_bytes <= kMediumAllocationMax) {
1934  return kMedium;
1935  } else if (size_in_bytes <= kLargeAllocationMax) {
1936  return kLarge;
1937  }
1938  return kHuge;
1939  }
1940 
1941  FreeListCategory* top(FreeListCategoryType type) const {
1942  return categories_[type];
1943  }
1944 
1945  std::atomic<size_t> wasted_bytes_;
1946  FreeListCategory* categories_[kNumberOfCategories];
1947 
1948  friend class FreeListCategory;
1949 };
1950 
1951 // LocalAllocationBuffer represents a linear allocation area that is created
1952 // from a given {AllocationResult} and can be used to allocate memory without
1953 // synchronization.
1954 //
1955 // The buffer is properly closed upon destruction and reassignment.
1956 // Example:
1957 // {
1958 // AllocationResult result = ...;
1959 // LocalAllocationBuffer a(heap, result, size);
1960 // LocalAllocationBuffer b = a;
1961 // CHECK(!a.IsValid());
1962 // CHECK(b.IsValid());
1963 // // {a} is invalid now and cannot be used for further allocations.
1964 // }
1965 // // Since {b} went out of scope, the LAB is closed, resulting in creating a
1966 // // filler object for the remaining area.
1968  public:
1969  // Indicates that a buffer cannot be used for allocations anymore. Can result
1970  // from either reassigning a buffer, or trying to construct it from an
1971  // invalid {AllocationResult}.
1972  static LocalAllocationBuffer InvalidBuffer() {
1973  return LocalAllocationBuffer(
1974  nullptr, LinearAllocationArea(kNullAddress, kNullAddress));
1975  }
1976 
1977  // Creates a new LAB from a given {AllocationResult}. Results in
1978  // InvalidBuffer if the result indicates a retry.
1979  static inline LocalAllocationBuffer FromResult(Heap* heap,
1980  AllocationResult result,
1981  intptr_t size);
1982 
1983  ~LocalAllocationBuffer() { Close(); }
1984 
1985  // Convert to C++11 move-semantics once allowed by the style guide.
1987  LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other);
1988 
1989  V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
1990  int size_in_bytes, AllocationAlignment alignment);
1991 
1992  inline bool IsValid() { return allocation_info_.top() != kNullAddress; }
1993 
1994  // Try to merge LABs, which is only possible when they are adjacent in memory.
1995  // Returns true if the merge was successful, false otherwise.
1996  inline bool TryMerge(LocalAllocationBuffer* other);
1997 
1998  inline bool TryFreeLast(HeapObject* object, int object_size);
1999 
2000  // Close a LAB, effectively invalidating it. Returns the unused area.
2001  LinearAllocationArea Close();
2002 
2003  private:
2004  LocalAllocationBuffer(Heap* heap, LinearAllocationArea allocation_info);
2005 
2006  Heap* heap_;
2007  LinearAllocationArea allocation_info_;
2008 };
2009 
2010 class SpaceWithLinearArea : public Space {
2011  public:
2012  SpaceWithLinearArea(Heap* heap, AllocationSpace id)
2013  : Space(heap, id), top_on_previous_step_(0) {
2014  allocation_info_.Reset(kNullAddress, kNullAddress);
2015  }
2016 
2017  virtual bool SupportsInlineAllocation() = 0;
2018 
2019  // Returns the allocation pointer in this space.
2020  Address top() { return allocation_info_.top(); }
2021  Address limit() { return allocation_info_.limit(); }
2022 
2023  // The allocation top address.
2024  Address* allocation_top_address() { return allocation_info_.top_address(); }
2025 
2026  // The allocation limit address.
2027  Address* allocation_limit_address() {
2028  return allocation_info_.limit_address();
2029  }
2030 
2031  V8_EXPORT_PRIVATE void AddAllocationObserver(
2032  AllocationObserver* observer) override;
2033  V8_EXPORT_PRIVATE void RemoveAllocationObserver(
2034  AllocationObserver* observer) override;
2035  V8_EXPORT_PRIVATE void ResumeAllocationObservers() override;
2036  V8_EXPORT_PRIVATE void PauseAllocationObservers() override;
2037 
2038  // When allocation observers are active we may use a lower limit to allow the
2039  // observers to 'interrupt' earlier than the natural limit. Given a linear
2040  // area bounded by [start, end), this function computes the limit to use to
2041  // allow proper observation based on existing observers. min_size specifies
2042  // the minimum size that the limited area should have.
2043  Address ComputeLimit(Address start, Address end, size_t min_size);
2044  V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
2045  size_t min_size) = 0;
2046 
2047  protected:
2048  // If we are doing inline allocation in steps, this method performs the 'step'
2049  // operation. top is the memory address of the bump pointer at the last
2050  // inline allocation (i.e. it determines the numbers of bytes actually
2051  // allocated since the last step.) top_for_next_step is the address of the
2052  // bump pointer where the next byte is going to be allocated from. top and
2053  // top_for_next_step may be different when we cross a page boundary or reset
2054  // the space.
2055  // TODO(ofrobots): clarify the precise difference between this and
2056  // Space::AllocationStep.
2057  void InlineAllocationStep(Address top, Address top_for_next_step,
2058  Address soon_object, size_t size);
2059  V8_EXPORT_PRIVATE void StartNextInlineAllocationStep() override;
2060 
2061  // TODO(ofrobots): make these private after refactoring is complete.
2062  LinearAllocationArea allocation_info_;
2063  Address top_on_previous_step_;
2064 };
2065 
2066 class V8_EXPORT_PRIVATE PagedSpace
2067  : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
2068  public:
2069  typedef PageIterator iterator;
2070 
2071  static const size_t kCompactionMemoryWanted = 500 * KB;
2072 
2073  // Creates a space with an id.
2074  PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
2075 
2076  ~PagedSpace() override { TearDown(); }
2077 
2078  // Checks whether an object/address is in this space.
2079  inline bool Contains(Address a);
2080  inline bool Contains(Object* o);
2081  bool ContainsSlow(Address addr);
2082 
2083  // Does the space need executable memory?
2084  Executability executable() { return executable_; }
2085 
2086  // Prepares for a mark-compact GC.
2087  void PrepareForMarkCompact();
2088 
2089  // Current capacity without growing (Size() + Available()).
2090  size_t Capacity() { return accounting_stats_.Capacity(); }
2091 
2092  // Approximate amount of physical memory committed for this space.
2093  size_t CommittedPhysicalMemory() override;
2094 
2095  void ResetFreeListStatistics();
2096 
2097  // Sets the capacity, the available space and the wasted space to zero.
2098  // The stats are rebuilt during sweeping by adding each page to the
2099  // capacity and the size when it is encountered. As free spaces are
2100  // discovered during the sweeping they are subtracted from the size and added
2101  // to the available and wasted totals.
2102  void ClearStats() {
2103  accounting_stats_.ClearSize();
2104  free_list_.ResetStats();
2105  ResetFreeListStatistics();
2106  }
2107 
2108  // Available bytes without growing. These are the bytes on the free list.
2109  // The bytes in the linear allocation area are not included in this total
2110  // because updating the stats would slow down allocation. New pages are
2111  // immediately added to the free list so they show up here.
2112  size_t Available() override { return free_list_.Available(); }
2113 
2114  // Allocated bytes in this space. Garbage bytes that were not found due to
2115  // concurrent sweeping are counted as being allocated! The bytes in the
2116  // current linear allocation area (between top and limit) are also counted
2117  // here.
2118  size_t Size() override { return accounting_stats_.Size(); }
2119 
2120  // As size, but the bytes in lazily swept pages are estimated and the bytes
2121  // in the current linear allocation area are not included.
2122  size_t SizeOfObjects() override;
2123 
2124  // Wasted bytes in this space. These are just the bytes that were thrown away
2125  // due to being too small to use for allocation.
2126  virtual size_t Waste() { return free_list_.wasted_bytes(); }
2127 
2128  enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };
2129 
2130  // Allocate the requested number of bytes in the space if possible, return a
2131  // failure object if not. Only use IGNORE_SKIP_LIST if the skip list is going
2132  // to be manually updated later.
2133  V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
2134  int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST);
2135 
2136  // Allocate the requested number of bytes in the space double aligned if
2137  // possible, return a failure object if not.
2138  V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
2139  int size_in_bytes, AllocationAlignment alignment);
2140 
2141  // Allocate the requested number of bytes in the space and consider allocation
2142  // alignment if needed.
2143  V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
2144  int size_in_bytes, AllocationAlignment alignment);
2145 
2146  size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
2147  if (size_in_bytes == 0) return 0;
2148  heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
2149  ClearRecordedSlots::kNo);
2150  if (mode == SpaceAccountingMode::kSpaceAccounted) {
2151  return AccountedFree(start, size_in_bytes);
2152  } else {
2153  return UnaccountedFree(start, size_in_bytes);
2154  }
2155  }
2156 
2157  // Give a block of memory to the space's free list. It might be added to
2158  // the free list or accounted as waste.
2159  // If add_to_freelist is false then just accounting stats are updated and
2160  // no attempt to add area to free list is made.
2161  size_t AccountedFree(Address start, size_t size_in_bytes) {
2162  size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
2163  Page* page = Page::FromAddress(start);
2164  accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
2165  DCHECK_GE(size_in_bytes, wasted);
2166  return size_in_bytes - wasted;
2167  }
2168 
2169  size_t UnaccountedFree(Address start, size_t size_in_bytes) {
2170  size_t wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
2171  DCHECK_GE(size_in_bytes, wasted);
2172  return size_in_bytes - wasted;
2173  }
2174 
2175  inline bool TryFreeLast(HeapObject* object, int object_size);
2176 
2177  void ResetFreeList();
2178 
2179  // Empty space linear allocation area, returning unused area to free list.
2180  void FreeLinearAllocationArea();
2181 
2182  void MarkLinearAllocationAreaBlack();
2183  void UnmarkLinearAllocationArea();
2184 
2185  void DecreaseAllocatedBytes(size_t bytes, Page* page) {
2186  accounting_stats_.DecreaseAllocatedBytes(bytes, page);
2187  }
2188  void IncreaseAllocatedBytes(size_t bytes, Page* page) {
2189  accounting_stats_.IncreaseAllocatedBytes(bytes, page);
2190  }
2191  void DecreaseCapacity(size_t bytes) {
2192  accounting_stats_.DecreaseCapacity(bytes);
2193  }
2194  void IncreaseCapacity(size_t bytes) {
2195  accounting_stats_.IncreaseCapacity(bytes);
2196  }
2197 
2198  void RefineAllocatedBytesAfterSweeping(Page* page);
2199 
2200  Page* InitializePage(MemoryChunk* chunk, Executability executable);
2201 
2202  void ReleasePage(Page* page);
2203 
2204  // Adds the page to this space and returns the number of bytes added to the
2205  // free list of the space.
2206  size_t AddPage(Page* page);
2207  void RemovePage(Page* page);
2208  // Remove a page if it has at least |size_in_bytes| bytes available that can
2209  // be used for allocation.
2210  Page* RemovePageSafe(int size_in_bytes);
2211 
2212  void SetReadAndExecutable();
2213  void SetReadAndWritable();
2214 
2215 #ifdef VERIFY_HEAP
2216  // Verify integrity of this space.
2217  virtual void Verify(Isolate* isolate, ObjectVisitor* visitor);
2218 
2219  void VerifyLiveBytes();
2220 
2221  // Overridden by subclasses to verify space-specific object
2222  // properties (e.g., only maps or free-list nodes are in map space).
2223  virtual void VerifyObject(HeapObject* obj) {}
2224 #endif
2225 
2226 #ifdef DEBUG
2227  void VerifyCountersAfterSweeping();
2228  void VerifyCountersBeforeConcurrentSweeping();
2229  // Print meta info and objects in this space.
2230  void Print() override;
2231 
2232  // Report code object related statistics
2233  static void ReportCodeStatistics(Isolate* isolate);
2234  static void ResetCodeStatistics(Isolate* isolate);
2235 #endif
2236 
2237  bool CanExpand(size_t size);
2238 
2239  // Returns the number of total pages in this space.
2240  int CountTotalPages();
2241 
2242  // Return size of allocatable area on a page in this space.
2243  inline int AreaSize() { return static_cast<int>(area_size_); }
2244 
2245  virtual bool is_local() { return false; }
2246 
2247  // Merges {other} into the current space. Note that this modifies {other},
2248  // e.g., removes its bump pointer area and resets statistics.
2249  void MergeCompactionSpace(CompactionSpace* other);
2250 
2251  // Refills the free list from the corresponding free list filled by the
2252  // sweeper.
2253  virtual void RefillFreeList();
2254 
2255  FreeList* free_list() { return &free_list_; }
2256 
2257  base::Mutex* mutex() { return &space_mutex_; }
2258 
2259  inline void UnlinkFreeListCategories(Page* page);
2260  inline size_t RelinkFreeListCategories(Page* page);
2261 
2262  Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
2263 
2264  iterator begin() { return iterator(first_page()); }
2265  iterator end() { return iterator(nullptr); }
2266 
2267  // Shrink immortal immovable pages of the space to be exactly the size needed
2268  // using the high water mark.
2269  void ShrinkImmortalImmovablePages();
2270 
2271  size_t ShrinkPageToHighWaterMark(Page* page);
2272 
2273  std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2274 
2275  void SetLinearAllocationArea(Address top, Address limit);
2276 
2277  private:
2278  // Set space linear allocation area.
2279  void SetTopAndLimit(Address top, Address limit) {
2280  DCHECK(top == limit ||
2281  Page::FromAddress(top) == Page::FromAddress(limit - 1));
2282  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2283  allocation_info_.Reset(top, limit);
2284  }
2285  void DecreaseLimit(Address new_limit);
2286  void UpdateInlineAllocationLimit(size_t min_size) override;
2287  bool SupportsInlineAllocation() override {
2288  return identity() == OLD_SPACE && !is_local();
2289  }
2290 
2291  protected:
2292  // PagedSpaces that should be included in snapshots have different, i.e.,
2293  // smaller, initial pages.
2294  virtual bool snapshotable() { return true; }
2295 
2296  bool HasPages() { return first_page() != nullptr; }
2297 
2298  // Cleans up the space, frees all pages in this space except those belonging
2299  // to the initial chunk, uncommits addresses in the initial chunk.
2300  void TearDown();
2301 
2302  // Expands the space by allocating a fixed number of pages. Returns false if
2303  // it cannot allocate requested number of pages from OS, or if the hard heap
2304  // size limit has been hit.
2305  bool Expand();
2306 
2307  // Sets up a linear allocation area that fits the given number of bytes.
2308  // Returns false if there is not enough space and the caller has to retry
2309  // after collecting garbage.
2310  inline bool EnsureLinearAllocationArea(int size_in_bytes);
2311  // Allocates an object from the linear allocation area. Assumes that the
2312  // linear allocation area is large enought to fit the object.
2313  inline HeapObject* AllocateLinearly(int size_in_bytes);
2314  // Tries to allocate an aligned object from the linear allocation area.
2315  // Returns nullptr if the linear allocation area does not fit the object.
2316  // Otherwise, returns the object pointer and writes the allocation size
2317  // (object size + alignment filler size) to the size_in_bytes.
2318  inline HeapObject* TryAllocateLinearlyAligned(int* size_in_bytes,
2319  AllocationAlignment alignment);
2320 
2321  V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
2322  size_t size_in_bytes);
2323 
2324  // If sweeping is still in progress try to sweep unswept pages. If that is
2325  // not successful, wait for the sweeper threads and retry free-list
2326  // allocation. Returns false if there is not enough space and the caller
2327  // has to retry after collecting garbage.
2328  V8_WARN_UNUSED_RESULT virtual bool SweepAndRetryAllocation(int size_in_bytes);
2329 
2330  // Slow path of AllocateRaw. This function is space-dependent. Returns false
2331  // if there is not enough space and the caller has to retry after
2332  // collecting garbage.
2333  V8_WARN_UNUSED_RESULT virtual bool SlowRefillLinearAllocationArea(
2334  int size_in_bytes);
2335 
2336  // Implementation of SlowAllocateRaw. Returns false if there is not enough
2337  // space and the caller has to retry after collecting garbage.
2338  V8_WARN_UNUSED_RESULT bool RawSlowRefillLinearAllocationArea(
2339  int size_in_bytes);
2340 
2341  Executability executable_;
2342 
2343  size_t area_size_;
2344 
2345  // Accounting information for this space.
2346  AllocationStats accounting_stats_;
2347 
2348  // The space's free list.
2349  FreeList free_list_;
2350 
2351  // Mutex guarding any concurrent access to the space.
2352  base::Mutex space_mutex_;
2353 
2354  friend class IncrementalMarking;
2355  friend class MarkCompactCollector;
2356 
2357  // Used in cctest.
2358  friend class heap::HeapTester;
2359 };
2360 
2361 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
2362 
2363 // -----------------------------------------------------------------------------
2364 // SemiSpace in young generation
2365 //
2366 // A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
2367 // The mark-compact collector uses the memory of the first page in the from
2368 // space as a marking stack when tracing live objects.
2369 class SemiSpace : public Space {
2370  public:
2371  typedef PageIterator iterator;
2372 
2373  static void Swap(SemiSpace* from, SemiSpace* to);
2374 
2375  SemiSpace(Heap* heap, SemiSpaceId semispace)
2376  : Space(heap, NEW_SPACE),
2377  current_capacity_(0),
2378  maximum_capacity_(0),
2379  minimum_capacity_(0),
2380  age_mark_(kNullAddress),
2381  committed_(false),
2382  id_(semispace),
2383  current_page_(nullptr),
2384  pages_used_(0) {}
2385 
2386  inline bool Contains(HeapObject* o);
2387  inline bool Contains(Object* o);
2388  inline bool ContainsSlow(Address a);
2389 
2390  void SetUp(size_t initial_capacity, size_t maximum_capacity);
2391  void TearDown();
2392 
2393  bool Commit();
2394  bool Uncommit();
2395  bool is_committed() { return committed_; }
2396 
2397  // Grow the semispace to the new capacity. The new capacity requested must
2398  // be larger than the current capacity and less than the maximum capacity.
2399  bool GrowTo(size_t new_capacity);
2400 
2401  // Shrinks the semispace to the new capacity. The new capacity requested
2402  // must be more than the amount of used memory in the semispace and less
2403  // than the current capacity.
2404  bool ShrinkTo(size_t new_capacity);
2405 
2406  bool EnsureCurrentCapacity();
2407 
2408  Address space_end() { return memory_chunk_list_.back()->area_end(); }
2409 
2410  // Returns the start address of the first page of the space.
2411  Address space_start() {
2412  DCHECK_NE(memory_chunk_list_.front(), nullptr);
2413  return memory_chunk_list_.front()->area_start();
2414  }
2415 
2416  Page* current_page() { return current_page_; }
2417  int pages_used() { return pages_used_; }
2418 
2419  // Returns the start address of the current page of the space.
2420  Address page_low() { return current_page_->area_start(); }
2421 
2422  // Returns one past the end address of the current page of the space.
2423  Address page_high() { return current_page_->area_end(); }
2424 
2425  bool AdvancePage() {
2426  Page* next_page = current_page_->next_page();
2427  // We cannot expand if we reached the maximum number of pages already. Note
2428  // that we need to account for the next page already for this check as we
2429  // could potentially fill the whole page after advancing.
2430  const bool reached_max_pages = (pages_used_ + 1) == max_pages();
2431  if (next_page == nullptr || reached_max_pages) {
2432  return false;
2433  }
2434  current_page_ = next_page;
2435  pages_used_++;
2436  return true;
2437  }
2438 
2439  // Resets the space to using the first page.
2440  void Reset();
2441 
2442  void RemovePage(Page* page);
2443  void PrependPage(Page* page);
2444 
2445  Page* InitializePage(MemoryChunk* chunk, Executability executable);
2446 
2447  // Age mark accessors.
2448  Address age_mark() { return age_mark_; }
2449  void set_age_mark(Address mark);
2450 
2451  // Returns the current capacity of the semispace.
2452  size_t current_capacity() { return current_capacity_; }
2453 
2454  // Returns the maximum capacity of the semispace.
2455  size_t maximum_capacity() { return maximum_capacity_; }
2456 
2457  // Returns the initial capacity of the semispace.
2458  size_t minimum_capacity() { return minimum_capacity_; }
2459 
2460  SemiSpaceId id() { return id_; }
2461 
2462  // Approximate amount of physical memory committed for this space.
2463  size_t CommittedPhysicalMemory() override;
2464 
2465  // If we don't have these here then SemiSpace will be abstract. However
2466  // they should never be called:
2467 
2468  size_t Size() override {
2469  UNREACHABLE();
2470  }
2471 
2472  size_t SizeOfObjects() override { return Size(); }
2473 
2474  size_t Available() override {
2475  UNREACHABLE();
2476  }
2477 
2478  Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
2479  Page* last_page() { return reinterpret_cast<Page*>(Space::last_page()); }
2480 
2481  iterator begin() { return iterator(first_page()); }
2482  iterator end() { return iterator(nullptr); }
2483 
2484  std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2485 
2486 #ifdef DEBUG
2487  void Print() override;
2488  // Validate a range of of addresses in a SemiSpace.
2489  // The "from" address must be on a page prior to the "to" address,
2490  // in the linked page order, or it must be earlier on the same page.
2491  static void AssertValidRange(Address from, Address to);
2492 #else
2493  // Do nothing.
2494  inline static void AssertValidRange(Address from, Address to) {}
2495 #endif
2496 
2497 #ifdef VERIFY_HEAP
2498  virtual void Verify();
2499 #endif
2500 
2501  private:
2502  void RewindPages(int num_pages);
2503 
2504  inline int max_pages() {
2505  return static_cast<int>(current_capacity_ / Page::kPageSize);
2506  }
2507 
2508  // Copies the flags into the masked positions on all pages in the space.
2509  void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
2510 
2511  // The currently committed space capacity.
2512  size_t current_capacity_;
2513 
2514  // The maximum capacity that can be used by this space. A space cannot grow
2515  // beyond that size.
2516  size_t maximum_capacity_;
2517 
2518  // The minimum capacity for the space. A space cannot shrink below this size.
2519  size_t minimum_capacity_;
2520 
2521  // Used to govern object promotion during mark-compact collection.
2522  Address age_mark_;
2523 
2524  bool committed_;
2525  SemiSpaceId id_;
2526 
2527  Page* current_page_;
2528 
2529  int pages_used_;
2530 
2531  friend class NewSpace;
2532  friend class SemiSpaceIterator;
2533 };
2534 
2535 
2536 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
2537 // semispace of the heap's new space. It iterates over the objects in the
2538 // semispace from a given start address (defaulting to the bottom of the
2539 // semispace) to the top of the semispace. New objects allocated after the
2540 // iterator is created are not iterated.
2542  public:
2543  // Create an iterator over the allocated objects in the given to-space.
2544  explicit SemiSpaceIterator(NewSpace* space);
2545 
2546  inline HeapObject* Next() override;
2547 
2548  private:
2549  void Initialize(Address start, Address end);
2550 
2551  // The current iteration point.
2552  Address current_;
2553  // The end of iteration.
2554  Address limit_;
2555 };
2556 
2557 // -----------------------------------------------------------------------------
2558 // The young generation space.
2559 //
2560 // The new space consists of a contiguous pair of semispaces. It simply
2561 // forwards most functions to the appropriate semispace.
2562 
2564  public:
2565  typedef PageIterator iterator;
2566 
2567  NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
2568  size_t initial_semispace_capacity, size_t max_semispace_capacity);
2569 
2570  ~NewSpace() override { TearDown(); }
2571 
2572  inline bool Contains(HeapObject* o);
2573  inline bool ContainsSlow(Address a);
2574  inline bool Contains(Object* o);
2575 
2576  // Tears down the space. Heap memory was not allocated by the space, so it
2577  // is not deallocated here.
2578  void TearDown();
2579 
2580  // Flip the pair of spaces.
2581  void Flip();
2582 
2583  // Grow the capacity of the semispaces. Assumes that they are not at
2584  // their maximum capacity.
2585  void Grow();
2586 
2587  // Shrink the capacity of the semispaces.
2588  void Shrink();
2589 
2590  // Return the allocated bytes in the active semispace.
2591  size_t Size() override {
2592  DCHECK_GE(top(), to_space_.page_low());
2593  return to_space_.pages_used() *
2594  MemoryChunkLayout::AllocatableMemoryInDataPage() +
2595  static_cast<size_t>(top() - to_space_.page_low());
2596  }
2597 
2598  size_t SizeOfObjects() override { return Size(); }
2599 
2600  // Return the allocatable capacity of a semispace.
2601  size_t Capacity() {
2602  SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2603  return (to_space_.current_capacity() / Page::kPageSize) *
2604  MemoryChunkLayout::AllocatableMemoryInDataPage();
2605  }
2606 
2607  // Return the current size of a semispace, allocatable and non-allocatable
2608  // memory.
2609  size_t TotalCapacity() {
2610  DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2611  return to_space_.current_capacity();
2612  }
2613 
2614  // Committed memory for NewSpace is the committed memory of both semi-spaces
2615  // combined.
2616  size_t CommittedMemory() override {
2617  return from_space_.CommittedMemory() + to_space_.CommittedMemory();
2618  }
2619 
2620  size_t MaximumCommittedMemory() override {
2621  return from_space_.MaximumCommittedMemory() +
2622  to_space_.MaximumCommittedMemory();
2623  }
2624 
2625  // Approximate amount of physical memory committed for this space.
2626  size_t CommittedPhysicalMemory() override;
2627 
2628  // Return the available bytes without growing.
2629  size_t Available() override {
2630  DCHECK_GE(Capacity(), Size());
2631  return Capacity() - Size();
2632  }
2633 
2634  size_t ExternalBackingStoreBytes(
2635  ExternalBackingStoreType type) const override {
2636  DCHECK_EQ(0, from_space_.ExternalBackingStoreBytes(type));
2637  return to_space_.ExternalBackingStoreBytes(type);
2638  }
2639 
2640  size_t AllocatedSinceLastGC() {
2641  const Address age_mark = to_space_.age_mark();
2642  DCHECK_NE(age_mark, kNullAddress);
2643  DCHECK_NE(top(), kNullAddress);
2644  Page* const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
2645  Page* const last_page = Page::FromAllocationAreaAddress(top());
2646  Page* current_page = age_mark_page;
2647  size_t allocated = 0;
2648  if (current_page != last_page) {
2649  DCHECK_EQ(current_page, age_mark_page);
2650  DCHECK_GE(age_mark_page->area_end(), age_mark);
2651  allocated += age_mark_page->area_end() - age_mark;
2652  current_page = current_page->next_page();
2653  } else {
2654  DCHECK_GE(top(), age_mark);
2655  return top() - age_mark;
2656  }
2657  while (current_page != last_page) {
2658  DCHECK_NE(current_page, age_mark_page);
2659  allocated += MemoryChunkLayout::AllocatableMemoryInDataPage();
2660  current_page = current_page->next_page();
2661  }
2662  DCHECK_GE(top(), current_page->area_start());
2663  allocated += top() - current_page->area_start();
2664  DCHECK_LE(allocated, Size());
2665  return allocated;
2666  }
2667 
2668  void MovePageFromSpaceToSpace(Page* page) {
2669  DCHECK(page->InFromSpace());
2670  from_space_.RemovePage(page);
2671  to_space_.PrependPage(page);
2672  }
2673 
2674  bool Rebalance();
2675 
2676  // Return the maximum capacity of a semispace.
2677  size_t MaximumCapacity() {
2678  DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
2679  return to_space_.maximum_capacity();
2680  }
2681 
2682  bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
2683 
2684  // Returns the initial capacity of a semispace.
2685  size_t InitialTotalCapacity() {
2686  DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
2687  return to_space_.minimum_capacity();
2688  }
2689 
2690  void ResetOriginalTop() {
2691  DCHECK_GE(top(), original_top_);
2692  DCHECK_LE(top(), original_limit_);
2693  original_top_.store(top(), std::memory_order_release);
2694  }
2695 
2696  Address original_top_acquire() {
2697  return original_top_.load(std::memory_order_acquire);
2698  }
2699  Address original_limit_relaxed() {
2700  return original_limit_.load(std::memory_order_relaxed);
2701  }
2702 
2703  // Return the address of the first allocatable address in the active
2704  // semispace. This may be the address where the first object resides.
2705  Address first_allocatable_address() { return to_space_.space_start(); }
2706 
2707  // Get the age mark of the inactive semispace.
2708  Address age_mark() { return from_space_.age_mark(); }
2709  // Set the age mark in the active semispace.
2710  void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2711 
2712  V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
2713  AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment);
2714 
2715  V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
2716  AllocateRawUnaligned(int size_in_bytes);
2717 
2718  V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
2719  AllocateRaw(int size_in_bytes, AllocationAlignment alignment);
2720 
2721  V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
2722  int size_in_bytes, AllocationAlignment alignment);
2723 
2724  // Reset the allocation pointer to the beginning of the active semispace.
2725  void ResetLinearAllocationArea();
2726 
2727  // When inline allocation stepping is active, either because of incremental
2728  // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
2729  // inline allocation every once in a while. This is done by setting
2730  // allocation_info_.limit to be lower than the actual limit and and increasing
2731  // it in steps to guarantee that the observers are notified periodically.
2732  void UpdateInlineAllocationLimit(size_t size_in_bytes) override;
2733 
2734  inline bool ToSpaceContainsSlow(Address a);
2735  inline bool ToSpaceContains(Object* o);
2736  inline bool FromSpaceContains(Object* o);
2737 
2738  // Try to switch the active semispace to a new, empty, page.
2739  // Returns false if this isn't possible or reasonable (i.e., there
2740  // are no pages, or the current page is already empty), or true
2741  // if successful.
2742  bool AddFreshPage();
2743  bool AddFreshPageSynchronized();
2744 
2745 #ifdef VERIFY_HEAP
2746  // Verify the active semispace.
2747  virtual void Verify(Isolate* isolate);
2748 #endif
2749 
2750 #ifdef DEBUG
2751  // Print the active semispace.
2752  void Print() override { to_space_.Print(); }
2753 #endif
2754 
2755  // Return whether the operation succeeded.
2756  bool CommitFromSpaceIfNeeded() {
2757  if (from_space_.is_committed()) return true;
2758  return from_space_.Commit();
2759  }
2760 
2761  bool UncommitFromSpace() {
2762  if (!from_space_.is_committed()) return true;
2763  return from_space_.Uncommit();
2764  }
2765 
2766  bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
2767 
2768  SemiSpace* active_space() { return &to_space_; }
2769 
2770  Page* first_page() { return to_space_.first_page(); }
2771  Page* last_page() { return to_space_.last_page(); }
2772 
2773  iterator begin() { return to_space_.begin(); }
2774  iterator end() { return to_space_.end(); }
2775 
2776  std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2777 
2778  SemiSpace& from_space() { return from_space_; }
2779  SemiSpace& to_space() { return to_space_; }
2780 
2781  private:
2782  // Update linear allocation area to match the current to-space page.
2783  void UpdateLinearAllocationArea();
2784 
2785  base::Mutex mutex_;
2786 
2787  // The top and the limit at the time of setting the linear allocation area.
2788  // These values can be accessed by background tasks.
2789  std::atomic<Address> original_top_;
2790  std::atomic<Address> original_limit_;
2791 
2792  // The semispaces.
2793  SemiSpace to_space_;
2794  SemiSpace from_space_;
2795  VirtualMemory reservation_;
2796 
2797  bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
2798  bool SupportsInlineAllocation() override { return true; }
2799 
2800  friend class SemiSpaceIterator;
2801 };
2802 
2804  public:
2805  explicit PauseAllocationObserversScope(Heap* heap);
2807 
2808  private:
2809  Heap* heap_;
2810  DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
2811 };
2812 
2813 // -----------------------------------------------------------------------------
2814 // Compaction space that is used temporarily during compaction.
2815 
2816 class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
2817  public:
2818  CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
2819  : PagedSpace(heap, id, executable) {}
2820 
2821  bool is_local() override { return true; }
2822 
2823  protected:
2824  // The space is temporary and not included in any snapshots.
2825  bool snapshotable() override { return false; }
2826 
2827  V8_WARN_UNUSED_RESULT bool SweepAndRetryAllocation(
2828  int size_in_bytes) override;
2829 
2830  V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
2831  int size_in_bytes) override;
2832 };
2833 
2834 
2835 // A collection of |CompactionSpace|s used by a single compaction task.
2837  public:
2838  explicit CompactionSpaceCollection(Heap* heap)
2839  : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
2840  code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
2841 
2842  CompactionSpace* Get(AllocationSpace space) {
2843  switch (space) {
2844  case OLD_SPACE:
2845  return &old_space_;
2846  case CODE_SPACE:
2847  return &code_space_;
2848  default:
2849  UNREACHABLE();
2850  }
2851  UNREACHABLE();
2852  }
2853 
2854  private:
2855  CompactionSpace old_space_;
2856  CompactionSpace code_space_;
2857 };
2858 
2859 // -----------------------------------------------------------------------------
2860 // Old generation regular object space.
2861 
2862 class OldSpace : public PagedSpace {
2863  public:
2864  // Creates an old space object. The constructor does not allocate pages
2865  // from OS.
2866  explicit OldSpace(Heap* heap) : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE) {}
2867 
2868  static bool IsAtPageStart(Address addr) {
2869  return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
2870  MemoryChunkLayout::ObjectStartOffsetInDataPage();
2871  }
2872 };
2873 
2874 // -----------------------------------------------------------------------------
2875 // Old generation code object space.
2876 
2877 class CodeSpace : public PagedSpace {
2878  public:
2879  // Creates an old space object. The constructor does not allocate pages
2880  // from OS.
2881  explicit CodeSpace(Heap* heap) : PagedSpace(heap, CODE_SPACE, EXECUTABLE) {}
2882 };
2883 
2884 // For contiguous spaces, top should be in the space (or at the end) and limit
2885 // should be the end of the space.
2886 #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
2887  SLOW_DCHECK((space).page_low() <= (info).top() && \
2888  (info).top() <= (space).page_high() && \
2889  (info).limit() <= (space).page_high())
2890 
2891 
2892 // -----------------------------------------------------------------------------
2893 // Old space for all map objects
2894 
2895 class MapSpace : public PagedSpace {
2896  public:
2897  // Creates a map space object.
2898  explicit MapSpace(Heap* heap) : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE) {}
2899 
2900  int RoundSizeDownToObjectAlignment(int size) override {
2901  if (base::bits::IsPowerOfTwo(Map::kSize)) {
2902  return RoundDown(size, Map::kSize);
2903  } else {
2904  return (size / Map::kSize) * Map::kSize;
2905  }
2906  }
2907 
2908 #ifdef VERIFY_HEAP
2909  void VerifyObject(HeapObject* obj) override;
2910 #endif
2911 };
2912 
2913 // -----------------------------------------------------------------------------
2914 // Read Only space for all Immortal Immovable and Immutable objects
2915 
2916 class ReadOnlySpace : public PagedSpace {
2917  public:
2919  public:
2920  explicit WritableScope(ReadOnlySpace* space) : space_(space) {
2921  space_->MarkAsReadWrite();
2922  }
2923 
2924  ~WritableScope() { space_->MarkAsReadOnly(); }
2925 
2926  private:
2927  ReadOnlySpace* space_;
2928  };
2929 
2930  explicit ReadOnlySpace(Heap* heap);
2931 
2932  bool writable() const { return !is_marked_read_only_; }
2933 
2934  void ClearStringPaddingIfNeeded();
2935  void MarkAsReadOnly();
2936 
2937  // During boot the free_space_map is created, and afterwards we may need
2938  // to write it into the free list nodes that were already created.
2939  void RepairFreeListsAfterDeserialization();
2940 
2941  private:
2942  void MarkAsReadWrite();
2943  void SetPermissionsForPages(PageAllocator::Permission access);
2944 
2945  bool is_marked_read_only_ = false;
2946  //
2947  // String padding must be cleared just before serialization and therefore the
2948  // string padding in the space will already have been cleared if the space was
2949  // deserialized.
2950  bool is_string_padding_cleared_;
2951 };
2952 
2953 // -----------------------------------------------------------------------------
2954 // Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
2955 // managed by the large object space.
2956 // Large objects do not move during garbage collections.
2957 
2958 class LargeObjectSpace : public Space {
2959  public:
2960  typedef LargePageIterator iterator;
2961 
2962  explicit LargeObjectSpace(Heap* heap);
2963  LargeObjectSpace(Heap* heap, AllocationSpace id);
2964 
2965  ~LargeObjectSpace() override { TearDown(); }
2966 
2967  // Releases internal resources, frees objects in this space.
2968  void TearDown();
2969 
2970  V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
2971  AllocateRaw(int object_size);
2972 
2973  // Available bytes for objects in this space.
2974  size_t Available() override;
2975 
2976  size_t Size() override { return size_; }
2977  size_t SizeOfObjects() override { return objects_size_; }
2978 
2979  // Approximate amount of physical memory committed for this space.
2980  size_t CommittedPhysicalMemory() override;
2981 
2982  int PageCount() { return page_count_; }
2983 
2984  // Finds an object for a given address, returns a Smi if it is not found.
2985  // The function iterates through all objects in this space, may be slow.
2986  Object* FindObject(Address a);
2987 
2988  // Finds a large object page containing the given address, returns nullptr
2989  // if such a page doesn't exist.
2990  LargePage* FindPage(Address a);
2991 
2992  // Clears the marking state of live objects.
2993  void ClearMarkingStateOfLiveObjects();
2994 
2995  // Frees unmarked objects.
2996  void FreeUnmarkedObjects();
2997 
2998  void InsertChunkMapEntries(LargePage* page);
2999  void RemoveChunkMapEntries(LargePage* page);
3000  void RemoveChunkMapEntries(LargePage* page, Address free_start);
3001 
3002  void PromoteNewLargeObject(LargePage* page);
3003 
3004  // Checks whether a heap object is in this space; O(1).
3005  bool Contains(HeapObject* obj);
3006  // Checks whether an address is in the object area in this space. Iterates
3007  // all objects in the space. May be slow.
3008  bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); }
3009 
3010  // Checks whether the space is empty.
3011  bool IsEmpty() { return first_page() == nullptr; }
3012 
3013  void Register(LargePage* page, size_t object_size);
3014  void Unregister(LargePage* page, size_t object_size);
3015 
3016  LargePage* first_page() {
3017  return reinterpret_cast<LargePage*>(Space::first_page());
3018  }
3019 
3020  // Collect code statistics.
3021  void CollectCodeStatistics();
3022 
3023  iterator begin() { return iterator(first_page()); }
3024  iterator end() { return iterator(nullptr); }
3025 
3026  std::unique_ptr<ObjectIterator> GetObjectIterator() override;
3027 
3028  base::Mutex* chunk_map_mutex() { return &chunk_map_mutex_; }
3029 
3030 #ifdef VERIFY_HEAP
3031  virtual void Verify(Isolate* isolate);
3032 #endif
3033 
3034 #ifdef DEBUG
3035  void Print() override;
3036 #endif
3037 
3038  protected:
3039  LargePage* AllocateLargePage(int object_size, Executability executable);
3040  V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
3041  Executability executable);
3042 
3043  size_t size_; // allocated bytes
3044  int page_count_; // number of chunks
3045  size_t objects_size_; // size of objects
3046 
3047  private:
3048  // The chunk_map_mutex_ has to be used when the chunk map is accessed
3049  // concurrently.
3050  base::Mutex chunk_map_mutex_;
3051 
3052  // Page-aligned addresses to their corresponding LargePage.
3053  std::unordered_map<Address, LargePage*> chunk_map_;
3054 
3055  friend class LargeObjectIterator;
3056 };
3057 
3059  public:
3060  explicit NewLargeObjectSpace(Heap* heap);
3061 
3062  V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size);
3063 
3064  // Available bytes for objects in this space.
3065  size_t Available() override;
3066 
3067  void Flip();
3068 
3069  void FreeAllObjects();
3070 };
3071 
3073  public:
3074  explicit CodeLargeObjectSpace(Heap* heap);
3075 
3076  V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
3077  AllocateRaw(int object_size);
3078 };
3079 
3081  public:
3082  explicit LargeObjectIterator(LargeObjectSpace* space);
3083 
3084  HeapObject* Next() override;
3085 
3086  private:
3087  LargePage* current_;
3088 };
3089 
3090 // Iterates over the chunks (pages and large object pages) that can contain
3091 // pointers to new space or to evacuation candidates.
3093  public:
3094  inline explicit OldGenerationMemoryChunkIterator(Heap* heap);
3095 
3096  // Return nullptr when the iterator is done.
3097  inline MemoryChunk* next();
3098 
3099  private:
3100  enum State {
3101  kOldSpaceState,
3102  kMapState,
3103  kCodeState,
3104  kLargeObjectState,
3105  kCodeLargeObjectState,
3106  kFinishedState
3107  };
3108  Heap* heap_;
3109  State state_;
3110  PageIterator old_iterator_;
3111  PageIterator code_iterator_;
3112  PageIterator map_iterator_;
3113  LargePageIterator lo_iterator_;
3114  LargePageIterator code_lo_iterator_;
3115 };
3116 
3117 } // namespace internal
3118 } // namespace v8
3119 
3120 #endif // V8_HEAP_SPACES_H_
Definition: libplatform.h:13