5 #ifndef V8_HEAP_SPACES_H_ 6 #define V8_HEAP_SPACES_H_ 11 #include <unordered_map> 12 #include <unordered_set> 15 #include "src/allocation.h" 16 #include "src/base/atomic-utils.h" 17 #include "src/base/bounded-page-allocator.h" 18 #include "src/base/export-template.h" 19 #include "src/base/iterator.h" 20 #include "src/base/list.h" 21 #include "src/base/platform/mutex.h" 22 #include "src/cancelable-task.h" 23 #include "src/flags.h" 24 #include "src/globals.h" 25 #include "src/heap/heap.h" 26 #include "src/heap/invalidated-slots.h" 27 #include "src/heap/marking.h" 28 #include "src/objects.h" 29 #include "src/objects/heap-object.h" 30 #include "src/objects/map.h" 31 #include "src/utils.h" 38 class TestCodePageAllocatorScope;
41 class AllocationObserver;
42 class CompactionSpace;
43 class CompactionSpaceCollection;
46 class LinearAllocationArea;
47 class LocalArrayBufferTracker;
48 class MemoryAllocator;
50 class MemoryChunkLayout;
116 #define DCHECK_PAGE_ALIGNED(address) DCHECK_EQ(0, (address)&kPageAlignmentMask) 118 #define DCHECK_OBJECT_ALIGNED(address) \ 119 DCHECK_EQ(0, (address)&kObjectAlignmentMask) 121 #define DCHECK_OBJECT_SIZE(size) \ 122 DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize)) 124 #define DCHECK_CODEOBJECT_SIZE(size, code_space) \ 125 DCHECK((0 < size) && (size <= code_space->AreaSize())) 127 enum FreeListCategoryType {
135 kFirstCategory = kTiniest,
136 kLastCategory = kHuge,
137 kNumberOfCategories = kLastCategory + 1,
141 enum FreeMode { kLinkCategory, kDoNotLinkCategory };
143 enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted };
145 enum RememberedSetType {
148 NUMBER_OF_REMEMBERED_SET_TYPES = OLD_TO_OLD + 1
155 : free_list_(free_list),
157 type_(kInvalidCategory),
163 void Initialize(FreeListCategoryType
type) {
173 void ResetStats() { Reset(); }
175 void RepairFreeList(
Heap* heap);
181 void Free(
Address address,
size_t size_in_bytes, FreeMode mode);
186 FreeSpace* PickNodeFromList(
size_t minimum_size,
size_t* node_size);
190 FreeSpace* SearchForNodeInList(
size_t minimum_size,
size_t* node_size);
193 inline Page* page()
const {
return page_; }
194 inline bool is_linked();
195 bool is_empty() {
return top() ==
nullptr; }
196 size_t available()
const {
return available_; }
198 void set_free_list(
FreeList* free_list) { free_list_ = free_list; }
201 size_t SumFreeList();
202 int FreeListLength();
208 static const int kVeryLongFreeList = 500;
211 void set_top(
FreeSpace* top) { top_ = top; }
224 FreeListCategoryType type_;
244 static size_t CodePageGuardStartOffset();
245 static size_t CodePageGuardSize();
246 static intptr_t ObjectStartOffsetInCodePage();
247 static intptr_t ObjectEndOffsetInCodePage();
248 static size_t AllocatableMemoryInCodePage();
249 static intptr_t ObjectStartOffsetInDataPage();
250 V8_EXPORT_PRIVATE
static size_t AllocatableMemoryInDataPage();
251 static size_t ObjectStartOffsetInMemoryChunk(AllocationSpace space);
252 static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
263 size_t operator()(
MemoryChunk*
const chunk)
const {
264 return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
270 IS_EXECUTABLE = 1u << 0,
271 POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
272 POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
274 IN_FROM_SPACE = 1u << 3,
275 IN_TO_SPACE = 1u << 4,
276 NEW_SPACE_BELOW_AGE_MARK = 1u << 5,
277 EVACUATION_CANDIDATE = 1u << 6,
278 NEVER_EVACUATE = 1u << 7,
284 HAS_PROGRESS_BAR = 1u << 8,
288 PAGE_NEW_OLD_PROMOTION = 1u << 9,
292 PAGE_NEW_NEW_PROMOTION = 1u << 10,
298 FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
301 NEVER_ALLOCATE_ON_PAGE = 1u << 12,
305 PRE_FREED = 1u << 13,
313 COMPACTION_WAS_ABORTED = 1u << 15,
318 COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
322 SWEEP_TO_ITERATE = 1u << 17,
326 INCREMENTAL_MARKING = 1u << 18
331 static const Flags kPointersToHereAreInterestingMask =
332 POINTERS_TO_HERE_ARE_INTERESTING;
334 static const Flags kPointersFromHereAreInterestingMask =
335 POINTERS_FROM_HERE_ARE_INTERESTING;
337 static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
339 static const Flags kIsInNewSpaceMask = IN_FROM_SPACE | IN_TO_SPACE;
341 static const Flags kSkipEvacuationSlotsRecordingMask =
342 kEvacuationCandidateMask | kIsInNewSpaceMask;
349 enum ConcurrentSweepingState {
355 static const intptr_t kAlignment =
356 (
static_cast<uintptr_t>(1) << kPageSizeBits);
358 static const intptr_t kAlignmentMask = kAlignment - 1;
360 static const intptr_t kSizeOffset = 0;
361 static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
362 static const intptr_t kMarkBitmapOffset = kFlagsOffset + kPointerSize;
363 static const intptr_t kReservationOffset = kMarkBitmapOffset + kPointerSize;
365 static const size_t kHeaderSize =
377 + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES
378 + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES
387 + kSizetSize * ExternalBackingStoreType::kNumTypes
392 + kPointerSize * kNumberOfCategories
399 static const int kPageSize = 1 << kPageSizeBits;
403 static const int kMaxWriteUnprotectCounter = 4;
406 static MemoryChunk* FromAddress(Address a) {
407 return reinterpret_cast<MemoryChunk*
>(a & ~kAlignmentMask);
410 static MemoryChunk* FromHeapObject(
const HeapObject* o) {
411 return reinterpret_cast<MemoryChunk*
>(
reinterpret_cast<Address
>(o) &
415 static MemoryChunk* FromHeapObject(
const HeapObjectPtr o) {
416 return reinterpret_cast<MemoryChunk*
>(o.ptr() & ~kAlignmentMask);
419 void SetOldGenerationPageFlags(
bool is_marking);
420 void SetYoungGenerationPageFlags(
bool is_marking);
422 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
424 static inline void UpdateHighWaterMark(Address mark) {
425 if (mark == kNullAddress)
return;
429 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
430 intptr_t new_mark =
static_cast<intptr_t
>(mark - chunk->address());
431 intptr_t old_mark = 0;
433 old_mark = chunk->high_water_mark_;
435 (new_mark > old_mark) &&
436 !chunk->high_water_mark_.compare_exchange_weak(old_mark, new_mark));
439 static inline void MoveExternalBackingStoreBytes(
440 ExternalBackingStoreType type, MemoryChunk* from, MemoryChunk* to,
443 void DiscardUnusedMemory(Address addr,
size_t size);
445 Address address()
const {
446 return reinterpret_cast<Address
>(
const_cast<MemoryChunk*
>(
this));
449 base::Mutex* mutex() {
return mutex_; }
451 bool Contains(Address addr) {
452 return addr >= area_start() && addr < area_end();
457 bool ContainsLimit(Address addr) {
458 return addr >= area_start() && addr <= area_end();
461 void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
462 concurrent_sweeping_ = state;
465 ConcurrentSweepingState concurrent_sweeping_state() {
466 return static_cast<ConcurrentSweepingState
>(concurrent_sweeping_.load());
469 bool SweepingDone() {
return concurrent_sweeping_ == kSweepingDone; }
471 size_t size()
const {
return size_; }
472 void set_size(
size_t size) { size_ = size; }
474 inline Heap* heap()
const {
return heap_; }
476 Heap* synchronized_heap();
478 inline SkipList* skip_list() {
return skip_list_; }
480 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
482 template <RememberedSetType type>
483 bool ContainsSlots() {
484 return slot_set<type>() !=
nullptr || typed_slot_set<type>() !=
nullptr ||
485 invalidated_slots() !=
nullptr;
488 template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
489 SlotSet* slot_set() {
490 if (access_mode == AccessMode::ATOMIC)
491 return base::AsAtomicPointer::Acquire_Load(&slot_set_[type]);
492 return slot_set_[type];
495 template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
496 TypedSlotSet* typed_slot_set() {
497 if (access_mode == AccessMode::ATOMIC)
498 return base::AsAtomicPointer::Acquire_Load(&typed_slot_set_[type]);
499 return typed_slot_set_[type];
502 template <RememberedSetType type>
503 SlotSet* AllocateSlotSet();
505 template <RememberedSetType type>
506 void ReleaseSlotSet();
507 template <RememberedSetType type>
508 TypedSlotSet* AllocateTypedSlotSet();
510 template <RememberedSetType type>
511 void ReleaseTypedSlotSet();
513 InvalidatedSlots* AllocateInvalidatedSlots();
514 void ReleaseInvalidatedSlots();
515 void RegisterObjectWithInvalidatedSlots(HeapObject*
object,
int size);
517 void MoveObjectWithInvalidatedSlots(HeapObject* old_start,
518 HeapObject* new_start);
519 bool RegisteredObjectWithInvalidatedSlots(HeapObject*
object);
520 InvalidatedSlots* invalidated_slots() {
return invalidated_slots_; }
522 void ReleaseLocalTracker();
524 void AllocateYoungGenerationBitmap();
525 void ReleaseYoungGenerationBitmap();
527 void AllocateMarkingBitmap();
528 void ReleaseMarkingBitmap();
530 Address area_start() {
return area_start_; }
531 Address area_end() {
return area_end_; }
532 size_t area_size() {
return static_cast<size_t>(area_end() - area_start()); }
535 size_t CommittedPhysicalMemory();
537 Address HighWaterMark() {
return address() + high_water_mark_; }
540 DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
541 return static_cast<int>(progress_bar_);
544 void set_progress_bar(
int progress_bar) {
545 DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
546 progress_bar_ = progress_bar;
549 void ResetProgressBar() {
550 if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
555 inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
558 inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
561 size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) {
562 return external_backing_store_bytes_[type];
565 inline uint32_t AddressToMarkbitIndex(Address addr)
const {
566 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
569 inline Address MarkbitIndexToAddress(
uint32_t index)
const {
570 return this->address() + (index << kPointerSizeLog2);
573 template <AccessMode access_mode = AccessMode::NON_ATOMIC>
574 void SetFlag(Flag flag) {
575 if (access_mode == AccessMode::NON_ATOMIC) {
578 base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
582 template <AccessMode access_mode = AccessMode::NON_ATOMIC>
583 bool IsFlagSet(Flag flag) {
584 return (GetFlags<access_mode>() & flag) != 0;
587 void ClearFlag(Flag flag) { flags_ &= ~flag; }
591 flags_ = (flags_ & ~mask) | (flags & mask);
595 template <AccessMode access_mode = AccessMode::NON_ATOMIC>
597 if (access_mode == AccessMode::NON_ATOMIC) {
600 return base::AsAtomicWord::Relaxed_Load(&flags_);
604 bool NeverEvacuate() {
return IsFlagSet(NEVER_EVACUATE); }
606 void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
609 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
612 template <AccessMode access_mode = AccessMode::NON_ATOMIC>
613 bool IsEvacuationCandidate() {
614 DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
615 IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
616 return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
619 template <AccessMode access_mode = AccessMode::NON_ATOMIC>
620 bool ShouldSkipEvacuationSlotRecording() {
621 uintptr_t flags = GetFlags<access_mode>();
622 return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
623 ((flags & COMPACTION_WAS_ABORTED) == 0);
626 Executability executable() {
627 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
630 bool InNewSpace() {
return (flags_ & kIsInNewSpaceMask) != 0; }
632 bool InToSpace() {
return IsFlagSet(IN_TO_SPACE); }
634 bool InFromSpace() {
return IsFlagSet(IN_FROM_SPACE); }
636 bool InOldSpace()
const;
638 bool InLargeObjectSpace()
const;
640 inline bool IsInNewLargeObjectSpace()
const;
642 Space* owner()
const {
return owner_; }
644 void set_owner(Space* space) { owner_ = space; }
646 bool IsPagedSpace()
const;
650 void InitializationMemoryFence();
652 void SetReadAndExecutable();
653 void SetReadAndWritable();
655 base::ListNode<MemoryChunk>& list_node() {
return list_node_; }
658 static MemoryChunk* Initialize(Heap* heap, Address base,
size_t size,
659 Address area_start, Address area_end,
660 Executability executable, Space* owner,
661 VirtualMemory reservation);
664 void ReleaseAllocatedMemory();
666 VirtualMemory* reserved_memory() {
return &reservation_; }
671 Bitmap* marking_bitmap_;
674 VirtualMemory reservation_;
681 std::atomic<Space*> owner_;
687 intptr_t progress_bar_;
690 std::atomic<intptr_t> live_byte_count_;
695 SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
696 TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
697 InvalidatedSlots* invalidated_slots_;
699 SkipList* skip_list_;
703 std::atomic<intptr_t> high_water_mark_;
707 std::atomic<intptr_t> concurrent_sweeping_;
709 base::Mutex* page_protection_change_mutex_;
727 size_t allocated_bytes_;
730 std::atomic<size_t> external_backing_store_bytes_[kNumTypes];
733 size_t wasted_memory_;
735 base::ListNode<MemoryChunk> list_node_;
737 FreeListCategory* categories_[kNumberOfCategories];
739 LocalArrayBufferTracker* local_tracker_;
741 std::atomic<intptr_t> young_generation_live_byte_count_;
742 Bitmap* young_generation_bitmap_;
745 void InitializeReservedMemory() { reservation_.Reset(); }
747 friend class ConcurrentMarkingState;
748 friend class IncrementalMarkingState;
749 friend class MajorAtomicMarkingState;
750 friend class MajorMarkingState;
751 friend class MajorNonAtomicMarkingState;
752 friend class MemoryAllocator;
753 friend class MemoryChunkValidator;
754 friend class MinorMarkingState;
755 friend class MinorNonAtomicMarkingState;
756 friend class PagedSpace;
759 static_assert(
sizeof(std::atomic<intptr_t>) == kPointerSize,
760 "sizeof(std::atomic<intptr_t>) == kPointerSize");
770 static const intptr_t kCopyAllFlags = ~0;
773 static const intptr_t kCopyOnFlipFlagsMask =
774 static_cast<intptr_t
>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
775 static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
776 static_cast<intptr_t
>(MemoryChunk::INCREMENTAL_MARKING);
782 return reinterpret_cast<Page*
>(addr & ~kPageAlignmentMask);
785 return reinterpret_cast<Page*
>(
reinterpret_cast<Address>(o) &
793 static Page* FromAllocationAreaAddress(
Address address) {
794 return Page::FromAddress(address - kPointerSize);
799 return Page::FromAddress(address1) == Page::FromAddress(address2);
803 static bool IsAlignedToPageSize(
Address addr) {
804 return (addr & kPageAlignmentMask) == 0;
807 static Page* ConvertNewToOld(
Page* old_page);
809 inline void MarkNeverAllocateForTesting();
810 inline void MarkEvacuationCandidate();
811 inline void ClearEvacuationCandidate();
813 Page* next_page() {
return static_cast<Page*
>(list_node_.next()); }
814 Page* prev_page() {
return static_cast<Page*
>(list_node_.prev()); }
816 template <
typename Callback>
817 inline void ForAllFreeListCategories(Callback callback) {
818 for (
int i = kFirstCategory;
i < kNumberOfCategories;
i++) {
819 callback(categories_[
i]);
824 inline size_t Offset(
Address a) {
return static_cast<size_t>(a - address()); }
827 Address OffsetToAddress(
size_t offset) {
828 Address address_in_page = address() + offset;
829 DCHECK_GE(address_in_page, area_start_);
830 DCHECK_LT(address_in_page, area_end_);
831 return address_in_page;
837 void WaitUntilSweepingCompleted() {
840 DCHECK(SweepingDone());
843 void AllocateLocalTracker();
845 bool contains_array_buffers();
847 void ResetFreeListStatistics();
849 size_t AvailableInFreeList();
851 size_t AvailableInFreeListFromAllocatedBytes() {
852 DCHECK_GE(area_size(), wasted_memory() + allocated_bytes());
853 return area_size() - wasted_memory() - allocated_bytes();
857 return categories_[
type];
860 size_t wasted_memory() {
return wasted_memory_; }
861 void add_wasted_memory(
size_t waste) { wasted_memory_ += waste; }
862 size_t allocated_bytes() {
return allocated_bytes_; }
863 void IncreaseAllocatedBytes(
size_t bytes) {
864 DCHECK_LE(bytes, area_size());
865 allocated_bytes_ += bytes;
867 void DecreaseAllocatedBytes(
size_t bytes) {
868 DCHECK_LE(bytes, area_size());
869 DCHECK_GE(allocated_bytes(), bytes);
870 allocated_bytes_ -= bytes;
873 void ResetAllocatedBytes();
875 size_t ShrinkToHighWaterMark();
877 V8_EXPORT_PRIVATE
void CreateBlackArea(
Address start,
Address end);
880 void InitializeFreeListCategories();
881 void AllocateFreeListCategories();
882 void ReleaseFreeListCategories();
889 enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
898 void MakeHeaderRelocatable();
910 static const int kMaxCodePageSize = 512 * MB;
913 return static_cast<LargePage*
>(MemoryChunk::FromHeapObject(o));
916 HeapObject* GetObject() {
return HeapObject::FromAddress(area_start()); }
919 return static_cast<LargePage*
>(list_node_.next());
924 Address GetAddressToShrink(
Address object_address,
size_t object_size);
926 void ClearOutOfLiveRangeSlots(
Address free_start);
930 Executability executable);
941 : allocation_observers_paused_(
false),
946 external_backing_store_bytes_ =
947 new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
948 external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
949 external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] =
953 static inline void MoveExternalBackingStoreBytes(
954 ExternalBackingStoreType
type,
Space* from,
Space* to,
size_t amount);
957 delete[] external_backing_store_bytes_;
958 external_backing_store_bytes_ =
nullptr;
961 Heap* heap()
const {
return heap_; }
964 AllocationSpace identity() {
return id_; }
966 const char* name() {
return AllocationSpaceName(id_); }
968 V8_EXPORT_PRIVATE
virtual void AddAllocationObserver(
971 V8_EXPORT_PRIVATE
virtual void RemoveAllocationObserver(
974 V8_EXPORT_PRIVATE
virtual void PauseAllocationObservers();
976 V8_EXPORT_PRIVATE
virtual void ResumeAllocationObservers();
978 V8_EXPORT_PRIVATE
virtual void StartNextInlineAllocationStep() {}
980 void AllocationStep(
int bytes_since_last,
Address soon_object,
int size);
984 virtual size_t CommittedMemory() {
return committed_; }
986 virtual size_t MaximumCommittedMemory() {
return max_committed_; }
989 virtual size_t Size() = 0;
993 virtual size_t SizeOfObjects() {
return Size(); }
996 virtual size_t CommittedPhysicalMemory() = 0;
999 virtual size_t Available() = 0;
1001 virtual int RoundSizeDownToObjectAlignment(
int size) {
1002 if (id_ == CODE_SPACE) {
1003 return RoundDown(size, kCodeAlignment);
1005 return RoundDown(size, kPointerSize);
1009 virtual std::unique_ptr<ObjectIterator> GetObjectIterator() = 0;
1011 void AccountCommitted(
size_t bytes) {
1012 DCHECK_GE(committed_ + bytes, committed_);
1013 committed_ += bytes;
1014 if (committed_ > max_committed_) {
1015 max_committed_ = committed_;
1019 void AccountUncommitted(
size_t bytes) {
1020 DCHECK_GE(committed_, committed_ - bytes);
1021 committed_ -= bytes;
1024 inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType
type,
1027 inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType
type,
1031 virtual size_t ExternalBackingStoreBytes(
1032 ExternalBackingStoreType
type)
const {
1033 return external_backing_store_bytes_[
type];
1036 V8_EXPORT_PRIVATE
void* GetRandomMmapAddr();
1038 MemoryChunk* first_page() {
return memory_chunk_list_.front(); }
1039 MemoryChunk* last_page() {
return memory_chunk_list_.back(); }
1044 virtual void Print() = 0;
1048 intptr_t GetNextInlineAllocationStepSize();
1049 bool AllocationObserversActive() {
1050 return !allocation_observers_paused_ && !allocation_observers_.empty();
1053 std::vector<AllocationObserver*> allocation_observers_;
1059 std::atomic<size_t>* external_backing_store_bytes_;
1062 bool allocation_observers_paused_;
1064 AllocationSpace id_;
1068 size_t max_committed_;
1070 DISALLOW_COPY_AND_ASSIGN(
Space);
1076 STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(
MemoryChunk, size_));
1079 STATIC_ASSERT(
sizeof(
MemoryChunk) <= MemoryChunk::kHeaderSize);
1080 STATIC_ASSERT(
sizeof(
LargePage) <= MemoryChunk::kHeaderSize);
1081 STATIC_ASSERT(
sizeof(
Page) <= MemoryChunk::kHeaderSize);
1092 V8_EXPORT_PRIVATE
Address GetAddressHint(
size_t code_range_size);
1094 V8_EXPORT_PRIVATE
void NotifyFreedCodeRange(
Address code_range_start,
1095 size_t code_range_size);
1103 std::unordered_map<size_t, std::vector<Address>> recently_freed_;
1111 for (
int idx = 0; idx < kSize; idx++) {
1112 starts_[idx] =
static_cast<Address>(-1);
1116 Address StartFor(
Address addr) {
return starts_[RegionNumber(addr)]; }
1118 void AddObject(
Address addr,
int size) {
1119 int start_region = RegionNumber(addr);
1120 int end_region = RegionNumber(addr + size - kPointerSize);
1121 for (
int idx = start_region; idx <= end_region; idx++) {
1122 if (starts_[idx] > addr) {
1123 starts_[idx] = addr;
1128 DCHECK_EQ(start_region, idx);
1133 static inline int RegionNumber(
Address addr) {
1134 return (addr & kPageAlignmentMask) >> kRegionSizeLog2;
1137 static void Update(
Address addr,
int size) {
1138 Page* page = Page::FromAddress(addr);
1139 SkipList* list = page->skip_list();
1140 if (list ==
nullptr) {
1142 page->set_skip_list(list);
1145 list->AddObject(addr, size);
1149 static const int kRegionSizeLog2 = 13;
1150 static const int kRegionSize = 1 << kRegionSizeLog2;
1151 static const int kSize = Page::kPageSize / kRegionSize;
1153 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1173 allocator_(allocator),
1174 pending_unmapping_tasks_semaphore_(0),
1175 pending_unmapping_tasks_(0),
1176 active_unmapping_tasks_(0) {
1177 chunks_[kRegular].reserve(kReservedQueueingSlots);
1178 chunks_[kPooled].reserve(kReservedQueueingSlots);
1182 if (chunk->IsPagedSpace() && chunk->executable() != EXECUTABLE) {
1183 AddMemoryChunkSafe<kRegular>(chunk);
1185 AddMemoryChunkSafe<kNonRegular>(chunk);
1195 MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
1196 if (chunk ==
nullptr) {
1197 chunk = GetMemoryChunkSafe<kRegular>();
1198 if (chunk !=
nullptr) {
1200 chunk->ReleaseAllocatedMemory();
1206 V8_EXPORT_PRIVATE
void FreeQueuedChunks();
1207 void CancelAndWaitForPendingTasks();
1208 void PrepareForMarkCompact();
1209 void EnsureUnmappingCompleted();
1210 V8_EXPORT_PRIVATE
void TearDown();
1211 size_t NumberOfCommittedChunks();
1212 int NumberOfChunks();
1213 size_t CommittedBufferedMemory();
1216 static const int kReservedQueueingSlots = 64;
1217 static const int kMaxUnmapperTasks = 4;
1219 enum ChunkQueueType {
1224 kNumberOfChunkQueues,
1227 enum class FreeMode {
1232 template <ChunkQueueType type>
1234 base::MutexGuard guard(&mutex_);
1235 chunks_[
type].push_back(chunk);
1238 template <ChunkQueueType type>
1240 base::MutexGuard guard(&mutex_);
1241 if (chunks_[
type].empty())
return nullptr;
1243 chunks_[
type].pop_back();
1247 bool MakeRoomForNewTasks();
1249 template <FreeMode mode>
1250 void PerformFreeMemoryOnQueuedChunks();
1252 void PerformFreeMemoryOnQueuedNonRegularChunks();
1257 std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
1258 CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
1260 intptr_t pending_unmapping_tasks_;
1261 std::atomic<intptr_t> active_unmapping_tasks_;
1266 enum AllocationMode {
1278 static intptr_t GetCommitPageSize();
1286 MemoryAllocator(Isolate* isolate,
size_t max_capacity,
1287 size_t code_range_size);
1294 template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
1296 EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
1297 Page* AllocatePage(
size_t size, SpaceType* owner, Executability executable);
1299 LargePage* AllocateLargePage(
size_t size, LargeObjectSpace* owner,
1300 Executability executable);
1302 template <MemoryAllocator::FreeMode mode = kFull>
1303 EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
1304 void Free(MemoryChunk* chunk);
1307 size_t Size() {
return size_; }
1310 size_t SizeExecutable() {
return size_executable_; }
1313 size_t Available() {
1314 const size_t size = Size();
1315 return capacity_ < size ? 0 : capacity_ - size;
1320 V8_INLINE
bool IsOutsideAllocatedSpace(Address address) {
1321 return address < lowest_ever_allocated_ ||
1322 address >= highest_ever_allocated_;
1328 MemoryChunk* AllocateChunk(
size_t reserve_area_size,
size_t commit_area_size,
1329 Executability executable, Space* space);
1331 Address AllocateAlignedMemory(
size_t reserve_size,
size_t commit_size,
1332 size_t alignment, Executability executable,
1333 void* hint, VirtualMemory* controller);
1341 void PartialFreeMemory(MemoryChunk* chunk, Address start_free,
1342 size_t bytes_to_free, Address new_area_end);
1346 bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
1347 return executable_memory_.find(chunk) != executable_memory_.end();
1352 bool CommitMemory(VirtualMemory* reservation);
1356 bool UncommitMemory(VirtualMemory* reservation);
1360 void ZapBlock(Address start,
size_t size,
uintptr_t zap_value);
1362 V8_WARN_UNUSED_RESULT
bool CommitExecutableMemory(VirtualMemory* vm,
1365 size_t reserved_size);
1378 return executable == EXECUTABLE ? code_page_allocator_
1379 : data_page_allocator_;
1384 const base::AddressRegion& code_range()
const {
1386 DCHECK_IMPLIES(!code_range_.is_empty(), code_page_allocator_instance_);
1387 DCHECK_IMPLIES(!code_range_.is_empty(),
1388 code_range_.contains(code_page_allocator_instance_->begin(),
1389 code_page_allocator_instance_->size()));
1393 Unmapper* unmapper() {
return &unmapper_; }
1401 void PreFreeMemory(MemoryChunk* chunk);
1404 void PerformFreeMemory(MemoryChunk* chunk);
1408 template <
typename SpaceType>
1409 MemoryChunk* AllocatePagePooled(SpaceType* owner);
1415 Page* InitializePagesInChunk(
int chunk_id,
int pages_in_chunk,
1418 void UpdateAllocatedSpaceLimits(Address low, Address high) {
1422 Address ptr = kNullAddress;
1424 ptr = lowest_ever_allocated_;
1425 }
while ((low < ptr) &&
1426 !lowest_ever_allocated_.compare_exchange_weak(ptr, low));
1428 ptr = highest_ever_allocated_;
1429 }
while ((high > ptr) &&
1430 !highest_ever_allocated_.compare_exchange_weak(ptr, high));
1433 void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
1434 DCHECK(chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
1435 DCHECK_EQ(executable_memory_.find(chunk), executable_memory_.end());
1436 executable_memory_.insert(chunk);
1439 void UnregisterExecutableMemoryChunk(MemoryChunk* chunk) {
1440 DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
1441 executable_memory_.erase(chunk);
1442 chunk->heap()->UnregisterUnprotectedMemoryChunk(chunk);
1454 VirtualMemory heap_reservation_;
1471 base::AddressRegion code_range_;
1482 std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_;
1488 std::atomic<size_t> size_;
1490 std::atomic<size_t> size_executable_;
1497 std::atomic<Address> lowest_ever_allocated_;
1498 std::atomic<Address> highest_ever_allocated_;
1500 VirtualMemory last_chunk_;
1504 std::unordered_set<MemoryChunk*> executable_memory_;
1506 friend class heap::TestCodePageAllocatorScope;
1508 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1511 extern template Page*
1512 MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
1513 size_t size, PagedSpace* owner, Executability executable);
1514 extern template Page*
1515 MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
1516 size_t size, SemiSpace* owner, Executability executable);
1517 extern template Page*
1518 MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
1519 size_t size, SemiSpace* owner, Executability executable);
1535 template <
class PAGE_TYPE>
1541 PAGE_TYPE* operator*() {
return p_; }
1543 return rhs.p_ == p_;
1546 return rhs.p_ != p_;
1599 bool AdvanceToNextPage();
1625 V8_INLINE
void set_top(
Address top) {
1626 SLOW_DCHECK(top == kNullAddress || (top & kHeapObjectTagMask) == 0);
1630 V8_INLINE
Address top()
const {
1631 SLOW_DCHECK(top_ == kNullAddress || (top_ & kHeapObjectTagMask) == 0);
1635 Address* top_address() {
return &top_; }
1637 V8_INLINE
void set_limit(
Address limit) { limit_ = limit; }
1639 V8_INLINE
Address limit()
const {
return limit_; }
1641 Address* limit_address() {
return &limit_; }
1644 bool VerifyPagedAllocation() {
1645 return (Page::FromAllocationAreaAddress(top_) ==
1646 Page::FromAllocationAreaAddress(limit_)) &&
1679 allocated_on_page_.clear();
1684 size_t Capacity() {
return capacity_; }
1685 size_t MaxCapacity() {
return max_capacity_; }
1686 size_t Size() {
return size_; }
1688 size_t AllocatedOnPage(
Page* page) {
return allocated_on_page_[page]; }
1691 void IncreaseAllocatedBytes(
size_t bytes,
Page* page) {
1692 DCHECK_GE(size_ + bytes, size_);
1695 allocated_on_page_[page] += bytes;
1699 void DecreaseAllocatedBytes(
size_t bytes,
Page* page) {
1700 DCHECK_GE(size_, bytes);
1703 DCHECK_GE(allocated_on_page_[page], bytes);
1704 allocated_on_page_[page] -= bytes;
1708 void DecreaseCapacity(
size_t bytes) {
1709 DCHECK_GE(capacity_, bytes);
1710 DCHECK_GE(capacity_ - bytes, size_);
1714 void IncreaseCapacity(
size_t bytes) {
1715 DCHECK_GE(capacity_ + bytes, capacity_);
1717 if (capacity_ > max_capacity_) {
1718 max_capacity_ = capacity_;
1727 std::atomic<size_t> capacity_;
1730 size_t max_capacity_;
1736 std::unordered_map<Page*, size_t, Page::Hasher> allocated_on_page_;
1765 static inline size_t GuaranteedAllocatable(
size_t maximum_freed) {
1766 if (maximum_freed <= kTiniestListMax) {
1770 }
else if (maximum_freed <= kTinyListMax) {
1771 return kTinyAllocationMax;
1772 }
else if (maximum_freed <= kSmallListMax) {
1773 return kSmallAllocationMax;
1774 }
else if (maximum_freed <= kMediumListMax) {
1775 return kMediumAllocationMax;
1776 }
else if (maximum_freed <= kLargeListMax) {
1777 return kLargeAllocationMax;
1779 return maximum_freed;
1782 static FreeListCategoryType SelectFreeListCategoryType(
size_t size_in_bytes) {
1783 if (size_in_bytes <= kTiniestListMax) {
1785 }
else if (size_in_bytes <= kTinyListMax) {
1787 }
else if (size_in_bytes <= kSmallListMax) {
1789 }
else if (size_in_bytes <= kMediumListMax) {
1791 }
else if (size_in_bytes <= kLargeListMax) {
1805 size_t Free(
Address start,
size_t size_in_bytes, FreeMode mode);
1811 V8_WARN_UNUSED_RESULT
FreeSpace* Allocate(
size_t size_in_bytes,
1819 ForAllFreeListCategories(
1824 size_t Available() {
1825 size_t available = 0;
1827 available += category->available();
1835 if (!category->is_empty()) empty =
false;
1841 void RepairLists(
Heap* heap);
1843 size_t EvictFreeListItems(
Page* page);
1844 bool ContainsPageFreeListItems(
Page* page);
1846 size_t wasted_bytes() {
return wasted_bytes_; }
1848 template <
typename Callback>
1849 void ForAllFreeListCategories(FreeListCategoryType
type, Callback callback) {
1851 while (current !=
nullptr) {
1858 template <
typename Callback>
1859 void ForAllFreeListCategories(Callback callback) {
1860 for (
int i = kFirstCategory;
i < kNumberOfCategories;
i++) {
1861 ForAllFreeListCategories(static_cast<FreeListCategoryType>(
i), callback);
1867 void PrintCategories(FreeListCategoryType
type);
1870 inline Page* GetPageForCategoryType(FreeListCategoryType
type);
1873 size_t SumFreeLists();
1878 class FreeListCategoryIterator {
1880 FreeListCategoryIterator(
FreeList* free_list, FreeListCategoryType
type)
1881 : current_(free_list->categories_[
type]) {}
1883 bool HasNext() {
return current_ !=
nullptr; }
1888 current_ = current_->next();
1897 static const size_t kMinBlockSize = 3 * kPointerSize;
1901 static const size_t kMaxBlockSize = Page::kPageSize;
1903 static const size_t kTiniestListMax = 0xa * kPointerSize;
1904 static const size_t kTinyListMax = 0x1f * kPointerSize;
1905 static const size_t kSmallListMax = 0xff * kPointerSize;
1906 static const size_t kMediumListMax = 0x7ff * kPointerSize;
1907 static const size_t kLargeListMax = 0x3fff * kPointerSize;
1908 static const size_t kTinyAllocationMax = kTiniestListMax;
1909 static const size_t kSmallAllocationMax = kTinyListMax;
1910 static const size_t kMediumAllocationMax = kSmallListMax;
1911 static const size_t kLargeAllocationMax = kMediumListMax;
1915 FreeSpace* FindNodeIn(FreeListCategoryType
type,
size_t minimum_size,
1921 FreeSpace* TryFindNodeIn(FreeListCategoryType
type,
size_t minimum_size,
1925 FreeSpace* SearchForNodeInList(FreeListCategoryType
type,
size_t* node_size,
1926 size_t minimum_size);
1929 FreeListCategoryType SelectFastAllocationFreeListCategoryType(
1930 size_t size_in_bytes) {
1931 if (size_in_bytes <= kSmallAllocationMax) {
1933 }
else if (size_in_bytes <= kMediumAllocationMax) {
1935 }
else if (size_in_bytes <= kLargeAllocationMax) {
1942 return categories_[
type];
1945 std::atomic<size_t> wasted_bytes_;
1990 int size_in_bytes, AllocationAlignment alignment);
1992 inline bool IsValid() {
return allocation_info_.top() != kNullAddress; }
1998 inline bool TryFreeLast(
HeapObject*
object,
int object_size);
2013 :
Space(heap,
id), top_on_previous_step_(0) {
2014 allocation_info_.Reset(kNullAddress, kNullAddress);
2017 virtual bool SupportsInlineAllocation() = 0;
2020 Address top() {
return allocation_info_.top(); }
2021 Address limit() {
return allocation_info_.limit(); }
2024 Address* allocation_top_address() {
return allocation_info_.top_address(); }
2027 Address* allocation_limit_address() {
2028 return allocation_info_.limit_address();
2031 V8_EXPORT_PRIVATE
void AddAllocationObserver(
2033 V8_EXPORT_PRIVATE
void RemoveAllocationObserver(
2035 V8_EXPORT_PRIVATE
void ResumeAllocationObservers()
override;
2036 V8_EXPORT_PRIVATE
void PauseAllocationObservers()
override;
2044 V8_EXPORT_PRIVATE
virtual void UpdateInlineAllocationLimit(
2045 size_t min_size) = 0;
2057 void InlineAllocationStep(
Address top,
Address top_for_next_step,
2058 Address soon_object,
size_t size);
2059 V8_EXPORT_PRIVATE
void StartNextInlineAllocationStep()
override;
2063 Address top_on_previous_step_;
2071 static const size_t kCompactionMemoryWanted = 500 * KB;
2074 PagedSpace(
Heap* heap, AllocationSpace
id, Executability executable);
2079 inline bool Contains(
Address a);
2080 inline bool Contains(
Object* o);
2081 bool ContainsSlow(
Address addr);
2084 Executability executable() {
return executable_; }
2087 void PrepareForMarkCompact();
2090 size_t Capacity() {
return accounting_stats_.Capacity(); }
2093 size_t CommittedPhysicalMemory()
override;
2095 void ResetFreeListStatistics();
2103 accounting_stats_.ClearSize();
2104 free_list_.ResetStats();
2105 ResetFreeListStatistics();
2112 size_t Available()
override {
return free_list_.Available(); }
2118 size_t Size()
override {
return accounting_stats_.Size(); }
2122 size_t SizeOfObjects()
override;
2126 virtual size_t Waste() {
return free_list_.wasted_bytes(); }
2128 enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };
2134 int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST);
2139 int size_in_bytes, AllocationAlignment alignment);
2144 int size_in_bytes, AllocationAlignment alignment);
2146 size_t Free(
Address start,
size_t size_in_bytes, SpaceAccountingMode mode) {
2147 if (size_in_bytes == 0)
return 0;
2148 heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
2149 ClearRecordedSlots::kNo);
2150 if (mode == SpaceAccountingMode::kSpaceAccounted) {
2151 return AccountedFree(start, size_in_bytes);
2153 return UnaccountedFree(start, size_in_bytes);
2161 size_t AccountedFree(
Address start,
size_t size_in_bytes) {
2162 size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
2163 Page* page = Page::FromAddress(start);
2164 accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
2165 DCHECK_GE(size_in_bytes, wasted);
2166 return size_in_bytes - wasted;
2169 size_t UnaccountedFree(
Address start,
size_t size_in_bytes) {
2170 size_t wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
2171 DCHECK_GE(size_in_bytes, wasted);
2172 return size_in_bytes - wasted;
2175 inline bool TryFreeLast(
HeapObject*
object,
int object_size);
2177 void ResetFreeList();
2180 void FreeLinearAllocationArea();
2182 void MarkLinearAllocationAreaBlack();
2183 void UnmarkLinearAllocationArea();
2185 void DecreaseAllocatedBytes(
size_t bytes,
Page* page) {
2186 accounting_stats_.DecreaseAllocatedBytes(bytes, page);
2188 void IncreaseAllocatedBytes(
size_t bytes,
Page* page) {
2189 accounting_stats_.IncreaseAllocatedBytes(bytes, page);
2191 void DecreaseCapacity(
size_t bytes) {
2192 accounting_stats_.DecreaseCapacity(bytes);
2194 void IncreaseCapacity(
size_t bytes) {
2195 accounting_stats_.IncreaseCapacity(bytes);
2198 void RefineAllocatedBytesAfterSweeping(
Page* page);
2202 void ReleasePage(
Page* page);
2206 size_t AddPage(
Page* page);
2207 void RemovePage(
Page* page);
2210 Page* RemovePageSafe(
int size_in_bytes);
2212 void SetReadAndExecutable();
2213 void SetReadAndWritable();
2219 void VerifyLiveBytes();
2223 virtual void VerifyObject(
HeapObject* obj) {}
2227 void VerifyCountersAfterSweeping();
2228 void VerifyCountersBeforeConcurrentSweeping();
2230 void Print()
override;
2233 static void ReportCodeStatistics(
Isolate* isolate);
2234 static void ResetCodeStatistics(
Isolate* isolate);
2237 bool CanExpand(
size_t size);
2240 int CountTotalPages();
2243 inline int AreaSize() {
return static_cast<int>(area_size_); }
2245 virtual bool is_local() {
return false; }
2253 virtual void RefillFreeList();
2255 FreeList* free_list() {
return &free_list_; }
2259 inline void UnlinkFreeListCategories(
Page* page);
2260 inline size_t RelinkFreeListCategories(
Page* page);
2262 Page* first_page() {
return reinterpret_cast<Page*
>(Space::first_page()); }
2269 void ShrinkImmortalImmovablePages();
2271 size_t ShrinkPageToHighWaterMark(
Page* page);
2273 std::unique_ptr<ObjectIterator> GetObjectIterator()
override;
2280 DCHECK(top == limit ||
2281 Page::FromAddress(top) == Page::FromAddress(limit - 1));
2282 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2283 allocation_info_.Reset(top, limit);
2285 void DecreaseLimit(
Address new_limit);
2286 void UpdateInlineAllocationLimit(
size_t min_size)
override;
2287 bool SupportsInlineAllocation()
override {
2288 return identity() == OLD_SPACE && !is_local();
2294 virtual bool snapshotable() {
return true; }
2296 bool HasPages() {
return first_page() !=
nullptr; }
2310 inline bool EnsureLinearAllocationArea(
int size_in_bytes);
2313 inline HeapObject* AllocateLinearly(
int size_in_bytes);
2318 inline HeapObject* TryAllocateLinearlyAligned(
int* size_in_bytes,
2319 AllocationAlignment alignment);
2321 V8_WARN_UNUSED_RESULT
bool RefillLinearAllocationAreaFromFreeList(
2322 size_t size_in_bytes);
2328 V8_WARN_UNUSED_RESULT
virtual bool SweepAndRetryAllocation(
int size_in_bytes);
2333 V8_WARN_UNUSED_RESULT
virtual bool SlowRefillLinearAllocationArea(
2338 V8_WARN_UNUSED_RESULT
bool RawSlowRefillLinearAllocationArea(
2341 Executability executable_;
2358 friend class heap::HeapTester;
2361 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
2376 :
Space(heap, NEW_SPACE),
2377 current_capacity_(0),
2378 maximum_capacity_(0),
2379 minimum_capacity_(0),
2380 age_mark_(kNullAddress),
2383 current_page_(
nullptr),
2387 inline bool Contains(
Object* o);
2388 inline bool ContainsSlow(
Address a);
2390 void SetUp(
size_t initial_capacity,
size_t maximum_capacity);
2395 bool is_committed() {
return committed_; }
2399 bool GrowTo(
size_t new_capacity);
2404 bool ShrinkTo(
size_t new_capacity);
2406 bool EnsureCurrentCapacity();
2408 Address space_end() {
return memory_chunk_list_.back()->area_end(); }
2412 DCHECK_NE(memory_chunk_list_.front(),
nullptr);
2413 return memory_chunk_list_.front()->area_start();
2416 Page* current_page() {
return current_page_; }
2417 int pages_used() {
return pages_used_; }
2420 Address page_low() {
return current_page_->area_start(); }
2423 Address page_high() {
return current_page_->area_end(); }
2425 bool AdvancePage() {
2426 Page* next_page = current_page_->next_page();
2430 const bool reached_max_pages = (pages_used_ + 1) == max_pages();
2431 if (next_page ==
nullptr || reached_max_pages) {
2434 current_page_ = next_page;
2442 void RemovePage(
Page* page);
2443 void PrependPage(
Page* page);
2448 Address age_mark() {
return age_mark_; }
2449 void set_age_mark(
Address mark);
2452 size_t current_capacity() {
return current_capacity_; }
2455 size_t maximum_capacity() {
return maximum_capacity_; }
2458 size_t minimum_capacity() {
return minimum_capacity_; }
2460 SemiSpaceId id() {
return id_; }
2463 size_t CommittedPhysicalMemory()
override;
2468 size_t Size()
override {
2472 size_t SizeOfObjects()
override {
return Size(); }
2474 size_t Available()
override {
2478 Page* first_page() {
return reinterpret_cast<Page*
>(Space::first_page()); }
2479 Page* last_page() {
return reinterpret_cast<Page*
>(Space::last_page()); }
2484 std::unique_ptr<ObjectIterator> GetObjectIterator()
override;
2487 void Print()
override;
2498 virtual void Verify();
2502 void RewindPages(
int num_pages);
2504 inline int max_pages() {
2505 return static_cast<int>(current_capacity_ / Page::kPageSize);
2509 void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
2512 size_t current_capacity_;
2516 size_t maximum_capacity_;
2519 size_t minimum_capacity_;
2527 Page* current_page_;
2568 size_t initial_semispace_capacity,
size_t max_semispace_capacity);
2570 ~
NewSpace()
override { TearDown(); }
2573 inline bool ContainsSlow(
Address a);
2574 inline bool Contains(
Object* o);
2591 size_t Size()
override {
2592 DCHECK_GE(top(), to_space_.page_low());
2593 return to_space_.pages_used() *
2594 MemoryChunkLayout::AllocatableMemoryInDataPage() +
2595 static_cast<size_t>(top() - to_space_.page_low());
2598 size_t SizeOfObjects()
override {
return Size(); }
2602 SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2603 return (to_space_.current_capacity() / Page::kPageSize) *
2604 MemoryChunkLayout::AllocatableMemoryInDataPage();
2609 size_t TotalCapacity() {
2610 DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2611 return to_space_.current_capacity();
2616 size_t CommittedMemory()
override {
2617 return from_space_.CommittedMemory() + to_space_.CommittedMemory();
2620 size_t MaximumCommittedMemory()
override {
2621 return from_space_.MaximumCommittedMemory() +
2622 to_space_.MaximumCommittedMemory();
2626 size_t CommittedPhysicalMemory()
override;
2629 size_t Available()
override {
2630 DCHECK_GE(Capacity(), Size());
2631 return Capacity() - Size();
2634 size_t ExternalBackingStoreBytes(
2635 ExternalBackingStoreType
type)
const override {
2636 DCHECK_EQ(0, from_space_.ExternalBackingStoreBytes(
type));
2637 return to_space_.ExternalBackingStoreBytes(
type);
2640 size_t AllocatedSinceLastGC() {
2641 const Address age_mark = to_space_.age_mark();
2642 DCHECK_NE(age_mark, kNullAddress);
2643 DCHECK_NE(top(), kNullAddress);
2644 Page*
const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
2645 Page*
const last_page = Page::FromAllocationAreaAddress(top());
2646 Page* current_page = age_mark_page;
2647 size_t allocated = 0;
2648 if (current_page != last_page) {
2649 DCHECK_EQ(current_page, age_mark_page);
2650 DCHECK_GE(age_mark_page->area_end(), age_mark);
2651 allocated += age_mark_page->area_end() - age_mark;
2652 current_page = current_page->next_page();
2654 DCHECK_GE(top(), age_mark);
2655 return top() - age_mark;
2657 while (current_page != last_page) {
2658 DCHECK_NE(current_page, age_mark_page);
2659 allocated += MemoryChunkLayout::AllocatableMemoryInDataPage();
2660 current_page = current_page->next_page();
2662 DCHECK_GE(top(), current_page->area_start());
2663 allocated += top() - current_page->area_start();
2664 DCHECK_LE(allocated, Size());
2668 void MovePageFromSpaceToSpace(
Page* page) {
2669 DCHECK(page->InFromSpace());
2670 from_space_.RemovePage(page);
2671 to_space_.PrependPage(page);
2677 size_t MaximumCapacity() {
2678 DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
2679 return to_space_.maximum_capacity();
2682 bool IsAtMaximumCapacity() {
return TotalCapacity() == MaximumCapacity(); }
2685 size_t InitialTotalCapacity() {
2686 DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
2687 return to_space_.minimum_capacity();
2690 void ResetOriginalTop() {
2691 DCHECK_GE(top(), original_top_);
2692 DCHECK_LE(top(), original_limit_);
2693 original_top_.store(top(), std::memory_order_release);
2696 Address original_top_acquire() {
2697 return original_top_.load(std::memory_order_acquire);
2699 Address original_limit_relaxed() {
2700 return original_limit_.load(std::memory_order_relaxed);
2705 Address first_allocatable_address() {
return to_space_.space_start(); }
2708 Address age_mark() {
return from_space_.age_mark(); }
2710 void set_age_mark(
Address mark) { to_space_.set_age_mark(mark); }
2713 AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment);
2716 AllocateRawUnaligned(
int size_in_bytes);
2719 AllocateRaw(
int size_in_bytes, AllocationAlignment alignment);
2722 int size_in_bytes, AllocationAlignment alignment);
2725 void ResetLinearAllocationArea();
2732 void UpdateInlineAllocationLimit(
size_t size_in_bytes)
override;
2734 inline bool ToSpaceContainsSlow(
Address a);
2735 inline bool ToSpaceContains(
Object* o);
2736 inline bool FromSpaceContains(
Object* o);
2742 bool AddFreshPage();
2743 bool AddFreshPageSynchronized();
2747 virtual void Verify(
Isolate* isolate);
2752 void Print()
override { to_space_.Print(); }
2756 bool CommitFromSpaceIfNeeded() {
2757 if (from_space_.is_committed())
return true;
2758 return from_space_.Commit();
2761 bool UncommitFromSpace() {
2762 if (!from_space_.is_committed())
return true;
2763 return from_space_.Uncommit();
2766 bool IsFromSpaceCommitted() {
return from_space_.is_committed(); }
2768 SemiSpace* active_space() {
return &to_space_; }
2770 Page* first_page() {
return to_space_.first_page(); }
2771 Page* last_page() {
return to_space_.last_page(); }
2773 iterator begin() {
return to_space_.begin(); }
2774 iterator end() {
return to_space_.end(); }
2776 std::unique_ptr<ObjectIterator> GetObjectIterator()
override;
2778 SemiSpace& from_space() {
return from_space_; }
2779 SemiSpace& to_space() {
return to_space_; }
2783 void UpdateLinearAllocationArea();
2789 std::atomic<Address> original_top_;
2790 std::atomic<Address> original_limit_;
2797 bool EnsureAllocation(
int size_in_bytes, AllocationAlignment alignment);
2798 bool SupportsInlineAllocation()
override {
return true; }
2821 bool is_local()
override {
return true; }
2825 bool snapshotable()
override {
return false; }
2827 V8_WARN_UNUSED_RESULT
bool SweepAndRetryAllocation(
2828 int size_in_bytes)
override;
2830 V8_WARN_UNUSED_RESULT
bool SlowRefillLinearAllocationArea(
2831 int size_in_bytes)
override;
2839 : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
2840 code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
2847 return &code_space_;
2868 static bool IsAtPageStart(
Address addr) {
2869 return static_cast<intptr_t
>(addr & kPageAlignmentMask) ==
2870 MemoryChunkLayout::ObjectStartOffsetInDataPage();
2886 #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \ 2887 SLOW_DCHECK((space).page_low() <= (info).top() && \ 2888 (info).top() <= (space).page_high() && \ 2889 (info).limit() <= (space).page_high()) 2900 int RoundSizeDownToObjectAlignment(
int size)
override {
2901 if (base::bits::IsPowerOfTwo(Map::kSize)) {
2902 return RoundDown(size, Map::kSize);
2904 return (size / Map::kSize) * Map::kSize;
2921 space_->MarkAsReadWrite();
2932 bool writable()
const {
return !is_marked_read_only_; }
2934 void ClearStringPaddingIfNeeded();
2935 void MarkAsReadOnly();
2939 void RepairFreeListsAfterDeserialization();
2942 void MarkAsReadWrite();
2945 bool is_marked_read_only_ =
false;
2950 bool is_string_padding_cleared_;
2971 AllocateRaw(
int object_size);
2974 size_t Available()
override;
2976 size_t Size()
override {
return size_; }
2977 size_t SizeOfObjects()
override {
return objects_size_; }
2980 size_t CommittedPhysicalMemory()
override;
2982 int PageCount() {
return page_count_; }
2993 void ClearMarkingStateOfLiveObjects();
2996 void FreeUnmarkedObjects();
2998 void InsertChunkMapEntries(
LargePage* page);
2999 void RemoveChunkMapEntries(
LargePage* page);
3002 void PromoteNewLargeObject(
LargePage* page);
3008 bool ContainsSlow(
Address addr) {
return FindObject(addr)->IsHeapObject(); }
3011 bool IsEmpty() {
return first_page() ==
nullptr; }
3014 void Unregister(
LargePage* page,
size_t object_size);
3017 return reinterpret_cast<LargePage*
>(Space::first_page());
3021 void CollectCodeStatistics();
3026 std::unique_ptr<ObjectIterator> GetObjectIterator()
override;
3028 base::Mutex* chunk_map_mutex() {
return &chunk_map_mutex_; }
3031 virtual void Verify(
Isolate* isolate);
3035 void Print()
override;
3039 LargePage* AllocateLargePage(
int object_size, Executability executable);
3041 Executability executable);
3045 size_t objects_size_;
3053 std::unordered_map<Address, LargePage*> chunk_map_;
3065 size_t Available()
override;
3069 void FreeAllObjects();
3077 AllocateRaw(
int object_size);
3105 kCodeLargeObjectState,
3120 #endif // V8_HEAP_SPACES_H_