5 #include "src/zone/accounting-allocator.h" 13 #include "src/allocation.h" 18 AccountingAllocator::AccountingAllocator() : unused_segments_mutex_() {
19 static const size_t kDefaultBucketMaxSize = 5;
21 memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
22 std::fill(unused_segments_heads_, unused_segments_heads_ + kNumberBuckets,
24 std::fill(unused_segments_sizes_, unused_segments_sizes_ + kNumberBuckets, 0);
25 std::fill(unused_segments_max_sizes_,
26 unused_segments_max_sizes_ + kNumberBuckets, kDefaultBucketMaxSize);
29 AccountingAllocator::~AccountingAllocator() { ClearPool(); }
31 void AccountingAllocator::MemoryPressureNotification(
32 MemoryPressureLevel level) {
33 memory_pressure_level_.SetValue(level);
35 if (level != MemoryPressureLevel::kNone) {
40 void AccountingAllocator::ConfigureSegmentPool(
const size_t max_pool_size) {
42 static const size_t full_size = (
size_t(1) << (kMaxSegmentSizePower + 1)) -
43 (
size_t(1) << kMinSegmentSizePower);
44 size_t fits_fully = max_pool_size / full_size;
46 base::MutexGuard lock_guard(&unused_segments_mutex_);
61 size_t total_size = fits_fully * full_size;
63 for (
size_t power = 0; power < kNumberBuckets; ++power) {
64 if (total_size + (
size_t(1) << (power + kMinSegmentSizePower)) <=
66 unused_segments_max_sizes_[power] = fits_fully + 1;
67 total_size +=
size_t(1) << power;
69 unused_segments_max_sizes_[power] = fits_fully;
74 Segment* AccountingAllocator::GetSegment(
size_t bytes) {
75 Segment* result = GetSegmentFromPool(bytes);
76 if (result ==
nullptr) {
77 result = AllocateSegment(bytes);
78 if (result !=
nullptr) {
79 result->Initialize(bytes);
86 Segment* AccountingAllocator::AllocateSegment(
size_t bytes) {
87 void* memory = AllocWithRetry(bytes);
88 if (memory !=
nullptr) {
89 base::AtomicWord current =
90 base::Relaxed_AtomicIncrement(¤t_memory_usage_, bytes);
91 base::AtomicWord max = base::Relaxed_Load(&max_memory_usage_);
92 while (current > max) {
93 max = base::Relaxed_CompareAndSwap(&max_memory_usage_, max, current);
96 return reinterpret_cast<Segment*
>(memory);
99 void AccountingAllocator::ReturnSegment(Segment* segment) {
100 segment->ZapContents();
102 if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
103 FreeSegment(segment);
104 }
else if (!AddSegmentToPool(segment)) {
105 FreeSegment(segment);
109 void AccountingAllocator::FreeSegment(Segment* memory) {
110 base::Relaxed_AtomicIncrement(¤t_memory_usage_,
111 -static_cast<base::AtomicWord>(memory->size()));
116 size_t AccountingAllocator::GetCurrentMemoryUsage()
const {
117 return base::Relaxed_Load(¤t_memory_usage_);
120 size_t AccountingAllocator::GetMaxMemoryUsage()
const {
121 return base::Relaxed_Load(&max_memory_usage_);
124 size_t AccountingAllocator::GetCurrentPoolSize()
const {
125 return base::Relaxed_Load(¤t_pool_size_);
128 Segment* AccountingAllocator::GetSegmentFromPool(
size_t requested_size) {
129 if (requested_size > (1 << kMaxSegmentSizePower)) {
133 size_t power = kMinSegmentSizePower;
134 while (requested_size > (static_cast<size_t>(1) << power)) power++;
136 DCHECK_GE(power, kMinSegmentSizePower + 0);
137 power -= kMinSegmentSizePower;
141 base::MutexGuard lock_guard(&unused_segments_mutex_);
143 segment = unused_segments_heads_[power];
145 if (segment !=
nullptr) {
146 unused_segments_heads_[power] = segment->next();
147 segment->set_next(
nullptr);
149 unused_segments_sizes_[power]--;
150 base::Relaxed_AtomicIncrement(
151 ¤t_pool_size_, -static_cast<base::AtomicWord>(segment->size()));
156 DCHECK_GE(segment->size(), requested_size);
161 bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
162 size_t size = segment->size();
164 if (size >= (1 << (kMaxSegmentSizePower + 1)))
return false;
166 if (size < (1 << kMinSegmentSizePower))
return false;
168 size_t power = kMaxSegmentSizePower;
170 while (size < (static_cast<size_t>(1) << power)) power--;
172 DCHECK_GE(power, kMinSegmentSizePower + 0);
173 power -= kMinSegmentSizePower;
176 base::MutexGuard lock_guard(&unused_segments_mutex_);
178 if (unused_segments_sizes_[power] >= unused_segments_max_sizes_[power]) {
182 segment->set_next(unused_segments_heads_[power]);
183 unused_segments_heads_[power] = segment;
184 base::Relaxed_AtomicIncrement(¤t_pool_size_, size);
185 unused_segments_sizes_[power]++;
191 void AccountingAllocator::ClearPool() {
192 base::MutexGuard lock_guard(&unused_segments_mutex_);
194 for (
size_t power = 0; power <= kMaxSegmentSizePower - kMinSegmentSizePower;
196 Segment* current = unused_segments_heads_[power];
198 Segment* next = current->next();
199 FreeSegment(current);
202 unused_segments_heads_[power] =
nullptr;