5 #include "src/allocation.h" 8 #include "src/base/bits.h" 9 #include "src/base/lazy-instance.h" 10 #include "src/base/logging.h" 11 #include "src/base/lsan-page-allocator.h" 12 #include "src/base/page-allocator.h" 13 #include "src/base/platform/platform.h" 14 #include "src/utils.h" 26 void* AlignedAllocInternal(
size_t size,
size_t alignment) {
29 ptr = _aligned_malloc(size, alignment);
33 ptr = memalign(alignment, size);
35 if (posix_memalign(&ptr, alignment, size)) ptr =
nullptr;
41 struct InitializePageAllocator {
42 static void Construct(
void* page_allocator_ptr_arg) {
43 auto page_allocator_ptr =
46 V8::GetCurrentPlatform()->GetPageAllocator();
47 if (page_allocator ==
nullptr) {
50 page_allocator = default_allocator;
52 #if defined(LEAK_SANITIZER) 55 static auto* lsan_allocator =
57 page_allocator = lsan_allocator;
60 *page_allocator_ptr = page_allocator;
64 static base::LazyInstance<v8::PageAllocator*, InitializePageAllocator>::type
65 page_allocator = LAZY_INSTANCE_INITIALIZER;
68 const int kAllocationTries = 2;
73 DCHECK_NOT_NULL(page_allocator.Get());
74 return page_allocator.Get();
80 *page_allocator.Pointer() = new_page_allocator;
81 return old_page_allocator;
84 void* Malloced::New(
size_t size) {
85 void* result = AllocWithRetry(size);
86 if (result ==
nullptr) {
87 V8::FatalProcessOutOfMemory(
nullptr,
"Malloced operator new");
92 void Malloced::Delete(
void* p) {
96 char* StrDup(
const char* str) {
97 int length = StrLength(str);
98 char* result = NewArray<char>(length + 1);
99 MemCopy(result, str, length);
100 result[length] =
'\0';
104 char* StrNDup(
const char* str,
int n) {
105 int length = StrLength(str);
106 if (n < length) length = n;
107 char* result = NewArray<char>(length + 1);
108 MemCopy(result, str, length);
109 result[length] =
'\0';
113 void* AllocWithRetry(
size_t size) {
114 void* result =
nullptr;
115 for (
int i = 0;
i < kAllocationTries; ++
i) {
116 result = malloc(size);
117 if (result !=
nullptr)
break;
118 if (!OnCriticalMemoryPressure(size))
break;
123 void* AlignedAlloc(
size_t size,
size_t alignment) {
124 DCHECK_LE(V8_ALIGNOF(
void*), alignment);
125 DCHECK(base::bits::IsPowerOfTwo(alignment));
126 void* result =
nullptr;
127 for (
int i = 0;
i < kAllocationTries; ++
i) {
128 result = AlignedAllocInternal(size, alignment);
129 if (result !=
nullptr)
break;
130 if (!OnCriticalMemoryPressure(size + alignment))
break;
132 if (result ==
nullptr) {
133 V8::FatalProcessOutOfMemory(
nullptr,
"AlignedAlloc");
138 void AlignedFree(
void *ptr) {
149 size_t AllocatePageSize() {
153 size_t CommitPageSize() {
return GetPlatformPageAllocator()->
CommitPageSize(); }
155 void SetRandomMmapSeed(
int64_t seed) {
159 void* GetRandomMmapAddr() {
164 size_t size,
size_t alignment,
166 DCHECK_NOT_NULL(page_allocator);
167 DCHECK_EQ(address, AlignedAddress(address, alignment));
169 void* result =
nullptr;
170 for (
int i = 0;
i < kAllocationTries; ++
i) {
171 result = page_allocator->
AllocatePages(address, size, alignment, access);
172 if (result !=
nullptr)
break;
173 size_t request_size = size + alignment - page_allocator->
AllocatePageSize();
174 if (!OnCriticalMemoryPressure(request_size))
break;
181 DCHECK_NOT_NULL(page_allocator);
183 return page_allocator->
FreePages(address, size);
188 DCHECK_NOT_NULL(page_allocator);
189 DCHECK_LT(new_size, size);
191 return page_allocator->
ReleasePages(address, size, new_size);
196 DCHECK_NOT_NULL(page_allocator);
202 DCHECK_NOT_NULL(page_allocator);
204 void* result = AllocatePages(page_allocator, address, page_size, page_size,
205 PageAllocator::kReadWrite);
206 if (result !=
nullptr) *allocated = page_size;
207 return static_cast<byte*
>(result);
210 bool OnCriticalMemoryPressure(
size_t length) {
213 if (!V8::GetCurrentPlatform()->OnCriticalMemoryPressure(length)) {
214 V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
220 void* hint,
size_t alignment)
221 : page_allocator_(page_allocator) {
222 DCHECK_NOT_NULL(page_allocator);
223 DCHECK(IsAligned(size, page_allocator_->CommitPageSize()));
224 size_t page_size = page_allocator_->AllocatePageSize();
225 alignment = RoundUp(alignment, page_size);
226 Address address =
reinterpret_cast<Address
>(
227 AllocatePages(page_allocator_, hint, RoundUp(size, page_size), alignment,
228 PageAllocator::kNoAccess));
229 if (address != kNullAddress) {
230 DCHECK(IsAligned(address, alignment));
231 region_ = base::AddressRegion(address, size);
235 VirtualMemory::~VirtualMemory() {
241 void VirtualMemory::Reset() {
242 page_allocator_ =
nullptr;
243 region_ = base::AddressRegion();
246 bool VirtualMemory::SetPermissions(Address address,
size_t size,
248 CHECK(InVM(address, size));
250 v8::internal::SetPermissions(page_allocator_, address, size, access);
255 size_t VirtualMemory::Release(Address free_start) {
256 DCHECK(IsReserved());
261 const size_t old_size = region_.size();
262 const size_t free_size = old_size - (free_start - region_.begin());
263 CHECK(InVM(free_start, free_size));
264 region_.set_size(old_size - free_size);
265 CHECK(ReleasePages(page_allocator_, reinterpret_cast<void*>(region_.begin()),
266 old_size, region_.size()));
270 void VirtualMemory::Free() {
271 DCHECK(IsReserved());
275 base::AddressRegion region = region_;
279 CHECK(FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
283 void VirtualMemory::TakeControl(VirtualMemory* from) {
284 DCHECK(!IsReserved());
285 page_allocator_ = from->page_allocator_;
286 region_ = from->region_;
virtual bool FreePages(void *address, size_t length)=0
virtual void SetRandomMmapSeed(int64_t seed)=0
virtual void * GetRandomMmapAddr()=0
virtual bool ReleasePages(void *address, size_t length, size_t new_length)=0
virtual void * AllocatePages(void *address, size_t length, size_t alignment, Permission permissions)=0
virtual size_t AllocatePageSize()=0
virtual size_t CommitPageSize()=0
virtual bool SetPermissions(void *address, size_t length, Permission permissions)=0