V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
zone.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/zone/zone.h"
6 
7 #include <cstring>
8 
9 #include "src/asan.h"
10 #include "src/utils.h"
11 #include "src/v8.h"
12 
13 namespace v8 {
14 namespace internal {
15 
16 namespace {
17 
18 #ifdef V8_USE_ADDRESS_SANITIZER
19 
20 constexpr size_t kASanRedzoneBytes = 24; // Must be a multiple of 8.
21 
22 #else // !V8_USE_ADDRESS_SANITIZER
23 
24 constexpr size_t kASanRedzoneBytes = 0;
25 
26 #endif // V8_USE_ADDRESS_SANITIZER
27 
28 } // namespace
29 
30 Zone::Zone(AccountingAllocator* allocator, const char* name,
31  SegmentSize segment_size)
32  : allocation_size_(0),
33  segment_bytes_allocated_(0),
34  position_(0),
35  limit_(0),
36  allocator_(allocator),
37  segment_head_(nullptr),
38  name_(name),
39  sealed_(false),
40  segment_size_(segment_size) {
41  allocator_->ZoneCreation(this);
42 }
43 
44 Zone::~Zone() {
45  allocator_->ZoneDestruction(this);
46  DeleteAll();
47 
48  DCHECK_EQ(segment_bytes_allocated_, 0);
49 }
50 
51 void* Zone::AsanNew(size_t size) {
52  CHECK(!sealed_);
53 
54  // Round up the requested size to fit the alignment.
55  size = RoundUp(size, kAlignmentInBytes);
56 
57  // Check if the requested size is available without expanding.
58  Address result = position_;
59 
60  const size_t size_with_redzone = size + kASanRedzoneBytes;
61  DCHECK_LE(position_, limit_);
62  if (size_with_redzone > limit_ - position_) {
63  result = NewExpand(size_with_redzone);
64  } else {
65  position_ += size_with_redzone;
66  }
67 
68  Address redzone_position = result + size;
69  DCHECK_EQ(redzone_position + kASanRedzoneBytes, position_);
70  ASAN_POISON_MEMORY_REGION(reinterpret_cast<void*>(redzone_position),
71  kASanRedzoneBytes);
72 
73  // Check that the result has the proper alignment and return it.
74  DCHECK(IsAligned(result, kAlignmentInBytes));
75  return reinterpret_cast<void*>(result);
76 }
77 
78 void Zone::ReleaseMemory() {
79  allocator_->ZoneDestruction(this);
80  DeleteAll();
81  allocator_->ZoneCreation(this);
82 }
83 
84 void Zone::DeleteAll() {
85  // Traverse the chained list of segments and return them all to the allocator.
86  for (Segment* current = segment_head_; current;) {
87  Segment* next = current->next();
88  size_t size = current->size();
89 
90  // Un-poison the segment content so we can re-use or zap it later.
91  ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void*>(current->start()),
92  current->capacity());
93 
94  segment_bytes_allocated_ -= size;
95  allocator_->ReturnSegment(current);
96  current = next;
97  }
98 
99  position_ = limit_ = 0;
100  allocation_size_ = 0;
101  segment_head_ = nullptr;
102 }
103 
104 // Creates a new segment, sets it size, and pushes it to the front
105 // of the segment chain. Returns the new segment.
106 Segment* Zone::NewSegment(size_t requested_size) {
107  Segment* result = allocator_->GetSegment(requested_size);
108  if (result != nullptr) {
109  DCHECK_GE(result->size(), requested_size);
110  segment_bytes_allocated_ += result->size();
111  result->set_zone(this);
112  result->set_next(segment_head_);
113  segment_head_ = result;
114  }
115  return result;
116 }
117 
118 Address Zone::NewExpand(size_t size) {
119  // Make sure the requested size is already properly aligned and that
120  // there isn't enough room in the Zone to satisfy the request.
121  DCHECK_EQ(size, RoundDown(size, kAlignmentInBytes));
122  DCHECK(limit_ - position_ < size);
123 
124  // Commit the allocation_size_ of segment_head_ if any.
125  allocation_size_ = allocation_size();
126  // Compute the new segment size. We use a 'high water mark'
127  // strategy, where we increase the segment size every time we expand
128  // except that we employ a maximum segment size when we delete. This
129  // is to avoid excessive malloc() and free() overhead.
130  Segment* head = segment_head_;
131  const size_t old_size = (head == nullptr) ? 0 : head->size();
132  static const size_t kSegmentOverhead = sizeof(Segment) + kAlignmentInBytes;
133  const size_t new_size_no_overhead = size + (old_size << 1);
134  size_t new_size = kSegmentOverhead + new_size_no_overhead;
135  const size_t min_new_size = kSegmentOverhead + size;
136  // Guard against integer overflow.
137  if (new_size_no_overhead < size || new_size < kSegmentOverhead) {
138  V8::FatalProcessOutOfMemory(nullptr, "Zone");
139  return kNullAddress;
140  }
141  if (segment_size_ == SegmentSize::kLarge) {
142  new_size = kMaximumSegmentSize;
143  }
144  if (new_size < kMinimumSegmentSize) {
145  new_size = kMinimumSegmentSize;
146  } else if (new_size > kMaximumSegmentSize) {
147  // Limit the size of new segments to avoid growing the segment size
148  // exponentially, thus putting pressure on contiguous virtual address space.
149  // All the while making sure to allocate a segment large enough to hold the
150  // requested size.
151  new_size = Max(min_new_size, kMaximumSegmentSize);
152  }
153  if (new_size > INT_MAX) {
154  V8::FatalProcessOutOfMemory(nullptr, "Zone");
155  return kNullAddress;
156  }
157  Segment* segment = NewSegment(new_size);
158  if (segment == nullptr) {
159  V8::FatalProcessOutOfMemory(nullptr, "Zone");
160  return kNullAddress;
161  }
162 
163  // Recompute 'top' and 'limit' based on the new segment.
164  Address result = RoundUp(segment->start(), kAlignmentInBytes);
165  position_ = result + size;
166  // Check for address overflow.
167  // (Should not happen since the segment is guaranteed to accommodate
168  // size bytes + header and alignment padding)
169  DCHECK(position_ >= result);
170  limit_ = segment->end();
171  DCHECK(position_ <= limit_);
172  return result;
173 }
174 
175 } // namespace internal
176 } // namespace v8
Definition: libplatform.h:13