V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
deserializer-allocator.cc
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/snapshot/deserializer-allocator.h"
6 
7 #include "src/heap/heap-inl.h" // crbug.com/v8/8499
8 #include "src/snapshot/deserializer.h"
9 #include "src/snapshot/startup-deserializer.h"
10 
11 namespace v8 {
12 namespace internal {
13 
14 DeserializerAllocator::DeserializerAllocator(Deserializer* deserializer)
15  : deserializer_(deserializer) {}
16 
17 // We know the space requirements before deserialization and can
18 // pre-allocate that reserved space. During deserialization, all we need
19 // to do is to bump up the pointer for each space in the reserved
20 // space. This is also used for fixing back references.
21 // We may have to split up the pre-allocation into several chunks
22 // because it would not fit onto a single page. We do not have to keep
23 // track of when to move to the next chunk. An opcode will signal this.
24 // Since multiple large objects cannot be folded into one large object
25 // space allocation, we have to do an actual allocation when deserializing
26 // each large object. Instead of tracking offset for back references, we
27 // reference large objects by index.
28 Address DeserializerAllocator::AllocateRaw(AllocationSpace space, int size) {
29  if (space == LO_SPACE) {
30  AlwaysAllocateScope scope(isolate());
31  // Note that we currently do not support deserialization of large code
32  // objects.
33  LargeObjectSpace* lo_space = isolate()->heap()->lo_space();
34  AllocationResult result = lo_space->AllocateRaw(size);
35  HeapObject* obj = result.ToObjectChecked();
36  deserialized_large_objects_.push_back(obj);
37  return obj->address();
38  } else if (space == MAP_SPACE) {
39  DCHECK_EQ(Map::kSize, size);
40  return allocated_maps_[next_map_index_++];
41  } else {
42  DCHECK_LT(space, kNumberOfPreallocatedSpaces);
43  Address address = high_water_[space];
44  DCHECK_NE(address, kNullAddress);
45  high_water_[space] += size;
46 #ifdef DEBUG
47  // Assert that the current reserved chunk is still big enough.
48  const Heap::Reservation& reservation = reservations_[space];
49  int chunk_index = current_chunk_[space];
50  DCHECK_LE(high_water_[space], reservation[chunk_index].end);
51 #endif
52  if (space == CODE_SPACE) SkipList::Update(address, size);
53  return address;
54  }
55 }
56 
57 Address DeserializerAllocator::Allocate(AllocationSpace space, int size) {
58  Address address;
59  HeapObject* obj;
60 
61  if (next_alignment_ != kWordAligned) {
62  const int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
63  address = AllocateRaw(space, reserved);
64  obj = HeapObject::FromAddress(address);
65  // If one of the following assertions fails, then we are deserializing an
66  // aligned object when the filler maps have not been deserialized yet.
67  // We require filler maps as padding to align the object.
68  Heap* heap = isolate()->heap();
69  DCHECK(ReadOnlyRoots(heap).free_space_map()->IsMap());
70  DCHECK(ReadOnlyRoots(heap).one_pointer_filler_map()->IsMap());
71  DCHECK(ReadOnlyRoots(heap).two_pointer_filler_map()->IsMap());
72  obj = heap->AlignWithFiller(obj, size, reserved, next_alignment_);
73  address = obj->address();
74  next_alignment_ = kWordAligned;
75  return address;
76  } else {
77  return AllocateRaw(space, size);
78  }
79 }
80 
81 void DeserializerAllocator::MoveToNextChunk(AllocationSpace space) {
82  DCHECK_LT(space, kNumberOfPreallocatedSpaces);
83  uint32_t chunk_index = current_chunk_[space];
84  const Heap::Reservation& reservation = reservations_[space];
85  // Make sure the current chunk is indeed exhausted.
86  CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
87  // Move to next reserved chunk.
88  chunk_index = ++current_chunk_[space];
89  CHECK_LT(chunk_index, reservation.size());
90  high_water_[space] = reservation[chunk_index].start;
91 }
92 
93 HeapObject* DeserializerAllocator::GetMap(uint32_t index) {
94  DCHECK_LT(index, next_map_index_);
95  return HeapObject::FromAddress(allocated_maps_[index]);
96 }
97 
98 HeapObject* DeserializerAllocator::GetLargeObject(uint32_t index) {
99  DCHECK_LT(index, deserialized_large_objects_.size());
100  return deserialized_large_objects_[index];
101 }
102 
103 HeapObject* DeserializerAllocator::GetObject(AllocationSpace space,
104  uint32_t chunk_index,
105  uint32_t chunk_offset) {
106  DCHECK_LT(space, kNumberOfPreallocatedSpaces);
107  DCHECK_LE(chunk_index, current_chunk_[space]);
108  Address address = reservations_[space][chunk_index].start + chunk_offset;
109  if (next_alignment_ != kWordAligned) {
110  int padding = Heap::GetFillToAlign(address, next_alignment_);
111  next_alignment_ = kWordAligned;
112  DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller());
113  address += padding;
114  }
115  return HeapObject::FromAddress(address);
116 }
117 
118 void DeserializerAllocator::DecodeReservation(
119  const std::vector<SerializedData::Reservation>& res) {
120  DCHECK_EQ(0, reservations_[FIRST_SPACE].size());
121  int current_space = FIRST_SPACE;
122  for (auto& r : res) {
123  reservations_[current_space].push_back(
124  {r.chunk_size(), kNullAddress, kNullAddress});
125  if (r.is_last()) current_space++;
126  }
127  DCHECK_EQ(kNumberOfSpaces, current_space);
128  for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
129 }
130 
131 bool DeserializerAllocator::ReserveSpace() {
132 #ifdef DEBUG
133  for (int i = FIRST_SPACE; i < kNumberOfSpaces; ++i) {
134  DCHECK_GT(reservations_[i].size(), 0);
135  }
136 #endif // DEBUG
137  DCHECK(allocated_maps_.empty());
138  if (!isolate()->heap()->ReserveSpace(reservations_, &allocated_maps_)) {
139  return false;
140  }
141  for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
142  high_water_[i] = reservations_[i][0].start;
143  }
144  return true;
145 }
146 
147 bool DeserializerAllocator::ReservationsAreFullyUsed() const {
148  for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
149  const uint32_t chunk_index = current_chunk_[space];
150  if (reservations_[space].size() != chunk_index + 1) {
151  return false;
152  }
153  if (reservations_[space][chunk_index].end != high_water_[space]) {
154  return false;
155  }
156  }
157  return (allocated_maps_.size() == next_map_index_);
158 }
159 
160 void DeserializerAllocator::RegisterDeserializedObjectsForBlackAllocation() {
161  isolate()->heap()->RegisterDeserializedObjectsForBlackAllocation(
162  reservations_, deserialized_large_objects_, allocated_maps_);
163 }
164 
165 Isolate* DeserializerAllocator::isolate() const {
166  return deserializer_->isolate();
167 }
168 
169 } // namespace internal
170 } // namespace v8
Definition: libplatform.h:13