V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
serializer.cc
1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/snapshot/serializer.h"
6 
7 #include "src/assembler-inl.h"
8 #include "src/heap/heap.h"
9 #include "src/interpreter/interpreter.h"
10 #include "src/objects/code.h"
11 #include "src/objects/js-array-buffer-inl.h"
12 #include "src/objects/js-array-inl.h"
13 #include "src/objects/map.h"
14 #include "src/objects/slots-inl.h"
15 #include "src/objects/smi.h"
16 #include "src/snapshot/natives.h"
17 #include "src/snapshot/snapshot.h"
18 
19 namespace v8 {
20 namespace internal {
21 
22 Serializer::Serializer(Isolate* isolate)
23  : isolate_(isolate),
24  external_reference_encoder_(isolate),
25  root_index_map_(isolate),
26  allocator_(this) {
27 #ifdef OBJECT_PRINT
28  if (FLAG_serialization_statistics) {
29  for (int space = 0; space < LAST_SPACE; ++space) {
30  instance_type_count_[space] = NewArray<int>(kInstanceTypes);
31  instance_type_size_[space] = NewArray<size_t>(kInstanceTypes);
32  for (int i = 0; i < kInstanceTypes; i++) {
33  instance_type_count_[space][i] = 0;
34  instance_type_size_[space][i] = 0;
35  }
36  }
37  } else {
38  for (int space = 0; space < LAST_SPACE; ++space) {
39  instance_type_count_[space] = nullptr;
40  instance_type_size_[space] = nullptr;
41  }
42  }
43 #endif // OBJECT_PRINT
44 }
45 
46 Serializer::~Serializer() {
47  if (code_address_map_ != nullptr) delete code_address_map_;
48 #ifdef OBJECT_PRINT
49  for (int space = 0; space < LAST_SPACE; ++space) {
50  if (instance_type_count_[space] != nullptr) {
51  DeleteArray(instance_type_count_[space]);
52  DeleteArray(instance_type_size_[space]);
53  }
54  }
55 #endif // OBJECT_PRINT
56 }
57 
58 #ifdef OBJECT_PRINT
59 void Serializer::CountInstanceType(Map map, int size, AllocationSpace space) {
60  int instance_type = map->instance_type();
61  instance_type_count_[space][instance_type]++;
62  instance_type_size_[space][instance_type] += size;
63 }
64 #endif // OBJECT_PRINT
65 
66 void Serializer::OutputStatistics(const char* name) {
67  if (!FLAG_serialization_statistics) return;
68 
69  PrintF("%s:\n", name);
70  allocator()->OutputStatistics();
71 
72 #ifdef OBJECT_PRINT
73  PrintF(" Instance types (count and bytes):\n");
74 #define PRINT_INSTANCE_TYPE(Name) \
75  for (int space = 0; space < LAST_SPACE; ++space) { \
76  if (instance_type_count_[space][Name]) { \
77  PrintF("%10d %10" PRIuS " %-10s %s\n", \
78  instance_type_count_[space][Name], \
79  instance_type_size_[space][Name], \
80  AllocationSpaceName(static_cast<AllocationSpace>(space)), #Name); \
81  } \
82  }
83  INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE)
84 #undef PRINT_INSTANCE_TYPE
85 
86  PrintF("\n");
87 #endif // OBJECT_PRINT
88 }
89 
90 void Serializer::SerializeDeferredObjects() {
91  while (!deferred_objects_.empty()) {
92  HeapObject* obj = deferred_objects_.back();
93  deferred_objects_.pop_back();
94  ObjectSerializer obj_serializer(this, obj, &sink_, kPlain, kStartOfObject);
95  obj_serializer.SerializeDeferred();
96  }
97  sink_.Put(kSynchronize, "Finished with deferred objects");
98 }
99 
100 bool Serializer::MustBeDeferred(HeapObject* object) { return false; }
101 
102 void Serializer::VisitRootPointers(Root root, const char* description,
103  ObjectSlot start, ObjectSlot end) {
104  for (ObjectSlot current = start; current < end; ++current) {
105  SerializeRootObject(*current);
106  }
107 }
108 
109 void Serializer::SerializeRootObject(Object* object) {
110  if (object->IsSmi()) {
111  PutSmi(Smi::cast(object));
112  } else {
113  SerializeObject(HeapObject::cast(object), kPlain, kStartOfObject, 0);
114  }
115 }
116 
117 #ifdef DEBUG
118 void Serializer::PrintStack() {
119  for (const auto o : stack_) {
120  o->Print();
121  PrintF("\n");
122  }
123 }
124 #endif // DEBUG
125 
126 bool Serializer::SerializeRoot(HeapObject* obj, HowToCode how_to_code,
127  WhereToPoint where_to_point, int skip) {
128  RootIndex root_index;
129  // Derived serializers are responsible for determining if the root has
130  // actually been serialized before calling this.
131  if (root_index_map()->Lookup(obj, &root_index)) {
132  PutRoot(root_index, obj, how_to_code, where_to_point, skip);
133  return true;
134  }
135  return false;
136 }
137 
138 bool Serializer::SerializeHotObject(HeapObject* obj, HowToCode how_to_code,
139  WhereToPoint where_to_point, int skip) {
140  if (how_to_code != kPlain || where_to_point != kStartOfObject) return false;
141  // Encode a reference to a hot object by its index in the working set.
142  int index = hot_objects_.Find(obj);
143  if (index == HotObjectsList::kNotFound) return false;
144  DCHECK(index >= 0 && index < kNumberOfHotObjects);
145  if (FLAG_trace_serializer) {
146  PrintF(" Encoding hot object %d:", index);
147  obj->ShortPrint();
148  PrintF("\n");
149  }
150  if (skip != 0) {
151  sink_.Put(kHotObjectWithSkip + index, "HotObjectWithSkip");
152  sink_.PutInt(skip, "HotObjectSkipDistance");
153  } else {
154  sink_.Put(kHotObject + index, "HotObject");
155  }
156  return true;
157 }
158 
159 bool Serializer::SerializeBackReference(HeapObject* obj, HowToCode how_to_code,
160  WhereToPoint where_to_point, int skip) {
161  SerializerReference reference = reference_map_.LookupReference(obj);
162  if (!reference.is_valid()) return false;
163  // Encode the location of an already deserialized object in order to write
164  // its location into a later object. We can encode the location as an
165  // offset fromthe start of the deserialized objects or as an offset
166  // backwards from thecurrent allocation pointer.
167  if (reference.is_attached_reference()) {
168  FlushSkip(skip);
169  if (FLAG_trace_serializer) {
170  PrintF(" Encoding attached reference %d\n",
171  reference.attached_reference_index());
172  }
173  PutAttachedReference(reference, how_to_code, where_to_point);
174  } else {
175  DCHECK(reference.is_back_reference());
176  if (FLAG_trace_serializer) {
177  PrintF(" Encoding back reference to: ");
178  obj->ShortPrint();
179  PrintF("\n");
180  }
181 
182  PutAlignmentPrefix(obj);
183  AllocationSpace space = reference.space();
184  if (skip == 0) {
185  sink_.Put(kBackref + how_to_code + where_to_point + space, "BackRef");
186  } else {
187  sink_.Put(kBackrefWithSkip + how_to_code + where_to_point + space,
188  "BackRefWithSkip");
189  sink_.PutInt(skip, "BackRefSkipDistance");
190  }
191  PutBackReference(obj, reference);
192  }
193  return true;
194 }
195 
196 bool Serializer::ObjectIsBytecodeHandler(HeapObject* obj) const {
197  if (!obj->IsCode()) return false;
198  return (Code::cast(obj)->kind() == Code::BYTECODE_HANDLER);
199 }
200 
201 void Serializer::PutRoot(RootIndex root, HeapObject* object,
202  SerializerDeserializer::HowToCode how_to_code,
203  SerializerDeserializer::WhereToPoint where_to_point,
204  int skip) {
205  int root_index = static_cast<int>(root);
206  if (FLAG_trace_serializer) {
207  PrintF(" Encoding root %d:", root_index);
208  object->ShortPrint();
209  PrintF("\n");
210  }
211 
212  // Assert that the first 32 root array items are a conscious choice. They are
213  // chosen so that the most common ones can be encoded more efficiently.
214  STATIC_ASSERT(static_cast<int>(RootIndex::kArgumentsMarker) ==
215  kNumberOfRootArrayConstants - 1);
216 
217  if (how_to_code == kPlain && where_to_point == kStartOfObject &&
218  root_index < kNumberOfRootArrayConstants && !Heap::InNewSpace(object)) {
219  if (skip == 0) {
220  sink_.Put(kRootArrayConstants + root_index, "RootConstant");
221  } else {
222  sink_.Put(kRootArrayConstantsWithSkip + root_index, "RootConstant");
223  sink_.PutInt(skip, "SkipInPutRoot");
224  }
225  } else {
226  FlushSkip(skip);
227  sink_.Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
228  sink_.PutInt(root_index, "root_index");
229  hot_objects_.Add(object);
230  }
231 }
232 
233 void Serializer::PutSmi(Smi smi) {
234  sink_.Put(kOnePointerRawData, "Smi");
235  Address raw_value = smi.ptr();
236  byte bytes[kPointerSize];
237  memcpy(bytes, &raw_value, kPointerSize);
238  for (int i = 0; i < kPointerSize; i++) sink_.Put(bytes[i], "Byte");
239 }
240 
241 void Serializer::PutBackReference(HeapObject* object,
242  SerializerReference reference) {
243  DCHECK(allocator()->BackReferenceIsAlreadyAllocated(reference));
244  switch (reference.space()) {
245  case MAP_SPACE:
246  sink_.PutInt(reference.map_index(), "BackRefMapIndex");
247  break;
248 
249  case LO_SPACE:
250  sink_.PutInt(reference.large_object_index(), "BackRefLargeObjectIndex");
251  break;
252 
253  default:
254  sink_.PutInt(reference.chunk_index(), "BackRefChunkIndex");
255  sink_.PutInt(reference.chunk_offset(), "BackRefChunkOffset");
256  break;
257  }
258 
259  hot_objects_.Add(object);
260 }
261 
262 void Serializer::PutAttachedReference(SerializerReference reference,
263  HowToCode how_to_code,
264  WhereToPoint where_to_point) {
265  DCHECK(reference.is_attached_reference());
266  DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
267  (how_to_code == kFromCode && where_to_point == kStartOfObject) ||
268  (how_to_code == kFromCode && where_to_point == kInnerPointer));
269  sink_.Put(kAttachedReference + how_to_code + where_to_point, "AttachedRef");
270  sink_.PutInt(reference.attached_reference_index(), "AttachedRefIndex");
271 }
272 
273 int Serializer::PutAlignmentPrefix(HeapObject* object) {
274  AllocationAlignment alignment = HeapObject::RequiredAlignment(object->map());
275  if (alignment != kWordAligned) {
276  DCHECK(1 <= alignment && alignment <= 3);
277  byte prefix = (kAlignmentPrefix - 1) + alignment;
278  sink_.Put(prefix, "Alignment");
279  return Heap::GetMaximumFillToAlign(alignment);
280  }
281  return 0;
282 }
283 
284 void Serializer::PutNextChunk(int space) {
285  sink_.Put(kNextChunk, "NextChunk");
286  sink_.Put(space, "NextChunkSpace");
287 }
288 
289 void Serializer::Pad(int padding_offset) {
290  // The non-branching GetInt will read up to 3 bytes too far, so we need
291  // to pad the snapshot to make sure we don't read over the end.
292  for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
293  sink_.Put(kNop, "Padding");
294  }
295  // Pad up to pointer size for checksum.
296  while (!IsAligned(sink_.Position() + padding_offset, kPointerAlignment)) {
297  sink_.Put(kNop, "Padding");
298  }
299 }
300 
301 void Serializer::InitializeCodeAddressMap() {
302  isolate_->InitializeLoggingAndCounters();
303  code_address_map_ = new CodeAddressMap(isolate_);
304 }
305 
306 Code Serializer::CopyCode(Code code) {
307  code_buffer_.clear(); // Clear buffer without deleting backing store.
308  int size = code->CodeSize();
309  code_buffer_.insert(code_buffer_.end(),
310  reinterpret_cast<byte*>(code->address()),
311  reinterpret_cast<byte*>(code->address() + size));
312  return Code::cast(HeapObject::FromAddress(
313  reinterpret_cast<Address>(&code_buffer_.front())));
314 }
315 
316 void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
317  int size, Map map) {
318  if (serializer_->code_address_map_) {
319  const char* code_name =
320  serializer_->code_address_map_->Lookup(object_->address());
321  LOG(serializer_->isolate_,
322  CodeNameEvent(object_->address(), sink_->Position(), code_name));
323  }
324 
325  SerializerReference back_reference;
326  if (space == LO_SPACE) {
327  sink_->Put(kNewObject + reference_representation_ + space,
328  "NewLargeObject");
329  sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
330  CHECK(!object_->IsCode());
331  back_reference = serializer_->allocator()->AllocateLargeObject(size);
332  } else if (space == MAP_SPACE) {
333  DCHECK_EQ(Map::kSize, size);
334  back_reference = serializer_->allocator()->AllocateMap();
335  sink_->Put(kNewObject + reference_representation_ + space, "NewMap");
336  // This is redundant, but we include it anyways.
337  sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
338  } else {
339  int fill = serializer_->PutAlignmentPrefix(object_);
340  back_reference = serializer_->allocator()->Allocate(space, size + fill);
341  sink_->Put(kNewObject + reference_representation_ + space, "NewObject");
342  sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
343  }
344 
345 #ifdef OBJECT_PRINT
346  if (FLAG_serialization_statistics) {
347  serializer_->CountInstanceType(map, size, space);
348  }
349 #endif // OBJECT_PRINT
350 
351  // Mark this object as already serialized.
352  serializer_->reference_map()->Add(object_, back_reference);
353 
354  // Serialize the map (first word of the object).
355  serializer_->SerializeObject(map, kPlain, kStartOfObject, 0);
356 }
357 
358 int32_t Serializer::ObjectSerializer::SerializeBackingStore(
359  void* backing_store, int32_t byte_length) {
360  SerializerReference reference =
361  serializer_->reference_map()->LookupReference(backing_store);
362 
363  // Serialize the off-heap backing store.
364  if (!reference.is_valid()) {
365  sink_->Put(kOffHeapBackingStore, "Off-heap backing store");
366  sink_->PutInt(byte_length, "length");
367  sink_->PutRaw(static_cast<byte*>(backing_store), byte_length,
368  "BackingStore");
369  reference = serializer_->allocator()->AllocateOffHeapBackingStore();
370  // Mark this backing store as already serialized.
371  serializer_->reference_map()->Add(backing_store, reference);
372  }
373 
374  return static_cast<int32_t>(reference.off_heap_backing_store_index());
375 }
376 
377 void Serializer::ObjectSerializer::SerializeJSTypedArray() {
378  JSTypedArray* typed_array = JSTypedArray::cast(object_);
379  FixedTypedArrayBase elements =
380  FixedTypedArrayBase::cast(typed_array->elements());
381 
382  if (!typed_array->WasNeutered()) {
383  if (!typed_array->is_on_heap()) {
384  // Explicitly serialize the backing store now.
385  JSArrayBuffer* buffer = JSArrayBuffer::cast(typed_array->buffer());
386  CHECK_LE(buffer->byte_length(), Smi::kMaxValue);
387  CHECK_LE(typed_array->byte_offset(), Smi::kMaxValue);
388  int32_t byte_length = static_cast<int32_t>(buffer->byte_length());
389  int32_t byte_offset = static_cast<int32_t>(typed_array->byte_offset());
390 
391  // We need to calculate the backing store from the external pointer
392  // because the ArrayBuffer may already have been serialized.
393  void* backing_store = reinterpret_cast<void*>(
394  reinterpret_cast<intptr_t>(elements->external_pointer()) -
395  byte_offset);
396  int32_t ref = SerializeBackingStore(backing_store, byte_length);
397 
398  // The external_pointer is the backing_store + typed_array->byte_offset.
399  // To properly share the buffer, we set the backing store ref here. On
400  // deserialization we re-add the byte_offset to external_pointer.
401  elements->set_external_pointer(Smi::FromInt(ref));
402  }
403  } else {
404  // When a JSArrayBuffer is neutered, the FixedTypedArray that points to the
405  // same backing store does not know anything about it. This fixup step finds
406  // neutered TypedArrays and clears the values in the FixedTypedArray so that
407  // we don't try to serialize the now invalid backing store.
408  elements->set_external_pointer(Smi::kZero);
409  elements->set_length(0);
410  }
411  SerializeObject();
412 }
413 
414 void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
415  JSArrayBuffer* buffer = JSArrayBuffer::cast(object_);
416  void* backing_store = buffer->backing_store();
417  // We cannot store byte_length larger than Smi range in the snapshot.
418  CHECK_LE(buffer->byte_length(), Smi::kMaxValue);
419  int32_t byte_length = static_cast<int32_t>(buffer->byte_length());
420 
421  // The embedder-allocated backing store only exists for the off-heap case.
422  if (backing_store != nullptr) {
423  int32_t ref = SerializeBackingStore(backing_store, byte_length);
424  buffer->set_backing_store(Smi::FromInt(ref));
425  }
426  SerializeObject();
427  buffer->set_backing_store(backing_store);
428 }
429 
430 void Serializer::ObjectSerializer::SerializeExternalString() {
431  Heap* heap = serializer_->isolate()->heap();
432  // For external strings with known resources, we replace the resource field
433  // with the encoded external reference, which we restore upon deserialize.
434  // for native native source code strings, we replace the resource field
435  // with the native source id.
436  // For the rest we serialize them to look like ordinary sequential strings.
437  if (object_->map() != ReadOnlyRoots(heap).native_source_string_map()) {
438  ExternalString string = ExternalString::cast(object_);
439  Address resource = string->resource_as_address();
440  ExternalReferenceEncoder::Value reference;
441  if (serializer_->external_reference_encoder_.TryEncode(resource).To(
442  &reference)) {
443  DCHECK(reference.is_from_api());
444  string->set_uint32_as_resource(reference.index());
445  SerializeObject();
446  string->set_address_as_resource(resource);
447  } else {
448  SerializeExternalStringAsSequentialString();
449  }
450  } else {
451  ExternalOneByteString string = ExternalOneByteString::cast(object_);
452  DCHECK(string->is_uncached());
453  const NativesExternalStringResource* resource =
454  reinterpret_cast<const NativesExternalStringResource*>(
455  string->resource());
456  // Replace the resource field with the type and index of the native source.
457  string->set_resource(resource->EncodeForSerialization());
458  SerializeObject();
459  // Restore the resource field.
460  string->set_resource(resource);
461  }
462 }
463 
464 void Serializer::ObjectSerializer::SerializeExternalStringAsSequentialString() {
465  // Instead of serializing this as an external string, we serialize
466  // an imaginary sequential string with the same content.
467  ReadOnlyRoots roots(serializer_->isolate());
468  DCHECK(object_->IsExternalString());
469  DCHECK(object_->map() != roots.native_source_string_map());
470  ExternalString string = ExternalString::cast(object_);
471  int length = string->length();
472  Map map;
473  int content_size;
474  int allocation_size;
475  const byte* resource;
476  // Find the map and size for the imaginary sequential string.
477  bool internalized = object_->IsInternalizedString();
478  if (object_->IsExternalOneByteString()) {
479  map = internalized ? roots.one_byte_internalized_string_map()
480  : roots.one_byte_string_map();
481  allocation_size = SeqOneByteString::SizeFor(length);
482  content_size = length * kCharSize;
483  resource = reinterpret_cast<const byte*>(
484  ExternalOneByteString::cast(string)->resource()->data());
485  } else {
486  map = internalized ? roots.internalized_string_map() : roots.string_map();
487  allocation_size = SeqTwoByteString::SizeFor(length);
488  content_size = length * kShortSize;
489  resource = reinterpret_cast<const byte*>(
490  ExternalTwoByteString::cast(string)->resource()->data());
491  }
492 
493  AllocationSpace space =
494  (allocation_size > kMaxRegularHeapObjectSize) ? LO_SPACE : OLD_SPACE;
495  SerializePrologue(space, allocation_size, map);
496 
497  // Output the rest of the imaginary string.
498  int bytes_to_output = allocation_size - HeapObject::kHeaderSize;
499 
500  // Output raw data header. Do not bother with common raw length cases here.
501  sink_->Put(kVariableRawData, "RawDataForString");
502  sink_->PutInt(bytes_to_output, "length");
503 
504  // Serialize string header (except for map).
505  uint8_t* string_start = reinterpret_cast<uint8_t*>(string->address());
506  for (int i = HeapObject::kHeaderSize; i < SeqString::kHeaderSize; i++) {
507  sink_->PutSection(string_start[i], "StringHeader");
508  }
509 
510  // Serialize string content.
511  sink_->PutRaw(resource, content_size, "StringContent");
512 
513  // Since the allocation size is rounded up to object alignment, there
514  // maybe left-over bytes that need to be padded.
515  int padding_size = allocation_size - SeqString::kHeaderSize - content_size;
516  DCHECK(0 <= padding_size && padding_size < kObjectAlignment);
517  for (int i = 0; i < padding_size; i++) sink_->PutSection(0, "StringPadding");
518 }
519 
520 // Clear and later restore the next link in the weak cell or allocation site.
521 // TODO(all): replace this with proper iteration of weak slots in serializer.
523  public:
524  explicit UnlinkWeakNextScope(Heap* heap, HeapObject* object)
525  : object_(nullptr) {
526  if (object->IsAllocationSite() &&
527  AllocationSite::cast(object)->HasWeakNext()) {
528  object_ = object;
529  next_ = AllocationSite::cast(object)->weak_next();
530  AllocationSite::cast(object)->set_weak_next(
531  ReadOnlyRoots(heap).undefined_value());
532  }
533  }
534 
536  if (object_ != nullptr) {
537  AllocationSite::cast(object_)->set_weak_next(next_,
538  UPDATE_WEAK_WRITE_BARRIER);
539  }
540  }
541 
542  private:
543  HeapObject* object_;
544  Object* next_;
545  DISALLOW_HEAP_ALLOCATION(no_gc_);
546 };
547 
548 void Serializer::ObjectSerializer::Serialize() {
549  if (FLAG_trace_serializer) {
550  PrintF(" Encoding heap object: ");
551  object_->ShortPrint();
552  PrintF("\n");
553  }
554 
555  if (object_->IsExternalString()) {
556  SerializeExternalString();
557  return;
558  } else if (!serializer_->isolate()->heap()->InReadOnlySpace(object_)) {
559  // Only clear padding for strings outside RO_SPACE. RO_SPACE should have
560  // been cleared elsewhere.
561  if (object_->IsSeqOneByteString()) {
562  // Clear padding bytes at the end. Done here to avoid having to do this
563  // at allocation sites in generated code.
564  SeqOneByteString::cast(object_)->clear_padding();
565  } else if (object_->IsSeqTwoByteString()) {
566  SeqTwoByteString::cast(object_)->clear_padding();
567  }
568  }
569  if (object_->IsJSTypedArray()) {
570  SerializeJSTypedArray();
571  return;
572  }
573  if (object_->IsJSArrayBuffer()) {
574  SerializeJSArrayBuffer();
575  return;
576  }
577 
578  // We don't expect fillers.
579  DCHECK(!object_->IsFiller());
580 
581  if (object_->IsScript()) {
582  // Clear cached line ends.
583  Object* undefined = ReadOnlyRoots(serializer_->isolate()).undefined_value();
584  Script::cast(object_)->set_line_ends(undefined);
585  }
586 
587  SerializeObject();
588 }
589 
590 void Serializer::ObjectSerializer::SerializeObject() {
591  int size = object_->Size();
592  Map map = object_->map();
593  AllocationSpace space =
594  MemoryChunk::FromAddress(object_->address())->owner()->identity();
595  // Young generation large objects are tenured.
596  if (space == NEW_LO_SPACE) {
597  space = LO_SPACE;
598  }
599  SerializePrologue(space, size, map);
600 
601  // Serialize the rest of the object.
602  CHECK_EQ(0, bytes_processed_so_far_);
603  bytes_processed_so_far_ = kPointerSize;
604 
605  RecursionScope recursion(serializer_);
606  // Objects that are immediately post processed during deserialization
607  // cannot be deferred, since post processing requires the object content.
608  if ((recursion.ExceedsMaximum() && CanBeDeferred(object_)) ||
609  serializer_->MustBeDeferred(object_)) {
610  serializer_->QueueDeferredObject(object_);
611  sink_->Put(kDeferred, "Deferring object content");
612  return;
613  }
614 
615  SerializeContent(map, size);
616 }
617 
618 void Serializer::ObjectSerializer::SerializeDeferred() {
619  if (FLAG_trace_serializer) {
620  PrintF(" Encoding deferred heap object: ");
621  object_->ShortPrint();
622  PrintF("\n");
623  }
624 
625  int size = object_->Size();
626  Map map = object_->map();
627  SerializerReference back_reference =
628  serializer_->reference_map()->LookupReference(object_);
629  DCHECK(back_reference.is_back_reference());
630 
631  // Serialize the rest of the object.
632  CHECK_EQ(0, bytes_processed_so_far_);
633  bytes_processed_so_far_ = kPointerSize;
634 
635  serializer_->PutAlignmentPrefix(object_);
636  sink_->Put(kNewObject + back_reference.space(), "deferred object");
637  serializer_->PutBackReference(object_, back_reference);
638  sink_->PutInt(size >> kPointerSizeLog2, "deferred object size");
639 
640  SerializeContent(map, size);
641 }
642 
643 void Serializer::ObjectSerializer::SerializeContent(Map map, int size) {
644  UnlinkWeakNextScope unlink_weak_next(serializer_->isolate()->heap(), object_);
645  if (object_->IsCode()) {
646  // For code objects, output raw bytes first.
647  OutputCode(size);
648  // Then iterate references via reloc info.
649  object_->IterateBody(map, size, this);
650  // Finally skip to the end.
651  serializer_->FlushSkip(SkipTo(object_->address() + size));
652  } else {
653  // For other objects, iterate references first.
654  object_->IterateBody(map, size, this);
655  // Then output data payload, if any.
656  OutputRawData(object_->address() + size);
657  }
658 }
659 
660 void Serializer::ObjectSerializer::VisitPointers(HeapObject* host,
661  ObjectSlot start,
662  ObjectSlot end) {
663  VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
664 }
665 
666 void Serializer::ObjectSerializer::VisitPointers(HeapObject* host,
667  MaybeObjectSlot start,
668  MaybeObjectSlot end) {
669  MaybeObjectSlot current = start;
670  while (current < end) {
671  while (current < end && (*current)->IsSmi()) {
672  ++current;
673  }
674  if (current < end) {
675  OutputRawData(current.address());
676  }
677  // TODO(ishell): Revisit this change once we stick to 32-bit compressed
678  // tagged values.
679  while (current < end && (*current)->IsCleared()) {
680  sink_->Put(kClearedWeakReference, "ClearedWeakReference");
681  bytes_processed_so_far_ += kPointerSize;
682  ++current;
683  }
684  HeapObject* current_contents;
685  HeapObjectReferenceType reference_type;
686  while (current < end &&
687  (*current)->GetHeapObject(&current_contents, &reference_type)) {
688  RootIndex root_index;
689  // Repeats are not subject to the write barrier so we can only use
690  // immortal immovable root members. They are never in new space.
691  if (current != start &&
692  serializer_->root_index_map()->Lookup(current_contents,
693  &root_index) &&
694  RootsTable::IsImmortalImmovable(root_index) &&
695  *current == *(current - 1)) {
696  DCHECK_EQ(reference_type, HeapObjectReferenceType::STRONG);
697  DCHECK(!Heap::InNewSpace(current_contents));
698  int repeat_count = 1;
699  while (current + repeat_count < end - 1 &&
700  *(current + repeat_count) == *current) {
701  repeat_count++;
702  }
703  current += repeat_count;
704  bytes_processed_so_far_ += repeat_count * kPointerSize;
705  if (repeat_count > kNumberOfFixedRepeat) {
706  sink_->Put(kVariableRepeat, "VariableRepeat");
707  sink_->PutInt(repeat_count, "repeat count");
708  } else {
709  sink_->Put(kFixedRepeatStart + repeat_count, "FixedRepeat");
710  }
711  } else {
712  if (reference_type == HeapObjectReferenceType::WEAK) {
713  sink_->Put(kWeakPrefix, "WeakReference");
714  }
715  serializer_->SerializeObject(current_contents, kPlain, kStartOfObject,
716  0);
717  bytes_processed_so_far_ += kPointerSize;
718  ++current;
719  }
720  }
721  }
722 }
723 
724 void Serializer::ObjectSerializer::VisitEmbeddedPointer(Code host,
725  RelocInfo* rinfo) {
726  int skip = SkipTo(rinfo->target_address_address());
727  HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
728  Object* object = rinfo->target_object();
729  serializer_->SerializeObject(HeapObject::cast(object), how_to_code,
730  kStartOfObject, skip);
731  bytes_processed_so_far_ += rinfo->target_address_size();
732 }
733 
734 void Serializer::ObjectSerializer::VisitExternalReference(Foreign* host,
735  Address* p) {
736  int skip = SkipTo(reinterpret_cast<Address>(p));
737  Address target = *p;
738  auto encoded_reference = serializer_->EncodeExternalReference(target);
739  if (encoded_reference.is_from_api()) {
740  sink_->Put(kApiReference, "ApiRef");
741  } else {
742  sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
743  }
744  sink_->PutInt(skip, "SkipB4ExternalRef");
745  sink_->PutInt(encoded_reference.index(), "reference index");
746  bytes_processed_so_far_ += kPointerSize;
747 }
748 
749 void Serializer::ObjectSerializer::VisitExternalReference(Code host,
750  RelocInfo* rinfo) {
751  int skip = SkipTo(rinfo->target_address_address());
752  Address target = rinfo->target_external_reference();
753  auto encoded_reference = serializer_->EncodeExternalReference(target);
754  if (encoded_reference.is_from_api()) {
755  DCHECK(!rinfo->IsCodedSpecially());
756  sink_->Put(kApiReference, "ApiRef");
757  } else {
758  HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
759  sink_->Put(kExternalReference + how_to_code + kStartOfObject,
760  "ExternalRef");
761  }
762  sink_->PutInt(skip, "SkipB4ExternalRef");
763  DCHECK_NE(target, kNullAddress); // Code does not reference null.
764  sink_->PutInt(encoded_reference.index(), "reference index");
765  bytes_processed_so_far_ += rinfo->target_address_size();
766 }
767 
768 void Serializer::ObjectSerializer::VisitInternalReference(Code host,
769  RelocInfo* rinfo) {
770  // We do not use skip from last patched pc to find the pc to patch, since
771  // target_address_address may not return addresses in ascending order when
772  // used for internal references. External references may be stored at the
773  // end of the code in the constant pool, whereas internal references are
774  // inline. That would cause the skip to be negative. Instead, we store the
775  // offset from code entry.
776  Address entry = Code::cast(object_)->entry();
777  DCHECK_GE(rinfo->target_internal_reference_address(), entry);
778  uintptr_t pc_offset = rinfo->target_internal_reference_address() - entry;
779  DCHECK_LE(pc_offset, Code::cast(object_)->raw_instruction_size());
780  DCHECK_GE(rinfo->target_internal_reference(), entry);
781  uintptr_t target_offset = rinfo->target_internal_reference() - entry;
782  DCHECK_LE(target_offset, Code::cast(object_)->raw_instruction_size());
783  sink_->Put(rinfo->rmode() == RelocInfo::INTERNAL_REFERENCE
784  ? kInternalReference
785  : kInternalReferenceEncoded,
786  "InternalRef");
787  sink_->PutInt(pc_offset, "internal ref address");
788  sink_->PutInt(target_offset, "internal ref value");
789 }
790 
791 void Serializer::ObjectSerializer::VisitRuntimeEntry(Code host,
792  RelocInfo* rinfo) {
793  int skip = SkipTo(rinfo->target_address_address());
794  HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
795  Address target = rinfo->target_address();
796  auto encoded_reference = serializer_->EncodeExternalReference(target);
797  DCHECK(!encoded_reference.is_from_api());
798  sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
799  sink_->PutInt(skip, "SkipB4ExternalRef");
800  sink_->PutInt(encoded_reference.index(), "reference index");
801  bytes_processed_so_far_ += rinfo->target_address_size();
802 }
803 
804 void Serializer::ObjectSerializer::VisitOffHeapTarget(Code host,
805  RelocInfo* rinfo) {
806  DCHECK(FLAG_embedded_builtins);
807  {
808  STATIC_ASSERT(EmbeddedData::kTableSize == Builtins::builtin_count);
809  CHECK(Builtins::IsIsolateIndependentBuiltin(host));
810  Address addr = rinfo->target_off_heap_target();
811  CHECK_NE(kNullAddress, addr);
812  CHECK(!InstructionStream::TryLookupCode(serializer_->isolate(), addr)
813  .is_null());
814  }
815 
816  int skip = SkipTo(rinfo->target_address_address());
817  sink_->Put(kOffHeapTarget, "OffHeapTarget");
818  sink_->PutInt(skip, "SkipB4OffHeapTarget");
819  sink_->PutInt(host->builtin_index(), "builtin index");
820  bytes_processed_so_far_ += rinfo->target_address_size();
821 }
822 
823 namespace {
824 class CompareRelocInfo {
825  public:
826  bool operator()(RelocInfo x, RelocInfo y) {
827  // Everything that does not use target_address_address will compare equal.
828  Address x_num = 0;
829  Address y_num = 0;
830  if (HasTargetAddressAddress(x.rmode())) {
831  x_num = x.target_address_address();
832  }
833  if (HasTargetAddressAddress(y.rmode())) {
834  y_num = y.target_address_address();
835  }
836  return x_num > y_num;
837  }
838 
839  private:
840  static bool HasTargetAddressAddress(RelocInfo::Mode mode) {
841  return RelocInfo::IsEmbeddedObject(mode) || RelocInfo::IsCodeTarget(mode) ||
842  RelocInfo::IsExternalReference(mode) ||
843  RelocInfo::IsRuntimeEntry(mode);
844  }
845 };
846 } // namespace
847 
848 void Serializer::ObjectSerializer::VisitRelocInfo(RelocIterator* it) {
849  std::priority_queue<RelocInfo, std::vector<RelocInfo>, CompareRelocInfo>
850  reloc_queue;
851  for (; !it->done(); it->next()) {
852  reloc_queue.push(*it->rinfo());
853  }
854  while (!reloc_queue.empty()) {
855  RelocInfo rinfo = reloc_queue.top();
856  reloc_queue.pop();
857  rinfo.Visit(this);
858  }
859 }
860 
861 void Serializer::ObjectSerializer::VisitCodeTarget(Code host,
862  RelocInfo* rinfo) {
863  int skip = SkipTo(rinfo->target_address_address());
864  Code object = Code::GetCodeFromTargetAddress(rinfo->target_address());
865  serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip);
866  bytes_processed_so_far_ += rinfo->target_address_size();
867 }
868 
869 void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
870  Address object_start = object_->address();
871  int base = bytes_processed_so_far_;
872  int up_to_offset = static_cast<int>(up_to - object_start);
873  int to_skip = up_to_offset - bytes_processed_so_far_;
874  int bytes_to_output = to_skip;
875  bytes_processed_so_far_ += to_skip;
876  DCHECK_GE(to_skip, 0);
877  if (bytes_to_output != 0) {
878  DCHECK(to_skip == bytes_to_output);
879  if (IsAligned(bytes_to_output, kPointerAlignment) &&
880  bytes_to_output <= kNumberOfFixedRawData * kPointerSize) {
881  int size_in_words = bytes_to_output >> kPointerSizeLog2;
882  sink_->PutSection(kFixedRawDataStart + size_in_words, "FixedRawData");
883  } else {
884  sink_->Put(kVariableRawData, "VariableRawData");
885  sink_->PutInt(bytes_to_output, "length");
886  }
887 #ifdef MEMORY_SANITIZER
888  // Check that we do not serialize uninitialized memory.
889  __msan_check_mem_is_initialized(
890  reinterpret_cast<void*>(object_start + base), bytes_to_output);
891 #endif // MEMORY_SANITIZER
892  if (object_->IsBytecodeArray()) {
893  // The code age byte can be changed concurrently by GC.
894  const int bytes_to_age_byte = BytecodeArray::kBytecodeAgeOffset - base;
895  if (0 <= bytes_to_age_byte && bytes_to_age_byte < bytes_to_output) {
896  sink_->PutRaw(reinterpret_cast<byte*>(object_start + base),
897  bytes_to_age_byte, "Bytes");
898  byte bytecode_age = BytecodeArray::kNoAgeBytecodeAge;
899  sink_->PutRaw(&bytecode_age, 1, "Bytes");
900  const int bytes_written = bytes_to_age_byte + 1;
901  sink_->PutRaw(
902  reinterpret_cast<byte*>(object_start + base + bytes_written),
903  bytes_to_output - bytes_written, "Bytes");
904  } else {
905  sink_->PutRaw(reinterpret_cast<byte*>(object_start + base),
906  bytes_to_output, "Bytes");
907  }
908  } else {
909  sink_->PutRaw(reinterpret_cast<byte*>(object_start + base),
910  bytes_to_output, "Bytes");
911  }
912  }
913 }
914 
915 int Serializer::ObjectSerializer::SkipTo(Address to) {
916  Address object_start = object_->address();
917  int up_to_offset = static_cast<int>(to - object_start);
918  int to_skip = up_to_offset - bytes_processed_so_far_;
919  bytes_processed_so_far_ += to_skip;
920  // This assert will fail if the reloc info gives us the target_address_address
921  // locations in a non-ascending order. We make sure this doesn't happen by
922  // sorting the relocation info.
923  DCHECK_GE(to_skip, 0);
924  return to_skip;
925 }
926 
927 void Serializer::ObjectSerializer::OutputCode(int size) {
928  DCHECK_EQ(kPointerSize, bytes_processed_so_far_);
929  Code on_heap_code = Code::cast(object_);
930  // To make snapshots reproducible, we make a copy of the code object
931  // and wipe all pointers in the copy, which we then serialize.
932  Code off_heap_code = serializer_->CopyCode(on_heap_code);
933  int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
934  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
935  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
936  RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
937  RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
938  RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) |
939  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
940  // With enabled pointer compression normal accessors no longer work for
941  // off-heap objects, so we have to get the relocation info data via the
942  // on-heap code object.
943  ByteArray relocation_info = on_heap_code->unchecked_relocation_info();
944  for (RelocIterator it(off_heap_code, relocation_info, mode_mask); !it.done();
945  it.next()) {
946  RelocInfo* rinfo = it.rinfo();
947  rinfo->WipeOut();
948  }
949  // We need to wipe out the header fields *after* wiping out the
950  // relocations, because some of these fields are needed for the latter.
951  off_heap_code->WipeOutHeader();
952 
953  Address start = off_heap_code->address() + Code::kDataStart;
954  int bytes_to_output = size - Code::kDataStart;
955 
956  sink_->Put(kVariableRawCode, "VariableRawCode");
957  sink_->PutInt(bytes_to_output, "length");
958 
959 #ifdef MEMORY_SANITIZER
960  // Check that we do not serialize uninitialized memory.
961  __msan_check_mem_is_initialized(reinterpret_cast<void*>(start),
962  bytes_to_output);
963 #endif // MEMORY_SANITIZER
964  sink_->PutRaw(reinterpret_cast<byte*>(start), bytes_to_output, "Code");
965 }
966 
967 } // namespace internal
968 } // namespace v8
Definition: libplatform.h:13