V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
deserializer.cc
1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/snapshot/deserializer.h"
6 
7 #include "src/assembler-inl.h"
8 #include "src/heap/heap-write-barrier-inl.h"
9 #include "src/interpreter/interpreter.h"
10 #include "src/isolate.h"
11 #include "src/log.h"
12 #include "src/objects/api-callbacks.h"
13 #include "src/objects/hash-table.h"
14 #include "src/objects/js-array-buffer-inl.h"
15 #include "src/objects/js-array-inl.h"
16 #include "src/objects/maybe-object.h"
17 #include "src/objects/slots.h"
18 #include "src/objects/smi.h"
19 #include "src/objects/string.h"
20 #include "src/snapshot/natives.h"
21 #include "src/snapshot/snapshot.h"
22 
23 namespace v8 {
24 namespace internal {
25 
26 // This is like a MaybeObjectSlot, except it doesn't enforce alignment.
27 // Most slots used below are aligned, but when writing into Code objects,
28 // they might not be, hence the use of UnalignedSlot and UnalignedCopy.
30  public:
31  explicit UnalignedSlot(ObjectSlot slot) : ptr_(slot.address()) {}
32  explicit UnalignedSlot(Address address) : ptr_(address) {}
33  explicit UnalignedSlot(MaybeObject* slot)
34  : ptr_(reinterpret_cast<Address>(slot)) {}
35  explicit UnalignedSlot(Object** slot)
36  : ptr_(reinterpret_cast<Address>(slot)) {}
37 
38  inline bool operator<(const UnalignedSlot& other) const {
39  return ptr_ < other.ptr_;
40  }
41  inline bool operator==(const UnalignedSlot& other) const {
42  return ptr_ == other.ptr_;
43  }
44 
45  inline void Advance(int bytes = kPointerSize) { ptr_ += bytes; }
46 
47  MaybeObject Read() {
48  Address result;
49  memcpy(&result, reinterpret_cast<void*>(ptr_), sizeof(result));
50  return MaybeObject(result);
51  }
52  MaybeObject ReadPrevious() {
53  Address result;
54  memcpy(&result, reinterpret_cast<void*>(ptr_ - kPointerSize),
55  sizeof(result));
56  return MaybeObject(result);
57  }
58  inline void Write(Address value) {
59  memcpy(reinterpret_cast<void*>(ptr_), &value, sizeof(value));
60  }
61  MaybeObjectSlot Slot() { return MaybeObjectSlot(ptr_); }
62 
63  Address address() { return ptr_; }
64 
65  private:
66  Address ptr_;
67 };
68 
69 void Deserializer::UnalignedCopy(UnalignedSlot dest, MaybeObject value) {
70  DCHECK(!allocator()->next_reference_is_weak());
71  dest.Write(value.ptr());
72 }
73 
74 void Deserializer::UnalignedCopy(UnalignedSlot dest, Address value) {
75  DCHECK(!allocator()->next_reference_is_weak());
76  dest.Write(value);
77 }
78 
79 void Deserializer::Initialize(Isolate* isolate) {
80  DCHECK_NULL(isolate_);
81  DCHECK_NOT_NULL(isolate);
82  isolate_ = isolate;
83  DCHECK_NULL(external_reference_table_);
84  external_reference_table_ = isolate->external_reference_table();
85 #ifdef DEBUG
86  // Count the number of external references registered through the API.
87  num_api_references_ = 0;
88  if (isolate_->api_external_references() != nullptr) {
89  while (isolate_->api_external_references()[num_api_references_] != 0) {
90  num_api_references_++;
91  }
92  }
93 #endif // DEBUG
94  CHECK_EQ(magic_number_,
95  SerializedData::ComputeMagicNumber(external_reference_table_));
96 }
97 
98 void Deserializer::Rehash() {
99  DCHECK(can_rehash() || deserializing_user_code());
100  for (const auto& item : to_rehash_) item->RehashBasedOnMap(isolate());
101 }
102 
103 Deserializer::~Deserializer() {
104 #ifdef DEBUG
105  // Do not perform checks if we aborted deserialization.
106  if (source_.position() == 0) return;
107  // Check that we only have padding bytes remaining.
108  while (source_.HasMore()) DCHECK_EQ(kNop, source_.Get());
109  // Check that we've fully used all reserved space.
110  DCHECK(allocator()->ReservationsAreFullyUsed());
111 #endif // DEBUG
112 }
113 
114 // This is called on the roots. It is the driver of the deserialization
115 // process. It is also called on the body of each function.
116 void Deserializer::VisitRootPointers(Root root, const char* description,
117  ObjectSlot start, ObjectSlot end) {
118  // The space must be new space. Any other space would cause ReadChunk to try
119  // to update the remembered using nullptr as the address.
120  ReadData(UnalignedSlot(start), UnalignedSlot(end), NEW_SPACE, kNullAddress);
121 }
122 
123 void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
124  static const byte expected = kSynchronize;
125  CHECK_EQ(expected, source_.Get());
126 }
127 
128 void Deserializer::DeserializeDeferredObjects() {
129  for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
130  switch (code) {
131  case kAlignmentPrefix:
132  case kAlignmentPrefix + 1:
133  case kAlignmentPrefix + 2: {
134  int alignment = code - (SerializerDeserializer::kAlignmentPrefix - 1);
135  allocator()->SetAlignment(static_cast<AllocationAlignment>(alignment));
136  break;
137  }
138  default: {
139  int space = code & kSpaceMask;
140  DCHECK_LE(space, kNumberOfSpaces);
141  DCHECK_EQ(code - space, kNewObject);
142  HeapObject* object = GetBackReferencedObject(space);
143  int size = source_.GetInt() << kPointerSizeLog2;
144  Address obj_address = object->address();
145  UnalignedSlot start(obj_address + kPointerSize);
146  UnalignedSlot end(obj_address + size);
147  bool filled = ReadData(start, end, space, obj_address);
148  CHECK(filled);
149  DCHECK(CanBeDeferred(object));
150  PostProcessNewObject(object, space);
151  }
152  }
153  }
154 }
155 
156 void Deserializer::LogNewObjectEvents() {
157  {
158  // {new_maps_} and {new_code_objects_} are vectors containing raw
159  // pointers, hence there should be no GC happening.
160  DisallowHeapAllocation no_gc;
161  // Issue code events for newly deserialized code objects.
162  LOG_CODE_EVENT(isolate_, LogCodeObjects());
163  }
164  LOG_CODE_EVENT(isolate_, LogCompiledFunctions());
165  LogNewMapEvents();
166 }
167 
168 void Deserializer::LogNewMapEvents() {
169  DisallowHeapAllocation no_gc;
170  for (Map map : new_maps()) {
171  DCHECK(FLAG_trace_maps);
172  LOG(isolate_, MapCreate(map));
173  LOG(isolate_, MapDetails(map));
174  }
175 }
176 
177 void Deserializer::LogScriptEvents(Script* script) {
178  DisallowHeapAllocation no_gc;
179  LOG(isolate_,
180  ScriptEvent(Logger::ScriptEventType::kDeserialize, script->id()));
181  LOG(isolate_, ScriptDetails(script));
182 }
183 
184 StringTableInsertionKey::StringTableInsertionKey(String string)
185  : StringTableKey(ComputeHashField(string)), string_(string) {
186  DCHECK(string->IsInternalizedString());
187 }
188 
189 bool StringTableInsertionKey::IsMatch(Object* string) {
190  // We know that all entries in a hash table had their hash keys created.
191  // Use that knowledge to have fast failure.
192  if (Hash() != String::cast(string)->Hash()) return false;
193  // We want to compare the content of two internalized strings here.
194  return string_->SlowEquals(String::cast(string));
195 }
196 
197 Handle<String> StringTableInsertionKey::AsHandle(Isolate* isolate) {
198  return handle(string_, isolate);
199 }
200 
201 uint32_t StringTableInsertionKey::ComputeHashField(String string) {
202  // Make sure hash_field() is computed.
203  string->Hash();
204  return string->hash_field();
205 }
206 
207 HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
208  if ((FLAG_rehash_snapshot && can_rehash_) || deserializing_user_code()) {
209  if (obj->IsString()) {
210  // Uninitialize hash field as we need to recompute the hash.
211  String string = String::cast(obj);
212  string->set_hash_field(String::kEmptyHashField);
213  } else if (obj->NeedsRehashing()) {
214  to_rehash_.push_back(obj);
215  }
216  }
217 
218  if (deserializing_user_code()) {
219  if (obj->IsString()) {
220  String string = String::cast(obj);
221  if (string->IsInternalizedString()) {
222  // Canonicalize the internalized string. If it already exists in the
223  // string table, set it to forward to the existing one.
224  StringTableInsertionKey key(string);
225  String canonical =
226  StringTable::ForwardStringIfExists(isolate_, &key, string);
227 
228  if (!canonical.is_null()) return canonical;
229 
230  new_internalized_strings_.push_back(handle(string, isolate_));
231  return string;
232  }
233  } else if (obj->IsScript()) {
234  new_scripts_.push_back(handle(Script::cast(obj), isolate_));
235  } else if (obj->IsAllocationSite()) {
236  // We should link new allocation sites, but we can't do this immediately
237  // because |AllocationSite::HasWeakNext()| internally accesses
238  // |Heap::roots_| that may not have been initialized yet. So defer this to
239  // |ObjectDeserializer::CommitPostProcessedObjects()|.
240  new_allocation_sites_.push_back(AllocationSite::cast(obj));
241  } else {
242  DCHECK(CanBeDeferred(obj));
243  }
244  }
245  if (obj->IsScript()) {
246  LogScriptEvents(Script::cast(obj));
247  } else if (obj->IsCode()) {
248  // We flush all code pages after deserializing the startup snapshot.
249  // Hence we only remember each individual code object when deserializing
250  // user code.
251  if (deserializing_user_code() || space == LO_SPACE) {
252  new_code_objects_.push_back(Code::cast(obj));
253  }
254  } else if (FLAG_trace_maps && obj->IsMap()) {
255  // Keep track of all seen Maps to log them later since they might be only
256  // partially initialized at this point.
257  new_maps_.push_back(Map::cast(obj));
258  } else if (obj->IsAccessorInfo()) {
259 #ifdef USE_SIMULATOR
260  accessor_infos_.push_back(AccessorInfo::cast(obj));
261 #endif
262  } else if (obj->IsCallHandlerInfo()) {
263 #ifdef USE_SIMULATOR
264  call_handler_infos_.push_back(CallHandlerInfo::cast(obj));
265 #endif
266  } else if (obj->IsExternalString()) {
267  if (obj->map() == ReadOnlyRoots(isolate_).native_source_string_map()) {
268  ExternalOneByteString string = ExternalOneByteString::cast(obj);
269  DCHECK(string->is_uncached());
270  string->SetResource(
271  isolate_, NativesExternalStringResource::DecodeForDeserialization(
272  string->resource()));
273  } else {
274  ExternalString string = ExternalString::cast(obj);
275  uint32_t index = string->resource_as_uint32();
276  Address address =
277  static_cast<Address>(isolate_->api_external_references()[index]);
278  string->set_address_as_resource(address);
279  isolate_->heap()->UpdateExternalString(string, 0,
280  string->ExternalPayloadSize());
281  }
282  isolate_->heap()->RegisterExternalString(String::cast(obj));
283  } else if (obj->IsJSTypedArray()) {
284  JSTypedArray* typed_array = JSTypedArray::cast(obj);
285  CHECK_LE(typed_array->byte_offset(), Smi::kMaxValue);
286  int32_t byte_offset = static_cast<int32_t>(typed_array->byte_offset());
287  if (byte_offset > 0) {
288  FixedTypedArrayBase elements =
289  FixedTypedArrayBase::cast(typed_array->elements());
290  // Must be off-heap layout.
291  DCHECK(!typed_array->is_on_heap());
292 
293  void* pointer_with_offset = reinterpret_cast<void*>(
294  reinterpret_cast<intptr_t>(elements->external_pointer()) +
295  byte_offset);
296  elements->set_external_pointer(pointer_with_offset);
297  }
298  } else if (obj->IsJSArrayBuffer()) {
299  JSArrayBuffer* buffer = JSArrayBuffer::cast(obj);
300  // Only fixup for the off-heap case.
301  if (buffer->backing_store() != nullptr) {
302  Smi store_index(reinterpret_cast<Address>(buffer->backing_store()));
303  void* backing_store = off_heap_backing_stores_[store_index->value()];
304 
305  buffer->set_backing_store(backing_store);
306  isolate_->heap()->RegisterNewArrayBuffer(buffer);
307  }
308  } else if (obj->IsFixedTypedArrayBase()) {
309  FixedTypedArrayBase fta = FixedTypedArrayBase::cast(obj);
310  // Only fixup for the off-heap case.
311  if (fta->base_pointer() == Smi::kZero) {
312  Smi store_index(reinterpret_cast<Address>(fta->external_pointer()));
313  void* backing_store = off_heap_backing_stores_[store_index->value()];
314  fta->set_external_pointer(backing_store);
315  }
316  } else if (obj->IsBytecodeArray()) {
317  // TODO(mythria): Remove these once we store the default values for these
318  // fields in the serializer.
319  BytecodeArray bytecode_array = BytecodeArray::cast(obj);
320  bytecode_array->set_interrupt_budget(
321  interpreter::Interpreter::InterruptBudget());
322  bytecode_array->set_osr_loop_nesting_level(0);
323  }
324 
325  // Check alignment.
326  DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(),
327  HeapObject::RequiredAlignment(obj->map())));
328  return obj;
329 }
330 
331 HeapObject* Deserializer::GetBackReferencedObject(int space) {
332  HeapObject* obj;
333  switch (space) {
334  case LO_SPACE:
335  obj = allocator()->GetLargeObject(source_.GetInt());
336  break;
337  case MAP_SPACE:
338  obj = allocator()->GetMap(source_.GetInt());
339  break;
340  case RO_SPACE: {
341  uint32_t chunk_index = source_.GetInt();
342  uint32_t chunk_offset = source_.GetInt();
343  if (isolate()->heap()->deserialization_complete()) {
344  PagedSpace* read_only_space = isolate()->heap()->read_only_space();
345  Page* page = read_only_space->first_page();
346  for (uint32_t i = 0; i < chunk_index; ++i) {
347  page = page->next_page();
348  }
349  Address address = page->OffsetToAddress(chunk_offset);
350  obj = HeapObject::FromAddress(address);
351  } else {
352  obj = allocator()->GetObject(static_cast<AllocationSpace>(space),
353  chunk_index, chunk_offset);
354  }
355  break;
356  }
357  default: {
358  uint32_t chunk_index = source_.GetInt();
359  uint32_t chunk_offset = source_.GetInt();
360  obj = allocator()->GetObject(static_cast<AllocationSpace>(space),
361  chunk_index, chunk_offset);
362  break;
363  }
364  }
365 
366  if (deserializing_user_code() && obj->IsThinString()) {
367  obj = ThinString::cast(obj)->actual();
368  }
369 
370  hot_objects_.Add(obj);
371  DCHECK(!HasWeakHeapObjectTag(obj));
372  return obj;
373 }
374 
375 // This routine writes the new object into the pointer provided.
376 // The reason for this strange interface is that otherwise the object is
377 // written very late, which means the FreeSpace map is not set up by the
378 // time we need to use it to mark the space at the end of a page free.
379 void Deserializer::ReadObject(int space_number, UnalignedSlot write_back,
380  HeapObjectReferenceType reference_type) {
381  const int size = source_.GetInt() << kObjectAlignmentBits;
382 
383  Address address =
384  allocator()->Allocate(static_cast<AllocationSpace>(space_number), size);
385  HeapObject* obj = HeapObject::FromAddress(address);
386 
387  isolate_->heap()->OnAllocationEvent(obj, size);
388  UnalignedSlot current(address);
389  UnalignedSlot limit(address + size);
390 
391  if (ReadData(current, limit, space_number, address)) {
392  // Only post process if object content has not been deferred.
393  obj = PostProcessNewObject(obj, space_number);
394  }
395 
396  MaybeObject write_back_obj = reference_type == HeapObjectReferenceType::STRONG
397  ? HeapObjectReference::Strong(obj)
398  : HeapObjectReference::Weak(obj);
399  UnalignedCopy(write_back, write_back_obj);
400 #ifdef DEBUG
401  if (obj->IsCode()) {
402  DCHECK(space_number == CODE_SPACE || space_number == CODE_LO_SPACE);
403  } else {
404  DCHECK(space_number != CODE_SPACE && space_number != CODE_LO_SPACE);
405  }
406 #endif // DEBUG
407 }
408 
409 static void NoExternalReferencesCallback() {
410  // The following check will trigger if a function or object template
411  // with references to native functions have been deserialized from
412  // snapshot, but no actual external references were provided when the
413  // isolate was created.
414  CHECK_WITH_MSG(false, "No external references provided via API");
415 }
416 
417 bool Deserializer::ReadData(UnalignedSlot current, UnalignedSlot limit,
418  int source_space, Address current_object_address) {
419  Isolate* const isolate = isolate_;
420  // Write barrier support costs around 1% in startup time. In fact there
421  // are no new space objects in current boot snapshots, so it's not needed,
422  // but that may change.
423  bool write_barrier_needed =
424  (current_object_address != kNullAddress && source_space != NEW_SPACE &&
425  source_space != CODE_SPACE);
426  while (current < limit) {
427  byte data = source_.Get();
428  switch (data) {
429 #define CASE_STATEMENT(where, how, within, space_number) \
430  case where + how + within + space_number: \
431  STATIC_ASSERT((where & ~kWhereMask) == 0); \
432  STATIC_ASSERT((how & ~kHowToCodeMask) == 0); \
433  STATIC_ASSERT((within & ~kWhereToPointMask) == 0); \
434  STATIC_ASSERT((space_number & ~kSpaceMask) == 0);
435 
436 #define CASE_BODY(where, how, within, space_number_if_any) \
437  current = ReadDataCase<where, how, within, space_number_if_any>( \
438  isolate, current, current_object_address, data, write_barrier_needed); \
439  break;
440 
441 // This generates a case and a body for the new space (which has to do extra
442 // write barrier handling) and handles the other spaces with fall-through cases
443 // and one body.
444 #define ALL_SPACES(where, how, within) \
445  CASE_STATEMENT(where, how, within, NEW_SPACE) \
446  CASE_BODY(where, how, within, NEW_SPACE) \
447  CASE_STATEMENT(where, how, within, OLD_SPACE) \
448  V8_FALLTHROUGH; \
449  CASE_STATEMENT(where, how, within, CODE_SPACE) \
450  V8_FALLTHROUGH; \
451  CASE_STATEMENT(where, how, within, MAP_SPACE) \
452  V8_FALLTHROUGH; \
453  CASE_STATEMENT(where, how, within, LO_SPACE) \
454  V8_FALLTHROUGH; \
455  CASE_STATEMENT(where, how, within, RO_SPACE) \
456  CASE_BODY(where, how, within, kAnyOldSpace)
457 
458 #define FOUR_CASES(byte_code) \
459  case byte_code: \
460  case byte_code + 1: \
461  case byte_code + 2: \
462  case byte_code + 3:
463 
464 #define SIXTEEN_CASES(byte_code) \
465  FOUR_CASES(byte_code) \
466  FOUR_CASES(byte_code + 4) \
467  FOUR_CASES(byte_code + 8) \
468  FOUR_CASES(byte_code + 12)
469 
470 #define SINGLE_CASE(where, how, within, space) \
471  CASE_STATEMENT(where, how, within, space) \
472  CASE_BODY(where, how, within, space)
473 
474  // Deserialize a new object and write a pointer to it to the current
475  // object.
476  ALL_SPACES(kNewObject, kPlain, kStartOfObject)
477  // Deserialize a new code object and write a pointer to its first
478  // instruction to the current code object.
479  ALL_SPACES(kNewObject, kFromCode, kInnerPointer)
480  // Find a recently deserialized object using its offset from the current
481  // allocation point and write a pointer to it to the current object.
482  ALL_SPACES(kBackref, kPlain, kStartOfObject)
483  ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
484 #if V8_CODE_EMBEDS_OBJECT_POINTER
485  // Deserialize a new object from pointer found in code and write
486  // a pointer to it to the current object. Required only for MIPS, PPC, ARM
487  // or S390 with embedded constant pool, and omitted on the other
488  // architectures because it is fully unrolled and would cause bloat.
489  ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
490  // Find a recently deserialized code object using its offset from the
491  // current allocation point and write a pointer to it to the current
492  // object. Required only for MIPS, PPC, ARM or S390 with embedded
493  // constant pool.
494  ALL_SPACES(kBackref, kFromCode, kStartOfObject)
495  ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
496 #endif
497  // Find a recently deserialized code object using its offset from the
498  // current allocation point and write a pointer to its first instruction
499  // to the current code object or the instruction pointer in a function
500  // object.
501  ALL_SPACES(kBackref, kFromCode, kInnerPointer)
502  ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer)
503  // Find an object in the roots array and write a pointer to it to the
504  // current object.
505  SINGLE_CASE(kRootArray, kPlain, kStartOfObject, 0)
506 #if V8_CODE_EMBEDS_OBJECT_POINTER
507  // Find an object in the roots array and write a pointer to it to in code.
508  SINGLE_CASE(kRootArray, kFromCode, kStartOfObject, 0)
509 #endif
510  // Find an object in the partial snapshots cache and write a pointer to it
511  // to the current object.
512  SINGLE_CASE(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
513  SINGLE_CASE(kPartialSnapshotCache, kFromCode, kStartOfObject, 0)
514  SINGLE_CASE(kPartialSnapshotCache, kFromCode, kInnerPointer, 0)
515  // Find an object in the partial snapshots cache and write a pointer to it
516  // to the current object.
517  SINGLE_CASE(kReadOnlyObjectCache, kPlain, kStartOfObject, 0)
518  SINGLE_CASE(kReadOnlyObjectCache, kFromCode, kStartOfObject, 0)
519  SINGLE_CASE(kReadOnlyObjectCache, kFromCode, kInnerPointer, 0)
520  // Find an object in the attached references and write a pointer to it to
521  // the current object.
522  SINGLE_CASE(kAttachedReference, kPlain, kStartOfObject, 0)
523  SINGLE_CASE(kAttachedReference, kFromCode, kStartOfObject, 0)
524  SINGLE_CASE(kAttachedReference, kFromCode, kInnerPointer, 0)
525 
526 #undef CASE_STATEMENT
527 #undef CASE_BODY
528 #undef ALL_SPACES
529 
530  case kSkip: {
531  int size = source_.GetInt();
532  current.Advance(size);
533  break;
534  }
535 
536  // Find an external reference and write a pointer to it to the current
537  // object.
538  case kExternalReference + kPlain + kStartOfObject:
539  current =
540  ReadExternalReferenceCase(kPlain, current, current_object_address);
541  break;
542  // Find an external reference and write a pointer to it in the current
543  // code object.
544  case kExternalReference + kFromCode + kStartOfObject:
545  current = ReadExternalReferenceCase(kFromCode, current,
546  current_object_address);
547  break;
548 
549  case kInternalReferenceEncoded:
550  case kInternalReference: {
551  // Internal reference address is not encoded via skip, but by offset
552  // from code entry.
553  int pc_offset = source_.GetInt();
554  int target_offset = source_.GetInt();
555  Code code = Code::cast(HeapObject::FromAddress(current_object_address));
556  DCHECK(0 <= pc_offset && pc_offset <= code->raw_instruction_size());
557  DCHECK(0 <= target_offset &&
558  target_offset <= code->raw_instruction_size());
559  Address pc = code->entry() + pc_offset;
560  Address target = code->entry() + target_offset;
561  Assembler::deserialization_set_target_internal_reference_at(
562  pc, target,
563  data == kInternalReference ? RelocInfo::INTERNAL_REFERENCE
564  : RelocInfo::INTERNAL_REFERENCE_ENCODED);
565  break;
566  }
567 
568  case kOffHeapTarget: {
569  DCHECK(FLAG_embedded_builtins);
570  int skip = source_.GetInt();
571  int builtin_index = source_.GetInt();
572  DCHECK(Builtins::IsBuiltinId(builtin_index));
573 
574  current.Advance(skip);
575 
576  CHECK_NOT_NULL(isolate->embedded_blob());
577  EmbeddedData d = EmbeddedData::FromBlob();
578  Address address = d.InstructionStartOfBuiltin(builtin_index);
579  CHECK_NE(kNullAddress, address);
580 
581  if (RelocInfo::OffHeapTargetIsCodedSpecially()) {
582  Address location_of_branch_data = current.address();
583  int skip = Assembler::deserialization_special_target_size(
584  location_of_branch_data);
585  Assembler::deserialization_set_special_target_at(
586  location_of_branch_data,
587  Code::cast(HeapObject::FromAddress(current_object_address)),
588  address);
589  current.Advance(skip);
590  } else {
591  UnalignedCopy(current, address);
592  current.Advance();
593  }
594  break;
595  }
596 
597  case kNop:
598  break;
599 
600  case kNextChunk: {
601  int space = source_.Get();
602  allocator()->MoveToNextChunk(static_cast<AllocationSpace>(space));
603  break;
604  }
605 
606  case kDeferred: {
607  // Deferred can only occur right after the heap object header.
608  DCHECK_EQ(current.address(), current_object_address + kPointerSize);
609  HeapObject* obj = HeapObject::FromAddress(current_object_address);
610  // If the deferred object is a map, its instance type may be used
611  // during deserialization. Initialize it with a temporary value.
612  if (obj->IsMap()) Map::cast(obj)->set_instance_type(FILLER_TYPE);
613  current = limit;
614  return false;
615  }
616 
617  case kSynchronize:
618  // If we get here then that indicates that you have a mismatch between
619  // the number of GC roots when serializing and deserializing.
620  UNREACHABLE();
621 
622  // Deserialize raw data of variable length.
623  case kVariableRawData: {
624  int size_in_bytes = source_.GetInt();
625  byte* raw_data_out = reinterpret_cast<byte*>(current.address());
626  source_.CopyRaw(raw_data_out, size_in_bytes);
627  current.Advance(size_in_bytes);
628  break;
629  }
630 
631  // Deserialize raw code directly into the body of the code object.
632  // Do not move current.
633  case kVariableRawCode: {
634  int size_in_bytes = source_.GetInt();
635  source_.CopyRaw(
636  reinterpret_cast<byte*>(current_object_address + Code::kDataStart),
637  size_in_bytes);
638  break;
639  }
640 
641  case kVariableRepeat: {
642  int repeats = source_.GetInt();
643  MaybeObject object = current.ReadPrevious();
644  DCHECK(!Heap::InNewSpace(object));
645  for (int i = 0; i < repeats; i++) {
646  UnalignedCopy(current, object);
647  current.Advance();
648  }
649  break;
650  }
651 
652  case kOffHeapBackingStore: {
653  int byte_length = source_.GetInt();
654  byte* backing_store = static_cast<byte*>(
655  isolate->array_buffer_allocator()->AllocateUninitialized(
656  byte_length));
657  CHECK_NOT_NULL(backing_store);
658  source_.CopyRaw(backing_store, byte_length);
659  off_heap_backing_stores_.push_back(backing_store);
660  break;
661  }
662 
663  case kApiReference: {
664  int skip = source_.GetInt();
665  current.Advance(skip);
666  uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
667  Address address;
668  if (isolate->api_external_references()) {
669  DCHECK_WITH_MSG(
670  reference_id < num_api_references_,
671  "too few external references provided through the API");
672  address = static_cast<Address>(
673  isolate->api_external_references()[reference_id]);
674  } else {
675  address = reinterpret_cast<Address>(NoExternalReferencesCallback);
676  }
677  UnalignedCopy(current, address);
678  current.Advance();
679  break;
680  }
681 
682  case kClearedWeakReference:
683  UnalignedCopy(current, HeapObjectReference::ClearedValue(isolate_));
684  current.Advance();
685  break;
686 
687  case kWeakPrefix:
688  DCHECK(!allocator()->next_reference_is_weak());
689  allocator()->set_next_reference_is_weak(true);
690  break;
691 
692  case kAlignmentPrefix:
693  case kAlignmentPrefix + 1:
694  case kAlignmentPrefix + 2: {
695  int alignment = data - (SerializerDeserializer::kAlignmentPrefix - 1);
696  allocator()->SetAlignment(static_cast<AllocationAlignment>(alignment));
697  break;
698  }
699 
700  // First kNumberOfRootArrayConstants roots are guaranteed to be in
701  // the old space.
702  STATIC_ASSERT(
703  static_cast<int>(RootIndex::kFirstImmortalImmovableRoot) == 0);
704  STATIC_ASSERT(kNumberOfRootArrayConstants <=
705  static_cast<int>(RootIndex::kLastImmortalImmovableRoot));
706  STATIC_ASSERT(kNumberOfRootArrayConstants == 32);
707  SIXTEEN_CASES(kRootArrayConstantsWithSkip)
708  SIXTEEN_CASES(kRootArrayConstantsWithSkip + 16) {
709  int skip = source_.GetInt();
710  current.Advance(skip);
711  V8_FALLTHROUGH;
712  }
713 
714  SIXTEEN_CASES(kRootArrayConstants)
715  SIXTEEN_CASES(kRootArrayConstants + 16) {
716  int id = data & kRootArrayConstantsMask;
717  RootIndex root_index = static_cast<RootIndex>(id);
718  MaybeObject object = MaybeObject::FromObject(isolate->root(root_index));
719  DCHECK(!Heap::InNewSpace(object));
720  UnalignedCopy(current, object);
721  current.Advance();
722  break;
723  }
724 
725  STATIC_ASSERT(kNumberOfHotObjects == 8);
726  FOUR_CASES(kHotObjectWithSkip)
727  FOUR_CASES(kHotObjectWithSkip + 4) {
728  int skip = source_.GetInt();
729  current.Advance(skip);
730  V8_FALLTHROUGH;
731  }
732 
733  FOUR_CASES(kHotObject)
734  FOUR_CASES(kHotObject + 4) {
735  int index = data & kHotObjectMask;
736  Object* hot_object = hot_objects_.Get(index);
737  MaybeObject hot_maybe_object = MaybeObject::FromObject(hot_object);
738  if (allocator()->GetAndClearNextReferenceIsWeak()) {
739  hot_maybe_object = MaybeObject::MakeWeak(hot_maybe_object);
740  }
741 
742  UnalignedCopy(current, hot_maybe_object);
743  if (write_barrier_needed && Heap::InNewSpace(hot_object)) {
744  GenerationalBarrier(HeapObject::FromAddress(current_object_address),
745  current.Slot(), hot_maybe_object);
746  }
747  current.Advance();
748  break;
749  }
750 
751  // Deserialize raw data of fixed length from 1 to 32 words.
752  STATIC_ASSERT(kNumberOfFixedRawData == 32);
753  SIXTEEN_CASES(kFixedRawData)
754  SIXTEEN_CASES(kFixedRawData + 16) {
755  byte* raw_data_out = reinterpret_cast<byte*>(current.address());
756  int size_in_bytes = (data - kFixedRawDataStart) << kPointerSizeLog2;
757  source_.CopyRaw(raw_data_out, size_in_bytes);
758  current.Advance(size_in_bytes);
759  break;
760  }
761 
762  STATIC_ASSERT(kNumberOfFixedRepeat == 16);
763  SIXTEEN_CASES(kFixedRepeat) {
764  int repeats = data - kFixedRepeatStart;
765  MaybeObject object = current.ReadPrevious();
766  DCHECK(!Heap::InNewSpace(object));
767  for (int i = 0; i < repeats; i++) {
768  UnalignedCopy(current, object);
769  current.Advance();
770  }
771  break;
772  }
773 
774 #ifdef DEBUG
775 #define UNUSED_CASE(byte_code) \
776  case byte_code: \
777  UNREACHABLE();
778  UNUSED_SERIALIZER_BYTE_CODES(UNUSED_CASE)
779 #endif
780 #undef UNUSED_CASE
781 
782 #undef SIXTEEN_CASES
783 #undef FOUR_CASES
784 #undef SINGLE_CASE
785  }
786  }
787  CHECK_EQ(limit, current);
788  return true;
789 }
790 
791 UnalignedSlot Deserializer::ReadExternalReferenceCase(
792  HowToCode how, UnalignedSlot current, Address current_object_address) {
793  int skip = source_.GetInt();
794  current.Advance(skip);
795  uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
796  Address address = external_reference_table_->address(reference_id);
797 
798  if (how == kFromCode) {
799  Address location_of_branch_data = current.address();
800  int skip =
801  Assembler::deserialization_special_target_size(location_of_branch_data);
802  Assembler::deserialization_set_special_target_at(
803  location_of_branch_data,
804  Code::cast(HeapObject::FromAddress(current_object_address)), address);
805  current.Advance(skip);
806  } else {
807  UnalignedCopy(current, address);
808  current.Advance();
809  }
810  return current;
811 }
812 
813 template <int where, int how, int within, int space_number_if_any>
814 UnalignedSlot Deserializer::ReadDataCase(Isolate* isolate,
815  UnalignedSlot current,
816  Address current_object_address,
817  byte data, bool write_barrier_needed) {
818  bool emit_write_barrier = false;
819  bool current_was_incremented = false;
820  int space_number = space_number_if_any == kAnyOldSpace ? (data & kSpaceMask)
821  : space_number_if_any;
822  HeapObjectReferenceType reference_type = HeapObjectReferenceType::STRONG;
823  if (where == kNewObject && how == kPlain && within == kStartOfObject) {
824  if (allocator()->GetAndClearNextReferenceIsWeak()) {
825  reference_type = HeapObjectReferenceType::WEAK;
826  }
827  ReadObject(space_number, current, reference_type);
828  emit_write_barrier = (space_number == NEW_SPACE);
829  } else {
830  Object* new_object = nullptr; /* May not be a real Object pointer. */
831  if (where == kNewObject) {
832  ReadObject(space_number, UnalignedSlot(&new_object),
833  HeapObjectReferenceType::STRONG);
834  } else if (where == kBackref) {
835  emit_write_barrier = (space_number == NEW_SPACE);
836  new_object = GetBackReferencedObject(data & kSpaceMask);
837  } else if (where == kBackrefWithSkip) {
838  int skip = source_.GetInt();
839  current.Advance(skip);
840  emit_write_barrier = (space_number == NEW_SPACE);
841  new_object = GetBackReferencedObject(data & kSpaceMask);
842  } else if (where == kRootArray) {
843  int id = source_.GetInt();
844  RootIndex root_index = static_cast<RootIndex>(id);
845  new_object = isolate->root(root_index);
846  emit_write_barrier = Heap::InNewSpace(new_object);
847  hot_objects_.Add(HeapObject::cast(new_object));
848  } else if (where == kReadOnlyObjectCache) {
849  int cache_index = source_.GetInt();
850  new_object = isolate->read_only_object_cache()->at(cache_index);
851  DCHECK(!Heap::InNewSpace(new_object));
852  emit_write_barrier = false;
853  } else if (where == kPartialSnapshotCache) {
854  int cache_index = source_.GetInt();
855  new_object = isolate->partial_snapshot_cache()->at(cache_index);
856  emit_write_barrier = Heap::InNewSpace(new_object);
857  } else {
858  DCHECK_EQ(where, kAttachedReference);
859  int index = source_.GetInt();
860  new_object = *attached_objects_[index];
861  emit_write_barrier = Heap::InNewSpace(new_object);
862  }
863  if (within == kInnerPointer) {
864  DCHECK_EQ(how, kFromCode);
865  if (new_object->IsCode()) {
866  new_object = reinterpret_cast<Object*>(
867  Code::cast(new_object)->raw_instruction_start());
868  } else {
869  Cell* cell = Cell::cast(new_object);
870  new_object = reinterpret_cast<Object*>(cell->ValueAddress());
871  }
872  }
873  if (how == kFromCode) {
874  DCHECK(!allocator()->next_reference_is_weak());
875  Address location_of_branch_data = current.address();
876  int skip = Assembler::deserialization_special_target_size(
877  location_of_branch_data);
878  Assembler::deserialization_set_special_target_at(
879  location_of_branch_data,
880  Code::cast(HeapObject::FromAddress(current_object_address)),
881  reinterpret_cast<Address>(new_object));
882  current.Advance(skip);
883  current_was_incremented = true;
884  } else {
885  MaybeObject new_maybe_object = MaybeObject::FromObject(new_object);
886  if (allocator()->GetAndClearNextReferenceIsWeak()) {
887  new_maybe_object = MaybeObject::MakeWeak(new_maybe_object);
888  }
889  UnalignedCopy(current, new_maybe_object);
890  }
891  }
892  if (emit_write_barrier && write_barrier_needed) {
893  HeapObject* object = HeapObject::FromAddress(current_object_address);
894  SLOW_DCHECK(isolate->heap()->Contains(object));
895  GenerationalBarrier(object, current.Slot(), current.Read());
896  }
897  if (!current_was_incremented) {
898  current.Advance();
899  }
900 
901  return current;
902 }
903 
904 } // namespace internal
905 } // namespace v8
Definition: libplatform.h:13