V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
isolate.h
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_ISOLATE_H_
6 #define V8_ISOLATE_H_
7 
8 #include <cstddef>
9 #include <functional>
10 #include <memory>
11 #include <queue>
12 #include <unordered_map>
13 #include <vector>
14 
15 #include "include/v8-inspector.h"
16 #include "include/v8-internal.h"
17 #include "include/v8.h"
18 #include "src/allocation.h"
19 #include "src/base/atomicops.h"
20 #include "src/base/macros.h"
21 #include "src/builtins/builtins.h"
22 #include "src/contexts.h"
23 #include "src/debug/debug-interface.h"
24 #include "src/execution.h"
25 #include "src/futex-emulation.h"
26 #include "src/globals.h"
27 #include "src/handles.h"
28 #include "src/heap/factory.h"
29 #include "src/heap/heap.h"
30 #include "src/isolate-allocator.h"
31 #include "src/isolate-data.h"
32 #include "src/messages.h"
33 #include "src/objects/code.h"
34 #include "src/objects/debug-objects.h"
35 #include "src/runtime/runtime.h"
36 #include "src/thread-id.h"
37 #include "src/unicode.h"
38 
39 #ifdef V8_INTL_SUPPORT
40 #include "unicode/uversion.h" // Define U_ICU_NAMESPACE.
41 namespace U_ICU_NAMESPACE {
42 class UObject;
43 } // namespace U_ICU_NAMESPACE
44 #endif // V8_INTL_SUPPORT
45 
46 namespace v8 {
47 
48 namespace base {
49 class RandomNumberGenerator;
50 }
51 
52 namespace debug {
53 class ConsoleDelegate;
54 }
55 
56 namespace internal {
57 
58 namespace heap {
59 class HeapTester;
60 } // namespace heap
61 
62 class AddressToIndexHashMap;
63 class AstStringConstants;
64 class Bootstrapper;
65 class BuiltinsConstantsTableBuilder;
66 class CancelableTaskManager;
67 class CodeEventDispatcher;
68 class CodeTracer;
69 class CompilationCache;
70 class CompilationStatistics;
71 class CompilerDispatcher;
72 class ContextSlotCache;
73 class Counters;
74 class Debug;
75 class DeoptimizerData;
76 class DescriptorLookupCache;
77 class EternalHandles;
78 class ExternalCallbackScope;
79 class HandleScopeImplementer;
80 class HeapObjectToIndexHashMap;
81 class HeapProfiler;
82 class InnerPointerToCodeCache;
83 class Logger;
84 class MaterializedObjectStore;
85 class Microtask;
86 class MicrotaskQueue;
87 class OptimizingCompileDispatcher;
88 class PromiseOnStack;
89 class RegExpStack;
90 class RootVisitor;
91 class RuntimeProfiler;
92 class SaveContext;
93 class SetupIsolateDelegate;
94 class Simulator;
95 class StartupDeserializer;
96 class StandardFrame;
97 class StubCache;
98 class ThreadManager;
99 class ThreadState;
100 class ThreadVisitor; // Defined in v8threads.h
101 class TracingCpuProfilerImpl;
102 class UnicodeCache;
103 struct ManagedPtrDestructor;
104 
105 template <StateTag Tag> class VMState;
106 
107 namespace interpreter {
108 class Interpreter;
109 }
110 
111 namespace compiler {
112 class PerIsolateCompilerCache;
113 }
114 
115 namespace wasm {
116 class WasmEngine;
117 }
118 
119 #define RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate) \
120  do { \
121  Isolate* __isolate__ = (isolate); \
122  DCHECK(!__isolate__->has_pending_exception()); \
123  if (__isolate__->has_scheduled_exception()) { \
124  return __isolate__->PromoteScheduledException(); \
125  } \
126  } while (false)
127 
128 // Macros for MaybeHandle.
129 
130 #define RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, value) \
131  do { \
132  Isolate* __isolate__ = (isolate); \
133  DCHECK(!__isolate__->has_pending_exception()); \
134  if (__isolate__->has_scheduled_exception()) { \
135  __isolate__->PromoteScheduledException(); \
136  return value; \
137  } \
138  } while (false)
139 
140 #define RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, T) \
141  RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, MaybeHandle<T>())
142 
143 #define ASSIGN_RETURN_ON_SCHEDULED_EXCEPTION_VALUE(isolate, dst, call, value) \
144  do { \
145  Isolate* __isolate__ = (isolate); \
146  if (!(call).ToLocal(&dst)) { \
147  DCHECK(__isolate__->has_scheduled_exception()); \
148  __isolate__->PromoteScheduledException(); \
149  return value; \
150  } \
151  } while (false)
152 
153 #define RETURN_ON_SCHEDULED_EXCEPTION_VALUE(isolate, call, value) \
154  do { \
155  Isolate* __isolate__ = (isolate); \
156  if ((call).IsNothing()) { \
157  DCHECK(__isolate__->has_scheduled_exception()); \
158  __isolate__->PromoteScheduledException(); \
159  return value; \
160  } \
161  } while (false)
162 
181 #define RETURN_RESULT_OR_FAILURE(isolate, call) \
182  do { \
183  Handle<Object> __result__; \
184  Isolate* __isolate__ = (isolate); \
185  if (!(call).ToHandle(&__result__)) { \
186  DCHECK(__isolate__->has_pending_exception()); \
187  return ReadOnlyRoots(__isolate__).exception(); \
188  } \
189  DCHECK(!__isolate__->has_pending_exception()); \
190  return *__result__; \
191  } while (false)
192 
193 #define ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value) \
194  do { \
195  if (!(call).ToHandle(&dst)) { \
196  DCHECK((isolate)->has_pending_exception()); \
197  return value; \
198  } \
199  } while (false)
200 
201 #define ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call) \
202  do { \
203  Isolate* __isolate__ = (isolate); \
204  ASSIGN_RETURN_ON_EXCEPTION_VALUE(__isolate__, dst, call, \
205  ReadOnlyRoots(__isolate__).exception()); \
206  } while (false)
207 
208 #define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T) \
209  ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, MaybeHandle<T>())
210 
211 #define THROW_NEW_ERROR(isolate, call, T) \
212  do { \
213  Isolate* __isolate__ = (isolate); \
214  return __isolate__->Throw<T>(__isolate__->factory()->call); \
215  } while (false)
216 
217 #define THROW_NEW_ERROR_RETURN_FAILURE(isolate, call) \
218  do { \
219  Isolate* __isolate__ = (isolate); \
220  return __isolate__->Throw(*__isolate__->factory()->call); \
221  } while (false)
222 
223 #define THROW_NEW_ERROR_RETURN_VALUE(isolate, call, value) \
224  do { \
225  Isolate* __isolate__ = (isolate); \
226  __isolate__->Throw(*__isolate__->factory()->call); \
227  return value; \
228  } while (false)
229 
260 #define RETURN_ON_EXCEPTION_VALUE(isolate, call, value) \
261  do { \
262  if ((call).is_null()) { \
263  DCHECK((isolate)->has_pending_exception()); \
264  return value; \
265  } \
266  } while (false)
267 
288 #define RETURN_FAILURE_ON_EXCEPTION(isolate, call) \
289  do { \
290  Isolate* __isolate__ = (isolate); \
291  RETURN_ON_EXCEPTION_VALUE(__isolate__, call, \
292  ReadOnlyRoots(__isolate__).exception()); \
293  } while (false);
294 
315 #define RETURN_ON_EXCEPTION(isolate, call, T) \
316  RETURN_ON_EXCEPTION_VALUE(isolate, call, MaybeHandle<T>())
317 
318 
319 #define FOR_WITH_HANDLE_SCOPE(isolate, loop_var_type, init, loop_var, \
320  limit_check, increment, body) \
321  do { \
322  loop_var_type init; \
323  loop_var_type for_with_handle_limit = loop_var; \
324  Isolate* for_with_handle_isolate = isolate; \
325  while (limit_check) { \
326  for_with_handle_limit += 1024; \
327  HandleScope loop_scope(for_with_handle_isolate); \
328  for (; limit_check && loop_var < for_with_handle_limit; increment) { \
329  body \
330  } \
331  } \
332  } while (false)
333 
334 #define FIELD_ACCESSOR(type, name) \
335  inline void set_##name(type v) { name##_ = v; } \
336  inline type name() const { return name##_; }
337 
338 // Controls for manual embedded blob lifecycle management, used by tests and
339 // mksnapshot.
340 V8_EXPORT_PRIVATE void DisableEmbeddedBlobRefcounting();
341 V8_EXPORT_PRIVATE void FreeCurrentEmbeddedBlob();
342 
344  public:
345  // Does early low-level initialization that does not depend on the
346  // isolate being present.
347  ThreadLocalTop() = default;
348 
349  // Initialize the thread data.
350  void Initialize(Isolate*);
351 
352  // Get the top C++ try catch handler or nullptr if none are registered.
353  //
354  // This method is not guaranteed to return an address that can be
355  // used for comparison with addresses into the JS stack. If such an
356  // address is needed, use try_catch_handler_address.
357  FIELD_ACCESSOR(v8::TryCatch*, try_catch_handler)
358 
359  // Get the address of the top C++ try catch handler or nullptr if
360  // none are registered.
361  //
362  // This method always returns an address that can be compared to
363  // pointers into the JavaScript stack. When running on actual
364  // hardware, try_catch_handler_address and TryCatchHandler return
365  // the same pointer. When running on a simulator with a separate JS
366  // stack, try_catch_handler_address returns a JS stack address that
367  // corresponds to the place on the JS stack where the C++ handler
368  // would have been if the stack were not separate.
369  Address try_catch_handler_address() {
370  return reinterpret_cast<Address>(
371  v8::TryCatch::JSStackComparableAddress(try_catch_handler()));
372  }
373 
374  void Free();
375 
376  Isolate* isolate_ = nullptr;
377  // The context where the current execution method is created and for variable
378  // lookups.
379  // TODO(3770): This field is read/written from generated code, so it would
380  // be cleaner to make it an "Address raw_context_", and construct a Context
381  // object in the getter. Same for {pending_handler_context_} below. In the
382  // meantime, assert that the memory layout is the same.
383  STATIC_ASSERT(sizeof(Context) == kPointerSize);
384  Context context_;
385  ThreadId thread_id_ = ThreadId::Invalid();
386  Object* pending_exception_ = nullptr;
387 
388  // Communication channel between Isolate::FindHandler and the CEntry.
389  Context pending_handler_context_;
390  Address pending_handler_entrypoint_ = kNullAddress;
391  Address pending_handler_constant_pool_ = kNullAddress;
392  Address pending_handler_fp_ = kNullAddress;
393  Address pending_handler_sp_ = kNullAddress;
394 
395  // Communication channel between Isolate::Throw and message consumers.
396  bool rethrowing_message_ = false;
397  Object* pending_message_obj_ = nullptr;
398 
399  // Use a separate value for scheduled exceptions to preserve the
400  // invariants that hold about pending_exception. We may want to
401  // unify them later.
402  Object* scheduled_exception_ = nullptr;
403  bool external_caught_exception_ = false;
404  SaveContext* save_context_ = nullptr;
405 
406  // Stack.
407  // The frame pointer of the top c entry frame.
408  Address c_entry_fp_ = kNullAddress;
409  // Try-blocks are chained through the stack.
410  Address handler_ = kNullAddress;
411  // C function that was called at c entry.
412  Address c_function_ = kNullAddress;
413 
414  // Throwing an exception may cause a Promise rejection. For this purpose
415  // we keep track of a stack of nested promises and the corresponding
416  // try-catch handlers.
417  PromiseOnStack* promise_on_stack_ = nullptr;
418 
419 #ifdef USE_SIMULATOR
420  Simulator* simulator_ = nullptr;
421 #endif
422 
423  // The stack pointer of the bottom JS entry frame.
424  Address js_entry_sp_ = kNullAddress;
425  // The external callback we're currently in.
426  ExternalCallbackScope* external_callback_scope_ = nullptr;
427  StateTag current_vm_state_ = EXTERNAL;
428 
429  // Call back function to report unsafe JS accesses.
430  v8::FailedAccessCheckCallback failed_access_check_callback_ = nullptr;
431 
432  // Address of the thread-local "thread in wasm" flag.
433  Address thread_in_wasm_flag_address_ = kNullAddress;
434 
435  private:
436  v8::TryCatch* try_catch_handler_ = nullptr;
437 };
438 
439 #ifdef DEBUG
440 
441 #define ISOLATE_INIT_DEBUG_ARRAY_LIST(V) \
442  V(CommentStatistic, paged_space_comments_statistics, \
443  CommentStatistic::kMaxComments + 1) \
444  V(int, code_kind_statistics, AbstractCode::NUMBER_OF_KINDS)
445 #else
446 
447 #define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
448 
449 #endif
450 
451 #define ISOLATE_INIT_ARRAY_LIST(V) \
452  /* SerializerDeserializer state. */ \
453  V(int32_t, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
454  V(int, bad_char_shift_table, kUC16AlphabetSize) \
455  V(int, good_suffix_shift_table, (kBMMaxShift + 1)) \
456  V(int, suffix_table, (kBMMaxShift + 1)) \
457  ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
458 
459 typedef std::vector<HeapObject*> DebugObjectCache;
460 
461 #define ISOLATE_INIT_LIST(V) \
462  /* Assembler state. */ \
463  V(FatalErrorCallback, exception_behavior, nullptr) \
464  V(OOMErrorCallback, oom_behavior, nullptr) \
465  V(LogEventCallback, event_logger, nullptr) \
466  V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
467  V(AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback, nullptr) \
468  V(ExtensionCallback, wasm_module_callback, &NoExtension) \
469  V(ExtensionCallback, wasm_instance_callback, &NoExtension) \
470  V(ApiImplementationCallback, wasm_compile_streaming_callback, nullptr) \
471  V(WasmStreamingCallback, wasm_streaming_callback, nullptr) \
472  V(WasmThreadsEnabledCallback, wasm_threads_enabled_callback, nullptr) \
473  /* State for Relocatable. */ \
474  V(Relocatable*, relocatable_top, nullptr) \
475  V(DebugObjectCache*, string_stream_debug_object_cache, nullptr) \
476  V(Object*, string_stream_current_security_token, nullptr) \
477  V(const intptr_t*, api_external_references, nullptr) \
478  V(AddressToIndexHashMap*, external_reference_map, nullptr) \
479  V(HeapObjectToIndexHashMap*, root_index_map, nullptr) \
480  V(MicrotaskQueue*, default_microtask_queue, nullptr) \
481  V(CompilationStatistics*, turbo_statistics, nullptr) \
482  V(CodeTracer*, code_tracer, nullptr) \
483  V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \
484  V(PromiseRejectCallback, promise_reject_callback, nullptr) \
485  V(const v8::StartupData*, snapshot_blob, nullptr) \
486  V(int, code_and_metadata_size, 0) \
487  V(int, bytecode_and_metadata_size, 0) \
488  V(int, external_script_source_size, 0) \
489  /* true if being profiled. Causes collection of extra compile info. */ \
490  V(bool, is_profiling, false) \
491  /* true if a trace is being formatted through Error.prepareStackTrace. */ \
492  V(bool, formatting_stack_trace, false) \
493  /* Perform side effect checks on function call and API callbacks. */ \
494  V(DebugInfo::ExecutionMode, debug_execution_mode, DebugInfo::kBreakpoints) \
495  /* Current code coverage mode */ \
496  V(debug::Coverage::Mode, code_coverage_mode, debug::Coverage::kBestEffort) \
497  V(debug::TypeProfile::Mode, type_profile_mode, debug::TypeProfile::kNone) \
498  V(int, last_stack_frame_info_id, 0) \
499  V(int, last_console_context_id, 0) \
500  V(v8_inspector::V8Inspector*, inspector, nullptr) \
501  V(bool, next_v8_call_is_safe_for_termination, false) \
502  V(bool, only_terminate_in_safe_scope, false) \
503  V(bool, detailed_source_positions_for_profiling, FLAG_detailed_line_info)
504 
505 #define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
506  inline void set_##name(type v) { thread_local_top_.name##_ = v; } \
507  inline type name() const { return thread_local_top_.name##_; }
508 
509 #define THREAD_LOCAL_TOP_ADDRESS(type, name) \
510  type* name##_address() { return &thread_local_top_.name##_; }
511 
512 // HiddenFactory exists so Isolate can privately inherit from it without making
513 // Factory's members available to Isolate directly.
514 class V8_EXPORT_PRIVATE HiddenFactory : private Factory {};
515 
516 class Isolate final : private HiddenFactory {
517  // These forward declarations are required to make the friend declarations in
518  // PerIsolateThreadData work on some older versions of gcc.
519  class ThreadDataTable;
520  class EntryStackItem;
521  public:
522  // A thread has a PerIsolateThreadData instance for each isolate that it has
523  // entered. That instance is allocated when the isolate is initially entered
524  // and reused on subsequent entries.
526  public:
527  PerIsolateThreadData(Isolate* isolate, ThreadId thread_id)
528  : isolate_(isolate),
529  thread_id_(thread_id),
530  stack_limit_(0),
531  thread_state_(nullptr),
532 #if USE_SIMULATOR
533  simulator_(nullptr),
534 #endif
535  next_(nullptr),
536  prev_(nullptr) {
537  }
539  Isolate* isolate() const { return isolate_; }
540  ThreadId thread_id() const { return thread_id_; }
541 
542  FIELD_ACCESSOR(uintptr_t, stack_limit)
543  FIELD_ACCESSOR(ThreadState*, thread_state)
544 
545 #if USE_SIMULATOR
546  FIELD_ACCESSOR(Simulator*, simulator)
547 #endif
548 
549  bool Matches(Isolate* isolate, ThreadId thread_id) const {
550  return isolate_ == isolate && thread_id_.Equals(thread_id);
551  }
552 
553  private:
554  Isolate* isolate_;
555  ThreadId thread_id_;
556  uintptr_t stack_limit_;
557  ThreadState* thread_state_;
558 
559 #if USE_SIMULATOR
560  Simulator* simulator_;
561 #endif
562 
563  PerIsolateThreadData* next_;
564  PerIsolateThreadData* prev_;
565 
566  friend class Isolate;
567  friend class ThreadDataTable;
568  friend class EntryStackItem;
569 
570  DISALLOW_COPY_AND_ASSIGN(PerIsolateThreadData);
571  };
572 
573  static void InitializeOncePerProcess();
574 
575  // Creates Isolate object. Must be used instead of constructing Isolate with
576  // new operator.
577  static V8_EXPORT_PRIVATE Isolate* New(
578  IsolateAllocationMode mode = IsolateAllocationMode::kDefault);
579 
580  // Deletes Isolate object. Must be used instead of delete operator.
581  // Destroys the non-default isolates.
582  // Sets default isolate into "has_been_disposed" state rather then destroying,
583  // for legacy API reasons.
584  static void Delete(Isolate* isolate);
585 
586  // Returns allocation mode of this isolate.
587  V8_INLINE IsolateAllocationMode isolate_allocation_mode();
588 
589  // Page allocator that must be used for allocating V8 heap pages.
590  v8::PageAllocator* page_allocator();
591 
592  // Returns the PerIsolateThreadData for the current thread (or nullptr if one
593  // is not currently set).
594  static PerIsolateThreadData* CurrentPerIsolateThreadData() {
595  return reinterpret_cast<PerIsolateThreadData*>(
596  base::Thread::GetThreadLocal(per_isolate_thread_data_key_));
597  }
598 
599  // Returns the isolate inside which the current thread is running or nullptr.
600  V8_INLINE static Isolate* TryGetCurrent() {
601  DCHECK_EQ(base::Relaxed_Load(&isolate_key_created_), 1);
602  return reinterpret_cast<Isolate*>(
603  base::Thread::GetExistingThreadLocal(isolate_key_));
604  }
605 
606  // Returns the isolate inside which the current thread is running.
607  V8_INLINE static Isolate* Current() {
608  Isolate* isolate = TryGetCurrent();
609  DCHECK_NOT_NULL(isolate);
610  return isolate;
611  }
612 
613  // Get the isolate that the given HeapObject lives in, returning true on
614  // success. If the object is not writable (i.e. lives in read-only space),
615  // return false.
616  inline static bool FromWritableHeapObject(HeapObject* obj, Isolate** isolate);
617 
618  // Usually called by Init(), but can be called early e.g. to allow
619  // testing components that require logging but not the whole
620  // isolate.
621  //
622  // Safe to call more than once.
623  void InitializeLoggingAndCounters();
624  bool InitializeCounters(); // Returns false if already initialized.
625 
626  bool Init(StartupDeserializer* des);
627 
628  // True if at least one thread Enter'ed this isolate.
629  bool IsInUse() { return entry_stack_ != nullptr; }
630 
631  void ReleaseSharedPtrs();
632 
633  void ClearSerializerData();
634 
635  bool LogObjectRelocation();
636 
637  // Initializes the current thread to run this Isolate.
638  // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
639  // at the same time, this should be prevented using external locking.
640  void Enter();
641 
642  // Exits the current thread. The previosuly entered Isolate is restored
643  // for the thread.
644  // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
645  // at the same time, this should be prevented using external locking.
646  void Exit();
647 
648  // Find the PerThread for this particular (isolate, thread) combination.
649  // If one does not yet exist, allocate a new one.
650  PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
651 
652  // Find the PerThread for this particular (isolate, thread) combination
653  // If one does not yet exist, return null.
654  PerIsolateThreadData* FindPerThreadDataForThisThread();
655 
656  // Find the PerThread for given (isolate, thread) combination
657  // If one does not yet exist, return null.
658  PerIsolateThreadData* FindPerThreadDataForThread(ThreadId thread_id);
659 
660  // Discard the PerThread for this particular (isolate, thread) combination
661  // If one does not yet exist, no-op.
662  void DiscardPerThreadDataForThisThread();
663 
664  // Returns the key used to store the pointer to the current isolate.
665  // Used internally for V8 threads that do not execute JavaScript but still
666  // are part of the domain of an isolate (like the context switcher).
667  static base::Thread::LocalStorageKey isolate_key() {
668  return isolate_key_;
669  }
670 
671  static base::Thread::LocalStorageKey per_isolate_thread_data_key();
672 
673  // Mutex for serializing access to break control structures.
674  base::RecursiveMutex* break_access() { return &break_access_; }
675 
676  Address get_address_from_id(IsolateAddressId id);
677 
678  // Access to top context (where the current function object was created).
679  Context context() { return thread_local_top_.context_; }
680  inline void set_context(Context context);
681  Context* context_address() { return &thread_local_top_.context_; }
682 
683  THREAD_LOCAL_TOP_ACCESSOR(SaveContext*, save_context)
684 
685  // Access to current thread id.
686  THREAD_LOCAL_TOP_ACCESSOR(ThreadId, thread_id)
687 
688  // Interface to pending exception.
689  inline Object* pending_exception();
690  inline void set_pending_exception(Object* exception_obj);
691  inline void clear_pending_exception();
692 
693  bool AreWasmThreadsEnabled(Handle<Context> context);
694 
695  THREAD_LOCAL_TOP_ADDRESS(Object*, pending_exception)
696 
697  inline bool has_pending_exception();
698 
699  THREAD_LOCAL_TOP_ADDRESS(Context, pending_handler_context)
700  THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_entrypoint)
701  THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_constant_pool)
702  THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_fp)
703  THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_sp)
704 
705  THREAD_LOCAL_TOP_ACCESSOR(bool, external_caught_exception)
706 
707  v8::TryCatch* try_catch_handler() {
708  return thread_local_top_.try_catch_handler();
709  }
710  bool* external_caught_exception_address() {
711  return &thread_local_top_.external_caught_exception_;
712  }
713 
714  THREAD_LOCAL_TOP_ADDRESS(Object*, scheduled_exception)
715 
716  inline void clear_pending_message();
717  Address pending_message_obj_address() {
718  return reinterpret_cast<Address>(&thread_local_top_.pending_message_obj_);
719  }
720 
721  inline Object* scheduled_exception();
722  inline bool has_scheduled_exception();
723  inline void clear_scheduled_exception();
724 
725  bool IsJavaScriptHandlerOnTop(Object* exception);
726  bool IsExternalHandlerOnTop(Object* exception);
727 
728  inline bool is_catchable_by_javascript(Object* exception);
729 
730  // JS execution stack (see frames.h).
731  static Address c_entry_fp(ThreadLocalTop* thread) {
732  return thread->c_entry_fp_;
733  }
734  static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
735  Address c_function() { return thread_local_top_.c_function_; }
736 
737  inline Address* c_entry_fp_address() {
738  return &thread_local_top_.c_entry_fp_;
739  }
740  inline Address* handler_address() { return &thread_local_top_.handler_; }
741  inline Address* c_function_address() {
742  return &thread_local_top_.c_function_;
743  }
744 
745  // Bottom JS entry.
746  Address js_entry_sp() {
747  return thread_local_top_.js_entry_sp_;
748  }
749  inline Address* js_entry_sp_address() {
750  return &thread_local_top_.js_entry_sp_;
751  }
752 
753  // Returns the global object of the current context. It could be
754  // a builtin object, or a JS global object.
755  inline Handle<JSGlobalObject> global_object();
756 
757  // Returns the global proxy object of the current context.
758  inline Handle<JSObject> global_proxy();
759 
760  static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
761  void FreeThreadResources() { thread_local_top_.Free(); }
762 
763  // This method is called by the api after operations that may throw
764  // exceptions. If an exception was thrown and not handled by an external
765  // handler the exception is scheduled to be rethrown when we return to running
766  // JavaScript code. If an exception is scheduled true is returned.
767  V8_EXPORT_PRIVATE bool OptionalRescheduleException(bool is_bottom_call);
768 
769  // Push and pop a promise and the current try-catch handler.
770  void PushPromise(Handle<JSObject> promise);
771  void PopPromise();
772 
773  // Return the relevant Promise that a throw/rejection pertains to, based
774  // on the contents of the Promise stack
775  Handle<Object> GetPromiseOnStackOnThrow();
776 
777  // Heuristically guess whether a Promise is handled by user catch handler
778  bool PromiseHasUserDefinedRejectHandler(Handle<Object> promise);
779 
781  public:
782  // Scope currently can only be used for regular exceptions,
783  // not termination exception.
784  inline explicit ExceptionScope(Isolate* isolate);
785  inline ~ExceptionScope();
786 
787  private:
788  Isolate* isolate_;
789  Handle<Object> pending_exception_;
790  };
791 
792  void SetCaptureStackTraceForUncaughtExceptions(
793  bool capture,
794  int frame_limit,
796 
797  void SetAbortOnUncaughtExceptionCallback(
798  v8::Isolate::AbortOnUncaughtExceptionCallback callback);
799 
800  enum PrintStackMode { kPrintStackConcise, kPrintStackVerbose };
801  void PrintCurrentStackTrace(FILE* out);
802  void PrintStack(StringStream* accumulator,
803  PrintStackMode mode = kPrintStackVerbose);
804  V8_EXPORT_PRIVATE void PrintStack(FILE* out,
805  PrintStackMode mode = kPrintStackVerbose);
806  Handle<String> StackTraceString();
807  // Stores a stack trace in a stack-allocated temporary buffer which will
808  // end up in the minidump for debugging purposes.
809  V8_NOINLINE void PushStackTraceAndDie(void* ptr1 = nullptr,
810  void* ptr2 = nullptr,
811  void* ptr3 = nullptr,
812  void* ptr4 = nullptr);
813  Handle<FixedArray> CaptureCurrentStackTrace(
814  int frame_limit, StackTrace::StackTraceOptions options);
815  Handle<Object> CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
816  FrameSkipMode mode,
817  Handle<Object> caller);
818  MaybeHandle<JSReceiver> CaptureAndSetDetailedStackTrace(
819  Handle<JSReceiver> error_object);
820  MaybeHandle<JSReceiver> CaptureAndSetSimpleStackTrace(
821  Handle<JSReceiver> error_object, FrameSkipMode mode,
822  Handle<Object> caller);
823  Handle<FixedArray> GetDetailedStackTrace(Handle<JSObject> error_object);
824 
825  Address GetAbstractPC(int* line, int* column);
826 
827  // Returns if the given context may access the given global object. If
828  // the result is false, the pending exception is guaranteed to be
829  // set.
830  bool MayAccess(Handle<Context> accessing_context, Handle<JSObject> receiver);
831 
832  void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
833  void ReportFailedAccessCheck(Handle<JSObject> receiver);
834 
835  // Exception throwing support. The caller should use the result
836  // of Throw() as its return value.
837  Object* Throw(Object* exception, MessageLocation* location = nullptr);
838  Object* ThrowIllegalOperation();
839 
840  template <typename T>
841  V8_WARN_UNUSED_RESULT MaybeHandle<T> Throw(
842  Handle<Object> exception, MessageLocation* location = nullptr) {
843  Throw(*exception, location);
844  return MaybeHandle<T>();
845  }
846 
847  void set_console_delegate(debug::ConsoleDelegate* delegate) {
848  console_delegate_ = delegate;
849  }
850  debug::ConsoleDelegate* console_delegate() { return console_delegate_; }
851 
852  void set_async_event_delegate(debug::AsyncEventDelegate* delegate) {
853  async_event_delegate_ = delegate;
854  PromiseHookStateUpdated();
855  }
856  void OnAsyncFunctionStateChanged(Handle<JSPromise> promise,
857  debug::DebugAsyncActionType);
858 
859  // Re-throw an exception. This involves no error reporting since error
860  // reporting was handled when the exception was thrown originally.
861  Object* ReThrow(Object* exception);
862 
863  // Find the correct handler for the current pending exception. This also
864  // clears and returns the current pending exception.
865  Object* UnwindAndFindHandler();
866 
867  // Tries to predict whether an exception will be caught. Note that this can
868  // only produce an estimate, because it is undecidable whether a finally
869  // clause will consume or re-throw an exception.
870  enum CatchType {
871  NOT_CAUGHT,
872  CAUGHT_BY_JAVASCRIPT,
873  CAUGHT_BY_EXTERNAL,
874  CAUGHT_BY_DESUGARING,
875  CAUGHT_BY_PROMISE,
876  CAUGHT_BY_ASYNC_AWAIT
877  };
878  CatchType PredictExceptionCatcher();
879 
880  V8_EXPORT_PRIVATE void ScheduleThrow(Object* exception);
881  // Re-set pending message, script and positions reported to the TryCatch
882  // back to the TLS for re-use when rethrowing.
883  void RestorePendingMessageFromTryCatch(v8::TryCatch* handler);
884  // Un-schedule an exception that was caught by a TryCatch handler.
885  void CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler);
886  void ReportPendingMessages();
887  void ReportPendingMessagesFromJavaScript();
888 
889  // Implements code shared between the two above methods
890  void ReportPendingMessagesImpl(bool report_externally);
891 
892  // Return pending location if any or unfilled structure.
893  MessageLocation GetMessageLocation();
894 
895  // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
896  Object* PromoteScheduledException();
897 
898  // Attempts to compute the current source location, storing the
899  // result in the target out parameter. The source location is attached to a
900  // Message object as the location which should be shown to the user. It's
901  // typically the top-most meaningful location on the stack.
902  bool ComputeLocation(MessageLocation* target);
903  bool ComputeLocationFromException(MessageLocation* target,
904  Handle<Object> exception);
905  bool ComputeLocationFromStackTrace(MessageLocation* target,
906  Handle<Object> exception);
907 
908  Handle<JSMessageObject> CreateMessage(Handle<Object> exception,
909  MessageLocation* location);
910 
911  // Out of resource exception helpers.
912  Object* StackOverflow();
913  Object* TerminateExecution();
914  void CancelTerminateExecution();
915 
916  void RequestInterrupt(InterruptCallback callback, void* data);
917  void InvokeApiInterruptCallbacks();
918 
919  // Administration
920  void Iterate(RootVisitor* v);
921  void Iterate(RootVisitor* v, ThreadLocalTop* t);
922  char* Iterate(RootVisitor* v, char* t);
923  void IterateThread(ThreadVisitor* v, char* t);
924 
925  // Returns the current native context.
926  inline Handle<NativeContext> native_context();
927  inline NativeContext raw_native_context();
928 
929  Handle<Context> GetIncumbentContext();
930 
931  void RegisterTryCatchHandler(v8::TryCatch* that);
932  void UnregisterTryCatchHandler(v8::TryCatch* that);
933 
934  char* ArchiveThread(char* to);
935  char* RestoreThread(char* from);
936 
937  static const int kUC16AlphabetSize = 256; // See StringSearchBase.
938  static const int kBMMaxShift = 250; // See StringSearchBase.
939 
940  // Accessors.
941 #define GLOBAL_ACCESSOR(type, name, initialvalue) \
942  inline type name() const { \
943  DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
944  return name##_; \
945  } \
946  inline void set_##name(type value) { \
947  DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
948  name##_ = value; \
949  }
950  ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
951 #undef GLOBAL_ACCESSOR
952 
953 #define GLOBAL_ARRAY_ACCESSOR(type, name, length) \
954  inline type* name() { \
955  DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
956  return &(name##_)[0]; \
957  }
958  ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
959 #undef GLOBAL_ARRAY_ACCESSOR
960 
961 #define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
962  inline Handle<type> name(); \
963  inline bool is_##name(type##ArgType value);
964  NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
965 #undef NATIVE_CONTEXT_FIELD_ACCESSOR
966 
967  Bootstrapper* bootstrapper() { return bootstrapper_; }
968  // Use for updating counters on a foreground thread.
969  Counters* counters() { return async_counters().get(); }
970  // Use for updating counters on a background thread.
971  const std::shared_ptr<Counters>& async_counters() {
972  // Make sure InitializeCounters() has been called.
973  DCHECK_NOT_NULL(async_counters_.get());
974  return async_counters_;
975  }
976  RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
977  CompilationCache* compilation_cache() { return compilation_cache_; }
978  Logger* logger() {
979  // Call InitializeLoggingAndCounters() if logging is needed before
980  // the isolate is fully initialized.
981  DCHECK_NOT_NULL(logger_);
982  return logger_;
983  }
984  StackGuard* stack_guard() { return &stack_guard_; }
985  Heap* heap() { return &heap_; }
986 
987  const IsolateData* isolate_data() const { return &isolate_data_; }
988  IsolateData* isolate_data() { return &isolate_data_; }
989 
990  // Generated code can embed this address to get access to the isolate-specific
991  // data (for example, roots, external references, builtins, etc.).
992  // The kRootRegister is set to this value.
993  Address isolate_root() const { return isolate_data()->isolate_root(); }
994  static size_t isolate_root_bias() {
995  return OFFSET_OF(Isolate, isolate_data_) + IsolateData::kIsolateRootBias;
996  }
997 
998  RootsTable& roots_table() { return isolate_data()->roots(); }
999 
1000  // A sub-region of the Isolate object that has "predictable" layout which
1001  // depends only on the pointer size and therefore it's guaranteed that there
1002  // will be no compatibility issues because of different compilers used for
1003  // snapshot generator and actual V8 code.
1004  // Thus, kRootRegister may be used to address any location that falls into
1005  // this region.
1006  // See IsolateData::AssertPredictableLayout() for details.
1007  base::AddressRegion root_register_addressable_region() const {
1008  return base::AddressRegion(reinterpret_cast<Address>(&isolate_data_),
1009  sizeof(IsolateData));
1010  }
1011 
1012  Object* root(RootIndex index) { return roots_table()[index]; }
1013 
1014  Handle<Object> root_handle(RootIndex index) {
1015  return Handle<Object>(&roots_table()[index]);
1016  }
1017 
1018  ExternalReferenceTable* external_reference_table() {
1019  DCHECK(isolate_data()->external_reference_table()->is_initialized());
1020  return isolate_data()->external_reference_table();
1021  }
1022 
1023  V8_INLINE Address* builtins_table() { return isolate_data_.builtins(); }
1024 
1025  StubCache* load_stub_cache() { return load_stub_cache_; }
1026  StubCache* store_stub_cache() { return store_stub_cache_; }
1027  DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
1028  bool deoptimizer_lazy_throw() const { return deoptimizer_lazy_throw_; }
1029  void set_deoptimizer_lazy_throw(bool value) {
1030  deoptimizer_lazy_throw_ = value;
1031  }
1032  ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
1033 
1034  static uint32_t thread_in_wasm_flag_address_offset() {
1035  // For WebAssembly trap handlers there is a flag in thread-local storage
1036  // which indicates that the executing thread executes WebAssembly code. To
1037  // access this flag directly from generated code, we store a pointer to the
1038  // flag in ThreadLocalTop in thread_in_wasm_flag_address_. This function
1039  // here returns the offset of that member from {isolate_root()}.
1040  return static_cast<uint32_t>(
1041  OFFSET_OF(Isolate, thread_local_top_.thread_in_wasm_flag_address_) -
1042  isolate_root_bias());
1043  }
1044 
1045  MaterializedObjectStore* materialized_object_store() {
1046  return materialized_object_store_;
1047  }
1048 
1049  DescriptorLookupCache* descriptor_lookup_cache() {
1050  return descriptor_lookup_cache_;
1051  }
1052 
1053  HandleScopeData* handle_scope_data() { return &handle_scope_data_; }
1054 
1055  HandleScopeImplementer* handle_scope_implementer() {
1056  DCHECK(handle_scope_implementer_);
1057  return handle_scope_implementer_;
1058  }
1059 
1060  UnicodeCache* unicode_cache() {
1061  return unicode_cache_;
1062  }
1063 
1064  InnerPointerToCodeCache* inner_pointer_to_code_cache() {
1065  return inner_pointer_to_code_cache_;
1066  }
1067 
1068  GlobalHandles* global_handles() { return global_handles_; }
1069 
1070  EternalHandles* eternal_handles() { return eternal_handles_; }
1071 
1072  ThreadManager* thread_manager() { return thread_manager_; }
1073 
1074  unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
1075  return &jsregexp_uncanonicalize_;
1076  }
1077 
1078  unibrow::Mapping<unibrow::CanonicalizationRange>* jsregexp_canonrange() {
1079  return &jsregexp_canonrange_;
1080  }
1081 
1082  RuntimeState* runtime_state() { return &runtime_state_; }
1083 
1084  Builtins* builtins() { return &builtins_; }
1085 
1087  regexp_macro_assembler_canonicalize() {
1088  return &regexp_macro_assembler_canonicalize_;
1089  }
1090 
1091  RegExpStack* regexp_stack() { return regexp_stack_; }
1092 
1093  size_t total_regexp_code_generated() { return total_regexp_code_generated_; }
1094  void IncreaseTotalRegexpCodeGenerated(int size) {
1095  total_regexp_code_generated_ += size;
1096  }
1097 
1098  std::vector<int>* regexp_indices() { return &regexp_indices_; }
1099 
1101  interp_canonicalize_mapping() {
1102  return &regexp_macro_assembler_canonicalize_;
1103  }
1104 
1105  Debug* debug() { return debug_; }
1106 
1107  bool* is_profiling_address() { return &is_profiling_; }
1108  CodeEventDispatcher* code_event_dispatcher() const {
1109  return code_event_dispatcher_.get();
1110  }
1111  HeapProfiler* heap_profiler() const { return heap_profiler_; }
1112 
1113 #ifdef DEBUG
1114  static size_t non_disposed_isolates() { return non_disposed_isolates_; }
1115 #endif
1116 
1117  v8::internal::Factory* factory() {
1118  // Upcast to the privately inherited base-class using c-style casts to avoid
1119  // undefined behavior (as static_cast cannot cast across private bases).
1120  // NOLINTNEXTLINE (google-readability-casting)
1121  return (v8::internal::Factory*)this; // NOLINT(readability/casting)
1122  }
1123 
1124  static const int kJSRegexpStaticOffsetsVectorSize = 128;
1125 
1126  THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope*, external_callback_scope)
1127 
1128  THREAD_LOCAL_TOP_ACCESSOR(StateTag, current_vm_state)
1129 
1130  void SetData(uint32_t slot, void* data) {
1131  DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
1132  isolate_data_.embedder_data_[slot] = data;
1133  }
1134  void* GetData(uint32_t slot) {
1135  DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
1136  return isolate_data_.embedder_data_[slot];
1137  }
1138 
1139  bool serializer_enabled() const { return serializer_enabled_; }
1140 
1141  void enable_serializer() { serializer_enabled_ = true; }
1142 
1143  bool snapshot_available() const {
1144  return snapshot_blob_ != nullptr && snapshot_blob_->raw_size != 0;
1145  }
1146 
1147  bool IsDead() { return has_fatal_error_; }
1148  void SignalFatalError() { has_fatal_error_ = true; }
1149 
1150  bool use_optimizer();
1151 
1152  bool initialized_from_snapshot() { return initialized_from_snapshot_; }
1153 
1154  bool NeedsSourcePositionsForProfiling() const;
1155 
1156  bool NeedsDetailedOptimizedCodeLineInfo() const;
1157 
1158  bool is_best_effort_code_coverage() const {
1159  return code_coverage_mode() == debug::Coverage::kBestEffort;
1160  }
1161 
1162  bool is_precise_count_code_coverage() const {
1163  return code_coverage_mode() == debug::Coverage::kPreciseCount;
1164  }
1165 
1166  bool is_precise_binary_code_coverage() const {
1167  return code_coverage_mode() == debug::Coverage::kPreciseBinary;
1168  }
1169 
1170  bool is_block_count_code_coverage() const {
1171  return code_coverage_mode() == debug::Coverage::kBlockCount;
1172  }
1173 
1174  bool is_block_binary_code_coverage() const {
1175  return code_coverage_mode() == debug::Coverage::kBlockBinary;
1176  }
1177 
1178  bool is_block_code_coverage() const {
1179  return is_block_count_code_coverage() || is_block_binary_code_coverage();
1180  }
1181 
1182  bool is_collecting_type_profile() const {
1183  return type_profile_mode() == debug::TypeProfile::kCollect;
1184  }
1185 
1186  // Collect feedback vectors with data for code coverage or type profile.
1187  // Reset the list, when both code coverage and type profile are not
1188  // needed anymore. This keeps many feedback vectors alive, but code
1189  // coverage or type profile are used for debugging only and increase in
1190  // memory usage is expected.
1191  void SetFeedbackVectorsForProfilingTools(Object* value);
1192 
1193  void MaybeInitializeVectorListFromHeap();
1194 
1195  double time_millis_since_init() {
1196  return heap_.MonotonicallyIncreasingTimeInMs() - time_millis_at_init_;
1197  }
1198 
1199  DateCache* date_cache() {
1200  return date_cache_;
1201  }
1202 
1203  void set_date_cache(DateCache* date_cache);
1204 
1205 #ifdef V8_INTL_SUPPORT
1206 
1207  const std::string& default_locale() { return default_locale_; }
1208 
1209  void set_default_locale(const std::string& locale) {
1210  DCHECK_EQ(default_locale_.length(), 0);
1211  default_locale_ = locale;
1212  }
1213 
1214  // enum to access the icu object cache.
1215  enum class ICUObjectCacheType{
1216  kDefaultCollator, kDefaultNumberFormat, kDefaultSimpleDateFormat,
1217  kDefaultSimpleDateFormatForTime, kDefaultSimpleDateFormatForDate};
1218 
1219  icu::UObject* get_cached_icu_object(ICUObjectCacheType cache_type);
1220  void set_icu_object_in_cache(ICUObjectCacheType cache_type,
1221  std::shared_ptr<icu::UObject> obj);
1222  void clear_cached_icu_object(ICUObjectCacheType cache_type);
1223 
1224 #endif // V8_INTL_SUPPORT
1225 
1226  static const int kProtectorValid = 1;
1227  static const int kProtectorInvalid = 0;
1228 
1229  inline bool IsArrayConstructorIntact();
1230 
1231  // The version with an explicit context parameter can be used when
1232  // Isolate::context is not set up, e.g. when calling directly into C++ from
1233  // CSA.
1234  bool IsNoElementsProtectorIntact(Context context);
1235  bool IsNoElementsProtectorIntact();
1236 
1237  bool IsArrayOrObjectOrStringPrototype(Object* object);
1238 
1239  inline bool IsArraySpeciesLookupChainIntact();
1240  inline bool IsTypedArraySpeciesLookupChainIntact();
1241  inline bool IsRegExpSpeciesLookupChainIntact();
1242  inline bool IsPromiseSpeciesLookupChainIntact();
1243  bool IsIsConcatSpreadableLookupChainIntact();
1244  bool IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver);
1245  inline bool IsStringLengthOverflowIntact();
1246  inline bool IsArrayIteratorLookupChainIntact();
1247 
1248  // The MapIterator protector protects the original iteration behaviors of
1249  // Map.prototype.keys(), Map.prototype.values(), and Set.prototype.entries().
1250  // It does not protect the original iteration behavior of
1251  // Map.prototype[Symbol.iterator](). The protector is invalidated when:
1252  // * The 'next' property is set on an object where the property holder is the
1253  // %MapIteratorPrototype% (e.g. because the object is that very prototype).
1254  // * The 'Symbol.iterator' property is set on an object where the property
1255  // holder is the %IteratorPrototype%. Note that this also invalidates the
1256  // SetIterator protector (see below).
1257  inline bool IsMapIteratorLookupChainIntact();
1258 
1259  // The SetIterator protector protects the original iteration behavior of
1260  // Set.prototype.keys(), Set.prototype.values(), Set.prototype.entries(),
1261  // and Set.prototype[Symbol.iterator](). The protector is invalidated when:
1262  // * The 'next' property is set on an object where the property holder is the
1263  // %SetIteratorPrototype% (e.g. because the object is that very prototype).
1264  // * The 'Symbol.iterator' property is set on an object where the property
1265  // holder is the %SetPrototype% OR %IteratorPrototype%. This means that
1266  // setting Symbol.iterator on a MapIterator object can also invalidate the
1267  // SetIterator protector, and vice versa, setting Symbol.iterator on a
1268  // SetIterator object can also invalidate the MapIterator. This is an over-
1269  // approximation for the sake of simplicity.
1270  inline bool IsSetIteratorLookupChainIntact();
1271 
1272  // The StringIteratorProtector protects the original string iteration behavior
1273  // for primitive strings. As long as the StringIteratorProtector is valid,
1274  // iterating over a primitive string is guaranteed to be unobservable from
1275  // user code and can thus be cut short. More specifically, the protector gets
1276  // invalidated as soon as either String.prototype[Symbol.iterator] or
1277  // String.prototype[Symbol.iterator]().next is modified. This guarantee does
1278  // not apply to string objects (as opposed to primitives), since they could
1279  // define their own Symbol.iterator.
1280  // String.prototype itself does not need to be protected, since it is
1281  // non-configurable and non-writable.
1282  inline bool IsStringIteratorLookupChainIntact();
1283 
1284  // Make sure we do check for neutered array buffers.
1285  inline bool IsArrayBufferNeuteringIntact();
1286 
1287  // Disable promise optimizations if promise (debug) hooks have ever been
1288  // active.
1289  bool IsPromiseHookProtectorIntact();
1290 
1291  // Make sure a lookup of "resolve" on the %Promise% intrinsic object
1292  // yeidls the initial Promise.resolve method.
1293  bool IsPromiseResolveLookupChainIntact();
1294 
1295  // Make sure a lookup of "then" on any JSPromise whose [[Prototype]] is the
1296  // initial %PromisePrototype% yields the initial method. In addition this
1297  // protector also guards the negative lookup of "then" on the intrinsic
1298  // %ObjectPrototype%, meaning that such lookups are guaranteed to yield
1299  // undefined without triggering any side-effects.
1300  bool IsPromiseThenLookupChainIntact();
1301  bool IsPromiseThenLookupChainIntact(Handle<JSReceiver> receiver);
1302 
1303  // On intent to set an element in object, make sure that appropriate
1304  // notifications occur if the set is on the elements of the array or
1305  // object prototype. Also ensure that changes to prototype chain between
1306  // Array and Object fire notifications.
1307  void UpdateNoElementsProtectorOnSetElement(Handle<JSObject> object);
1308  void UpdateNoElementsProtectorOnSetLength(Handle<JSObject> object) {
1309  UpdateNoElementsProtectorOnSetElement(object);
1310  }
1311  void UpdateNoElementsProtectorOnSetPrototype(Handle<JSObject> object) {
1312  UpdateNoElementsProtectorOnSetElement(object);
1313  }
1314  void UpdateNoElementsProtectorOnNormalizeElements(Handle<JSObject> object) {
1315  UpdateNoElementsProtectorOnSetElement(object);
1316  }
1317  void InvalidateArrayConstructorProtector();
1318  void InvalidateArraySpeciesProtector();
1319  void InvalidateTypedArraySpeciesProtector();
1320  void InvalidateRegExpSpeciesProtector();
1321  void InvalidatePromiseSpeciesProtector();
1322  void InvalidateIsConcatSpreadableProtector();
1323  void InvalidateStringLengthOverflowProtector();
1324  void InvalidateArrayIteratorProtector();
1325  void InvalidateMapIteratorProtector();
1326  void InvalidateSetIteratorProtector();
1327  void InvalidateStringIteratorProtector();
1328  void InvalidateArrayBufferNeuteringProtector();
1329  V8_EXPORT_PRIVATE void InvalidatePromiseHookProtector();
1330  void InvalidatePromiseResolveProtector();
1331  void InvalidatePromiseThenProtector();
1332 
1333  // Returns true if array is the initial array prototype in any native context.
1334  bool IsAnyInitialArrayPrototype(Handle<JSArray> array);
1335 
1336  void IterateDeferredHandles(RootVisitor* visitor);
1337  void LinkDeferredHandles(DeferredHandles* deferred_handles);
1338  void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
1339 
1340 #ifdef DEBUG
1341  bool IsDeferredHandle(Address* location);
1342 #endif // DEBUG
1343 
1344  bool concurrent_recompilation_enabled() {
1345  // Thread is only available with flag enabled.
1346  DCHECK(optimizing_compile_dispatcher_ == nullptr ||
1347  FLAG_concurrent_recompilation);
1348  return optimizing_compile_dispatcher_ != nullptr;
1349  }
1350 
1351  OptimizingCompileDispatcher* optimizing_compile_dispatcher() {
1352  return optimizing_compile_dispatcher_;
1353  }
1354  // Flushes all pending concurrent optimzation jobs from the optimizing
1355  // compile dispatcher's queue.
1356  void AbortConcurrentOptimization(BlockingBehavior blocking_behavior);
1357 
1358  int id() const { return static_cast<int>(id_); }
1359 
1360  CompilationStatistics* GetTurboStatistics();
1361  CodeTracer* GetCodeTracer();
1362 
1363  void DumpAndResetStats();
1364 
1365  void* stress_deopt_count_address() { return &stress_deopt_count_; }
1366 
1367  void set_force_slow_path(bool v) { force_slow_path_ = v; }
1368  bool force_slow_path() const { return force_slow_path_; }
1369  bool* force_slow_path_address() { return &force_slow_path_; }
1370 
1371  DebugInfo::ExecutionMode* debug_execution_mode_address() {
1372  return &debug_execution_mode_;
1373  }
1374 
1375  V8_EXPORT_PRIVATE base::RandomNumberGenerator* random_number_generator();
1376 
1377  V8_EXPORT_PRIVATE base::RandomNumberGenerator* fuzzer_rng();
1378 
1379  // Generates a random number that is non-zero when masked
1380  // with the provided mask.
1381  int GenerateIdentityHash(uint32_t mask);
1382 
1383  // Given an address occupied by a live code object, return that object.
1384  Code FindCodeObject(Address a);
1385 
1386  int NextOptimizationId() {
1387  int id = next_optimization_id_++;
1388  if (!Smi::IsValid(next_optimization_id_)) {
1389  next_optimization_id_ = 0;
1390  }
1391  return id;
1392  }
1393 
1394  void AddNearHeapLimitCallback(v8::NearHeapLimitCallback, void* data);
1395  void RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
1396  size_t heap_limit);
1397  void AddCallCompletedCallback(CallCompletedCallback callback);
1398  void RemoveCallCompletedCallback(CallCompletedCallback callback);
1399  void FireCallCompletedCallback();
1400 
1401  void AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
1402  void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
1403  inline void FireBeforeCallEnteredCallback();
1404 
1405  void AddMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
1406  void RemoveMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
1407  inline void FireMicrotasksCompletedCallback();
1408 
1409  void SetPromiseRejectCallback(PromiseRejectCallback callback);
1410  void ReportPromiseReject(Handle<JSPromise> promise, Handle<Object> value,
1411  v8::PromiseRejectEvent event);
1412 
1413  void EnqueueMicrotask(Handle<Microtask> microtask);
1414  void RunMicrotasks();
1415  bool IsRunningMicrotasks() const { return is_running_microtasks_; }
1416 
1417  Handle<Symbol> SymbolFor(RootIndex dictionary_index, Handle<String> name,
1418  bool private_symbol);
1419 
1420  void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback);
1421  void CountUsage(v8::Isolate::UseCounterFeature feature);
1422 
1423  static std::string GetTurboCfgFileName(Isolate* isolate);
1424 
1425 #if V8_SFI_HAS_UNIQUE_ID
1426  int GetNextUniqueSharedFunctionInfoId() { return next_unique_sfi_id_++; }
1427 #endif
1428 
1429  Address promise_hook_address() {
1430  return reinterpret_cast<Address>(&promise_hook_);
1431  }
1432 
1433  Address async_event_delegate_address() {
1434  return reinterpret_cast<Address>(&async_event_delegate_);
1435  }
1436 
1437  Address promise_hook_or_async_event_delegate_address() {
1438  return reinterpret_cast<Address>(&promise_hook_or_async_event_delegate_);
1439  }
1440 
1441  Address default_microtask_queue_address() {
1442  return reinterpret_cast<Address>(&default_microtask_queue_);
1443  }
1444 
1445  Address promise_hook_or_debug_is_active_or_async_event_delegate_address() {
1446  return reinterpret_cast<Address>(
1447  &promise_hook_or_debug_is_active_or_async_event_delegate_);
1448  }
1449 
1450  Address handle_scope_implementer_address() {
1451  return reinterpret_cast<Address>(&handle_scope_implementer_);
1452  }
1453 
1454  void SetAtomicsWaitCallback(v8::Isolate::AtomicsWaitCallback callback,
1455  void* data);
1456  void RunAtomicsWaitCallback(v8::Isolate::AtomicsWaitEvent event,
1457  Handle<JSArrayBuffer> array_buffer,
1458  size_t offset_in_bytes, int32_t value,
1459  double timeout_in_ms,
1460  AtomicsWaitWakeHandle* stop_handle);
1461 
1462  void SetPromiseHook(PromiseHook hook);
1463  void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
1464  Handle<Object> parent);
1465  void PromiseHookStateUpdated();
1466 
1467  void AddDetachedContext(Handle<Context> context);
1468  void CheckDetachedContextsAfterGC();
1469 
1470  std::vector<Object*>* read_only_object_cache() {
1471  return &read_only_object_cache_;
1472  }
1473 
1474  std::vector<Object*>* partial_snapshot_cache() {
1475  return &partial_snapshot_cache_;
1476  }
1477 
1478  // Off-heap builtins cannot embed constants within the code object itself,
1479  // and thus need to load them from the root list.
1480  // TODO(jgruber): Rename to IsGeneratingEmbeddedBuiltins().
1481  bool ShouldLoadConstantsFromRootList() const {
1482  return FLAG_embedded_builtins &&
1483  builtins_constants_table_builder() != nullptr;
1484  }
1485 
1486  BuiltinsConstantsTableBuilder* builtins_constants_table_builder() const {
1487  return builtins_constants_table_builder_;
1488  }
1489 
1490  static const uint8_t* CurrentEmbeddedBlob();
1491  static uint32_t CurrentEmbeddedBlobSize();
1492 
1493  // These always return the same result as static methods above, but don't
1494  // access the global atomic variable (and thus *might be* slightly faster).
1495  const uint8_t* embedded_blob() const;
1496  uint32_t embedded_blob_size() const;
1497 
1498  void set_array_buffer_allocator(v8::ArrayBuffer::Allocator* allocator) {
1499  array_buffer_allocator_ = allocator;
1500  }
1501  v8::ArrayBuffer::Allocator* array_buffer_allocator() const {
1502  return array_buffer_allocator_;
1503  }
1504 
1505  FutexWaitListNode* futex_wait_list_node() { return &futex_wait_list_node_; }
1506 
1507  CancelableTaskManager* cancelable_task_manager() {
1508  return cancelable_task_manager_;
1509  }
1510 
1511  const AstStringConstants* ast_string_constants() const {
1512  return ast_string_constants_;
1513  }
1514 
1515  interpreter::Interpreter* interpreter() const { return interpreter_; }
1516 
1517  compiler::PerIsolateCompilerCache* compiler_cache() const {
1518  return compiler_cache_;
1519  }
1520  void set_compiler_utils(compiler::PerIsolateCompilerCache* cache,
1521  Zone* zone) {
1522  compiler_cache_ = cache;
1523  compiler_zone_ = zone;
1524  }
1525 
1526  AccountingAllocator* allocator() { return allocator_; }
1527 
1528  CompilerDispatcher* compiler_dispatcher() const {
1529  return compiler_dispatcher_;
1530  }
1531 
1532  bool IsInAnyContext(Object* object, uint32_t index);
1533 
1534  void SetHostImportModuleDynamicallyCallback(
1535  HostImportModuleDynamicallyCallback callback);
1536  MaybeHandle<JSPromise> RunHostImportModuleDynamicallyCallback(
1537  Handle<Script> referrer, Handle<Object> specifier);
1538 
1539  void SetHostInitializeImportMetaObjectCallback(
1540  HostInitializeImportMetaObjectCallback callback);
1541  Handle<JSObject> RunHostInitializeImportMetaObjectCallback(
1542  Handle<Module> module);
1543 
1544  void SetPrepareStackTraceCallback(PrepareStackTraceCallback callback);
1545  MaybeHandle<Object> RunPrepareStackTraceCallback(Handle<Context>,
1546  Handle<JSObject> Error,
1547  Handle<JSArray> sites);
1548  bool HasPrepareStackTraceCallback() const;
1549 
1550  void SetRAILMode(RAILMode rail_mode);
1551 
1552  RAILMode rail_mode() { return rail_mode_.Value(); }
1553 
1554  double LoadStartTimeMs();
1555 
1556  void IsolateInForegroundNotification();
1557 
1558  void IsolateInBackgroundNotification();
1559 
1560  bool IsIsolateInBackground() { return is_isolate_in_background_; }
1561 
1562  void EnableMemorySavingsMode() { memory_savings_mode_active_ = true; }
1563 
1564  void DisableMemorySavingsMode() { memory_savings_mode_active_ = false; }
1565 
1566  bool IsMemorySavingsModeActive() { return memory_savings_mode_active_; }
1567 
1568  PRINTF_FORMAT(2, 3) void PrintWithTimestamp(const char* format, ...);
1569 
1570  void set_allow_atomics_wait(bool set) { allow_atomics_wait_ = set; }
1571  bool allow_atomics_wait() { return allow_atomics_wait_; }
1572 
1573  // Register a finalizer to be called at isolate teardown.
1574  void RegisterManagedPtrDestructor(ManagedPtrDestructor* finalizer);
1575 
1576  // Removes a previously-registered shared object finalizer.
1577  void UnregisterManagedPtrDestructor(ManagedPtrDestructor* finalizer);
1578 
1579  size_t elements_deletion_counter() { return elements_deletion_counter_; }
1580  void set_elements_deletion_counter(size_t value) {
1581  elements_deletion_counter_ = value;
1582  }
1583 
1584  wasm::WasmEngine* wasm_engine() const { return wasm_engine_.get(); }
1585  void SetWasmEngine(std::shared_ptr<wasm::WasmEngine> engine);
1586 
1587  const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope() const {
1588  return top_backup_incumbent_scope_;
1589  }
1590  void set_top_backup_incumbent_scope(
1591  const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope) {
1592  top_backup_incumbent_scope_ = top_backup_incumbent_scope;
1593  }
1594 
1595  void SetIdle(bool is_idle);
1596 
1597  private:
1598  explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator);
1599  ~Isolate();
1600 
1601  void CheckIsolateLayout();
1602 
1603  class ThreadDataTable {
1604  public:
1605  ThreadDataTable() = default;
1606 
1607  PerIsolateThreadData* Lookup(ThreadId thread_id);
1608  void Insert(PerIsolateThreadData* data);
1609  void Remove(PerIsolateThreadData* data);
1610  void RemoveAllThreads();
1611 
1612  private:
1613  struct Hasher {
1614  std::size_t operator()(const ThreadId& t) const {
1615  return std::hash<int>()(t.ToInteger());
1616  }
1617  };
1618 
1619  std::unordered_map<ThreadId, PerIsolateThreadData*, Hasher> table_;
1620  };
1621 
1622  // These items form a stack synchronously with threads Enter'ing and Exit'ing
1623  // the Isolate. The top of the stack points to a thread which is currently
1624  // running the Isolate. When the stack is empty, the Isolate is considered
1625  // not entered by any thread and can be Disposed.
1626  // If the same thread enters the Isolate more than once, the entry_count_
1627  // is incremented rather then a new item pushed to the stack.
1628  class EntryStackItem {
1629  public:
1630  EntryStackItem(PerIsolateThreadData* previous_thread_data,
1631  Isolate* previous_isolate,
1632  EntryStackItem* previous_item)
1633  : entry_count(1),
1634  previous_thread_data(previous_thread_data),
1635  previous_isolate(previous_isolate),
1636  previous_item(previous_item) { }
1637 
1638  int entry_count;
1639  PerIsolateThreadData* previous_thread_data;
1640  Isolate* previous_isolate;
1641  EntryStackItem* previous_item;
1642 
1643  private:
1644  DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
1645  };
1646 
1647  static base::Thread::LocalStorageKey per_isolate_thread_data_key_;
1648  static base::Thread::LocalStorageKey isolate_key_;
1649 
1650  // A global counter for all generated Isolates, might overflow.
1651  static base::Atomic32 isolate_counter_;
1652 
1653 #if DEBUG
1654  static base::Atomic32 isolate_key_created_;
1655 #endif
1656 
1657  void Deinit();
1658 
1659  static void SetIsolateThreadLocals(Isolate* isolate,
1660  PerIsolateThreadData* data);
1661 
1662  void InitializeThreadLocal();
1663 
1664  void MarkCompactPrologue(bool is_compacting,
1665  ThreadLocalTop* archived_thread_data);
1666  void MarkCompactEpilogue(bool is_compacting,
1667  ThreadLocalTop* archived_thread_data);
1668 
1669  void FillCache();
1670 
1671  // Propagate pending exception message to the v8::TryCatch.
1672  // If there is no external try-catch or message was successfully propagated,
1673  // then return true.
1674  bool PropagatePendingExceptionToExternalTryCatch();
1675 
1676  void SetTerminationOnExternalTryCatch();
1677 
1678  void RunPromiseHookForAsyncEventDelegate(PromiseHookType type,
1679  Handle<JSPromise> promise);
1680 
1681  const char* RAILModeName(RAILMode rail_mode) const {
1682  switch (rail_mode) {
1683  case PERFORMANCE_RESPONSE:
1684  return "RESPONSE";
1685  case PERFORMANCE_ANIMATION:
1686  return "ANIMATION";
1687  case PERFORMANCE_IDLE:
1688  return "IDLE";
1689  case PERFORMANCE_LOAD:
1690  return "LOAD";
1691  }
1692  return "";
1693  }
1694 
1695  // This class contains a collection of data accessible from both C++ runtime
1696  // and compiled code (including assembly stubs, builtins, interpreter bytecode
1697  // handlers and optimized code).
1698  IsolateData isolate_data_;
1699 
1700  std::unique_ptr<IsolateAllocator> isolate_allocator_;
1701  Heap heap_;
1702 
1703  base::Atomic32 id_;
1704  EntryStackItem* entry_stack_ = nullptr;
1705  int stack_trace_nesting_level_ = 0;
1706  StringStream* incomplete_message_ = nullptr;
1707  Address isolate_addresses_[kIsolateAddressCount + 1] = {};
1708  Bootstrapper* bootstrapper_ = nullptr;
1709  RuntimeProfiler* runtime_profiler_ = nullptr;
1710  CompilationCache* compilation_cache_ = nullptr;
1711  std::shared_ptr<Counters> async_counters_;
1712  base::RecursiveMutex break_access_;
1713  Logger* logger_ = nullptr;
1714  StackGuard stack_guard_;
1715  StubCache* load_stub_cache_ = nullptr;
1716  StubCache* store_stub_cache_ = nullptr;
1717  DeoptimizerData* deoptimizer_data_ = nullptr;
1718  bool deoptimizer_lazy_throw_ = false;
1719  MaterializedObjectStore* materialized_object_store_ = nullptr;
1720  ThreadLocalTop thread_local_top_;
1721  bool capture_stack_trace_for_uncaught_exceptions_ = false;
1722  int stack_trace_for_uncaught_exceptions_frame_limit_ = 0;
1723  StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_ =
1724  StackTrace::kOverview;
1725  DescriptorLookupCache* descriptor_lookup_cache_ = nullptr;
1726  HandleScopeData handle_scope_data_;
1727  HandleScopeImplementer* handle_scope_implementer_ = nullptr;
1728  UnicodeCache* unicode_cache_ = nullptr;
1729  AccountingAllocator* allocator_ = nullptr;
1730  InnerPointerToCodeCache* inner_pointer_to_code_cache_ = nullptr;
1731  GlobalHandles* global_handles_ = nullptr;
1732  EternalHandles* eternal_handles_ = nullptr;
1733  ThreadManager* thread_manager_ = nullptr;
1734  RuntimeState runtime_state_;
1735  Builtins builtins_;
1736  SetupIsolateDelegate* setup_delegate_ = nullptr;
1737  unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
1740  regexp_macro_assembler_canonicalize_;
1741  RegExpStack* regexp_stack_ = nullptr;
1742  std::vector<int> regexp_indices_;
1743  DateCache* date_cache_ = nullptr;
1744  base::RandomNumberGenerator* random_number_generator_ = nullptr;
1745  base::RandomNumberGenerator* fuzzer_rng_ = nullptr;
1746  base::AtomicValue<RAILMode> rail_mode_;
1747  v8::Isolate::AtomicsWaitCallback atomics_wait_callback_ = nullptr;
1748  void* atomics_wait_callback_data_ = nullptr;
1749  PromiseHook promise_hook_ = nullptr;
1750  HostImportModuleDynamicallyCallback host_import_module_dynamically_callback_ =
1751  nullptr;
1752  HostInitializeImportMetaObjectCallback
1753  host_initialize_import_meta_object_callback_ = nullptr;
1754  base::Mutex rail_mutex_;
1755  double load_start_time_ms_ = 0;
1756 
1757 #ifdef V8_INTL_SUPPORT
1758  std::string default_locale_;
1759 
1760  struct ICUObjectCacheTypeHash {
1761  std::size_t operator()(ICUObjectCacheType a) const {
1762  return static_cast<std::size_t>(a);
1763  }
1764  };
1765  std::unordered_map<ICUObjectCacheType, std::shared_ptr<icu::UObject>,
1766  ICUObjectCacheTypeHash>
1767  icu_object_cache_;
1768 
1769 #endif // V8_INTL_SUPPORT
1770 
1771  // Whether the isolate has been created for snapshotting.
1772  bool serializer_enabled_ = false;
1773 
1774  // True if fatal error has been signaled for this isolate.
1775  bool has_fatal_error_ = false;
1776 
1777  // True if this isolate was initialized from a snapshot.
1778  bool initialized_from_snapshot_ = false;
1779 
1780  // TODO(ishell): remove
1781  // True if ES2015 tail call elimination feature is enabled.
1782  bool is_tail_call_elimination_enabled_ = true;
1783 
1784  // True if the isolate is in background. This flag is used
1785  // to prioritize between memory usage and latency.
1786  bool is_isolate_in_background_ = false;
1787 
1788  // True if the isolate is in memory savings mode. This flag is used to
1789  // favor memory over runtime performance.
1790  bool memory_savings_mode_active_ = false;
1791 
1792  // Time stamp at initialization.
1793  double time_millis_at_init_ = 0;
1794 
1795 #ifdef DEBUG
1796  static std::atomic<size_t> non_disposed_isolates_;
1797 
1798  JSObject::SpillInformation js_spill_information_;
1799 #endif
1800 
1801  Debug* debug_ = nullptr;
1802  HeapProfiler* heap_profiler_ = nullptr;
1803  std::unique_ptr<CodeEventDispatcher> code_event_dispatcher_;
1804 
1805  const AstStringConstants* ast_string_constants_ = nullptr;
1806 
1807  interpreter::Interpreter* interpreter_ = nullptr;
1808 
1809  compiler::PerIsolateCompilerCache* compiler_cache_ = nullptr;
1810  Zone* compiler_zone_ = nullptr;
1811 
1812  CompilerDispatcher* compiler_dispatcher_ = nullptr;
1813 
1814  typedef std::pair<InterruptCallback, void*> InterruptEntry;
1815  std::queue<InterruptEntry> api_interrupts_queue_;
1816 
1817 #define GLOBAL_BACKING_STORE(type, name, initialvalue) \
1818  type name##_;
1819  ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE)
1820 #undef GLOBAL_BACKING_STORE
1821 
1822 #define GLOBAL_ARRAY_BACKING_STORE(type, name, length) \
1823  type name##_[length];
1824  ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_BACKING_STORE)
1825 #undef GLOBAL_ARRAY_BACKING_STORE
1826 
1827 #ifdef DEBUG
1828  // This class is huge and has a number of fields controlled by
1829  // preprocessor defines. Make sure the offsets of these fields agree
1830  // between compilation units.
1831 #define ISOLATE_FIELD_OFFSET(type, name, ignored) \
1832  V8_EXPORT_PRIVATE static const intptr_t name##_debug_offset_;
1833  ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
1834  ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
1835 #undef ISOLATE_FIELD_OFFSET
1836 #endif
1837 
1838  DeferredHandles* deferred_handles_head_ = nullptr;
1839  OptimizingCompileDispatcher* optimizing_compile_dispatcher_ = nullptr;
1840 
1841  // Counts deopt points if deopt_every_n_times is enabled.
1842  unsigned int stress_deopt_count_ = 0;
1843 
1844  bool force_slow_path_ = false;
1845 
1846  int next_optimization_id_ = 0;
1847 
1848 #if V8_SFI_HAS_UNIQUE_ID
1849  int next_unique_sfi_id_ = 0;
1850 #endif
1851 
1852  // Vector of callbacks before a Call starts execution.
1853  std::vector<BeforeCallEnteredCallback> before_call_entered_callbacks_;
1854 
1855  // Vector of callbacks when a Call completes.
1856  std::vector<CallCompletedCallback> call_completed_callbacks_;
1857 
1858  // Vector of callbacks after microtasks were run.
1859  std::vector<MicrotasksCompletedCallback> microtasks_completed_callbacks_;
1860  bool is_running_microtasks_ = false;
1861 
1862  v8::Isolate::UseCounterCallback use_counter_callback_ = nullptr;
1863 
1864  std::vector<Object*> read_only_object_cache_;
1865  std::vector<Object*> partial_snapshot_cache_;
1866 
1867  // Used during builtins compilation to build the builtins constants table,
1868  // which is stored on the root list prior to serialization.
1869  BuiltinsConstantsTableBuilder* builtins_constants_table_builder_ = nullptr;
1870 
1871  void InitializeDefaultEmbeddedBlob();
1872  void CreateAndSetEmbeddedBlob();
1873  void TearDownEmbeddedBlob();
1874 
1875  void SetEmbeddedBlob(const uint8_t* blob, uint32_t blob_size);
1876  void ClearEmbeddedBlob();
1877 
1878  const uint8_t* embedded_blob_ = nullptr;
1879  uint32_t embedded_blob_size_ = 0;
1880 
1881  v8::ArrayBuffer::Allocator* array_buffer_allocator_ = nullptr;
1882 
1883  FutexWaitListNode futex_wait_list_node_;
1884 
1885  CancelableTaskManager* cancelable_task_manager_ = nullptr;
1886 
1887  debug::ConsoleDelegate* console_delegate_ = nullptr;
1888 
1889  debug::AsyncEventDelegate* async_event_delegate_ = nullptr;
1890  bool promise_hook_or_async_event_delegate_ = false;
1891  bool promise_hook_or_debug_is_active_or_async_event_delegate_ = false;
1892  int async_task_count_ = 0;
1893 
1894  v8::Isolate::AbortOnUncaughtExceptionCallback
1895  abort_on_uncaught_exception_callback_ = nullptr;
1896 
1897  bool allow_atomics_wait_ = true;
1898 
1899  base::Mutex managed_ptr_destructors_mutex_;
1900  ManagedPtrDestructor* managed_ptr_destructors_head_ = nullptr;
1901 
1902  size_t total_regexp_code_generated_ = 0;
1903 
1904  size_t elements_deletion_counter_ = 0;
1905 
1906  std::shared_ptr<wasm::WasmEngine> wasm_engine_;
1907 
1908  std::unique_ptr<TracingCpuProfilerImpl> tracing_cpu_profiler_;
1909 
1910  // The top entry of the v8::Context::BackupIncumbentScope stack.
1911  const v8::Context::BackupIncumbentScope* top_backup_incumbent_scope_ =
1912  nullptr;
1913 
1914  PrepareStackTraceCallback prepare_stack_trace_callback_ = nullptr;
1915 
1916  // TODO(kenton@cloudflare.com): This mutex can be removed if
1917  // thread_data_table_ is always accessed under the isolate lock. I do not
1918  // know if this is the case, so I'm preserving it for now.
1919  base::Mutex thread_data_table_mutex_;
1920  ThreadDataTable thread_data_table_;
1921 
1922  // Delete new/delete operators to ensure that Isolate::New() and
1923  // Isolate::Delete() are used for Isolate creation and deletion.
1924  void* operator new(size_t, void* ptr) { return ptr; }
1925  void* operator new(size_t) = delete;
1926  void operator delete(void*) = delete;
1927 
1928  friend class heap::HeapTester;
1929  friend class TestSerializer;
1930 
1931  DISALLOW_COPY_AND_ASSIGN(Isolate);
1932 };
1933 
1934 
1935 #undef FIELD_ACCESSOR
1936 #undef THREAD_LOCAL_TOP_ACCESSOR
1937 
1938 
1940  public:
1942  : promise_(promise), prev_(prev) {}
1943  Handle<JSObject> promise() { return promise_; }
1944  PromiseOnStack* prev() { return prev_; }
1945 
1946  private:
1947  Handle<JSObject> promise_;
1948  PromiseOnStack* prev_;
1949 };
1950 
1951 
1952 // If the GCC version is 4.1.x or 4.2.x an additional field is added to the
1953 // class as a work around for a bug in the generated code found with these
1954 // versions of GCC. See V8 issue 122 for details.
1955 class V8_EXPORT_PRIVATE SaveContext {
1956  public:
1957  explicit SaveContext(Isolate* isolate);
1958  ~SaveContext();
1959 
1960  Handle<Context> context() { return context_; }
1961  SaveContext* prev() { return prev_; }
1962 
1963  // Returns true if this save context is below a given JavaScript frame.
1964  bool IsBelowFrame(StandardFrame* frame);
1965 
1966  private:
1967  Isolate* const isolate_;
1968  Handle<Context> context_;
1969  SaveContext* const prev_;
1970  Address c_entry_fp_;
1971 };
1972 
1974 #ifdef DEBUG
1975  public:
1976  explicit AssertNoContextChange(Isolate* isolate);
1978  DCHECK(isolate_->context() == *context_);
1979  }
1980 
1981  private:
1982  Isolate* isolate_;
1983  Handle<Context> context_;
1984 #else
1985  public:
1986  explicit AssertNoContextChange(Isolate* isolate) { }
1987 #endif
1988 };
1989 
1991  public:
1992  explicit ExecutionAccess(Isolate* isolate) : isolate_(isolate) {
1993  Lock(isolate);
1994  }
1995  ~ExecutionAccess() { Unlock(isolate_); }
1996 
1997  static void Lock(Isolate* isolate) { isolate->break_access()->Lock(); }
1998  static void Unlock(Isolate* isolate) { isolate->break_access()->Unlock(); }
1999 
2000  static bool TryLock(Isolate* isolate) {
2001  return isolate->break_access()->TryLock();
2002  }
2003 
2004  private:
2005  Isolate* isolate_;
2006 };
2007 
2008 
2009 // Support for checking for stack-overflows.
2011  public:
2012  explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { }
2013 
2014  // Use this to check for stack-overflows in C++ code.
2015  bool HasOverflowed() const {
2016  StackGuard* stack_guard = isolate_->stack_guard();
2017  return GetCurrentStackPosition() < stack_guard->real_climit();
2018  }
2019 
2020  // Use this to check for interrupt request in C++ code.
2021  bool InterruptRequested() {
2022  StackGuard* stack_guard = isolate_->stack_guard();
2023  return GetCurrentStackPosition() < stack_guard->climit();
2024  }
2025 
2026  // Use this to check for stack-overflow when entering runtime from JS code.
2027  bool JsHasOverflowed(uintptr_t gap = 0) const;
2028 
2029  private:
2030  Isolate* isolate_;
2031 };
2032 
2033 #define STACK_CHECK(isolate, result_value) \
2034  do { \
2035  StackLimitCheck stack_check(isolate); \
2036  if (stack_check.HasOverflowed()) { \
2037  isolate->StackOverflow(); \
2038  return result_value; \
2039  } \
2040  } while (false)
2041 
2042 // Scope intercepts only interrupt which is part of its interrupt_mask and does
2043 // not affect other interrupts.
2045  public:
2046  enum Mode { kPostponeInterrupts, kRunInterrupts, kNoop };
2047 
2048  virtual ~InterruptsScope() {
2049  if (mode_ != kNoop) stack_guard_->PopInterruptsScope();
2050  }
2051 
2052  // Find the scope that intercepts this interrupt.
2053  // It may be outermost PostponeInterruptsScope or innermost
2054  // SafeForInterruptsScope if any.
2055  // Return whether the interrupt has been intercepted.
2056  bool Intercept(StackGuard::InterruptFlag flag);
2057 
2058  InterruptsScope(Isolate* isolate, int intercept_mask, Mode mode)
2059  : stack_guard_(isolate->stack_guard()),
2060  intercept_mask_(intercept_mask),
2061  intercepted_flags_(0),
2062  mode_(mode) {
2063  if (mode_ != kNoop) stack_guard_->PushInterruptsScope(this);
2064  }
2065 
2066  private:
2067  StackGuard* stack_guard_;
2068  int intercept_mask_;
2069  int intercepted_flags_;
2070  Mode mode_;
2071  InterruptsScope* prev_;
2072 
2073  friend class StackGuard;
2074 };
2075 
2076 // Support for temporarily postponing interrupts. When the outermost
2077 // postpone scope is left the interrupts will be re-enabled and any
2078 // interrupts that occurred while in the scope will be taken into
2079 // account.
2081  public:
2083  int intercept_mask = StackGuard::ALL_INTERRUPTS)
2084  : InterruptsScope(isolate, intercept_mask,
2085  InterruptsScope::kPostponeInterrupts) {}
2086  ~PostponeInterruptsScope() override = default;
2087 };
2088 
2089 // Support for overriding PostponeInterruptsScope. Interrupt is not ignored if
2090 // innermost scope is SafeForInterruptsScope ignoring any outer
2091 // PostponeInterruptsScopes.
2093  public:
2095  int intercept_mask = StackGuard::ALL_INTERRUPTS)
2096  : InterruptsScope(isolate, intercept_mask,
2097  InterruptsScope::kRunInterrupts) {}
2098  ~SafeForInterruptsScope() override = default;
2099 };
2100 
2102  public:
2103  explicit StackTraceFailureMessage(Isolate* isolate, void* ptr1 = nullptr,
2104  void* ptr2 = nullptr, void* ptr3 = nullptr,
2105  void* ptr4 = nullptr);
2106 
2107  V8_NOINLINE void Print() volatile;
2108 
2109  static const uintptr_t kStartMarker = 0xdecade30;
2110  static const uintptr_t kEndMarker = 0xdecade31;
2111  static const int kStacktraceBufferSize = 32 * KB;
2112 
2113  uintptr_t start_marker_ = kStartMarker;
2114  void* isolate_;
2115  void* ptr1_;
2116  void* ptr2_;
2117  void* ptr3_;
2118  void* ptr4_;
2119  void* code_objects_[4];
2120  char js_stack_trace_[kStacktraceBufferSize];
2121  uintptr_t end_marker_ = kEndMarker;
2122 };
2123 
2124 } // namespace internal
2125 } // namespace v8
2126 
2127 #endif // V8_ISOLATE_H_
Definition: libplatform.h:13
StackTraceOptions
Definition: v8.h:1691