V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
optimizing-compile-dispatcher.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
6 
7 #include "src/base/atomicops.h"
8 #include "src/base/template-utils.h"
9 #include "src/cancelable-task.h"
10 #include "src/compiler.h"
11 #include "src/counters.h"
12 #include "src/isolate.h"
13 #include "src/objects-inl.h"
14 #include "src/optimized-compilation-info.h"
15 #include "src/tracing/trace-event.h"
16 #include "src/v8.h"
17 
18 namespace v8 {
19 namespace internal {
20 
21 namespace {
22 
23 void DisposeCompilationJob(OptimizedCompilationJob* job,
24  bool restore_function_code) {
25  if (restore_function_code) {
26  Handle<JSFunction> function = job->compilation_info()->closure();
27  function->set_code(function->shared()->GetCode());
28  if (function->IsInOptimizationQueue()) {
29  function->ClearOptimizationMarker();
30  }
31  // TODO(mvstanton): We can't call EnsureFeedbackVector here due to
32  // allocation, but we probably shouldn't call set_code either, as this
33  // sometimes runs on the worker thread!
34  // JSFunction::EnsureFeedbackVector(function);
35  }
36  delete job;
37 }
38 
39 } // namespace
40 
42  public:
43  explicit CompileTask(Isolate* isolate,
44  OptimizingCompileDispatcher* dispatcher)
45  : CancelableTask(isolate),
46  isolate_(isolate),
47  worker_thread_runtime_call_stats_(
48  isolate->counters()->worker_thread_runtime_call_stats()),
49  dispatcher_(dispatcher) {
50  base::MutexGuard lock_guard(&dispatcher_->ref_count_mutex_);
51  ++dispatcher_->ref_count_;
52  }
53 
54  ~CompileTask() override = default;
55 
56  private:
57  // v8::Task overrides.
58  void RunInternal() override {
59  DisallowHeapAllocation no_allocation;
60  DisallowHandleAllocation no_handles;
62 
63  {
64  WorkerThreadRuntimeCallStatsScope runtime_call_stats_scope(
65  worker_thread_runtime_call_stats_);
66  RuntimeCallTimerScope runtimeTimer(
67  runtime_call_stats_scope.Get(),
68  RuntimeCallCounterId::kRecompileConcurrent);
69 
71  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
72  "V8.RecompileConcurrent");
73 
74  if (dispatcher_->recompilation_delay_ != 0) {
75  base::OS::Sleep(base::TimeDelta::FromMilliseconds(
76  dispatcher_->recompilation_delay_));
77  }
78 
79  dispatcher_->CompileNext(dispatcher_->NextInput(true));
80  }
81  {
82  base::MutexGuard lock_guard(&dispatcher_->ref_count_mutex_);
83  if (--dispatcher_->ref_count_ == 0) {
84  dispatcher_->ref_count_zero_.NotifyOne();
85  }
86  }
87  }
88 
89  Isolate* isolate_;
90  WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats_;
91  OptimizingCompileDispatcher* dispatcher_;
92 
93  DISALLOW_COPY_AND_ASSIGN(CompileTask);
94 };
95 
96 OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
97 #ifdef DEBUG
98  {
99  base::MutexGuard lock_guard(&ref_count_mutex_);
100  DCHECK_EQ(0, ref_count_);
101  }
102 #endif
103  DCHECK_EQ(0, input_queue_length_);
104  DeleteArray(input_queue_);
105 }
106 
107 OptimizedCompilationJob* OptimizingCompileDispatcher::NextInput(
108  bool check_if_flushing) {
109  base::MutexGuard access_input_queue_(&input_queue_mutex_);
110  if (input_queue_length_ == 0) return nullptr;
111  OptimizedCompilationJob* job = input_queue_[InputQueueIndex(0)];
112  DCHECK_NOT_NULL(job);
113  input_queue_shift_ = InputQueueIndex(1);
114  input_queue_length_--;
115  if (check_if_flushing) {
116  if (mode_ == FLUSH) {
117  AllowHandleDereference allow_handle_dereference;
118  DisposeCompilationJob(job, true);
119  return nullptr;
120  }
121  }
122  return job;
123 }
124 
125 void OptimizingCompileDispatcher::CompileNext(OptimizedCompilationJob* job) {
126  if (!job) return;
127 
128  // The function may have already been optimized by OSR. Simply continue.
129  CompilationJob::Status status = job->ExecuteJob();
130  USE(status); // Prevent an unused-variable error.
131 
132  // The function may have already been optimized by OSR. Simply continue.
133  // Use a mutex to make sure that functions marked for install
134  // are always also queued.
135  base::MutexGuard access_output_queue_(&output_queue_mutex_);
136  output_queue_.push(job);
137  isolate_->stack_guard()->RequestInstallCode();
138 }
139 
140 void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
141  for (;;) {
142  OptimizedCompilationJob* job = nullptr;
143  {
144  base::MutexGuard access_output_queue_(&output_queue_mutex_);
145  if (output_queue_.empty()) return;
146  job = output_queue_.front();
147  output_queue_.pop();
148  }
149 
150  DisposeCompilationJob(job, restore_function_code);
151  }
152 }
153 
154 void OptimizingCompileDispatcher::Flush(BlockingBehavior blocking_behavior) {
155  if (blocking_behavior == BlockingBehavior::kDontBlock) {
156  if (FLAG_block_concurrent_recompilation) Unblock();
157  base::MutexGuard access_input_queue_(&input_queue_mutex_);
158  while (input_queue_length_ > 0) {
159  OptimizedCompilationJob* job = input_queue_[InputQueueIndex(0)];
160  DCHECK_NOT_NULL(job);
161  input_queue_shift_ = InputQueueIndex(1);
162  input_queue_length_--;
163  DisposeCompilationJob(job, true);
164  }
165  FlushOutputQueue(true);
166  if (FLAG_trace_concurrent_recompilation) {
167  PrintF(" ** Flushed concurrent recompilation queues (not blocking).\n");
168  }
169  return;
170  }
171  mode_ = FLUSH;
172  if (FLAG_block_concurrent_recompilation) Unblock();
173  {
174  base::MutexGuard lock_guard(&ref_count_mutex_);
175  while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
176  mode_ = COMPILE;
177  }
178  FlushOutputQueue(true);
179  if (FLAG_trace_concurrent_recompilation) {
180  PrintF(" ** Flushed concurrent recompilation queues.\n");
181  }
182 }
183 
184 void OptimizingCompileDispatcher::Stop() {
185  mode_ = FLUSH;
186  if (FLAG_block_concurrent_recompilation) Unblock();
187  {
188  base::MutexGuard lock_guard(&ref_count_mutex_);
189  while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
190  mode_ = COMPILE;
191  }
192 
193  if (recompilation_delay_ != 0) {
194  // At this point the optimizing compiler thread's event loop has stopped.
195  // There is no need for a mutex when reading input_queue_length_.
196  while (input_queue_length_ > 0) CompileNext(NextInput());
197  InstallOptimizedFunctions();
198  } else {
199  FlushOutputQueue(false);
200  }
201 }
202 
203 void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
204  HandleScope handle_scope(isolate_);
205 
206  for (;;) {
207  OptimizedCompilationJob* job = nullptr;
208  {
209  base::MutexGuard access_output_queue_(&output_queue_mutex_);
210  if (output_queue_.empty()) return;
211  job = output_queue_.front();
212  output_queue_.pop();
213  }
214  OptimizedCompilationInfo* info = job->compilation_info();
215  Handle<JSFunction> function(*info->closure(), isolate_);
216  if (function->HasOptimizedCode()) {
217  if (FLAG_trace_concurrent_recompilation) {
218  PrintF(" ** Aborting compilation for ");
219  function->ShortPrint();
220  PrintF(" as it has already been optimized.\n");
221  }
222  DisposeCompilationJob(job, false);
223  } else {
224  Compiler::FinalizeOptimizedCompilationJob(job, isolate_);
225  }
226  }
227 }
228 
229 void OptimizingCompileDispatcher::QueueForOptimization(
230  OptimizedCompilationJob* job) {
231  DCHECK(IsQueueAvailable());
232  {
233  // Add job to the back of the input queue.
234  base::MutexGuard access_input_queue(&input_queue_mutex_);
235  DCHECK_LT(input_queue_length_, input_queue_capacity_);
236  input_queue_[InputQueueIndex(input_queue_length_)] = job;
237  input_queue_length_++;
238  }
239  if (FLAG_block_concurrent_recompilation) {
240  blocked_jobs_++;
241  } else {
242  V8::GetCurrentPlatform()->CallOnWorkerThread(
243  base::make_unique<CompileTask>(isolate_, this));
244  }
245 }
246 
247 void OptimizingCompileDispatcher::Unblock() {
248  while (blocked_jobs_ > 0) {
249  V8::GetCurrentPlatform()->CallOnWorkerThread(
250  base::make_unique<CompileTask>(isolate_, this));
251  blocked_jobs_--;
252  }
253 }
254 
255 } // namespace internal
256 } // namespace v8
Definition: libplatform.h:13