V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
runtime-profiler.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/runtime-profiler.h"
6 
7 #include "src/assembler.h"
8 #include "src/base/platform/platform.h"
9 #include "src/bootstrapper.h"
10 #include "src/code-stubs.h"
11 #include "src/compilation-cache.h"
12 #include "src/compiler.h"
13 #include "src/execution.h"
14 #include "src/frames-inl.h"
15 #include "src/global-handles.h"
16 #include "src/interpreter/interpreter.h"
17 
18 namespace v8 {
19 namespace internal {
20 
21 // Number of times a function has to be seen on the stack before it is
22 // optimized.
23 static const int kProfilerTicksBeforeOptimization = 2;
24 
25 // The number of ticks required for optimizing a function increases with
26 // the size of the bytecode. This is in addition to the
27 // kProfilerTicksBeforeOptimization required for any function.
28 static const int kBytecodeSizeAllowancePerTick = 1200;
29 
30 // Maximum size in bytes of generate code for a function to allow OSR.
31 static const int kOSRBytecodeSizeAllowanceBase = 180;
32 
33 static const int kOSRBytecodeSizeAllowancePerTick = 48;
34 
35 // Maximum size in bytes of generated code for a function to be optimized
36 // the very first time it is seen on the stack.
37 static const int kMaxBytecodeSizeForEarlyOpt = 90;
38 
39 // Certain functions are simply too big to be worth optimizing.
40 static const int kMaxBytecodeSizeForOpt = 60 * KB;
41 
42 #define OPTIMIZATION_REASON_LIST(V) \
43  V(DoNotOptimize, "do not optimize") \
44  V(HotAndStable, "hot and stable") \
45  V(SmallFunction, "small function")
46 
47 enum class OptimizationReason : uint8_t {
48 #define OPTIMIZATION_REASON_CONSTANTS(Constant, message) k##Constant,
49  OPTIMIZATION_REASON_LIST(OPTIMIZATION_REASON_CONSTANTS)
50 #undef OPTIMIZATION_REASON_CONSTANTS
51 };
52 
53 char const* OptimizationReasonToString(OptimizationReason reason) {
54  static char const* reasons[] = {
55 #define OPTIMIZATION_REASON_TEXTS(Constant, message) message,
56  OPTIMIZATION_REASON_LIST(OPTIMIZATION_REASON_TEXTS)
57 #undef OPTIMIZATION_REASON_TEXTS
58  };
59  size_t const index = static_cast<size_t>(reason);
60  DCHECK_LT(index, arraysize(reasons));
61  return reasons[index];
62 }
63 
64 std::ostream& operator<<(std::ostream& os, OptimizationReason reason) {
65  return os << OptimizationReasonToString(reason);
66 }
67 
68 RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
69  : isolate_(isolate),
70  any_ic_changed_(false) {
71 }
72 
73 static void GetICCounts(JSFunction* function, int* ic_with_type_info_count,
74  int* ic_generic_count, int* ic_total_count,
75  int* type_info_percentage, int* generic_percentage) {
76  // Harvest vector-ics.
77  FeedbackVector* vector = function->feedback_vector();
78  vector->ComputeCounts(ic_with_type_info_count, ic_generic_count,
79  ic_total_count);
80 
81  if (*ic_total_count > 0) {
82  *type_info_percentage = 100 * *ic_with_type_info_count / *ic_total_count;
83  *generic_percentage = 100 * *ic_generic_count / *ic_total_count;
84  } else {
85  *type_info_percentage = 100; // Compared against lower bound.
86  *generic_percentage = 0; // Compared against upper bound.
87  }
88 }
89 
90 static void TraceRecompile(JSFunction* function, const char* reason,
91  const char* type) {
92  if (FLAG_trace_opt) {
93  PrintF("[marking ");
94  function->ShortPrint();
95  PrintF(" for %s recompilation, reason: %s", type, reason);
96  if (FLAG_type_info_threshold > 0) {
97  int typeinfo, generic, total, type_percentage, generic_percentage;
98  GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
99  &generic_percentage);
100  PrintF(", ICs with typeinfo: %d/%d (%d%%)", typeinfo, total,
101  type_percentage);
102  PrintF(", generic ICs: %d/%d (%d%%)", generic, total, generic_percentage);
103  }
104  PrintF("]\n");
105  }
106 }
107 
108 void RuntimeProfiler::Optimize(JSFunction* function,
109  OptimizationReason reason) {
110  DCHECK_NE(reason, OptimizationReason::kDoNotOptimize);
111  TraceRecompile(function, OptimizationReasonToString(reason), "optimized");
112  function->MarkForOptimization(ConcurrencyMode::kConcurrent);
113 }
114 
115 void RuntimeProfiler::AttemptOnStackReplacement(InterpretedFrame* frame,
116  int loop_nesting_levels) {
117  JSFunction* function = frame->function();
118  SharedFunctionInfo* shared = function->shared();
119  if (!FLAG_use_osr || !function->shared()->IsUserJavaScript()) {
120  return;
121  }
122 
123  // If the code is not optimizable, don't try OSR.
124  if (shared->optimization_disabled()) return;
125 
126  // We're using on-stack replacement: Store new loop nesting level in
127  // BytecodeArray header so that certain back edges in any interpreter frame
128  // for this bytecode will trigger on-stack replacement for that frame.
129  if (FLAG_trace_osr) {
130  PrintF("[OSR - arming back edges in ");
131  function->PrintName();
132  PrintF("]\n");
133  }
134 
135  DCHECK_EQ(StackFrame::INTERPRETED, frame->type());
136  int level = frame->GetBytecodeArray()->osr_loop_nesting_level();
137  frame->GetBytecodeArray()->set_osr_loop_nesting_level(
138  Min(level + loop_nesting_levels, AbstractCode::kMaxLoopNestingMarker));
139 }
140 
141 void RuntimeProfiler::MaybeOptimize(JSFunction* function,
142  InterpretedFrame* frame) {
143  if (function->IsInOptimizationQueue()) {
144  if (FLAG_trace_opt_verbose) {
145  PrintF("[function ");
146  function->PrintName();
147  PrintF(" is already in optimization queue]\n");
148  }
149  return;
150  }
151 
152  if (FLAG_always_osr) {
153  AttemptOnStackReplacement(frame, AbstractCode::kMaxLoopNestingMarker);
154  // Fall through and do a normal optimized compile as well.
155  } else if (MaybeOSR(function, frame)) {
156  return;
157  }
158 
159  if (function->shared()->optimization_disabled()) return;
160 
161  OptimizationReason reason =
162  ShouldOptimize(function, function->shared()->GetBytecodeArray());
163 
164  if (reason != OptimizationReason::kDoNotOptimize) {
165  Optimize(function, reason);
166  }
167 }
168 
169 bool RuntimeProfiler::MaybeOSR(JSFunction* function, InterpretedFrame* frame) {
170  int ticks = function->feedback_vector()->profiler_ticks();
171  // TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller
172  // than kMaxToplevelSourceSize.
173 
174  if (function->IsMarkedForOptimization() ||
175  function->IsMarkedForConcurrentOptimization() ||
176  function->HasOptimizedCode()) {
177  // Attempt OSR if we are still running interpreted code even though the
178  // the function has long been marked or even already been optimized.
179  int64_t allowance =
180  kOSRBytecodeSizeAllowanceBase +
181  static_cast<int64_t>(ticks) * kOSRBytecodeSizeAllowancePerTick;
182  if (function->shared()->GetBytecodeArray()->length() <= allowance) {
183  AttemptOnStackReplacement(frame);
184  }
185  return true;
186  }
187  return false;
188 }
189 
190 OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction* function,
191  BytecodeArray bytecode) {
192  int ticks = function->feedback_vector()->profiler_ticks();
193  if (bytecode->length() > kMaxBytecodeSizeForOpt) {
194  return OptimizationReason::kDoNotOptimize;
195  }
196 
197  int ticks_for_optimization =
198  kProfilerTicksBeforeOptimization +
199  (bytecode->length() / kBytecodeSizeAllowancePerTick);
200  if (ticks >= ticks_for_optimization) {
201  return OptimizationReason::kHotAndStable;
202  } else if (!any_ic_changed_ &&
203  bytecode->length() < kMaxBytecodeSizeForEarlyOpt) {
204  // If no IC was patched since the last tick and this function is very
205  // small, optimistically optimize it now.
206  return OptimizationReason::kSmallFunction;
207  } else if (FLAG_trace_opt_verbose) {
208  PrintF("[not yet optimizing ");
209  function->PrintName();
210  PrintF(", not enough ticks: %d/%d and ", ticks,
211  kProfilerTicksBeforeOptimization);
212  if (any_ic_changed_) {
213  PrintF("ICs changed]\n");
214  } else {
215  PrintF(" too large for small function optimization: %d/%d]\n",
216  bytecode->length(), kMaxBytecodeSizeForEarlyOpt);
217  }
218  }
219  return OptimizationReason::kDoNotOptimize;
220 }
221 
222 void RuntimeProfiler::MarkCandidatesForOptimization() {
223  HandleScope scope(isolate_);
224 
225  if (!isolate_->use_optimizer()) return;
226 
227  DisallowHeapAllocation no_gc;
228 
229  // Run through the JavaScript frames and collect them. If we already
230  // have a sample of the function, we mark it for optimizations
231  // (eagerly or lazily).
232  int frame_count = 0;
233  int frame_count_limit = FLAG_frame_count;
234  for (JavaScriptFrameIterator it(isolate_);
235  frame_count++ < frame_count_limit && !it.done();
236  it.Advance()) {
237  JavaScriptFrame* frame = it.frame();
238  if (!frame->is_interpreted()) continue;
239 
240  JSFunction* function = frame->function();
241  DCHECK(function->shared()->is_compiled());
242  if (!function->shared()->IsInterpreted()) continue;
243 
244  if (!function->has_feedback_vector()) continue;
245 
246  MaybeOptimize(function, InterpretedFrame::cast(frame));
247 
248  // TODO(leszeks): Move this increment to before the maybe optimize checks,
249  // and update the tests to assume the increment has already happened.
250  int ticks = function->feedback_vector()->profiler_ticks();
251  if (ticks < Smi::kMaxValue) {
252  function->feedback_vector()->set_profiler_ticks(ticks + 1);
253  }
254  }
255  any_ic_changed_ = false;
256 }
257 
258 } // namespace internal
259 } // namespace v8
Definition: libplatform.h:13