V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
cpu-profiler.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/profiler/cpu-profiler.h"
6 
7 #include <unordered_map>
8 #include <utility>
9 
10 #include "src/base/lazy-instance.h"
11 #include "src/base/platform/mutex.h"
12 #include "src/base/template-utils.h"
13 #include "src/debug/debug.h"
14 #include "src/deoptimizer.h"
15 #include "src/frames-inl.h"
16 #include "src/locked-queue-inl.h"
17 #include "src/log-inl.h"
18 #include "src/profiler/cpu-profiler-inl.h"
19 #include "src/vm-state-inl.h"
20 
21 namespace v8 {
22 namespace internal {
23 
24 static const int kProfilerStackSize = 64 * KB;
25 
26 class CpuSampler : public sampler::Sampler {
27  public:
28  CpuSampler(Isolate* isolate, SamplingEventsProcessor* processor)
29  : sampler::Sampler(reinterpret_cast<v8::Isolate*>(isolate)),
30  processor_(processor) {}
31 
32  void SampleStack(const v8::RegisterState& regs) override {
33  TickSample* sample = processor_->StartTickSample();
34  if (sample == nullptr) return;
35  Isolate* isolate = reinterpret_cast<Isolate*>(this->isolate());
36  sample->Init(isolate, regs, TickSample::kIncludeCEntryFrame, true);
37  if (is_counting_samples_ && !sample->timestamp.IsNull()) {
38  if (sample->state == JS) ++js_sample_count_;
39  if (sample->state == EXTERNAL) ++external_sample_count_;
40  }
41  processor_->FinishTickSample();
42  }
43 
44  private:
45  SamplingEventsProcessor* processor_;
46 };
47 
48 ProfilerEventsProcessor::ProfilerEventsProcessor(Isolate* isolate,
49  ProfileGenerator* generator)
50  : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
51  generator_(generator),
52  running_(1),
53  last_code_event_id_(0),
54  last_processed_code_event_id_(0),
55  isolate_(isolate) {}
56 
57 SamplingEventsProcessor::SamplingEventsProcessor(Isolate* isolate,
58  ProfileGenerator* generator,
59  base::TimeDelta period)
60  : ProfilerEventsProcessor(isolate, generator),
61  sampler_(new CpuSampler(isolate, this)),
62  period_(period) {
63  sampler_->IncreaseProfilingDepth();
64 }
65 
66 SamplingEventsProcessor::~SamplingEventsProcessor() {
67  sampler_->DecreaseProfilingDepth();
68  sampler_->UnregisterIfRegistered();
69 }
70 
71 ProfilerEventsProcessor::~ProfilerEventsProcessor() = default;
72 
73 void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
74  event.generic.order = ++last_code_event_id_;
75  events_buffer_.Enqueue(event);
76 }
77 
78 void ProfilerEventsProcessor::AddDeoptStack(Address from, int fp_to_sp_delta) {
79  TickSampleEventRecord record(last_code_event_id_);
80  RegisterState regs;
81  Address fp = isolate_->c_entry_fp(isolate_->thread_local_top());
82  regs.sp = reinterpret_cast<void*>(fp - fp_to_sp_delta);
83  regs.fp = reinterpret_cast<void*>(fp);
84  regs.pc = reinterpret_cast<void*>(from);
85  record.sample.Init(isolate_, regs, TickSample::kSkipCEntryFrame, false,
86  false);
87  ticks_from_vm_buffer_.Enqueue(record);
88 }
89 
90 void ProfilerEventsProcessor::AddCurrentStack(bool update_stats) {
91  TickSampleEventRecord record(last_code_event_id_);
92  RegisterState regs;
93  StackFrameIterator it(isolate_);
94  if (!it.done()) {
95  StackFrame* frame = it.frame();
96  regs.sp = reinterpret_cast<void*>(frame->sp());
97  regs.fp = reinterpret_cast<void*>(frame->fp());
98  regs.pc = reinterpret_cast<void*>(frame->pc());
99  }
100  record.sample.Init(isolate_, regs, TickSample::kSkipCEntryFrame, update_stats,
101  false);
102  ticks_from_vm_buffer_.Enqueue(record);
103 }
104 
105 void ProfilerEventsProcessor::AddSample(TickSample sample) {
106  TickSampleEventRecord record(last_code_event_id_);
107  record.sample = sample;
108  ticks_from_vm_buffer_.Enqueue(record);
109 }
110 
111 void ProfilerEventsProcessor::StopSynchronously() {
112  if (!base::Relaxed_AtomicExchange(&running_, 0)) return;
113  Join();
114 }
115 
116 
117 bool ProfilerEventsProcessor::ProcessCodeEvent() {
118  CodeEventsContainer record;
119  if (events_buffer_.Dequeue(&record)) {
120  switch (record.generic.type) {
121 #define PROFILER_TYPE_CASE(type, clss) \
122  case CodeEventRecord::type: \
123  record.clss##_.UpdateCodeMap(generator_->code_map()); \
124  break;
125 
126  CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE)
127 
128 #undef PROFILER_TYPE_CASE
129  default: return true; // Skip record.
130  }
131  last_processed_code_event_id_ = record.generic.order;
132  return true;
133  }
134  return false;
135 }
136 
137 void ProfilerEventsProcessor::CodeEventHandler(
138  const CodeEventsContainer& evt_rec) {
139  switch (evt_rec.generic.type) {
140  case CodeEventRecord::CODE_CREATION:
141  case CodeEventRecord::CODE_MOVE:
142  case CodeEventRecord::CODE_DISABLE_OPT:
143  Enqueue(evt_rec);
144  break;
145  case CodeEventRecord::CODE_DEOPT: {
146  const CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
147  Address pc = rec->pc;
148  int fp_to_sp_delta = rec->fp_to_sp_delta;
149  Enqueue(evt_rec);
150  AddDeoptStack(pc, fp_to_sp_delta);
151  break;
152  }
153  case CodeEventRecord::NONE:
154  case CodeEventRecord::REPORT_BUILTIN:
155  UNREACHABLE();
156  }
157 }
158 
159 ProfilerEventsProcessor::SampleProcessingResult
160 SamplingEventsProcessor::ProcessOneSample() {
161  TickSampleEventRecord record1;
162  if (ticks_from_vm_buffer_.Peek(&record1) &&
163  (record1.order == last_processed_code_event_id_)) {
164  TickSampleEventRecord record;
165  ticks_from_vm_buffer_.Dequeue(&record);
166  generator_->RecordTickSample(record.sample);
167  return OneSampleProcessed;
168  }
169 
170  const TickSampleEventRecord* record = ticks_buffer_.Peek();
171  if (record == nullptr) {
172  if (ticks_from_vm_buffer_.IsEmpty()) return NoSamplesInQueue;
173  return FoundSampleForNextCodeEvent;
174  }
175  if (record->order != last_processed_code_event_id_) {
176  return FoundSampleForNextCodeEvent;
177  }
178  generator_->RecordTickSample(record->sample);
179  ticks_buffer_.Remove();
180  return OneSampleProcessed;
181 }
182 
183 void SamplingEventsProcessor::Run() {
184  while (!!base::Relaxed_Load(&running_)) {
185  base::TimeTicks nextSampleTime =
186  base::TimeTicks::HighResolutionNow() + period_;
187  base::TimeTicks now;
188  SampleProcessingResult result;
189  // Keep processing existing events until we need to do next sample
190  // or the ticks buffer is empty.
191  do {
192  result = ProcessOneSample();
193  if (result == FoundSampleForNextCodeEvent) {
194  // All ticks of the current last_processed_code_event_id_ are
195  // processed, proceed to the next code event.
196  ProcessCodeEvent();
197  }
198  now = base::TimeTicks::HighResolutionNow();
199  } while (result != NoSamplesInQueue && now < nextSampleTime);
200 
201  if (nextSampleTime > now) {
202 #if V8_OS_WIN
203  if (nextSampleTime - now < base::TimeDelta::FromMilliseconds(100)) {
204  // Do not use Sleep on Windows as it is very imprecise, with up to 16ms
205  // jitter, which is unacceptable for short profile intervals.
206  while (base::TimeTicks::HighResolutionNow() < nextSampleTime) {
207  }
208  } else // NOLINT
209 #endif
210  {
211  base::OS::Sleep(nextSampleTime - now);
212  }
213  }
214 
215  // Schedule next sample.
216  sampler_->DoSample();
217  }
218 
219  // Process remaining tick events.
220  do {
221  SampleProcessingResult result;
222  do {
223  result = ProcessOneSample();
224  } while (result == OneSampleProcessed);
225  } while (ProcessCodeEvent());
226 }
227 
228 void* SamplingEventsProcessor::operator new(size_t size) {
229  return AlignedAlloc(size, V8_ALIGNOF(SamplingEventsProcessor));
230 }
231 
232 void SamplingEventsProcessor::operator delete(void* ptr) { AlignedFree(ptr); }
233 
234 int CpuProfiler::GetProfilesCount() {
235  // The count of profiles doesn't depend on a security token.
236  return static_cast<int>(profiles_->profiles()->size());
237 }
238 
239 
240 CpuProfile* CpuProfiler::GetProfile(int index) {
241  return profiles_->profiles()->at(index).get();
242 }
243 
244 
245 void CpuProfiler::DeleteAllProfiles() {
246  if (is_profiling_) StopProcessor();
247  ResetProfiles();
248 }
249 
250 
251 void CpuProfiler::DeleteProfile(CpuProfile* profile) {
252  profiles_->RemoveProfile(profile);
253  if (profiles_->profiles()->empty() && !is_profiling_) {
254  // If this was the last profile, clean up all accessory data as well.
255  ResetProfiles();
256  }
257 }
258 
259 namespace {
260 
261 class CpuProfilersManager {
262  public:
263  void AddProfiler(Isolate* isolate, CpuProfiler* profiler) {
264  base::MutexGuard lock(&mutex_);
265  profilers_.emplace(isolate, profiler);
266  }
267 
268  void RemoveProfiler(Isolate* isolate, CpuProfiler* profiler) {
269  base::MutexGuard lock(&mutex_);
270  auto range = profilers_.equal_range(isolate);
271  for (auto it = range.first; it != range.second; ++it) {
272  if (it->second != profiler) continue;
273  profilers_.erase(it);
274  return;
275  }
276  UNREACHABLE();
277  }
278 
279  void CallCollectSample(Isolate* isolate) {
280  base::MutexGuard lock(&mutex_);
281  auto range = profilers_.equal_range(isolate);
282  for (auto it = range.first; it != range.second; ++it) {
283  it->second->CollectSample();
284  }
285  }
286 
287  private:
288  std::unordered_multimap<Isolate*, CpuProfiler*> profilers_;
289  base::Mutex mutex_;
290 };
291 
292 base::LazyInstance<CpuProfilersManager>::type g_profilers_manager =
293  LAZY_INSTANCE_INITIALIZER;
294 
295 } // namespace
296 
297 CpuProfiler::CpuProfiler(Isolate* isolate)
298  : CpuProfiler(isolate, new CpuProfilesCollection(isolate), nullptr,
299  nullptr) {}
300 
301 CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilesCollection* test_profiles,
302  ProfileGenerator* test_generator,
303  ProfilerEventsProcessor* test_processor)
304  : isolate_(isolate),
305  sampling_interval_(base::TimeDelta::FromMicroseconds(
306  FLAG_cpu_profiler_sampling_interval)),
307  profiles_(test_profiles),
308  generator_(test_generator),
309  processor_(test_processor),
310  is_profiling_(false) {
311  profiles_->set_cpu_profiler(this);
312  g_profilers_manager.Pointer()->AddProfiler(isolate, this);
313 }
314 
315 CpuProfiler::~CpuProfiler() {
316  DCHECK(!is_profiling_);
317  g_profilers_manager.Pointer()->RemoveProfiler(isolate_, this);
318 }
319 
320 void CpuProfiler::set_sampling_interval(base::TimeDelta value) {
321  DCHECK(!is_profiling_);
322  sampling_interval_ = value;
323 }
324 
325 void CpuProfiler::ResetProfiles() {
326  profiles_.reset(new CpuProfilesCollection(isolate_));
327  profiles_->set_cpu_profiler(this);
328  profiler_listener_.reset();
329  generator_.reset();
330 }
331 
332 void CpuProfiler::CreateEntriesForRuntimeCallStats() {
333  RuntimeCallStats* rcs = isolate_->counters()->runtime_call_stats();
334  CodeMap* code_map = generator_->code_map();
335  for (int i = 0; i < RuntimeCallStats::kNumberOfCounters; ++i) {
336  RuntimeCallCounter* counter = rcs->GetCounter(i);
337  DCHECK(counter->name());
338  auto entry = new CodeEntry(CodeEventListener::FUNCTION_TAG, counter->name(),
339  "native V8Runtime");
340  code_map->AddCode(reinterpret_cast<Address>(counter), entry, 1);
341  }
342 }
343 
344 // static
345 void CpuProfiler::CollectSample(Isolate* isolate) {
346  g_profilers_manager.Pointer()->CallCollectSample(isolate);
347 }
348 
349 void CpuProfiler::CollectSample() {
350  if (processor_) {
351  processor_->AddCurrentStack();
352  }
353 }
354 
355 void CpuProfiler::StartProfiling(const char* title, bool record_samples,
356  ProfilingMode mode) {
357  if (profiles_->StartProfiling(title, record_samples, mode)) {
358  TRACE_EVENT0("v8", "CpuProfiler::StartProfiling");
359  StartProcessorIfNotStarted();
360  }
361 }
362 
363 void CpuProfiler::StartProfiling(String title, bool record_samples,
364  ProfilingMode mode) {
365  StartProfiling(profiles_->GetName(title), record_samples, mode);
366  isolate_->debug()->feature_tracker()->Track(DebugFeatureTracker::kProfiler);
367 }
368 
369 void CpuProfiler::StartProcessorIfNotStarted() {
370  if (processor_) {
371  processor_->AddCurrentStack();
372  return;
373  }
374  Logger* logger = isolate_->logger();
375  // Disable logging when using the new implementation.
376  saved_is_logging_ = logger->is_logging_;
377  logger->is_logging_ = false;
378 
379  bool codemap_needs_initialization = false;
380  if (!generator_) {
381  generator_.reset(new ProfileGenerator(profiles_.get()));
382  codemap_needs_initialization = true;
383  CreateEntriesForRuntimeCallStats();
384  }
385  processor_.reset(new SamplingEventsProcessor(isolate_, generator_.get(),
386  sampling_interval_));
387  if (!profiler_listener_) {
388  profiler_listener_.reset(new ProfilerListener(isolate_, processor_.get()));
389  }
390  logger->AddCodeEventListener(profiler_listener_.get());
391  is_profiling_ = true;
392  isolate_->set_is_profiling(true);
393  // Enumerate stuff we already have in the heap.
394  DCHECK(isolate_->heap()->HasBeenSetUp());
395  if (codemap_needs_initialization) {
396  if (!FLAG_prof_browser_mode) {
397  logger->LogCodeObjects();
398  }
399  logger->LogCompiledFunctions();
400  logger->LogAccessorCallbacks();
401  LogBuiltins();
402  }
403  // Enable stack sampling.
404  processor_->AddCurrentStack();
405  processor_->StartSynchronously();
406 }
407 
408 CpuProfile* CpuProfiler::StopProfiling(const char* title) {
409  if (!is_profiling_) return nullptr;
410  StopProcessorIfLastProfile(title);
411  return profiles_->StopProfiling(title);
412 }
413 
414 CpuProfile* CpuProfiler::StopProfiling(String title) {
415  return StopProfiling(profiles_->GetName(title));
416 }
417 
418 void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
419  if (!profiles_->IsLastProfile(title)) return;
420  StopProcessor();
421 }
422 
423 void CpuProfiler::StopProcessor() {
424  Logger* logger = isolate_->logger();
425  is_profiling_ = false;
426  isolate_->set_is_profiling(false);
427  logger->RemoveCodeEventListener(profiler_listener_.get());
428  processor_->StopSynchronously();
429  processor_.reset();
430  logger->is_logging_ = saved_is_logging_;
431 }
432 
433 
434 void CpuProfiler::LogBuiltins() {
435  Builtins* builtins = isolate_->builtins();
436  DCHECK(builtins->is_initialized());
437  for (int i = 0; i < Builtins::builtin_count; i++) {
438  CodeEventsContainer evt_rec(CodeEventRecord::REPORT_BUILTIN);
439  ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_;
440  Builtins::Name id = static_cast<Builtins::Name>(i);
441  rec->instruction_start = builtins->builtin(id)->InstructionStart();
442  rec->builtin_id = id;
443  processor_->Enqueue(evt_rec);
444  }
445 }
446 
447 } // namespace internal
448 } // namespace v8
Definition: libplatform.h:13