5 #include "src/profiler/cpu-profiler.h" 7 #include <unordered_map> 10 #include "src/base/lazy-instance.h" 11 #include "src/base/platform/mutex.h" 12 #include "src/base/template-utils.h" 13 #include "src/debug/debug.h" 14 #include "src/deoptimizer.h" 15 #include "src/frames-inl.h" 16 #include "src/locked-queue-inl.h" 17 #include "src/log-inl.h" 18 #include "src/profiler/cpu-profiler-inl.h" 19 #include "src/vm-state-inl.h" 24 static const int kProfilerStackSize = 64 * KB;
30 processor_(processor) {}
33 TickSample* sample = processor_->StartTickSample();
34 if (sample ==
nullptr)
return;
36 sample->Init(isolate, regs, TickSample::kIncludeCEntryFrame,
true);
37 if (is_counting_samples_ && !sample->timestamp.IsNull()) {
38 if (sample->state == JS) ++js_sample_count_;
39 if (sample->state == EXTERNAL) ++external_sample_count_;
41 processor_->FinishTickSample();
48 ProfilerEventsProcessor::ProfilerEventsProcessor(
Isolate* isolate,
50 : Thread(Thread::Options(
"v8:ProfEvntProc", kProfilerStackSize)),
51 generator_(generator),
53 last_code_event_id_(0),
54 last_processed_code_event_id_(0),
57 SamplingEventsProcessor::SamplingEventsProcessor(Isolate* isolate,
58 ProfileGenerator* generator,
60 : ProfilerEventsProcessor(isolate, generator),
61 sampler_(new CpuSampler(isolate, this)),
63 sampler_->IncreaseProfilingDepth();
66 SamplingEventsProcessor::~SamplingEventsProcessor() {
67 sampler_->DecreaseProfilingDepth();
68 sampler_->UnregisterIfRegistered();
71 ProfilerEventsProcessor::~ProfilerEventsProcessor() =
default;
73 void ProfilerEventsProcessor::Enqueue(
const CodeEventsContainer& event) {
74 event.generic.order = ++last_code_event_id_;
75 events_buffer_.Enqueue(event);
78 void ProfilerEventsProcessor::AddDeoptStack(Address from,
int fp_to_sp_delta) {
79 TickSampleEventRecord record(last_code_event_id_);
81 Address fp = isolate_->c_entry_fp(isolate_->thread_local_top());
82 regs.sp =
reinterpret_cast<void*
>(fp - fp_to_sp_delta);
83 regs.fp =
reinterpret_cast<void*
>(fp);
84 regs.pc =
reinterpret_cast<void*
>(from);
85 record.sample.Init(isolate_, regs, TickSample::kSkipCEntryFrame,
false,
87 ticks_from_vm_buffer_.Enqueue(record);
90 void ProfilerEventsProcessor::AddCurrentStack(
bool update_stats) {
91 TickSampleEventRecord record(last_code_event_id_);
93 StackFrameIterator it(isolate_);
95 StackFrame* frame = it.frame();
96 regs.sp =
reinterpret_cast<void*
>(frame->sp());
97 regs.fp =
reinterpret_cast<void*
>(frame->fp());
98 regs.pc =
reinterpret_cast<void*
>(frame->pc());
100 record.sample.Init(isolate_, regs, TickSample::kSkipCEntryFrame, update_stats,
102 ticks_from_vm_buffer_.Enqueue(record);
105 void ProfilerEventsProcessor::AddSample(TickSample sample) {
106 TickSampleEventRecord record(last_code_event_id_);
107 record.sample = sample;
108 ticks_from_vm_buffer_.Enqueue(record);
111 void ProfilerEventsProcessor::StopSynchronously() {
112 if (!base::Relaxed_AtomicExchange(&running_, 0))
return;
117 bool ProfilerEventsProcessor::ProcessCodeEvent() {
118 CodeEventsContainer record;
119 if (events_buffer_.Dequeue(&record)) {
120 switch (record.generic.type) {
121 #define PROFILER_TYPE_CASE(type, clss) \ 122 case CodeEventRecord::type: \ 123 record.clss##_.UpdateCodeMap(generator_->code_map()); \ 126 CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE)
128 #undef PROFILER_TYPE_CASE 129 default:
return true;
131 last_processed_code_event_id_ = record.generic.order;
137 void ProfilerEventsProcessor::CodeEventHandler(
138 const CodeEventsContainer& evt_rec) {
139 switch (evt_rec.generic.type) {
140 case CodeEventRecord::CODE_CREATION:
141 case CodeEventRecord::CODE_MOVE:
142 case CodeEventRecord::CODE_DISABLE_OPT:
145 case CodeEventRecord::CODE_DEOPT: {
146 const CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
147 Address pc = rec->pc;
148 int fp_to_sp_delta = rec->fp_to_sp_delta;
150 AddDeoptStack(pc, fp_to_sp_delta);
153 case CodeEventRecord::NONE:
154 case CodeEventRecord::REPORT_BUILTIN:
159 ProfilerEventsProcessor::SampleProcessingResult
160 SamplingEventsProcessor::ProcessOneSample() {
161 TickSampleEventRecord record1;
162 if (ticks_from_vm_buffer_.Peek(&record1) &&
163 (record1.order == last_processed_code_event_id_)) {
164 TickSampleEventRecord record;
165 ticks_from_vm_buffer_.Dequeue(&record);
166 generator_->RecordTickSample(record.sample);
167 return OneSampleProcessed;
170 const TickSampleEventRecord* record = ticks_buffer_.Peek();
171 if (record ==
nullptr) {
172 if (ticks_from_vm_buffer_.IsEmpty())
return NoSamplesInQueue;
173 return FoundSampleForNextCodeEvent;
175 if (record->order != last_processed_code_event_id_) {
176 return FoundSampleForNextCodeEvent;
178 generator_->RecordTickSample(record->sample);
179 ticks_buffer_.Remove();
180 return OneSampleProcessed;
183 void SamplingEventsProcessor::Run() {
184 while (!!base::Relaxed_Load(&running_)) {
185 base::TimeTicks nextSampleTime =
186 base::TimeTicks::HighResolutionNow() + period_;
188 SampleProcessingResult result;
192 result = ProcessOneSample();
193 if (result == FoundSampleForNextCodeEvent) {
198 now = base::TimeTicks::HighResolutionNow();
199 }
while (result != NoSamplesInQueue && now < nextSampleTime);
201 if (nextSampleTime > now) {
203 if (nextSampleTime - now < base::TimeDelta::FromMilliseconds(100)) {
206 while (base::TimeTicks::HighResolutionNow() < nextSampleTime) {
211 base::OS::Sleep(nextSampleTime - now);
216 sampler_->DoSample();
221 SampleProcessingResult result;
223 result = ProcessOneSample();
224 }
while (result == OneSampleProcessed);
225 }
while (ProcessCodeEvent());
228 void* SamplingEventsProcessor::operator
new(
size_t size) {
229 return AlignedAlloc(size, V8_ALIGNOF(SamplingEventsProcessor));
232 void SamplingEventsProcessor::operator
delete(
void* ptr) { AlignedFree(ptr); }
234 int CpuProfiler::GetProfilesCount() {
236 return static_cast<int>(profiles_->profiles()->size());
240 CpuProfile* CpuProfiler::GetProfile(
int index) {
241 return profiles_->profiles()->at(index).get();
245 void CpuProfiler::DeleteAllProfiles() {
246 if (is_profiling_) StopProcessor();
251 void CpuProfiler::DeleteProfile(CpuProfile* profile) {
252 profiles_->RemoveProfile(profile);
253 if (profiles_->profiles()->empty() && !is_profiling_) {
261 class CpuProfilersManager {
263 void AddProfiler(Isolate* isolate, CpuProfiler* profiler) {
264 base::MutexGuard lock(&mutex_);
265 profilers_.emplace(isolate, profiler);
268 void RemoveProfiler(Isolate* isolate, CpuProfiler* profiler) {
269 base::MutexGuard lock(&mutex_);
270 auto range = profilers_.equal_range(isolate);
271 for (
auto it = range.first; it != range.second; ++it) {
272 if (it->second != profiler)
continue;
273 profilers_.erase(it);
279 void CallCollectSample(Isolate* isolate) {
280 base::MutexGuard lock(&mutex_);
281 auto range = profilers_.equal_range(isolate);
282 for (
auto it = range.first; it != range.second; ++it) {
283 it->second->CollectSample();
288 std::unordered_multimap<Isolate*, CpuProfiler*> profilers_;
292 base::LazyInstance<CpuProfilersManager>::type g_profilers_manager =
293 LAZY_INSTANCE_INITIALIZER;
297 CpuProfiler::CpuProfiler(Isolate* isolate)
298 : CpuProfiler(isolate, new CpuProfilesCollection(isolate), nullptr,
301 CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilesCollection* test_profiles,
302 ProfileGenerator* test_generator,
303 ProfilerEventsProcessor* test_processor)
305 sampling_interval_(base::TimeDelta::FromMicroseconds(
306 FLAG_cpu_profiler_sampling_interval)),
307 profiles_(test_profiles),
308 generator_(test_generator),
309 processor_(test_processor),
310 is_profiling_(false) {
311 profiles_->set_cpu_profiler(
this);
312 g_profilers_manager.Pointer()->AddProfiler(isolate,
this);
315 CpuProfiler::~CpuProfiler() {
316 DCHECK(!is_profiling_);
317 g_profilers_manager.Pointer()->RemoveProfiler(isolate_,
this);
320 void CpuProfiler::set_sampling_interval(base::TimeDelta value) {
321 DCHECK(!is_profiling_);
322 sampling_interval_ = value;
325 void CpuProfiler::ResetProfiles() {
326 profiles_.reset(
new CpuProfilesCollection(isolate_));
327 profiles_->set_cpu_profiler(
this);
328 profiler_listener_.reset();
332 void CpuProfiler::CreateEntriesForRuntimeCallStats() {
333 RuntimeCallStats* rcs = isolate_->counters()->runtime_call_stats();
334 CodeMap* code_map = generator_->code_map();
335 for (
int i = 0;
i < RuntimeCallStats::kNumberOfCounters; ++
i) {
336 RuntimeCallCounter* counter = rcs->GetCounter(
i);
337 DCHECK(counter->name());
338 auto entry =
new CodeEntry(CodeEventListener::FUNCTION_TAG, counter->name(),
340 code_map->AddCode(reinterpret_cast<Address>(counter), entry, 1);
345 void CpuProfiler::CollectSample(Isolate* isolate) {
346 g_profilers_manager.Pointer()->CallCollectSample(isolate);
349 void CpuProfiler::CollectSample() {
351 processor_->AddCurrentStack();
355 void CpuProfiler::StartProfiling(
const char* title,
bool record_samples,
356 ProfilingMode mode) {
357 if (profiles_->StartProfiling(title, record_samples, mode)) {
358 TRACE_EVENT0(
"v8",
"CpuProfiler::StartProfiling");
359 StartProcessorIfNotStarted();
363 void CpuProfiler::StartProfiling(String title,
bool record_samples,
364 ProfilingMode mode) {
365 StartProfiling(profiles_->GetName(title), record_samples, mode);
366 isolate_->debug()->feature_tracker()->Track(DebugFeatureTracker::kProfiler);
369 void CpuProfiler::StartProcessorIfNotStarted() {
371 processor_->AddCurrentStack();
374 Logger* logger = isolate_->logger();
376 saved_is_logging_ = logger->is_logging_;
377 logger->is_logging_ =
false;
379 bool codemap_needs_initialization =
false;
381 generator_.reset(
new ProfileGenerator(profiles_.get()));
382 codemap_needs_initialization =
true;
383 CreateEntriesForRuntimeCallStats();
385 processor_.reset(
new SamplingEventsProcessor(isolate_, generator_.get(),
386 sampling_interval_));
387 if (!profiler_listener_) {
388 profiler_listener_.reset(
new ProfilerListener(isolate_, processor_.get()));
390 logger->AddCodeEventListener(profiler_listener_.get());
391 is_profiling_ =
true;
392 isolate_->set_is_profiling(
true);
394 DCHECK(isolate_->heap()->HasBeenSetUp());
395 if (codemap_needs_initialization) {
396 if (!FLAG_prof_browser_mode) {
397 logger->LogCodeObjects();
399 logger->LogCompiledFunctions();
400 logger->LogAccessorCallbacks();
404 processor_->AddCurrentStack();
405 processor_->StartSynchronously();
408 CpuProfile* CpuProfiler::StopProfiling(
const char* title) {
409 if (!is_profiling_)
return nullptr;
410 StopProcessorIfLastProfile(title);
411 return profiles_->StopProfiling(title);
414 CpuProfile* CpuProfiler::StopProfiling(String title) {
415 return StopProfiling(profiles_->GetName(title));
418 void CpuProfiler::StopProcessorIfLastProfile(
const char* title) {
419 if (!profiles_->IsLastProfile(title))
return;
423 void CpuProfiler::StopProcessor() {
424 Logger* logger = isolate_->logger();
425 is_profiling_ =
false;
426 isolate_->set_is_profiling(
false);
427 logger->RemoveCodeEventListener(profiler_listener_.get());
428 processor_->StopSynchronously();
430 logger->is_logging_ = saved_is_logging_;
434 void CpuProfiler::LogBuiltins() {
435 Builtins* builtins = isolate_->builtins();
436 DCHECK(builtins->is_initialized());
437 for (
int i = 0;
i < Builtins::builtin_count;
i++) {
438 CodeEventsContainer evt_rec(CodeEventRecord::REPORT_BUILTIN);
439 ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_;
440 Builtins::Name
id =
static_cast<Builtins::Name
>(
i);
441 rec->instruction_start = builtins->builtin(
id)->InstructionStart();
442 rec->builtin_id = id;
443 processor_->Enqueue(evt_rec);