V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
profile-generator.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/profiler/profile-generator.h"
6 
7 #include "src/base/adapters.h"
8 #include "src/debug/debug.h"
9 #include "src/deoptimizer.h"
10 #include "src/global-handles.h"
11 #include "src/objects-inl.h"
12 #include "src/profiler/cpu-profiler.h"
13 #include "src/profiler/profile-generator-inl.h"
14 #include "src/tracing/trace-event.h"
15 #include "src/tracing/traced-value.h"
16 #include "src/unicode.h"
17 
18 namespace v8 {
19 namespace internal {
20 
21 void SourcePositionTable::SetPosition(int pc_offset, int line) {
22  DCHECK_GE(pc_offset, 0);
23  DCHECK_GT(line, 0); // The 1-based number of the source line.
24  // Check that we are inserting in ascending order, so that the vector remains
25  // sorted.
26  DCHECK(pc_offsets_to_lines_.empty() ||
27  pc_offsets_to_lines_.back().pc_offset < pc_offset);
28  if (pc_offsets_to_lines_.empty() ||
29  pc_offsets_to_lines_.back().line_number != line) {
30  pc_offsets_to_lines_.push_back({pc_offset, line});
31  }
32 }
33 
34 int SourcePositionTable::GetSourceLineNumber(int pc_offset) const {
35  if (pc_offsets_to_lines_.empty()) {
36  return v8::CpuProfileNode::kNoLineNumberInfo;
37  }
38  auto it =
39  std::upper_bound(pc_offsets_to_lines_.begin(), pc_offsets_to_lines_.end(),
40  PCOffsetAndLineNumber{pc_offset, 0});
41  if (it != pc_offsets_to_lines_.begin()) --it;
42  return it->line_number;
43 }
44 
45 const char* const CodeEntry::kWasmResourceNamePrefix = "wasm ";
46 const char* const CodeEntry::kEmptyResourceName = "";
47 const char* const CodeEntry::kEmptyBailoutReason = "";
48 const char* const CodeEntry::kNoDeoptReason = "";
49 
50 const char* const CodeEntry::kProgramEntryName = "(program)";
51 const char* const CodeEntry::kIdleEntryName = "(idle)";
52 const char* const CodeEntry::kGarbageCollectorEntryName = "(garbage collector)";
53 const char* const CodeEntry::kUnresolvedFunctionName = "(unresolved function)";
54 
55 base::LazyDynamicInstance<CodeEntry, CodeEntry::ProgramEntryCreateTrait>::type
56  CodeEntry::kProgramEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
57 
58 base::LazyDynamicInstance<CodeEntry, CodeEntry::IdleEntryCreateTrait>::type
59  CodeEntry::kIdleEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
60 
61 base::LazyDynamicInstance<CodeEntry, CodeEntry::GCEntryCreateTrait>::type
62  CodeEntry::kGCEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
63 
64 base::LazyDynamicInstance<CodeEntry,
65  CodeEntry::UnresolvedEntryCreateTrait>::type
66  CodeEntry::kUnresolvedEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
67 
68 CodeEntry* CodeEntry::ProgramEntryCreateTrait::Create() {
69  return new CodeEntry(Logger::FUNCTION_TAG, CodeEntry::kProgramEntryName);
70 }
71 
72 CodeEntry* CodeEntry::IdleEntryCreateTrait::Create() {
73  return new CodeEntry(Logger::FUNCTION_TAG, CodeEntry::kIdleEntryName);
74 }
75 
76 CodeEntry* CodeEntry::GCEntryCreateTrait::Create() {
77  return new CodeEntry(Logger::BUILTIN_TAG,
78  CodeEntry::kGarbageCollectorEntryName);
79 }
80 
81 CodeEntry* CodeEntry::UnresolvedEntryCreateTrait::Create() {
82  return new CodeEntry(Logger::FUNCTION_TAG,
83  CodeEntry::kUnresolvedFunctionName);
84 }
85 
86 uint32_t CodeEntry::GetHash() const {
87  uint32_t hash = ComputeUnseededHash(tag());
88  if (script_id_ != v8::UnboundScript::kNoScriptId) {
89  hash ^= ComputeUnseededHash(static_cast<uint32_t>(script_id_));
90  hash ^= ComputeUnseededHash(static_cast<uint32_t>(position_));
91  } else {
92  hash ^= ComputeUnseededHash(
93  static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)));
94  hash ^= ComputeUnseededHash(
95  static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)));
96  hash ^= ComputeUnseededHash(line_number_);
97  }
98  return hash;
99 }
100 
101 bool CodeEntry::IsSameFunctionAs(const CodeEntry* entry) const {
102  if (this == entry) return true;
103  if (script_id_ != v8::UnboundScript::kNoScriptId) {
104  return script_id_ == entry->script_id_ && position_ == entry->position_;
105  }
106  return name_ == entry->name_ && resource_name_ == entry->resource_name_ &&
107  line_number_ == entry->line_number_;
108 }
109 
110 
111 void CodeEntry::SetBuiltinId(Builtins::Name id) {
112  bit_field_ = TagField::update(bit_field_, CodeEventListener::BUILTIN_TAG);
113  bit_field_ = BuiltinIdField::update(bit_field_, id);
114 }
115 
116 
117 int CodeEntry::GetSourceLine(int pc_offset) const {
118  if (line_info_) return line_info_->GetSourceLineNumber(pc_offset);
119  return v8::CpuProfileNode::kNoLineNumberInfo;
120 }
121 
122 void CodeEntry::AddInlineStack(
123  int pc_offset, std::vector<std::unique_ptr<CodeEntry>> inline_stack) {
124  EnsureRareData()->inline_locations_.insert(
125  std::make_pair(pc_offset, std::move(inline_stack)));
126 }
127 
128 const std::vector<std::unique_ptr<CodeEntry>>* CodeEntry::GetInlineStack(
129  int pc_offset) const {
130  if (!rare_data_) return nullptr;
131  auto it = rare_data_->inline_locations_.find(pc_offset);
132  return it != rare_data_->inline_locations_.end() ? &it->second : nullptr;
133 }
134 
135 void CodeEntry::set_deopt_info(
136  const char* deopt_reason, int deopt_id,
137  std::vector<CpuProfileDeoptFrame> inlined_frames) {
138  DCHECK(!has_deopt_info());
139  RareData* rare_data = EnsureRareData();
140  rare_data->deopt_reason_ = deopt_reason;
141  rare_data->deopt_id_ = deopt_id;
142  rare_data->deopt_inlined_frames_ = std::move(inlined_frames);
143 }
144 
145 void CodeEntry::FillFunctionInfo(SharedFunctionInfo* shared) {
146  if (!shared->script()->IsScript()) return;
147  Script* script = Script::cast(shared->script());
148  set_script_id(script->id());
149  set_position(shared->StartPosition());
150  if (shared->optimization_disabled()) {
151  set_bailout_reason(GetBailoutReason(shared->disable_optimization_reason()));
152  }
153 }
154 
155 CpuProfileDeoptInfo CodeEntry::GetDeoptInfo() {
156  DCHECK(has_deopt_info());
157 
158  CpuProfileDeoptInfo info;
159  info.deopt_reason = rare_data_->deopt_reason_;
160  DCHECK_NE(kNoDeoptimizationId, rare_data_->deopt_id_);
161  if (rare_data_->deopt_inlined_frames_.empty()) {
162  info.stack.push_back(CpuProfileDeoptFrame(
163  {script_id_, static_cast<size_t>(std::max(0, position()))}));
164  } else {
165  info.stack = rare_data_->deopt_inlined_frames_;
166  }
167  return info;
168 }
169 
170 CodeEntry::RareData* CodeEntry::EnsureRareData() {
171  if (!rare_data_) {
172  rare_data_.reset(new RareData());
173  }
174  return rare_data_.get();
175 }
176 
177 void ProfileNode::CollectDeoptInfo(CodeEntry* entry) {
178  deopt_infos_.push_back(entry->GetDeoptInfo());
179  entry->clear_deopt_info();
180 }
181 
182 ProfileNode* ProfileNode::FindChild(CodeEntry* entry, int line_number) {
183  auto map_entry = children_.find({entry, line_number});
184  return map_entry != children_.end() ? map_entry->second : nullptr;
185 }
186 
187 ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry, int line_number) {
188  auto map_entry = children_.find({entry, line_number});
189  if (map_entry == children_.end()) {
190  ProfileNode* node = new ProfileNode(tree_, entry, this, line_number);
191  children_[{entry, line_number}] = node;
192  children_list_.push_back(node);
193  return node;
194  } else {
195  return map_entry->second;
196  }
197 }
198 
199 
200 void ProfileNode::IncrementLineTicks(int src_line) {
201  if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) return;
202  // Increment a hit counter of a certain source line.
203  // Add a new source line if not found.
204  auto map_entry = line_ticks_.find(src_line);
205  if (map_entry == line_ticks_.end()) {
206  line_ticks_[src_line] = 1;
207  } else {
208  line_ticks_[src_line]++;
209  }
210 }
211 
212 
213 bool ProfileNode::GetLineTicks(v8::CpuProfileNode::LineTick* entries,
214  unsigned int length) const {
215  if (entries == nullptr || length == 0) return false;
216 
217  unsigned line_count = static_cast<unsigned>(line_ticks_.size());
218 
219  if (line_count == 0) return true;
220  if (length < line_count) return false;
221 
222  v8::CpuProfileNode::LineTick* entry = entries;
223 
224  for (auto p = line_ticks_.begin(); p != line_ticks_.end(); p++, entry++) {
225  entry->line = p->first;
226  entry->hit_count = p->second;
227  }
228 
229  return true;
230 }
231 
232 
233 void ProfileNode::Print(int indent) {
234  int line_number = line_number_ != 0 ? line_number_ : entry_->line_number();
235  base::OS::Print("%5u %*s %s:%d %d #%d", self_ticks_, indent, "",
236  entry_->name(), line_number, entry_->script_id(), id());
237  if (entry_->resource_name()[0] != '\0')
238  base::OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
239  base::OS::Print("\n");
240  for (size_t i = 0; i < deopt_infos_.size(); ++i) {
241  CpuProfileDeoptInfo& info = deopt_infos_[i];
242  base::OS::Print("%*s;;; deopted at script_id: %d position: %" PRIuS
243  " with reason '%s'.\n",
244  indent + 10, "", info.stack[0].script_id,
245  info.stack[0].position, info.deopt_reason);
246  for (size_t index = 1; index < info.stack.size(); ++index) {
247  base::OS::Print("%*s;;; Inline point: script_id %d position: %" PRIuS
248  ".\n",
249  indent + 10, "", info.stack[index].script_id,
250  info.stack[index].position);
251  }
252  }
253  const char* bailout_reason = entry_->bailout_reason();
254  if (bailout_reason != GetBailoutReason(BailoutReason::kNoReason) &&
255  bailout_reason != CodeEntry::kEmptyBailoutReason) {
256  base::OS::Print("%*s bailed out due to '%s'\n", indent + 10, "",
257  bailout_reason);
258  }
259  for (auto child : children_) {
260  child.second->Print(indent + 2);
261  }
262 }
263 
264 
266  public:
267  void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
268 
269  void AfterAllChildrenTraversed(ProfileNode* node) {
270  delete node;
271  }
272 
273  void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
274 };
275 
276 ProfileTree::ProfileTree(Isolate* isolate)
277  : root_entry_(CodeEventListener::FUNCTION_TAG, "(root)"),
278  next_node_id_(1),
279  root_(new ProfileNode(this, &root_entry_, nullptr)),
280  isolate_(isolate),
281  next_function_id_(1) {}
282 
283 ProfileTree::~ProfileTree() {
284  DeleteNodesCallback cb;
285  TraverseDepthFirst(&cb);
286 }
287 
288 
289 unsigned ProfileTree::GetFunctionId(const ProfileNode* node) {
290  CodeEntry* code_entry = node->entry();
291  auto map_entry = function_ids_.find(code_entry);
292  if (map_entry == function_ids_.end()) {
293  return function_ids_[code_entry] = next_function_id_++;
294  }
295  return function_ids_[code_entry];
296 }
297 
298 ProfileNode* ProfileTree::AddPathFromEnd(const std::vector<CodeEntry*>& path,
299  int src_line, bool update_stats) {
300  ProfileNode* node = root_;
301  CodeEntry* last_entry = nullptr;
302  for (auto it = path.rbegin(); it != path.rend(); ++it) {
303  if (*it == nullptr) continue;
304  last_entry = *it;
305  node = node->FindOrAddChild(*it, v8::CpuProfileNode::kNoLineNumberInfo);
306  }
307  if (last_entry && last_entry->has_deopt_info()) {
308  node->CollectDeoptInfo(last_entry);
309  }
310  if (update_stats) {
311  node->IncrementSelfTicks();
312  if (src_line != v8::CpuProfileNode::kNoLineNumberInfo) {
313  node->IncrementLineTicks(src_line);
314  }
315  }
316  return node;
317 }
318 
319 ProfileNode* ProfileTree::AddPathFromEnd(const ProfileStackTrace& path,
320  int src_line, bool update_stats,
321  ProfilingMode mode) {
322  ProfileNode* node = root_;
323  CodeEntry* last_entry = nullptr;
324  int parent_line_number = v8::CpuProfileNode::kNoLineNumberInfo;
325  for (auto it = path.rbegin(); it != path.rend(); ++it) {
326  if ((*it).code_entry == nullptr) continue;
327  last_entry = (*it).code_entry;
328  node = node->FindOrAddChild((*it).code_entry, parent_line_number);
329  parent_line_number = mode == ProfilingMode::kCallerLineNumbers
330  ? (*it).line_number
331  : v8::CpuProfileNode::kNoLineNumberInfo;
332  }
333  if (last_entry && last_entry->has_deopt_info()) {
334  node->CollectDeoptInfo(last_entry);
335  }
336  if (update_stats) {
337  node->IncrementSelfTicks();
338  if (src_line != v8::CpuProfileNode::kNoLineNumberInfo) {
339  node->IncrementLineTicks(src_line);
340  }
341  }
342  return node;
343 }
344 
345 
346 class Position {
347  public:
348  explicit Position(ProfileNode* node)
349  : node(node), child_idx_(0) { }
350  V8_INLINE ProfileNode* current_child() {
351  return node->children()->at(child_idx_);
352  }
353  V8_INLINE bool has_current_child() {
354  return child_idx_ < static_cast<int>(node->children()->size());
355  }
356  V8_INLINE void next_child() { ++child_idx_; }
357 
358  ProfileNode* node;
359  private:
360  int child_idx_;
361 };
362 
363 
364 // Non-recursive implementation of a depth-first post-order tree traversal.
365 template <typename Callback>
366 void ProfileTree::TraverseDepthFirst(Callback* callback) {
367  std::vector<Position> stack;
368  stack.emplace_back(root_);
369  while (stack.size() > 0) {
370  Position& current = stack.back();
371  if (current.has_current_child()) {
372  callback->BeforeTraversingChild(current.node, current.current_child());
373  stack.emplace_back(current.current_child());
374  } else {
375  callback->AfterAllChildrenTraversed(current.node);
376  if (stack.size() > 1) {
377  Position& parent = stack[stack.size() - 2];
378  callback->AfterChildTraversed(parent.node, current.node);
379  parent.next_child();
380  }
381  // Remove child from the stack.
382  stack.pop_back();
383  }
384  }
385 }
386 
388 
389 std::atomic<uint32_t> CpuProfile::last_id_;
390 
391 CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
392  bool record_samples, ProfilingMode mode)
393  : title_(title),
394  record_samples_(record_samples),
395  mode_(mode),
396  start_time_(base::TimeTicks::HighResolutionNow()),
397  top_down_(profiler->isolate()),
398  profiler_(profiler),
399  streaming_next_sample_(0),
400  id_(++last_id_) {
401  auto value = TracedValue::Create();
402  value->SetDouble("startTime",
403  (start_time_ - base::TimeTicks()).InMicroseconds());
404  TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
405  "Profile", id_, "data", std::move(value));
406 }
407 
408 void CpuProfile::AddPath(base::TimeTicks timestamp,
409  const ProfileStackTrace& path, int src_line,
410  bool update_stats) {
411  ProfileNode* top_frame_node =
412  top_down_.AddPathFromEnd(path, src_line, update_stats, mode_);
413 
414  if (record_samples_ && !timestamp.IsNull()) {
415  timestamps_.push_back(timestamp);
416  samples_.push_back(top_frame_node);
417  }
418 
419  const int kSamplesFlushCount = 100;
420  const int kNodesFlushCount = 10;
421  if (samples_.size() - streaming_next_sample_ >= kSamplesFlushCount ||
422  top_down_.pending_nodes_count() >= kNodesFlushCount) {
423  StreamPendingTraceEvents();
424  }
425 }
426 
427 namespace {
428 
429 void BuildNodeValue(const ProfileNode* node, TracedValue* value) {
430  const CodeEntry* entry = node->entry();
431  value->BeginDictionary("callFrame");
432  value->SetString("functionName", entry->name());
433  if (*entry->resource_name()) {
434  value->SetString("url", entry->resource_name());
435  }
436  value->SetInteger("scriptId", entry->script_id());
437  if (entry->line_number()) {
438  value->SetInteger("lineNumber", entry->line_number() - 1);
439  }
440  if (entry->column_number()) {
441  value->SetInteger("columnNumber", entry->column_number() - 1);
442  }
443  value->EndDictionary();
444  value->SetInteger("id", node->id());
445  if (node->parent()) {
446  value->SetInteger("parent", node->parent()->id());
447  }
448  const char* deopt_reason = entry->bailout_reason();
449  if (deopt_reason && deopt_reason[0] && strcmp(deopt_reason, "no reason")) {
450  value->SetString("deoptReason", deopt_reason);
451  }
452 }
453 
454 } // namespace
455 
456 void CpuProfile::StreamPendingTraceEvents() {
457  std::vector<const ProfileNode*> pending_nodes = top_down_.TakePendingNodes();
458  if (pending_nodes.empty() && samples_.empty()) return;
459  auto value = TracedValue::Create();
460 
461  if (!pending_nodes.empty() || streaming_next_sample_ != samples_.size()) {
462  value->BeginDictionary("cpuProfile");
463  if (!pending_nodes.empty()) {
464  value->BeginArray("nodes");
465  for (auto node : pending_nodes) {
466  value->BeginDictionary();
467  BuildNodeValue(node, value.get());
468  value->EndDictionary();
469  }
470  value->EndArray();
471  }
472  if (streaming_next_sample_ != samples_.size()) {
473  value->BeginArray("samples");
474  for (size_t i = streaming_next_sample_; i < samples_.size(); ++i) {
475  value->AppendInteger(samples_[i]->id());
476  }
477  value->EndArray();
478  }
479  value->EndDictionary();
480  }
481  if (streaming_next_sample_ != samples_.size()) {
482  value->BeginArray("timeDeltas");
483  base::TimeTicks lastTimestamp =
484  streaming_next_sample_ ? timestamps_[streaming_next_sample_ - 1]
485  : start_time();
486  for (size_t i = streaming_next_sample_; i < timestamps_.size(); ++i) {
487  value->AppendInteger(
488  static_cast<int>((timestamps_[i] - lastTimestamp).InMicroseconds()));
489  lastTimestamp = timestamps_[i];
490  }
491  value->EndArray();
492  DCHECK_EQ(samples_.size(), timestamps_.size());
493  streaming_next_sample_ = samples_.size();
494  }
495 
496  TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
497  "ProfileChunk", id_, "data", std::move(value));
498 }
499 
500 void CpuProfile::FinishProfile() {
501  end_time_ = base::TimeTicks::HighResolutionNow();
502  StreamPendingTraceEvents();
503  auto value = TracedValue::Create();
504  value->SetDouble("endTime", (end_time_ - base::TimeTicks()).InMicroseconds());
505  TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
506  "ProfileChunk", id_, "data", std::move(value));
507 }
508 
509 void CpuProfile::Print() {
510  base::OS::Print("[Top down]:\n");
511  top_down_.Print();
512 }
513 
514 CodeMap::CodeMap() = default;
515 
516 CodeMap::~CodeMap() {
517  // First clean the free list as it's otherwise impossible to tell
518  // the slot type.
519  unsigned free_slot = free_list_head_;
520  while (free_slot != kNoFreeSlot) {
521  unsigned next_slot = code_entries_[free_slot].next_free_slot;
522  code_entries_[free_slot].entry = nullptr;
523  free_slot = next_slot;
524  }
525  for (auto slot : code_entries_) delete slot.entry;
526 }
527 
528 void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
529  ClearCodesInRange(addr, addr + size);
530  unsigned index = AddCodeEntry(addr, entry);
531  code_map_.emplace(addr, CodeEntryMapInfo{index, size});
532  DCHECK(entry->instruction_start() == kNullAddress ||
533  addr == entry->instruction_start());
534 }
535 
536 void CodeMap::ClearCodesInRange(Address start, Address end) {
537  auto left = code_map_.upper_bound(start);
538  if (left != code_map_.begin()) {
539  --left;
540  if (left->first + left->second.size <= start) ++left;
541  }
542  auto right = left;
543  for (; right != code_map_.end() && right->first < end; ++right) {
544  if (!entry(right->second.index)->used()) {
545  DeleteCodeEntry(right->second.index);
546  }
547  }
548  code_map_.erase(left, right);
549 }
550 
551 CodeEntry* CodeMap::FindEntry(Address addr) {
552  auto it = code_map_.upper_bound(addr);
553  if (it == code_map_.begin()) return nullptr;
554  --it;
555  Address start_address = it->first;
556  Address end_address = start_address + it->second.size;
557  CodeEntry* ret = addr < end_address ? entry(it->second.index) : nullptr;
558  if (ret && ret->instruction_start() != kNullAddress) {
559  DCHECK_EQ(start_address, ret->instruction_start());
560  DCHECK(addr >= start_address && addr < end_address);
561  }
562  return ret;
563 }
564 
565 void CodeMap::MoveCode(Address from, Address to) {
566  if (from == to) return;
567  auto it = code_map_.find(from);
568  if (it == code_map_.end()) return;
569  CodeEntryMapInfo info = it->second;
570  code_map_.erase(it);
571  DCHECK(from + info.size <= to || to + info.size <= from);
572  ClearCodesInRange(to, to + info.size);
573  code_map_.emplace(to, info);
574 
575  CodeEntry* entry = code_entries_[info.index].entry;
576  entry->set_instruction_start(to);
577 }
578 
579 unsigned CodeMap::AddCodeEntry(Address start, CodeEntry* entry) {
580  if (free_list_head_ == kNoFreeSlot) {
581  code_entries_.push_back(CodeEntrySlotInfo{entry});
582  return static_cast<unsigned>(code_entries_.size()) - 1;
583  }
584  unsigned index = free_list_head_;
585  free_list_head_ = code_entries_[index].next_free_slot;
586  code_entries_[index].entry = entry;
587  return index;
588 }
589 
590 void CodeMap::DeleteCodeEntry(unsigned index) {
591  delete code_entries_[index].entry;
592  code_entries_[index].next_free_slot = free_list_head_;
593  free_list_head_ = index;
594 }
595 
596 void CodeMap::Print() {
597  for (const auto& pair : code_map_) {
598  base::OS::Print("%p %5d %s\n", reinterpret_cast<void*>(pair.first),
599  pair.second.size, entry(pair.second.index)->name());
600  }
601 }
602 
603 CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
604  : profiler_(nullptr), current_profiles_semaphore_(1) {}
605 
606 bool CpuProfilesCollection::StartProfiling(const char* title,
607  bool record_samples,
608  ProfilingMode mode) {
609  current_profiles_semaphore_.Wait();
610  if (static_cast<int>(current_profiles_.size()) >= kMaxSimultaneousProfiles) {
611  current_profiles_semaphore_.Signal();
612  return false;
613  }
614  for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
615  if (strcmp(profile->title(), title) == 0) {
616  // Ignore attempts to start profile with the same title...
617  current_profiles_semaphore_.Signal();
618  // ... though return true to force it collect a sample.
619  return true;
620  }
621  }
622  current_profiles_.emplace_back(
623  new CpuProfile(profiler_, title, record_samples, mode));
624  current_profiles_semaphore_.Signal();
625  return true;
626 }
627 
628 
629 CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
630  const int title_len = StrLength(title);
631  CpuProfile* profile = nullptr;
632  current_profiles_semaphore_.Wait();
633 
634  auto it =
635  std::find_if(current_profiles_.rbegin(), current_profiles_.rend(),
636  [&](const std::unique_ptr<CpuProfile>& p) {
637  return title_len == 0 || strcmp(p->title(), title) == 0;
638  });
639 
640  if (it != current_profiles_.rend()) {
641  (*it)->FinishProfile();
642  profile = it->get();
643  finished_profiles_.push_back(std::move(*it));
644  // Convert reverse iterator to matching forward iterator.
645  current_profiles_.erase(--(it.base()));
646  }
647 
648  current_profiles_semaphore_.Signal();
649  return profile;
650 }
651 
652 
653 bool CpuProfilesCollection::IsLastProfile(const char* title) {
654  // Called from VM thread, and only it can mutate the list,
655  // so no locking is needed here.
656  if (current_profiles_.size() != 1) return false;
657  return StrLength(title) == 0
658  || strcmp(current_profiles_[0]->title(), title) == 0;
659 }
660 
661 
662 void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
663  // Called from VM thread for a completed profile.
664  auto pos =
665  std::find_if(finished_profiles_.begin(), finished_profiles_.end(),
666  [&](const std::unique_ptr<CpuProfile>& finished_profile) {
667  return finished_profile.get() == profile;
668  });
669  DCHECK(pos != finished_profiles_.end());
670  finished_profiles_.erase(pos);
671 }
672 
673 void CpuProfilesCollection::AddPathToCurrentProfiles(
674  base::TimeTicks timestamp, const ProfileStackTrace& path, int src_line,
675  bool update_stats) {
676  // As starting / stopping profiles is rare relatively to this
677  // method, we don't bother minimizing the duration of lock holding,
678  // e.g. copying contents of the list to a local vector.
679  current_profiles_semaphore_.Wait();
680  for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
681  profile->AddPath(timestamp, path, src_line, update_stats);
682  }
683  current_profiles_semaphore_.Signal();
684 }
685 
686 ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
687  : profiles_(profiles) {}
688 
689 void ProfileGenerator::RecordTickSample(const TickSample& sample) {
690  ProfileStackTrace stack_trace;
691  // Conservatively reserve space for stack frames + pc + function + vm-state.
692  // There could in fact be more of them because of inlined entries.
693  stack_trace.reserve(sample.frames_count + 3);
694 
695  // The ProfileNode knows nothing about all versions of generated code for
696  // the same JS function. The line number information associated with
697  // the latest version of generated code is used to find a source line number
698  // for a JS function. Then, the detected source line is passed to
699  // ProfileNode to increase the tick count for this source line.
700  const int no_line_info = v8::CpuProfileNode::kNoLineNumberInfo;
701  int src_line = no_line_info;
702  bool src_line_not_found = true;
703 
704  if (sample.pc != nullptr) {
705  if (sample.has_external_callback && sample.state == EXTERNAL) {
706  // Don't use PC when in external callback code, as it can point
707  // inside a callback's code, and we will erroneously report
708  // that a callback calls itself.
709  stack_trace.push_back(
710  {FindEntry(reinterpret_cast<Address>(sample.external_callback_entry)),
711  no_line_info});
712  } else {
713  Address attributed_pc = reinterpret_cast<Address>(sample.pc);
714  CodeEntry* pc_entry = FindEntry(attributed_pc);
715  // If there is no pc_entry, we're likely in native code. Find out if the
716  // top of the stack (the return address) was pointing inside a JS
717  // function, meaning that we have encountered a frameless invocation.
718  if (!pc_entry && !sample.has_external_callback) {
719  attributed_pc = reinterpret_cast<Address>(sample.tos);
720  pc_entry = FindEntry(attributed_pc);
721  }
722  // If pc is in the function code before it set up stack frame or after the
723  // frame was destroyed, SafeStackFrameIterator incorrectly thinks that
724  // ebp contains the return address of the current function and skips the
725  // caller's frame. Check for this case and just skip such samples.
726  if (pc_entry) {
727  int pc_offset =
728  static_cast<int>(attributed_pc - pc_entry->instruction_start());
729  // TODO(petermarshall): pc_offset can still be negative in some cases.
730  src_line = pc_entry->GetSourceLine(pc_offset);
731  if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
732  src_line = pc_entry->line_number();
733  }
734  src_line_not_found = false;
735  stack_trace.push_back({pc_entry, src_line});
736 
737  if (pc_entry->builtin_id() == Builtins::kFunctionPrototypeApply ||
738  pc_entry->builtin_id() == Builtins::kFunctionPrototypeCall) {
739  // When current function is either the Function.prototype.apply or the
740  // Function.prototype.call builtin the top frame is either frame of
741  // the calling JS function or internal frame.
742  // In the latter case we know the caller for sure but in the
743  // former case we don't so we simply replace the frame with
744  // 'unresolved' entry.
745  if (!sample.has_external_callback) {
746  stack_trace.push_back(
747  {CodeEntry::unresolved_entry(), no_line_info});
748  }
749  }
750  }
751  }
752 
753  for (unsigned i = 0; i < sample.frames_count; ++i) {
754  Address stack_pos = reinterpret_cast<Address>(sample.stack[i]);
755  CodeEntry* entry = FindEntry(stack_pos);
756  int line_number = no_line_info;
757  if (entry) {
758  // Find out if the entry has an inlining stack associated.
759  int pc_offset =
760  static_cast<int>(stack_pos - entry->instruction_start());
761  // TODO(petermarshall): pc_offset can still be negative in some cases.
762  const std::vector<std::unique_ptr<CodeEntry>>* inline_stack =
763  entry->GetInlineStack(pc_offset);
764  if (inline_stack) {
765  std::transform(
766  inline_stack->rbegin(), inline_stack->rend(),
767  std::back_inserter(stack_trace),
768  [=](const std::unique_ptr<CodeEntry>& ptr) {
769  return CodeEntryAndLineNumber{ptr.get(), no_line_info};
770  });
771  }
772  // Skip unresolved frames (e.g. internal frame) and get source line of
773  // the first JS caller.
774  if (src_line_not_found) {
775  src_line = entry->GetSourceLine(pc_offset);
776  if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
777  src_line = entry->line_number();
778  }
779  src_line_not_found = false;
780  }
781  line_number = entry->GetSourceLine(pc_offset);
782  }
783  stack_trace.push_back({entry, line_number});
784  }
785  }
786 
787  if (FLAG_prof_browser_mode) {
788  bool no_symbolized_entries = true;
789  for (auto e : stack_trace) {
790  if (e.code_entry != nullptr) {
791  no_symbolized_entries = false;
792  break;
793  }
794  }
795  // If no frames were symbolized, put the VM state entry in.
796  if (no_symbolized_entries) {
797  stack_trace.push_back({EntryForVMState(sample.state), no_line_info});
798  }
799  }
800 
801  profiles_->AddPathToCurrentProfiles(sample.timestamp, stack_trace, src_line,
802  sample.update_stats);
803 }
804 
805 CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
806  switch (tag) {
807  case GC:
808  return CodeEntry::gc_entry();
809  case JS:
810  case PARSER:
811  case COMPILER:
812  case BYTECODE_COMPILER:
813  // DOM events handlers are reported as OTHER / EXTERNAL entries.
814  // To avoid confusing people, let's put all these entries into
815  // one bucket.
816  case OTHER:
817  case EXTERNAL:
818  return CodeEntry::program_entry();
819  case IDLE:
820  return CodeEntry::idle_entry();
821  }
822  UNREACHABLE();
823 }
824 
825 } // namespace internal
826 } // namespace v8
Definition: libplatform.h:13