V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
cpu-profiler.h
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_PROFILER_CPU_PROFILER_H_
6 #define V8_PROFILER_CPU_PROFILER_H_
7 
8 #include <memory>
9 
10 #include "src/allocation.h"
11 #include "src/base/atomic-utils.h"
12 #include "src/base/atomicops.h"
13 #include "src/base/platform/time.h"
14 #include "src/isolate.h"
15 #include "src/libsampler/sampler.h"
16 #include "src/locked-queue.h"
17 #include "src/profiler/circular-queue.h"
18 #include "src/profiler/profiler-listener.h"
19 #include "src/profiler/tick-sample.h"
20 
21 namespace v8 {
22 namespace internal {
23 
24 // Forward declarations.
25 class CodeEntry;
26 class CodeMap;
27 class CpuProfile;
28 class CpuProfilesCollection;
29 class ProfileGenerator;
30 
31 #define CODE_EVENTS_TYPE_LIST(V) \
32  V(CODE_CREATION, CodeCreateEventRecord) \
33  V(CODE_MOVE, CodeMoveEventRecord) \
34  V(CODE_DISABLE_OPT, CodeDisableOptEventRecord) \
35  V(CODE_DEOPT, CodeDeoptEventRecord) \
36  V(REPORT_BUILTIN, ReportBuiltinEventRecord)
37 
38 
40  public:
41 #define DECLARE_TYPE(type, ignore) type,
42  enum Type {
43  NONE = 0,
44  CODE_EVENTS_TYPE_LIST(DECLARE_TYPE)
45  };
46 #undef DECLARE_TYPE
47 
48  Type type;
49  mutable unsigned order;
50 };
51 
52 
54  public:
55  Address instruction_start;
56  CodeEntry* entry;
57  unsigned instruction_size;
58 
59  V8_INLINE void UpdateCodeMap(CodeMap* code_map);
60 };
61 
62 
64  public:
65  Address from_instruction_start;
66  Address to_instruction_start;
67 
68  V8_INLINE void UpdateCodeMap(CodeMap* code_map);
69 };
70 
71 
73  public:
74  Address instruction_start;
75  const char* bailout_reason;
76 
77  V8_INLINE void UpdateCodeMap(CodeMap* code_map);
78 };
79 
80 
82  public:
83  Address instruction_start;
84  const char* deopt_reason;
85  int deopt_id;
86  Address pc;
87  int fp_to_sp_delta;
88  CpuProfileDeoptFrame* deopt_frames;
89  int deopt_frame_count;
90 
91  V8_INLINE void UpdateCodeMap(CodeMap* code_map);
92 };
93 
94 
96  public:
97  Address instruction_start;
98  Builtins::Name builtin_id;
99 
100  V8_INLINE void UpdateCodeMap(CodeMap* code_map);
101 };
102 
103 
105  public:
106  // The parameterless constructor is used when we dequeue data from
107  // the ticks buffer.
108  TickSampleEventRecord() = default;
109  explicit TickSampleEventRecord(unsigned order) : order(order) { }
110 
111  unsigned order;
112  TickSample sample;
113 };
114 
115 
117  public:
118  explicit CodeEventsContainer(
119  CodeEventRecord::Type type = CodeEventRecord::NONE) {
120  generic.type = type;
121  }
122  union {
123  CodeEventRecord generic;
124 #define DECLARE_CLASS(ignore, type) type type##_;
125  CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
126 #undef DECLARE_CLASS
127  };
128 };
129 
130 
131 // This class implements both the profile events processor thread and
132 // methods called by event producers: VM and stack sampler threads.
134  public:
135  virtual ~ProfilerEventsProcessor();
136 
137  void CodeEventHandler(const CodeEventsContainer& evt_rec) override;
138 
139  // Thread control.
140  void Run() override = 0;
141  void StopSynchronously();
142  V8_INLINE bool running() { return !!base::Relaxed_Load(&running_); }
143  void Enqueue(const CodeEventsContainer& event);
144 
145  // Puts current stack into the tick sample events buffer.
146  void AddCurrentStack(bool update_stats = false);
147  void AddDeoptStack(Address from, int fp_to_sp_delta);
148  // Add a sample into the tick sample events buffer. Used for testing.
149  void AddSample(TickSample sample);
150 
151  protected:
152  ProfilerEventsProcessor(Isolate* isolate, ProfileGenerator* generator);
153 
154  // Called from events processing thread (Run() method.)
155  bool ProcessCodeEvent();
156 
157  enum SampleProcessingResult {
158  OneSampleProcessed,
159  FoundSampleForNextCodeEvent,
160  NoSamplesInQueue
161  };
162  virtual SampleProcessingResult ProcessOneSample() = 0;
163 
164  ProfileGenerator* generator_;
165  base::Atomic32 running_;
166  LockedQueue<CodeEventsContainer> events_buffer_;
167  LockedQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
168  std::atomic<unsigned> last_code_event_id_;
169  unsigned last_processed_code_event_id_;
170  Isolate* isolate_;
171 };
172 
174  public:
175  SamplingEventsProcessor(Isolate* isolate, ProfileGenerator* generator,
176  base::TimeDelta period);
177  ~SamplingEventsProcessor() override;
178 
179  // SamplingCircularQueue has stricter alignment requirements than a normal new
180  // can fulfil, so we need to provide our own new/delete here.
181  void* operator new(size_t size);
182  void operator delete(void* ptr);
183 
184  void Run() override;
185 
186  // Tick sample events are filled directly in the buffer of the circular
187  // queue (because the structure is of fixed width, but usually not all
188  // stack frame entries are filled.) This method returns a pointer to the
189  // next record of the buffer.
190  // These methods are not thread-safe and should only ever be called by one
191  // producer (from CpuSampler::SampleStack()). For testing, use AddSample.
192  inline TickSample* StartTickSample();
193  inline void FinishTickSample();
194 
195  sampler::Sampler* sampler() { return sampler_.get(); }
196 
197  private:
198  SampleProcessingResult ProcessOneSample() override;
199 
200  static const size_t kTickSampleBufferSize = 1 * MB;
201  static const size_t kTickSampleQueueLength =
202  kTickSampleBufferSize / sizeof(TickSampleEventRecord);
204  kTickSampleQueueLength> ticks_buffer_;
205  std::unique_ptr<sampler::Sampler> sampler_;
206  const base::TimeDelta period_; // Samples & code events processing period.
207 };
208 
209 class CpuProfiler {
210  public:
211  explicit CpuProfiler(Isolate* isolate);
212 
213  CpuProfiler(Isolate* isolate, CpuProfilesCollection* profiles,
214  ProfileGenerator* test_generator,
215  ProfilerEventsProcessor* test_processor);
216 
217  ~CpuProfiler();
218 
219  static void CollectSample(Isolate* isolate);
220 
221  typedef v8::CpuProfilingMode ProfilingMode;
222 
223  void set_sampling_interval(base::TimeDelta value);
224  void CollectSample();
225  void StartProfiling(const char* title, bool record_samples = false,
226  ProfilingMode mode = ProfilingMode::kLeafNodeLineNumbers);
227  void StartProfiling(String title, bool record_samples, ProfilingMode mode);
228  CpuProfile* StopProfiling(const char* title);
229  CpuProfile* StopProfiling(String title);
230  int GetProfilesCount();
231  CpuProfile* GetProfile(int index);
232  void DeleteAllProfiles();
233  void DeleteProfile(CpuProfile* profile);
234 
235  bool is_profiling() const { return is_profiling_; }
236 
237  ProfileGenerator* generator() const { return generator_.get(); }
238  ProfilerEventsProcessor* processor() const { return processor_.get(); }
239  Isolate* isolate() const { return isolate_; }
240 
241  ProfilerListener* profiler_listener_for_test() {
242  return profiler_listener_.get();
243  }
244 
245  private:
246  void StartProcessorIfNotStarted();
247  void StopProcessorIfLastProfile(const char* title);
248  void StopProcessor();
249  void ResetProfiles();
250  void LogBuiltins();
251  void CreateEntriesForRuntimeCallStats();
252 
253  Isolate* const isolate_;
254  base::TimeDelta sampling_interval_;
255  std::unique_ptr<CpuProfilesCollection> profiles_;
256  std::unique_ptr<ProfileGenerator> generator_;
257  std::unique_ptr<ProfilerEventsProcessor> processor_;
258  std::unique_ptr<ProfilerListener> profiler_listener_;
259  bool saved_is_logging_;
260  bool is_profiling_;
261 
262  DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
263 };
264 
265 } // namespace internal
266 } // namespace v8
267 
268 #endif // V8_PROFILER_CPU_PROFILER_H_
Definition: libplatform.h:13