V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
pipeline.cc
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/pipeline.h"
6 
7 #include <fstream> // NOLINT(readability/streams)
8 #include <iostream>
9 #include <memory>
10 #include <sstream>
11 
12 #include "src/assembler-inl.h"
13 #include "src/base/adapters.h"
14 #include "src/base/optional.h"
15 #include "src/base/platform/elapsed-timer.h"
16 #include "src/bootstrapper.h"
17 #include "src/code-tracer.h"
18 #include "src/compiler.h"
19 #include "src/compiler/backend/code-generator.h"
20 #include "src/compiler/backend/frame-elider.h"
21 #include "src/compiler/backend/instruction-selector.h"
22 #include "src/compiler/backend/instruction.h"
23 #include "src/compiler/backend/jump-threading.h"
24 #include "src/compiler/backend/live-range-separator.h"
25 #include "src/compiler/backend/move-optimizer.h"
26 #include "src/compiler/backend/register-allocator-verifier.h"
27 #include "src/compiler/backend/register-allocator.h"
28 #include "src/compiler/basic-block-instrumentor.h"
29 #include "src/compiler/branch-elimination.h"
30 #include "src/compiler/bytecode-graph-builder.h"
31 #include "src/compiler/checkpoint-elimination.h"
32 #include "src/compiler/common-operator-reducer.h"
33 #include "src/compiler/compilation-dependencies.h"
34 #include "src/compiler/compiler-source-position-table.h"
35 #include "src/compiler/constant-folding-reducer.h"
36 #include "src/compiler/control-flow-optimizer.h"
37 #include "src/compiler/dead-code-elimination.h"
38 #include "src/compiler/effect-control-linearizer.h"
39 #include "src/compiler/escape-analysis-reducer.h"
40 #include "src/compiler/escape-analysis.h"
41 #include "src/compiler/graph-trimmer.h"
42 #include "src/compiler/graph-visualizer.h"
43 #include "src/compiler/js-call-reducer.h"
44 #include "src/compiler/js-context-specialization.h"
45 #include "src/compiler/js-create-lowering.h"
46 #include "src/compiler/js-generic-lowering.h"
47 #include "src/compiler/js-heap-broker.h"
48 #include "src/compiler/js-heap-copy-reducer.h"
49 #include "src/compiler/js-inlining-heuristic.h"
50 #include "src/compiler/js-intrinsic-lowering.h"
51 #include "src/compiler/js-native-context-specialization.h"
52 #include "src/compiler/js-typed-lowering.h"
53 #include "src/compiler/load-elimination.h"
54 #include "src/compiler/loop-analysis.h"
55 #include "src/compiler/loop-peeling.h"
56 #include "src/compiler/loop-variable-optimizer.h"
57 #include "src/compiler/machine-graph-verifier.h"
58 #include "src/compiler/machine-operator-reducer.h"
59 #include "src/compiler/memory-optimizer.h"
60 #include "src/compiler/node-origin-table.h"
61 #include "src/compiler/osr.h"
62 #include "src/compiler/pipeline-statistics.h"
63 #include "src/compiler/redundancy-elimination.h"
64 #include "src/compiler/schedule.h"
65 #include "src/compiler/scheduler.h"
66 #include "src/compiler/select-lowering.h"
67 #include "src/compiler/simplified-lowering.h"
68 #include "src/compiler/simplified-operator-reducer.h"
69 #include "src/compiler/simplified-operator.h"
70 #include "src/compiler/store-store-elimination.h"
71 #include "src/compiler/type-narrowing-reducer.h"
72 #include "src/compiler/typed-optimization.h"
73 #include "src/compiler/typer.h"
74 #include "src/compiler/value-numbering-reducer.h"
75 #include "src/compiler/verifier.h"
76 #include "src/compiler/wasm-compiler.h"
77 #include "src/compiler/zone-stats.h"
78 #include "src/disassembler.h"
79 #include "src/isolate-inl.h"
80 #include "src/objects/shared-function-info.h"
81 #include "src/optimized-compilation-info.h"
82 #include "src/ostreams.h"
83 #include "src/parsing/parse-info.h"
84 #include "src/register-configuration.h"
85 #include "src/utils.h"
86 #include "src/wasm/function-body-decoder.h"
87 #include "src/wasm/wasm-engine.h"
88 
89 namespace v8 {
90 namespace internal {
91 namespace compiler {
92 
93 // Turbofan can only handle 2^16 control inputs. Since each control flow split
94 // requires at least two bytes (jump and offset), we limit the bytecode size
95 // to 128K bytes.
96 const int kMaxBytecodeSizeForTurbofan = 128 * 1024;
97 
98 class PipelineData {
99  public:
100  // For main entry point.
101  PipelineData(ZoneStats* zone_stats, Isolate* isolate,
103  PipelineStatistics* pipeline_statistics)
104  : isolate_(isolate),
105  allocator_(isolate->allocator()),
106  info_(info),
107  debug_name_(info_->GetDebugName()),
108  may_have_unverifiable_graph_(false),
109  zone_stats_(zone_stats),
110  pipeline_statistics_(pipeline_statistics),
111  graph_zone_scope_(zone_stats_, ZONE_NAME),
112  graph_zone_(graph_zone_scope_.zone()),
113  instruction_zone_scope_(zone_stats_, ZONE_NAME),
114  instruction_zone_(instruction_zone_scope_.zone()),
115  codegen_zone_scope_(zone_stats_, ZONE_NAME),
116  codegen_zone_(codegen_zone_scope_.zone()),
117  register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
118  register_allocation_zone_(register_allocation_zone_scope_.zone()),
119  assembler_options_(AssemblerOptions::Default(isolate)) {
120  PhaseScope scope(pipeline_statistics, "init pipeline data");
121  graph_ = new (graph_zone_) Graph(graph_zone_);
122  source_positions_ = new (graph_zone_) SourcePositionTable(graph_);
123  node_origins_ = info->trace_turbo_json_enabled()
124  ? new (graph_zone_) NodeOriginTable(graph_)
125  : nullptr;
126  simplified_ = new (graph_zone_) SimplifiedOperatorBuilder(graph_zone_);
127  machine_ = new (graph_zone_) MachineOperatorBuilder(
128  graph_zone_, MachineType::PointerRepresentation(),
129  InstructionSelector::SupportedMachineOperatorFlags(),
130  InstructionSelector::AlignmentRequirements());
131  common_ = new (graph_zone_) CommonOperatorBuilder(graph_zone_);
132  javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
133  jsgraph_ = new (graph_zone_)
134  JSGraph(isolate_, graph_, common_, javascript_, simplified_, machine_);
135  broker_ = new (info_->zone()) JSHeapBroker(isolate_, info_->zone());
136  dependencies_ =
137  new (info_->zone()) CompilationDependencies(isolate_, info_->zone());
138  }
139 
140  // For WebAssembly compile entry point.
141  PipelineData(ZoneStats* zone_stats, wasm::WasmEngine* wasm_engine,
142  OptimizedCompilationInfo* info, MachineGraph* mcgraph,
143  PipelineStatistics* pipeline_statistics,
144  SourcePositionTable* source_positions,
145  NodeOriginTable* node_origins,
146  const AssemblerOptions& assembler_options)
147  : isolate_(nullptr),
148  wasm_engine_(wasm_engine),
149  allocator_(wasm_engine->allocator()),
150  info_(info),
151  debug_name_(info_->GetDebugName()),
152  may_have_unverifiable_graph_(false),
153  zone_stats_(zone_stats),
154  pipeline_statistics_(pipeline_statistics),
155  graph_zone_scope_(zone_stats_, ZONE_NAME),
156  graph_zone_(graph_zone_scope_.zone()),
157  graph_(mcgraph->graph()),
158  source_positions_(source_positions),
159  node_origins_(node_origins),
160  machine_(mcgraph->machine()),
161  common_(mcgraph->common()),
162  mcgraph_(mcgraph),
163  instruction_zone_scope_(zone_stats_, ZONE_NAME),
164  instruction_zone_(instruction_zone_scope_.zone()),
165  codegen_zone_scope_(zone_stats_, ZONE_NAME),
166  codegen_zone_(codegen_zone_scope_.zone()),
167  register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
168  register_allocation_zone_(register_allocation_zone_scope_.zone()),
169  assembler_options_(assembler_options) {}
170 
171  // For machine graph testing entry point.
173  Isolate* isolate, Graph* graph, Schedule* schedule,
174  SourcePositionTable* source_positions,
175  NodeOriginTable* node_origins, JumpOptimizationInfo* jump_opt,
176  const AssemblerOptions& assembler_options)
177  : isolate_(isolate),
178  allocator_(isolate->allocator()),
179  info_(info),
180  debug_name_(info_->GetDebugName()),
181  zone_stats_(zone_stats),
182  graph_zone_scope_(zone_stats_, ZONE_NAME),
183  graph_(graph),
184  source_positions_(source_positions),
185  node_origins_(node_origins),
186  schedule_(schedule),
187  instruction_zone_scope_(zone_stats_, ZONE_NAME),
188  instruction_zone_(instruction_zone_scope_.zone()),
189  codegen_zone_scope_(zone_stats_, ZONE_NAME),
190  codegen_zone_(codegen_zone_scope_.zone()),
191  register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
192  register_allocation_zone_(register_allocation_zone_scope_.zone()),
193  jump_optimization_info_(jump_opt),
194  assembler_options_(assembler_options) {}
195 
196  // For register allocation testing entry point.
198  Isolate* isolate, InstructionSequence* sequence)
199  : isolate_(isolate),
200  allocator_(isolate->allocator()),
201  info_(info),
202  debug_name_(info_->GetDebugName()),
203  zone_stats_(zone_stats),
204  graph_zone_scope_(zone_stats_, ZONE_NAME),
205  instruction_zone_scope_(zone_stats_, ZONE_NAME),
206  instruction_zone_(sequence->zone()),
207  sequence_(sequence),
208  codegen_zone_scope_(zone_stats_, ZONE_NAME),
209  codegen_zone_(codegen_zone_scope_.zone()),
210  register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
211  register_allocation_zone_(register_allocation_zone_scope_.zone()),
212  assembler_options_(AssemblerOptions::Default(isolate)) {}
213 
214  ~PipelineData() {
215  // Must happen before zones are destroyed.
216  delete code_generator_;
217  code_generator_ = nullptr;
218  DeleteTyper();
219 
220  DeleteRegisterAllocationZone();
221  DeleteInstructionZone();
222  DeleteCodegenZone();
223  DeleteGraphZone();
224  }
225 
226  Isolate* isolate() const { return isolate_; }
227  AccountingAllocator* allocator() const { return allocator_; }
228  OptimizedCompilationInfo* info() const { return info_; }
229  ZoneStats* zone_stats() const { return zone_stats_; }
230  CompilationDependencies* dependencies() const { return dependencies_; }
231  PipelineStatistics* pipeline_statistics() { return pipeline_statistics_; }
232  OsrHelper* osr_helper() { return &(*osr_helper_); }
233  bool compilation_failed() const { return compilation_failed_; }
234  void set_compilation_failed() { compilation_failed_ = true; }
235 
236  bool verify_graph() const { return verify_graph_; }
237  void set_verify_graph(bool value) { verify_graph_ = value; }
238 
239  MaybeHandle<Code> code() { return code_; }
240  void set_code(MaybeHandle<Code> code) {
241  DCHECK(code_.is_null());
242  code_ = code;
243  }
244 
245  CodeGenerator* code_generator() const { return code_generator_; }
246 
247  // RawMachineAssembler generally produces graphs which cannot be verified.
248  bool MayHaveUnverifiableGraph() const { return may_have_unverifiable_graph_; }
249 
250  Zone* graph_zone() const { return graph_zone_; }
251  Graph* graph() const { return graph_; }
252  SourcePositionTable* source_positions() const { return source_positions_; }
253  NodeOriginTable* node_origins() const { return node_origins_; }
254  MachineOperatorBuilder* machine() const { return machine_; }
255  CommonOperatorBuilder* common() const { return common_; }
256  JSOperatorBuilder* javascript() const { return javascript_; }
257  JSGraph* jsgraph() const { return jsgraph_; }
258  MachineGraph* mcgraph() const { return mcgraph_; }
259  Handle<Context> native_context() const {
260  return handle(info()->native_context(), isolate());
261  }
262  Handle<JSGlobalObject> global_object() const {
263  return handle(info()->global_object(), isolate());
264  }
265 
266  JSHeapBroker* broker() const { return broker_; }
267 
268  Schedule* schedule() const { return schedule_; }
269  void set_schedule(Schedule* schedule) {
270  DCHECK(!schedule_);
271  schedule_ = schedule;
272  }
273  void reset_schedule() { schedule_ = nullptr; }
274 
275  Zone* instruction_zone() const { return instruction_zone_; }
276  Zone* codegen_zone() const { return codegen_zone_; }
277  InstructionSequence* sequence() const { return sequence_; }
278  Frame* frame() const { return frame_; }
279 
280  Zone* register_allocation_zone() const { return register_allocation_zone_; }
281  RegisterAllocationData* register_allocation_data() const {
282  return register_allocation_data_;
283  }
284 
285  BasicBlockProfiler::Data* profiler_data() const { return profiler_data_; }
286  void set_profiler_data(BasicBlockProfiler::Data* profiler_data) {
287  profiler_data_ = profiler_data;
288  }
289 
290  std::string const& source_position_output() const {
291  return source_position_output_;
292  }
293  void set_source_position_output(std::string const& source_position_output) {
294  source_position_output_ = source_position_output;
295  }
296 
297  JumpOptimizationInfo* jump_optimization_info() const {
298  return jump_optimization_info_;
299  }
300 
301  const AssemblerOptions& assembler_options() const {
302  return assembler_options_;
303  }
304 
305  CodeTracer* GetCodeTracer() const {
306  return wasm_engine_ == nullptr ? isolate_->GetCodeTracer()
307  : wasm_engine_->GetCodeTracer();
308  }
309 
310  Typer* CreateTyper() {
311  DCHECK_NULL(typer_);
312  typer_ = new Typer(broker(), typer_flags_, graph());
313  return typer_;
314  }
315 
316  void AddTyperFlag(Typer::Flag flag) {
317  DCHECK_NULL(typer_);
318  typer_flags_ |= flag;
319  }
320 
321  void DeleteTyper() {
322  delete typer_;
323  typer_ = nullptr;
324  }
325 
326  void DeleteGraphZone() {
327  if (graph_zone_ == nullptr) return;
328  graph_zone_scope_.Destroy();
329  graph_zone_ = nullptr;
330  graph_ = nullptr;
331  source_positions_ = nullptr;
332  node_origins_ = nullptr;
333  simplified_ = nullptr;
334  machine_ = nullptr;
335  common_ = nullptr;
336  javascript_ = nullptr;
337  jsgraph_ = nullptr;
338  mcgraph_ = nullptr;
339  schedule_ = nullptr;
340  }
341 
342  void DeleteInstructionZone() {
343  if (instruction_zone_ == nullptr) return;
344  instruction_zone_scope_.Destroy();
345  instruction_zone_ = nullptr;
346  sequence_ = nullptr;
347  }
348 
349  void DeleteCodegenZone() {
350  if (codegen_zone_ == nullptr) return;
351  codegen_zone_scope_.Destroy();
352  codegen_zone_ = nullptr;
353  dependencies_ = nullptr;
354  broker_ = nullptr;
355  frame_ = nullptr;
356  }
357 
358  void DeleteRegisterAllocationZone() {
359  if (register_allocation_zone_ == nullptr) return;
360  register_allocation_zone_scope_.Destroy();
361  register_allocation_zone_ = nullptr;
362  register_allocation_data_ = nullptr;
363  }
364 
365  void InitializeInstructionSequence(const CallDescriptor* call_descriptor) {
366  DCHECK_NULL(sequence_);
367  InstructionBlocks* instruction_blocks =
368  InstructionSequence::InstructionBlocksFor(instruction_zone(),
369  schedule());
370  sequence_ = new (instruction_zone())
371  InstructionSequence(isolate(), instruction_zone(), instruction_blocks);
372  if (call_descriptor && call_descriptor->RequiresFrameAsIncoming()) {
373  sequence_->instruction_blocks()[0]->mark_needs_frame();
374  } else {
375  DCHECK_EQ(0u, call_descriptor->CalleeSavedFPRegisters());
376  DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters());
377  }
378  }
379 
380  void InitializeFrameData(CallDescriptor* call_descriptor) {
381  DCHECK_NULL(frame_);
382  int fixed_frame_size = 0;
383  if (call_descriptor != nullptr) {
384  fixed_frame_size = call_descriptor->CalculateFixedFrameSize();
385  }
386  frame_ = new (codegen_zone()) Frame(fixed_frame_size);
387  }
388 
389  void InitializeRegisterAllocationData(const RegisterConfiguration* config,
390  CallDescriptor* call_descriptor) {
391  DCHECK_NULL(register_allocation_data_);
392  register_allocation_data_ = new (register_allocation_zone())
393  RegisterAllocationData(config, register_allocation_zone(), frame(),
394  sequence(), debug_name());
395  }
396 
397  void InitializeOsrHelper() {
398  DCHECK(!osr_helper_.has_value());
399  osr_helper_.emplace(info());
400  }
401 
402  void set_start_source_position(int position) {
403  DCHECK_EQ(start_source_position_, kNoSourcePosition);
404  start_source_position_ = position;
405  }
406 
407  void InitializeCodeGenerator(Linkage* linkage) {
408  DCHECK_NULL(code_generator_);
409 
410  code_generator_ = new CodeGenerator(
411  codegen_zone(), frame(), linkage, sequence(), info(), isolate(),
412  osr_helper_, start_source_position_, jump_optimization_info_,
413  info()->GetPoisoningMitigationLevel(), assembler_options_,
414  info_->builtin_index());
415  }
416 
417  void BeginPhaseKind(const char* phase_kind_name) {
418  if (pipeline_statistics() != nullptr) {
419  pipeline_statistics()->BeginPhaseKind(phase_kind_name);
420  }
421  }
422 
423  void EndPhaseKind() {
424  if (pipeline_statistics() != nullptr) {
425  pipeline_statistics()->EndPhaseKind();
426  }
427  }
428 
429  const char* debug_name() const { return debug_name_.get(); }
430 
431  private:
432  Isolate* const isolate_;
433  wasm::WasmEngine* const wasm_engine_ = nullptr;
434  AccountingAllocator* const allocator_;
435  OptimizedCompilationInfo* const info_;
436  std::unique_ptr<char[]> debug_name_;
437  bool may_have_unverifiable_graph_ = true;
438  ZoneStats* const zone_stats_;
439  PipelineStatistics* pipeline_statistics_ = nullptr;
440  bool compilation_failed_ = false;
441  bool verify_graph_ = false;
442  int start_source_position_ = kNoSourcePosition;
443  base::Optional<OsrHelper> osr_helper_;
444  MaybeHandle<Code> code_;
445  CodeGenerator* code_generator_ = nullptr;
446  Typer* typer_ = nullptr;
447  Typer::Flags typer_flags_ = Typer::kNoFlags;
448 
449  // All objects in the following group of fields are allocated in graph_zone_.
450  // They are all set to nullptr when the graph_zone_ is destroyed.
451  ZoneStats::Scope graph_zone_scope_;
452  Zone* graph_zone_ = nullptr;
453  Graph* graph_ = nullptr;
454  SourcePositionTable* source_positions_ = nullptr;
455  NodeOriginTable* node_origins_ = nullptr;
456  SimplifiedOperatorBuilder* simplified_ = nullptr;
457  MachineOperatorBuilder* machine_ = nullptr;
458  CommonOperatorBuilder* common_ = nullptr;
459  JSOperatorBuilder* javascript_ = nullptr;
460  JSGraph* jsgraph_ = nullptr;
461  MachineGraph* mcgraph_ = nullptr;
462  Schedule* schedule_ = nullptr;
463 
464  // All objects in the following group of fields are allocated in
465  // instruction_zone_. They are all set to nullptr when the instruction_zone_
466  // is destroyed.
467  ZoneStats::Scope instruction_zone_scope_;
468  Zone* instruction_zone_;
469  InstructionSequence* sequence_ = nullptr;
470 
471  // All objects in the following group of fields are allocated in
472  // codegen_zone_. They are all set to nullptr when the codegen_zone_
473  // is destroyed.
474  ZoneStats::Scope codegen_zone_scope_;
475  Zone* codegen_zone_;
476  CompilationDependencies* dependencies_ = nullptr;
477  JSHeapBroker* broker_ = nullptr;
478  Frame* frame_ = nullptr;
479 
480  // All objects in the following group of fields are allocated in
481  // register_allocation_zone_. They are all set to nullptr when the zone is
482  // destroyed.
483  ZoneStats::Scope register_allocation_zone_scope_;
484  Zone* register_allocation_zone_;
485  RegisterAllocationData* register_allocation_data_ = nullptr;
486 
487  // Basic block profiling support.
488  BasicBlockProfiler::Data* profiler_data_ = nullptr;
489 
490  // Source position output for --trace-turbo.
491  std::string source_position_output_;
492 
493  JumpOptimizationInfo* jump_optimization_info_ = nullptr;
494  AssemblerOptions assembler_options_;
495 
496  DISALLOW_COPY_AND_ASSIGN(PipelineData);
497 };
498 
499 class PipelineImpl final {
500  public:
501  explicit PipelineImpl(PipelineData* data) : data_(data) {}
502 
503  // Helpers for executing pipeline phases.
504  template <typename Phase>
505  void Run();
506  template <typename Phase, typename Arg0>
507  void Run(Arg0 arg_0);
508  template <typename Phase, typename Arg0, typename Arg1>
509  void Run(Arg0 arg_0, Arg1 arg_1);
510 
511  // Step A. Run the graph creation and initial optimization passes.
512  bool CreateGraph();
513 
514  // B. Run the concurrent optimization passes.
515  bool OptimizeGraph(Linkage* linkage);
516 
517  // Substep B.1. Produce a scheduled graph.
518  void ComputeScheduledGraph();
519 
520  // Substep B.2. Select instructions from a scheduled graph.
521  bool SelectInstructions(Linkage* linkage);
522 
523  // Step C. Run the code assembly pass.
524  void AssembleCode(Linkage* linkage);
525 
526  // Step D. Run the code finalization pass.
527  MaybeHandle<Code> FinalizeCode();
528 
529  // Step E. Install any code dependencies.
530  bool CommitDependencies(Handle<Code> code);
531 
532  void VerifyGeneratedCodeIsIdempotent();
533  void RunPrintAndVerify(const char* phase, bool untyped = false);
534  MaybeHandle<Code> GenerateCode(CallDescriptor* call_descriptor);
535  void AllocateRegisters(const RegisterConfiguration* config,
536  CallDescriptor* call_descriptor, bool run_verifier);
537 
538  OptimizedCompilationInfo* info() const;
539  Isolate* isolate() const;
540  CodeGenerator* code_generator() const;
541 
542  private:
543  PipelineData* const data_;
544 };
545 
546 namespace {
547 
548 void PrintFunctionSource(OptimizedCompilationInfo* info, Isolate* isolate,
549  int source_id, Handle<SharedFunctionInfo> shared) {
550  if (!shared->script()->IsUndefined(isolate)) {
551  Handle<Script> script(Script::cast(shared->script()), isolate);
552 
553  if (!script->source()->IsUndefined(isolate)) {
554  CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
555  Object* source_name = script->name();
556  OFStream os(tracing_scope.file());
557  os << "--- FUNCTION SOURCE (";
558  if (source_name->IsString()) {
559  os << String::cast(source_name)->ToCString().get() << ":";
560  }
561  os << shared->DebugName()->ToCString().get() << ") id{";
562  os << info->optimization_id() << "," << source_id << "} start{";
563  os << shared->StartPosition() << "} ---\n";
564  {
565  DisallowHeapAllocation no_allocation;
566  int start = shared->StartPosition();
567  int len = shared->EndPosition() - start;
568  SubStringRange source(String::cast(script->source()), start, len);
569  for (const auto& c : source) {
570  os << AsReversiblyEscapedUC16(c);
571  }
572  }
573 
574  os << "\n--- END ---\n";
575  }
576  }
577 }
578 
579 // Print information for the given inlining: which function was inlined and
580 // where the inlining occurred.
581 void PrintInlinedFunctionInfo(
582  OptimizedCompilationInfo* info, Isolate* isolate, int source_id,
583  int inlining_id, const OptimizedCompilationInfo::InlinedFunctionHolder& h) {
584  CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
585  OFStream os(tracing_scope.file());
586  os << "INLINE (" << h.shared_info->DebugName()->ToCString().get() << ") id{"
587  << info->optimization_id() << "," << source_id << "} AS " << inlining_id
588  << " AT ";
589  const SourcePosition position = h.position.position;
590  if (position.IsKnown()) {
591  os << "<" << position.InliningId() << ":" << position.ScriptOffset() << ">";
592  } else {
593  os << "<?>";
594  }
595  os << std::endl;
596 }
597 
598 // Print the source of all functions that participated in this optimizing
599 // compilation. For inlined functions print source position of their inlining.
600 void PrintParticipatingSource(OptimizedCompilationInfo* info,
601  Isolate* isolate) {
602  AllowDeferredHandleDereference allow_deference_for_print_code;
603 
604  SourceIdAssigner id_assigner(info->inlined_functions().size());
605  PrintFunctionSource(info, isolate, -1, info->shared_info());
606  const auto& inlined = info->inlined_functions();
607  for (unsigned id = 0; id < inlined.size(); id++) {
608  const int source_id = id_assigner.GetIdFor(inlined[id].shared_info);
609  PrintFunctionSource(info, isolate, source_id, inlined[id].shared_info);
610  PrintInlinedFunctionInfo(info, isolate, source_id, id, inlined[id]);
611  }
612 }
613 
614 // Print the code after compiling it.
615 void PrintCode(Isolate* isolate, Handle<Code> code,
616  OptimizedCompilationInfo* info) {
617  if (FLAG_print_opt_source && info->IsOptimizing()) {
618  PrintParticipatingSource(info, isolate);
619  }
620 
621 #ifdef ENABLE_DISASSEMBLER
622  AllowDeferredHandleDereference allow_deference_for_print_code;
623  bool print_code =
624  FLAG_print_code || (info->IsStub() && FLAG_print_code_stubs) ||
625  (info->IsOptimizing() && FLAG_print_opt_code &&
626  info->shared_info()->PassesFilter(FLAG_print_opt_code_filter));
627  if (print_code) {
628  std::unique_ptr<char[]> debug_name = info->GetDebugName();
629  CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
630  OFStream os(tracing_scope.file());
631 
632  // Print the source code if available.
633  bool print_source = code->kind() == Code::OPTIMIZED_FUNCTION;
634  if (print_source) {
635  Handle<SharedFunctionInfo> shared = info->shared_info();
636  if (shared->script()->IsScript() &&
637  !Script::cast(shared->script())->source()->IsUndefined(isolate)) {
638  os << "--- Raw source ---\n";
639  StringCharacterStream stream(
640  String::cast(Script::cast(shared->script())->source()),
641  shared->StartPosition());
642  // fun->end_position() points to the last character in the stream. We
643  // need to compensate by adding one to calculate the length.
644  int source_len = shared->EndPosition() - shared->StartPosition() + 1;
645  for (int i = 0; i < source_len; i++) {
646  if (stream.HasMore()) {
647  os << AsReversiblyEscapedUC16(stream.GetNext());
648  }
649  }
650  os << "\n\n";
651  }
652  }
653  if (info->IsOptimizing()) {
654  os << "--- Optimized code ---\n"
655  << "optimization_id = " << info->optimization_id() << "\n";
656  } else {
657  os << "--- Code ---\n";
658  }
659  if (print_source) {
660  Handle<SharedFunctionInfo> shared = info->shared_info();
661  os << "source_position = " << shared->StartPosition() << "\n";
662  }
663  code->Disassemble(debug_name.get(), os);
664  os << "--- End code ---\n";
665  }
666 #endif // ENABLE_DISASSEMBLER
667 }
668 
669 void TraceSchedule(OptimizedCompilationInfo* info, PipelineData* data,
670  Schedule* schedule, const char* phase_name) {
671  if (info->trace_turbo_json_enabled()) {
672  AllowHandleDereference allow_deref;
673  TurboJsonFile json_of(info, std::ios_base::app);
674  json_of << "{\"name\":\"" << phase_name << "\",\"type\":\"schedule\""
675  << ",\"data\":\"";
676  std::stringstream schedule_stream;
677  schedule_stream << *schedule;
678  std::string schedule_string(schedule_stream.str());
679  for (const auto& c : schedule_string) {
680  json_of << AsEscapedUC16ForJSON(c);
681  }
682  json_of << "\"},\n";
683  }
684  if (info->trace_turbo_graph_enabled() || FLAG_trace_turbo_scheduler) {
685  AllowHandleDereference allow_deref;
686  CodeTracer::Scope tracing_scope(data->GetCodeTracer());
687  OFStream os(tracing_scope.file());
688  os << "-- Schedule --------------------------------------\n" << *schedule;
689  }
690 }
691 
692 
693 class SourcePositionWrapper final : public Reducer {
694  public:
695  SourcePositionWrapper(Reducer* reducer, SourcePositionTable* table)
696  : reducer_(reducer), table_(table) {}
697  ~SourcePositionWrapper() final = default;
698 
699  const char* reducer_name() const override { return reducer_->reducer_name(); }
700 
701  Reduction Reduce(Node* node) final {
702  SourcePosition const pos = table_->GetSourcePosition(node);
703  SourcePositionTable::Scope position(table_, pos);
704  return reducer_->Reduce(node);
705  }
706 
707  void Finalize() final { reducer_->Finalize(); }
708 
709  private:
710  Reducer* const reducer_;
711  SourcePositionTable* const table_;
712 
713  DISALLOW_COPY_AND_ASSIGN(SourcePositionWrapper);
714 };
715 
716 class NodeOriginsWrapper final : public Reducer {
717  public:
718  NodeOriginsWrapper(Reducer* reducer, NodeOriginTable* table)
719  : reducer_(reducer), table_(table) {}
720  ~NodeOriginsWrapper() final = default;
721 
722  const char* reducer_name() const override { return reducer_->reducer_name(); }
723 
724  Reduction Reduce(Node* node) final {
725  NodeOriginTable::Scope position(table_, reducer_name(), node);
726  return reducer_->Reduce(node);
727  }
728 
729  void Finalize() final { reducer_->Finalize(); }
730 
731  private:
732  Reducer* const reducer_;
733  NodeOriginTable* const table_;
734 
735  DISALLOW_COPY_AND_ASSIGN(NodeOriginsWrapper);
736 };
737 
738 void AddReducer(PipelineData* data, GraphReducer* graph_reducer,
739  Reducer* reducer) {
740  if (data->info()->is_source_positions_enabled()) {
741  void* const buffer = data->graph_zone()->New(sizeof(SourcePositionWrapper));
742  SourcePositionWrapper* const wrapper =
743  new (buffer) SourcePositionWrapper(reducer, data->source_positions());
744  reducer = wrapper;
745  }
746  if (data->info()->trace_turbo_json_enabled()) {
747  void* const buffer = data->graph_zone()->New(sizeof(NodeOriginsWrapper));
748  NodeOriginsWrapper* const wrapper =
749  new (buffer) NodeOriginsWrapper(reducer, data->node_origins());
750  reducer = wrapper;
751  }
752 
753  graph_reducer->AddReducer(reducer);
754 }
755 
756 class PipelineRunScope {
757  public:
758  PipelineRunScope(PipelineData* data, const char* phase_name)
759  : phase_scope_(
760  phase_name == nullptr ? nullptr : data->pipeline_statistics(),
761  phase_name),
762  zone_scope_(data->zone_stats(), ZONE_NAME),
763  origin_scope_(data->node_origins(), phase_name) {}
764 
765  Zone* zone() { return zone_scope_.zone(); }
766 
767  private:
768  PhaseScope phase_scope_;
769  ZoneStats::Scope zone_scope_;
770  NodeOriginTable::PhaseScope origin_scope_;
771 };
772 
773 PipelineStatistics* CreatePipelineStatistics(Handle<Script> script,
774  OptimizedCompilationInfo* info,
775  Isolate* isolate,
776  ZoneStats* zone_stats) {
777  PipelineStatistics* pipeline_statistics = nullptr;
778 
779  if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
780  pipeline_statistics =
781  new PipelineStatistics(info, isolate->GetTurboStatistics(), zone_stats);
782  pipeline_statistics->BeginPhaseKind("initializing");
783  }
784 
785  if (info->trace_turbo_json_enabled()) {
786  TurboJsonFile json_of(info, std::ios_base::trunc);
787  json_of << "{\"function\" : ";
788  JsonPrintFunctionSource(json_of, -1, info->GetDebugName(), script, isolate,
789  info->shared_info());
790  json_of << ",\n\"phases\":[";
791  }
792 
793  return pipeline_statistics;
794 }
795 
796 PipelineStatistics* CreatePipelineStatistics(
797  wasm::WasmEngine* wasm_engine, wasm::FunctionBody function_body,
798  const wasm::WasmModule* wasm_module, OptimizedCompilationInfo* info,
799  ZoneStats* zone_stats) {
800  PipelineStatistics* pipeline_statistics = nullptr;
801 
802  if (FLAG_turbo_stats_wasm) {
803  pipeline_statistics = new PipelineStatistics(
804  info, wasm_engine->GetOrCreateTurboStatistics(), zone_stats);
805  pipeline_statistics->BeginPhaseKind("initializing");
806  }
807 
808  if (info->trace_turbo_json_enabled()) {
809  TurboJsonFile json_of(info, std::ios_base::trunc);
810  std::unique_ptr<char[]> function_name = info->GetDebugName();
811  json_of << "{\"function\":\"" << function_name.get() << "\", \"source\":\"";
812  AccountingAllocator allocator;
813  std::ostringstream disassembly;
814  std::vector<int> source_positions;
815  wasm::PrintRawWasmCode(&allocator, function_body, wasm_module,
816  wasm::kPrintLocals, disassembly, &source_positions);
817  for (const auto& c : disassembly.str()) {
818  json_of << AsEscapedUC16ForJSON(c);
819  }
820  json_of << "\",\n\"sourceLineToBytecodePosition\" : [";
821  bool insert_comma = false;
822  for (auto val : source_positions) {
823  if (insert_comma) {
824  json_of << ", ";
825  }
826  json_of << val;
827  insert_comma = true;
828  }
829  json_of << "],\n\"phases\":[";
830  }
831 
832  return pipeline_statistics;
833 }
834 
835 } // namespace
836 
838  public:
840  Handle<SharedFunctionInfo> shared_info,
841  Handle<JSFunction> function)
842  // Note that the OptimizedCompilationInfo is not initialized at the time
843  // we pass it to the CompilationJob constructor, but it is not
844  // dereferenced there.
846  function->GetIsolate()->stack_guard()->real_climit(),
847  &compilation_info_, "TurboFan"),
848  zone_(function->GetIsolate()->allocator(), ZONE_NAME),
849  zone_stats_(function->GetIsolate()->allocator()),
850  compilation_info_(&zone_, function->GetIsolate(), shared_info,
851  function),
852  pipeline_statistics_(CreatePipelineStatistics(
853  handle(Script::cast(shared_info->script()), isolate),
854  compilation_info(), function->GetIsolate(), &zone_stats_)),
855  data_(&zone_stats_, function->GetIsolate(), compilation_info(),
856  pipeline_statistics_.get()),
857  pipeline_(&data_),
858  linkage_(nullptr) {}
859 
860  protected:
861  Status PrepareJobImpl(Isolate* isolate) final;
862  Status ExecuteJobImpl() final;
863  Status FinalizeJobImpl(Isolate* isolate) final;
864 
865  // Registers weak object to optimized code dependencies.
866  void RegisterWeakObjectsInOptimizedCode(Handle<Code> code, Isolate* isolate);
867 
868  private:
869  Zone zone_;
870  ZoneStats zone_stats_;
871  OptimizedCompilationInfo compilation_info_;
872  std::unique_ptr<PipelineStatistics> pipeline_statistics_;
873  PipelineData data_;
874  PipelineImpl pipeline_;
875  Linkage* linkage_;
876 
877  DISALLOW_COPY_AND_ASSIGN(PipelineCompilationJob);
878 };
879 
880 PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
881  Isolate* isolate) {
882  if (compilation_info()->bytecode_array()->length() >
883  kMaxBytecodeSizeForTurbofan) {
884  return AbortOptimization(BailoutReason::kFunctionTooBig);
885  }
886 
887  if (!FLAG_always_opt) {
888  compilation_info()->MarkAsBailoutOnUninitialized();
889  }
890  if (FLAG_turbo_loop_peeling) {
891  compilation_info()->MarkAsLoopPeelingEnabled();
892  }
893  if (FLAG_turbo_inlining) {
894  compilation_info()->MarkAsInliningEnabled();
895  }
896  if (FLAG_inline_accessors) {
897  compilation_info()->MarkAsAccessorInliningEnabled();
898  }
899 
900  // This is the bottleneck for computing and setting poisoning level in the
901  // optimizing compiler.
902  PoisoningMitigationLevel load_poisoning =
903  PoisoningMitigationLevel::kDontPoison;
904  if (FLAG_untrusted_code_mitigations) {
905  // For full mitigations, this can be changed to
906  // PoisoningMitigationLevel::kPoisonAll.
907  load_poisoning = PoisoningMitigationLevel::kPoisonCriticalOnly;
908  }
909  compilation_info()->SetPoisoningMitigationLevel(load_poisoning);
910 
911  if (FLAG_turbo_allocation_folding) {
912  compilation_info()->MarkAsAllocationFoldingEnabled();
913  }
914 
915  if (compilation_info()->closure()->feedback_cell()->map() ==
916  ReadOnlyRoots(isolate).one_closure_cell_map()) {
917  compilation_info()->MarkAsFunctionContextSpecializing();
918  }
919 
920  data_.set_start_source_position(
921  compilation_info()->shared_info()->StartPosition());
922 
923  linkage_ = new (compilation_info()->zone()) Linkage(
924  Linkage::ComputeIncoming(compilation_info()->zone(), compilation_info()));
925 
926  if (!pipeline_.CreateGraph()) {
927  if (isolate->has_pending_exception()) return FAILED; // Stack overflowed.
928  return AbortOptimization(BailoutReason::kGraphBuildingFailed);
929  }
930 
931  if (compilation_info()->is_osr()) data_.InitializeOsrHelper();
932 
933  // Make sure that we have generated the maximal number of deopt entries.
934  // This is in order to avoid triggering the generation of deopt entries later
935  // during code assembly.
936  Deoptimizer::EnsureCodeForMaxDeoptimizationEntries(isolate);
937 
938  return SUCCEEDED;
939 }
940 
941 PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl() {
942  if (!pipeline_.OptimizeGraph(linkage_)) return FAILED;
943  pipeline_.AssembleCode(linkage_);
944  return SUCCEEDED;
945 }
946 
947 PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl(
948  Isolate* isolate) {
949  MaybeHandle<Code> maybe_code = pipeline_.FinalizeCode();
950  Handle<Code> code;
951  if (!maybe_code.ToHandle(&code)) {
952  if (compilation_info()->bailout_reason() == BailoutReason::kNoReason) {
953  return AbortOptimization(BailoutReason::kCodeGenerationFailed);
954  }
955  return FAILED;
956  }
957  if (!pipeline_.CommitDependencies(code)) {
958  return RetryOptimization(BailoutReason::kBailedOutDueToDependencyChange);
959  }
960 
961  compilation_info()->SetCode(code);
962  compilation_info()->native_context()->AddOptimizedCode(*code);
963  RegisterWeakObjectsInOptimizedCode(code, isolate);
964  return SUCCEEDED;
965 }
966 
967 void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
968  Handle<Code> code, Isolate* isolate) {
969  DCHECK(code->is_optimized_code());
970  std::vector<Handle<Map>> maps;
971  {
972  DisallowHeapAllocation no_gc;
973  int const mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
974  for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
975  RelocInfo::Mode mode = it.rinfo()->rmode();
976  if (mode == RelocInfo::EMBEDDED_OBJECT &&
977  code->IsWeakObjectInOptimizedCode(it.rinfo()->target_object())) {
978  Handle<HeapObject> object(HeapObject::cast(it.rinfo()->target_object()),
979  isolate);
980  if (object->IsMap()) {
981  maps.push_back(Handle<Map>::cast(object));
982  }
983  }
984  }
985  }
986  for (Handle<Map> map : maps) {
987  isolate->heap()->AddRetainedMap(map);
988  }
989  code->set_can_have_weak_objects(true);
990 }
991 
992 template <typename Phase>
993 void PipelineImpl::Run() {
994  PipelineRunScope scope(this->data_, Phase::phase_name());
995  Phase phase;
996  phase.Run(this->data_, scope.zone());
997 }
998 
999 template <typename Phase, typename Arg0>
1000 void PipelineImpl::Run(Arg0 arg_0) {
1001  PipelineRunScope scope(this->data_, Phase::phase_name());
1002  Phase phase;
1003  phase.Run(this->data_, scope.zone(), arg_0);
1004 }
1005 
1006 template <typename Phase, typename Arg0, typename Arg1>
1007 void PipelineImpl::Run(Arg0 arg_0, Arg1 arg_1) {
1008  PipelineRunScope scope(this->data_, Phase::phase_name());
1009  Phase phase;
1010  phase.Run(this->data_, scope.zone(), arg_0, arg_1);
1011 }
1012 
1014  static const char* phase_name() { return "bytecode graph builder"; }
1015 
1016  void Run(PipelineData* data, Zone* temp_zone) {
1017  JSTypeHintLowering::Flags flags = JSTypeHintLowering::kNoFlags;
1018  if (data->info()->is_bailout_on_uninitialized()) {
1019  flags |= JSTypeHintLowering::kBailoutOnUninitialized;
1020  }
1021  CallFrequency frequency = CallFrequency(1.0f);
1022  BytecodeGraphBuilder graph_builder(
1023  temp_zone, data->info()->bytecode_array(), data->info()->shared_info(),
1024  handle(data->info()->closure()->feedback_vector(), data->isolate()),
1025  data->info()->osr_offset(), data->jsgraph(), frequency,
1026  data->source_positions(), data->native_context(),
1027  SourcePosition::kNotInlined, flags, true,
1028  data->info()->is_analyze_environment_liveness());
1029  graph_builder.CreateGraph();
1030  }
1031 };
1032 
1033 namespace {
1034 
1035 Maybe<OuterContext> GetModuleContext(Handle<JSFunction> closure) {
1036  Context current = closure->context();
1037  size_t distance = 0;
1038  while (!current->IsNativeContext()) {
1039  if (current->IsModuleContext()) {
1040  return Just(
1041  OuterContext(handle(current, current->GetIsolate()), distance));
1042  }
1043  current = current->previous();
1044  distance++;
1045  }
1046  return Nothing<OuterContext>();
1047 }
1048 
1049 Maybe<OuterContext> ChooseSpecializationContext(
1050  Isolate* isolate, OptimizedCompilationInfo* info) {
1051  if (info->is_function_context_specializing()) {
1052  DCHECK(info->has_context());
1053  return Just(OuterContext(handle(info->context(), isolate), 0));
1054  }
1055  return GetModuleContext(info->closure());
1056 }
1057 
1058 } // anonymous namespace
1059 
1061  static const char* phase_name() { return "inlining"; }
1062 
1063  void Run(PipelineData* data, Zone* temp_zone) {
1064  Isolate* isolate = data->isolate();
1065  OptimizedCompilationInfo* info = data->info();
1066  GraphReducer graph_reducer(temp_zone, data->graph(),
1067  data->jsgraph()->Dead());
1068  DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1069  data->common(), temp_zone);
1070  CheckpointElimination checkpoint_elimination(&graph_reducer);
1071  CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1072  data->broker(), data->common(),
1073  data->machine(), temp_zone);
1074  JSCallReducer call_reducer(&graph_reducer, data->jsgraph(), data->broker(),
1075  data->info()->is_bailout_on_uninitialized()
1076  ? JSCallReducer::kBailoutOnUninitialized
1077  : JSCallReducer::kNoFlags,
1078  data->dependencies());
1079  JSContextSpecialization context_specialization(
1080  &graph_reducer, data->jsgraph(), data->broker(),
1081  ChooseSpecializationContext(isolate, data->info()),
1082  data->info()->is_function_context_specializing()
1083  ? data->info()->closure()
1086  JSNativeContextSpecialization::kNoFlags;
1087  if (data->info()->is_accessor_inlining_enabled()) {
1088  flags |= JSNativeContextSpecialization::kAccessorInliningEnabled;
1089  }
1090  if (data->info()->is_bailout_on_uninitialized()) {
1091  flags |= JSNativeContextSpecialization::kBailoutOnUninitialized;
1092  }
1093  // Passing the OptimizedCompilationInfo's shared zone here as
1094  // JSNativeContextSpecialization allocates out-of-heap objects
1095  // that need to live until code generation.
1096  JSNativeContextSpecialization native_context_specialization(
1097  &graph_reducer, data->jsgraph(), data->broker(), flags,
1098  data->native_context(), data->dependencies(), temp_zone, info->zone());
1099  JSInliningHeuristic inlining(&graph_reducer,
1100  data->info()->is_inlining_enabled()
1101  ? JSInliningHeuristic::kGeneralInlining
1102  : JSInliningHeuristic::kRestrictedInlining,
1103  temp_zone, data->info(), data->jsgraph(),
1104  data->broker(), data->source_positions());
1105  JSIntrinsicLowering intrinsic_lowering(&graph_reducer, data->jsgraph());
1106  AddReducer(data, &graph_reducer, &dead_code_elimination);
1107  AddReducer(data, &graph_reducer, &checkpoint_elimination);
1108  AddReducer(data, &graph_reducer, &common_reducer);
1109  AddReducer(data, &graph_reducer, &native_context_specialization);
1110  AddReducer(data, &graph_reducer, &context_specialization);
1111  AddReducer(data, &graph_reducer, &intrinsic_lowering);
1112  AddReducer(data, &graph_reducer, &call_reducer);
1113  AddReducer(data, &graph_reducer, &inlining);
1114  graph_reducer.ReduceGraph();
1115  }
1116 };
1117 
1118 
1119 struct TyperPhase {
1120  static const char* phase_name() { return "typer"; }
1121 
1122  void Run(PipelineData* data, Zone* temp_zone, Typer* typer) {
1123  NodeVector roots(temp_zone);
1124  data->jsgraph()->GetCachedNodes(&roots);
1125 
1126  // Make sure we always type True and False. Needed for escape analysis.
1127  roots.push_back(data->jsgraph()->TrueConstant());
1128  roots.push_back(data->jsgraph()->FalseConstant());
1129 
1130  LoopVariableOptimizer induction_vars(data->jsgraph()->graph(),
1131  data->common(), temp_zone);
1132  if (FLAG_turbo_loop_variable) induction_vars.Run();
1133  typer->Run(roots, &induction_vars);
1134  }
1135 };
1136 
1138  static const char* phase_name() { return "untyper"; }
1139 
1140  void Run(PipelineData* data, Zone* temp_zone) {
1141  class RemoveTypeReducer final : public Reducer {
1142  public:
1143  const char* reducer_name() const override { return "RemoveTypeReducer"; }
1144  Reduction Reduce(Node* node) final {
1145  if (NodeProperties::IsTyped(node)) {
1146  NodeProperties::RemoveType(node);
1147  return Changed(node);
1148  }
1149  return NoChange();
1150  }
1151  };
1152 
1153  NodeVector roots(temp_zone);
1154  data->jsgraph()->GetCachedNodes(&roots);
1155  for (Node* node : roots) {
1156  NodeProperties::RemoveType(node);
1157  }
1158 
1159  GraphReducer graph_reducer(temp_zone, data->graph(),
1160  data->jsgraph()->Dead());
1161  RemoveTypeReducer remove_type_reducer;
1162  AddReducer(data, &graph_reducer, &remove_type_reducer);
1163  graph_reducer.ReduceGraph();
1164  }
1165 };
1166 
1168  static const char* phase_name() { return "serialize standard objects"; }
1169 
1170  void Run(PipelineData* data, Zone* temp_zone) {
1171  data->broker()->SerializeStandardObjects();
1172  }
1173 };
1174 
1176  static const char* phase_name() { return "serialize metadata"; }
1177 
1178  void Run(PipelineData* data, Zone* temp_zone) {
1179  GraphReducer graph_reducer(temp_zone, data->graph(),
1180  data->jsgraph()->Dead());
1181  JSHeapCopyReducer heap_copy_reducer(data->broker());
1182  AddReducer(data, &graph_reducer, &heap_copy_reducer);
1183  graph_reducer.ReduceGraph();
1184 
1185  // Some nodes that are no longer in the graph might still be in the cache.
1186  NodeVector cached_nodes(temp_zone);
1187  data->jsgraph()->GetCachedNodes(&cached_nodes);
1188  for (Node* const node : cached_nodes) graph_reducer.ReduceNode(node);
1189  }
1190 };
1191 
1193  static const char* phase_name() { return "typed lowering"; }
1194 
1195  void Run(PipelineData* data, Zone* temp_zone) {
1196  GraphReducer graph_reducer(temp_zone, data->graph(),
1197  data->jsgraph()->Dead());
1198  DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1199  data->common(), temp_zone);
1200  JSCreateLowering create_lowering(&graph_reducer, data->dependencies(),
1201  data->jsgraph(), data->broker(),
1202  temp_zone);
1203  JSTypedLowering typed_lowering(&graph_reducer, data->jsgraph(),
1204  data->broker(), temp_zone);
1205  ConstantFoldingReducer constant_folding_reducer(
1206  &graph_reducer, data->jsgraph(), data->broker());
1207  TypedOptimization typed_optimization(&graph_reducer, data->dependencies(),
1208  data->jsgraph(), data->broker());
1209  SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
1210  data->broker());
1211  CheckpointElimination checkpoint_elimination(&graph_reducer);
1212  CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1213  data->broker(), data->common(),
1214  data->machine(), temp_zone);
1215  AddReducer(data, &graph_reducer, &dead_code_elimination);
1216  AddReducer(data, &graph_reducer, &create_lowering);
1217  AddReducer(data, &graph_reducer, &constant_folding_reducer);
1218  AddReducer(data, &graph_reducer, &typed_lowering);
1219  AddReducer(data, &graph_reducer, &typed_optimization);
1220  AddReducer(data, &graph_reducer, &simple_reducer);
1221  AddReducer(data, &graph_reducer, &checkpoint_elimination);
1222  AddReducer(data, &graph_reducer, &common_reducer);
1223  graph_reducer.ReduceGraph();
1224  }
1225 };
1226 
1227 
1229  static const char* phase_name() { return "escape analysis"; }
1230 
1231  void Run(PipelineData* data, Zone* temp_zone) {
1232  EscapeAnalysis escape_analysis(data->jsgraph(), temp_zone);
1233  escape_analysis.ReduceGraph();
1234  GraphReducer reducer(temp_zone, data->graph(), data->jsgraph()->Dead());
1235  EscapeAnalysisReducer escape_reducer(&reducer, data->jsgraph(),
1236  escape_analysis.analysis_result(),
1237  temp_zone);
1238  AddReducer(data, &reducer, &escape_reducer);
1239  reducer.ReduceGraph();
1240  // TODO(tebbi): Turn this into a debug mode check once we have confidence.
1241  escape_reducer.VerifyReplacement();
1242  }
1243 };
1244 
1246  static const char* phase_name() { return "simplified lowering"; }
1247 
1248  void Run(PipelineData* data, Zone* temp_zone) {
1249  SimplifiedLowering lowering(data->jsgraph(), data->broker(), temp_zone,
1250  data->source_positions(), data->node_origins(),
1251  data->info()->GetPoisoningMitigationLevel());
1252  lowering.LowerAllNodes();
1253  }
1254 };
1255 
1257  static const char* phase_name() { return "loop peeling"; }
1258 
1259  void Run(PipelineData* data, Zone* temp_zone) {
1260  GraphTrimmer trimmer(temp_zone, data->graph());
1261  NodeVector roots(temp_zone);
1262  data->jsgraph()->GetCachedNodes(&roots);
1263  trimmer.TrimGraph(roots.begin(), roots.end());
1264 
1265  LoopTree* loop_tree =
1266  LoopFinder::BuildLoopTree(data->jsgraph()->graph(), temp_zone);
1267  LoopPeeler(data->graph(), data->common(), loop_tree, temp_zone,
1268  data->source_positions(), data->node_origins())
1269  .PeelInnerLoopsOfTree();
1270  }
1271 };
1272 
1274  static const char* phase_name() { return "loop exit elimination"; }
1275 
1276  void Run(PipelineData* data, Zone* temp_zone) {
1277  LoopPeeler::EliminateLoopExits(data->graph(), temp_zone);
1278  }
1279 };
1280 
1282  static const char* phase_name() { return "generic lowering"; }
1283 
1284  void Run(PipelineData* data, Zone* temp_zone) {
1285  GraphReducer graph_reducer(temp_zone, data->graph(),
1286  data->jsgraph()->Dead());
1287  JSGenericLowering generic_lowering(data->jsgraph());
1288  AddReducer(data, &graph_reducer, &generic_lowering);
1289  graph_reducer.ReduceGraph();
1290  }
1291 };
1292 
1294  static const char* phase_name() { return "early optimization"; }
1295 
1296  void Run(PipelineData* data, Zone* temp_zone) {
1297  GraphReducer graph_reducer(temp_zone, data->graph(),
1298  data->jsgraph()->Dead());
1299  DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1300  data->common(), temp_zone);
1301  SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
1302  data->broker());
1303  RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
1304  ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1305  MachineOperatorReducer machine_reducer(data->jsgraph());
1306  CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1307  data->broker(), data->common(),
1308  data->machine(), temp_zone);
1309  AddReducer(data, &graph_reducer, &dead_code_elimination);
1310  AddReducer(data, &graph_reducer, &simple_reducer);
1311  AddReducer(data, &graph_reducer, &redundancy_elimination);
1312  AddReducer(data, &graph_reducer, &machine_reducer);
1313  AddReducer(data, &graph_reducer, &common_reducer);
1314  AddReducer(data, &graph_reducer, &value_numbering);
1315  graph_reducer.ReduceGraph();
1316  }
1317 };
1318 
1320  static const char* phase_name() { return "control flow optimization"; }
1321 
1322  void Run(PipelineData* data, Zone* temp_zone) {
1323  ControlFlowOptimizer optimizer(data->graph(), data->common(),
1324  data->machine(), temp_zone);
1325  optimizer.Optimize();
1326  }
1327 };
1328 
1330  static const char* phase_name() { return "effect linearization"; }
1331 
1332  void Run(PipelineData* data, Zone* temp_zone) {
1333  {
1334  // The scheduler requires the graphs to be trimmed, so trim now.
1335  // TODO(jarin) Remove the trimming once the scheduler can handle untrimmed
1336  // graphs.
1337  GraphTrimmer trimmer(temp_zone, data->graph());
1338  NodeVector roots(temp_zone);
1339  data->jsgraph()->GetCachedNodes(&roots);
1340  trimmer.TrimGraph(roots.begin(), roots.end());
1341 
1342  // Schedule the graph without node splitting so that we can
1343  // fix the effect and control flow for nodes with low-level side
1344  // effects (such as changing representation to tagged or
1345  // 'floating' allocation regions.)
1346  Schedule* schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(),
1347  Scheduler::kTempSchedule);
1348  if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
1349  TraceSchedule(data->info(), data, schedule,
1350  "effect linearization schedule");
1351 
1352  EffectControlLinearizer::MaskArrayIndexEnable mask_array_index =
1353  (data->info()->GetPoisoningMitigationLevel() !=
1354  PoisoningMitigationLevel::kDontPoison)
1355  ? EffectControlLinearizer::kMaskArrayIndex
1356  : EffectControlLinearizer::kDoNotMaskArrayIndex;
1357  // Post-pass for wiring the control/effects
1358  // - connect allocating representation changes into the control&effect
1359  // chains and lower them,
1360  // - get rid of the region markers,
1361  // - introduce effect phis and rewire effects to get SSA again.
1362  EffectControlLinearizer linearizer(
1363  data->jsgraph(), schedule, temp_zone, data->source_positions(),
1364  data->node_origins(), mask_array_index);
1365  linearizer.Run();
1366  }
1367  {
1368  // The {EffectControlLinearizer} might leave {Dead} nodes behind, so we
1369  // run {DeadCodeElimination} to prune these parts of the graph.
1370  // Also, the following store-store elimination phase greatly benefits from
1371  // doing a common operator reducer and dead code elimination just before
1372  // it, to eliminate conditional deopts with a constant condition.
1373  GraphReducer graph_reducer(temp_zone, data->graph(),
1374  data->jsgraph()->Dead());
1375  DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1376  data->common(), temp_zone);
1377  CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1378  data->broker(), data->common(),
1379  data->machine(), temp_zone);
1380  AddReducer(data, &graph_reducer, &dead_code_elimination);
1381  AddReducer(data, &graph_reducer, &common_reducer);
1382  graph_reducer.ReduceGraph();
1383  }
1384  }
1385 };
1386 
1388  static const char* phase_name() { return "store-store elimination"; }
1389 
1390  void Run(PipelineData* data, Zone* temp_zone) {
1391  GraphTrimmer trimmer(temp_zone, data->graph());
1392  NodeVector roots(temp_zone);
1393  data->jsgraph()->GetCachedNodes(&roots);
1394  trimmer.TrimGraph(roots.begin(), roots.end());
1395 
1396  StoreStoreElimination::Run(data->jsgraph(), temp_zone);
1397  }
1398 };
1399 
1401  static const char* phase_name() { return "load elimination"; }
1402 
1403  void Run(PipelineData* data, Zone* temp_zone) {
1404  GraphReducer graph_reducer(temp_zone, data->graph(),
1405  data->jsgraph()->Dead());
1406  BranchElimination branch_condition_elimination(&graph_reducer,
1407  data->jsgraph(), temp_zone);
1408  DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1409  data->common(), temp_zone);
1410  RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
1411  LoadElimination load_elimination(&graph_reducer, data->jsgraph(),
1412  temp_zone);
1413  CheckpointElimination checkpoint_elimination(&graph_reducer);
1414  ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1415  CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1416  data->broker(), data->common(),
1417  data->machine(), temp_zone);
1418  TypedOptimization typed_optimization(&graph_reducer, data->dependencies(),
1419  data->jsgraph(), data->broker());
1420  ConstantFoldingReducer constant_folding_reducer(
1421  &graph_reducer, data->jsgraph(), data->broker());
1422  TypeNarrowingReducer type_narrowing_reducer(&graph_reducer, data->jsgraph(),
1423  data->broker());
1424  AddReducer(data, &graph_reducer, &branch_condition_elimination);
1425  AddReducer(data, &graph_reducer, &dead_code_elimination);
1426  AddReducer(data, &graph_reducer, &redundancy_elimination);
1427  AddReducer(data, &graph_reducer, &load_elimination);
1428  AddReducer(data, &graph_reducer, &type_narrowing_reducer);
1429  AddReducer(data, &graph_reducer, &constant_folding_reducer);
1430  AddReducer(data, &graph_reducer, &typed_optimization);
1431  AddReducer(data, &graph_reducer, &checkpoint_elimination);
1432  AddReducer(data, &graph_reducer, &common_reducer);
1433  AddReducer(data, &graph_reducer, &value_numbering);
1434  graph_reducer.ReduceGraph();
1435  }
1436 };
1437 
1439  static const char* phase_name() { return "memory optimization"; }
1440 
1441  void Run(PipelineData* data, Zone* temp_zone) {
1442  // The memory optimizer requires the graphs to be trimmed, so trim now.
1443  GraphTrimmer trimmer(temp_zone, data->graph());
1444  NodeVector roots(temp_zone);
1445  data->jsgraph()->GetCachedNodes(&roots);
1446  trimmer.TrimGraph(roots.begin(), roots.end());
1447 
1448  // Optimize allocations and load/store operations.
1449  MemoryOptimizer optimizer(
1450  data->jsgraph(), temp_zone, data->info()->GetPoisoningMitigationLevel(),
1451  data->info()->is_allocation_folding_enabled()
1452  ? MemoryOptimizer::AllocationFolding::kDoAllocationFolding
1453  : MemoryOptimizer::AllocationFolding::kDontAllocationFolding);
1454  optimizer.Optimize();
1455  }
1456 };
1457 
1459  static const char* phase_name() { return "late optimization"; }
1460 
1461  void Run(PipelineData* data, Zone* temp_zone) {
1462  GraphReducer graph_reducer(temp_zone, data->graph(),
1463  data->jsgraph()->Dead());
1464  BranchElimination branch_condition_elimination(&graph_reducer,
1465  data->jsgraph(), temp_zone);
1466  DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
1467  data->common(), temp_zone);
1468  ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
1469  MachineOperatorReducer machine_reducer(data->jsgraph());
1470  CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
1471  data->broker(), data->common(),
1472  data->machine(), temp_zone);
1473  SelectLowering select_lowering(data->jsgraph()->graph(),
1474  data->jsgraph()->common());
1475  AddReducer(data, &graph_reducer, &branch_condition_elimination);
1476  AddReducer(data, &graph_reducer, &dead_code_elimination);
1477  AddReducer(data, &graph_reducer, &machine_reducer);
1478  AddReducer(data, &graph_reducer, &common_reducer);
1479  AddReducer(data, &graph_reducer, &select_lowering);
1480  AddReducer(data, &graph_reducer, &value_numbering);
1481  graph_reducer.ReduceGraph();
1482  }
1483 };
1484 
1486  static const char* phase_name() { return "early trimming"; }
1487  void Run(PipelineData* data, Zone* temp_zone) {
1488  GraphTrimmer trimmer(temp_zone, data->graph());
1489  NodeVector roots(temp_zone);
1490  data->jsgraph()->GetCachedNodes(&roots);
1491  trimmer.TrimGraph(roots.begin(), roots.end());
1492  }
1493 };
1494 
1495 
1497  static const char* phase_name() { return "late graph trimming"; }
1498  void Run(PipelineData* data, Zone* temp_zone) {
1499  GraphTrimmer trimmer(temp_zone, data->graph());
1500  NodeVector roots(temp_zone);
1501  if (data->jsgraph()) {
1502  data->jsgraph()->GetCachedNodes(&roots);
1503  }
1504  trimmer.TrimGraph(roots.begin(), roots.end());
1505  }
1506 };
1507 
1508 
1510  static const char* phase_name() { return "scheduling"; }
1511 
1512  void Run(PipelineData* data, Zone* temp_zone) {
1513  Schedule* schedule = Scheduler::ComputeSchedule(
1514  temp_zone, data->graph(), data->info()->is_splitting_enabled()
1515  ? Scheduler::kSplitNodes
1516  : Scheduler::kNoFlags);
1517  if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
1518  data->set_schedule(schedule);
1519  }
1520 };
1521 
1523  const InstructionSequence* sequence;
1524  const ZoneVector<std::pair<int, int>>* instr_origins;
1525 };
1526 
1527 std::ostream& operator<<(std::ostream& out, const InstructionRangesAsJSON& s) {
1528  const int max = static_cast<int>(s.sequence->LastInstructionIndex());
1529 
1530  out << ", \"nodeIdToInstructionRange\": {";
1531  bool need_comma = false;
1532  for (size_t i = 0; i < s.instr_origins->size(); ++i) {
1533  std::pair<int, int> offset = (*s.instr_origins)[i];
1534  if (offset.first == -1) continue;
1535  const int first = max - offset.first + 1;
1536  const int second = max - offset.second + 1;
1537  if (need_comma) out << ", ";
1538  out << "\"" << i << "\": [" << first << ", " << second << "]";
1539  need_comma = true;
1540  }
1541  out << "}";
1542  out << ", \"blockIdtoInstructionRange\": {";
1543  need_comma = false;
1544  for (auto block : s.sequence->instruction_blocks()) {
1545  if (need_comma) out << ", ";
1546  out << "\"" << block->rpo_number() << "\": [" << block->code_start() << ", "
1547  << block->code_end() << "]";
1548  need_comma = true;
1549  }
1550  out << "}";
1551  return out;
1552 }
1553 
1555  static const char* phase_name() { return "select instructions"; }
1556 
1557  void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
1558  InstructionSelector selector(
1559  temp_zone, data->graph()->NodeCount(), linkage, data->sequence(),
1560  data->schedule(), data->source_positions(), data->frame(),
1561  data->info()->switch_jump_table_enabled()
1562  ? InstructionSelector::kEnableSwitchJumpTable
1563  : InstructionSelector::kDisableSwitchJumpTable,
1564  data->info()->is_source_positions_enabled()
1565  ? InstructionSelector::kAllSourcePositions
1566  : InstructionSelector::kCallSourcePositions,
1567  InstructionSelector::SupportedFeatures(),
1568  FLAG_turbo_instruction_scheduling
1569  ? InstructionSelector::kEnableScheduling
1570  : InstructionSelector::kDisableScheduling,
1571  !data->isolate() || data->isolate()->serializer_enabled() ||
1572  data->isolate()->ShouldLoadConstantsFromRootList()
1573  ? InstructionSelector::kDisableRootsRelativeAddressing
1574  : InstructionSelector::kEnableRootsRelativeAddressing,
1575  data->info()->GetPoisoningMitigationLevel(),
1576  data->info()->trace_turbo_json_enabled()
1577  ? InstructionSelector::kEnableTraceTurboJson
1578  : InstructionSelector::kDisableTraceTurboJson);
1579  if (!selector.SelectInstructions()) {
1580  data->set_compilation_failed();
1581  }
1582  if (data->info()->trace_turbo_json_enabled()) {
1583  TurboJsonFile json_of(data->info(), std::ios_base::app);
1584  json_of << "{\"name\":\"" << phase_name()
1585  << "\",\"type\":\"instructions\""
1586  << InstructionRangesAsJSON{data->sequence(),
1587  &selector.instr_origins()}
1588  << "},\n";
1589  }
1590  }
1591 };
1592 
1593 
1595  static const char* phase_name() { return "meet register constraints"; }
1596 
1597  void Run(PipelineData* data, Zone* temp_zone) {
1598  ConstraintBuilder builder(data->register_allocation_data());
1599  builder.MeetRegisterConstraints();
1600  }
1601 };
1602 
1603 
1605  static const char* phase_name() { return "resolve phis"; }
1606 
1607  void Run(PipelineData* data, Zone* temp_zone) {
1608  ConstraintBuilder builder(data->register_allocation_data());
1609  builder.ResolvePhis();
1610  }
1611 };
1612 
1613 
1615  static const char* phase_name() { return "build live ranges"; }
1616 
1617  void Run(PipelineData* data, Zone* temp_zone) {
1618  LiveRangeBuilder builder(data->register_allocation_data(), temp_zone);
1619  builder.BuildLiveRanges();
1620  }
1621 };
1622 
1623 
1625  static const char* phase_name() { return "splinter live ranges"; }
1626 
1627  void Run(PipelineData* data, Zone* temp_zone) {
1628  LiveRangeSeparator live_range_splinterer(data->register_allocation_data(),
1629  temp_zone);
1630  live_range_splinterer.Splinter();
1631  }
1632 };
1633 
1634 
1635 template <typename RegAllocator>
1637  static const char* phase_name() { return "allocate general registers"; }
1638 
1639  void Run(PipelineData* data, Zone* temp_zone) {
1640  RegAllocator allocator(data->register_allocation_data(), GENERAL_REGISTERS,
1641  temp_zone);
1642  allocator.AllocateRegisters();
1643  }
1644 };
1645 
1646 template <typename RegAllocator>
1648  static const char* phase_name() { return "allocate f.p. registers"; }
1649 
1650  void Run(PipelineData* data, Zone* temp_zone) {
1651  RegAllocator allocator(data->register_allocation_data(), FP_REGISTERS,
1652  temp_zone);
1653  allocator.AllocateRegisters();
1654  }
1655 };
1656 
1657 
1659  static const char* phase_name() { return "merge splintered ranges"; }
1660  void Run(PipelineData* pipeline_data, Zone* temp_zone) {
1661  RegisterAllocationData* data = pipeline_data->register_allocation_data();
1662  LiveRangeMerger live_range_merger(data, temp_zone);
1663  live_range_merger.Merge();
1664  }
1665 };
1666 
1667 
1669  static const char* phase_name() { return "locate spill slots"; }
1670 
1671  void Run(PipelineData* data, Zone* temp_zone) {
1672  SpillSlotLocator locator(data->register_allocation_data());
1673  locator.LocateSpillSlots();
1674  }
1675 };
1676 
1677 
1679  static const char* phase_name() { return "assign spill slots"; }
1680 
1681  void Run(PipelineData* data, Zone* temp_zone) {
1682  OperandAssigner assigner(data->register_allocation_data());
1683  assigner.AssignSpillSlots();
1684  }
1685 };
1686 
1687 
1689  static const char* phase_name() { return "commit assignment"; }
1690 
1691  void Run(PipelineData* data, Zone* temp_zone) {
1692  OperandAssigner assigner(data->register_allocation_data());
1693  assigner.CommitAssignment();
1694  }
1695 };
1696 
1697 
1699  static const char* phase_name() { return "populate pointer maps"; }
1700 
1701  void Run(PipelineData* data, Zone* temp_zone) {
1702  ReferenceMapPopulator populator(data->register_allocation_data());
1703  populator.PopulateReferenceMaps();
1704  }
1705 };
1706 
1707 
1709  static const char* phase_name() { return "connect ranges"; }
1710 
1711  void Run(PipelineData* data, Zone* temp_zone) {
1712  LiveRangeConnector connector(data->register_allocation_data());
1713  connector.ConnectRanges(temp_zone);
1714  }
1715 };
1716 
1717 
1719  static const char* phase_name() { return "resolve control flow"; }
1720 
1721  void Run(PipelineData* data, Zone* temp_zone) {
1722  LiveRangeConnector connector(data->register_allocation_data());
1723  connector.ResolveControlFlow(temp_zone);
1724  }
1725 };
1726 
1727 
1729  static const char* phase_name() { return "optimize moves"; }
1730 
1731  void Run(PipelineData* data, Zone* temp_zone) {
1732  MoveOptimizer move_optimizer(temp_zone, data->sequence());
1733  move_optimizer.Run();
1734  }
1735 };
1736 
1737 
1739  static const char* phase_name() { return "frame elision"; }
1740 
1741  void Run(PipelineData* data, Zone* temp_zone) {
1742  FrameElider(data->sequence()).Run();
1743  }
1744 };
1745 
1746 
1748  static const char* phase_name() { return "jump threading"; }
1749 
1750  void Run(PipelineData* data, Zone* temp_zone, bool frame_at_start) {
1751  ZoneVector<RpoNumber> result(temp_zone);
1752  if (JumpThreading::ComputeForwarding(temp_zone, result, data->sequence(),
1753  frame_at_start)) {
1754  JumpThreading::ApplyForwarding(temp_zone, result, data->sequence());
1755  }
1756  }
1757 };
1758 
1760  static const char* phase_name() { return "assemble code"; }
1761 
1762  void Run(PipelineData* data, Zone* temp_zone) {
1763  data->code_generator()->AssembleCode();
1764  }
1765 };
1766 
1768  static const char* phase_name() { return "finalize code"; }
1769 
1770  void Run(PipelineData* data, Zone* temp_zone) {
1771  data->set_code(data->code_generator()->FinalizeCode());
1772  }
1773 };
1774 
1775 
1777  static const char* phase_name() { return nullptr; }
1778 
1779  void Run(PipelineData* data, Zone* temp_zone, const char* phase) {
1780  OptimizedCompilationInfo* info = data->info();
1781  Graph* graph = data->graph();
1782 
1783  if (info->trace_turbo_json_enabled()) { // Print JSON.
1784  AllowHandleDereference allow_deref;
1785 
1786  TurboJsonFile json_of(info, std::ios_base::app);
1787  json_of << "{\"name\":\"" << phase << "\",\"type\":\"graph\",\"data\":"
1788  << AsJSON(*graph, data->source_positions(), data->node_origins())
1789  << "},\n";
1790  }
1791 
1792  if (info->trace_turbo_scheduled_enabled()) {
1793  AccountingAllocator allocator;
1794  Schedule* schedule = data->schedule();
1795  if (schedule == nullptr) {
1796  schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(),
1797  Scheduler::kNoFlags);
1798  }
1799 
1800  AllowHandleDereference allow_deref;
1801  CodeTracer::Scope tracing_scope(data->GetCodeTracer());
1802  OFStream os(tracing_scope.file());
1803  os << "-- Graph after " << phase << " -- " << std::endl;
1804  os << AsScheduledGraph(schedule);
1805  } else if (info->trace_turbo_graph_enabled()) { // Simple textual RPO.
1806  AllowHandleDereference allow_deref;
1807  CodeTracer::Scope tracing_scope(data->GetCodeTracer());
1808  OFStream os(tracing_scope.file());
1809  os << "-- Graph after " << phase << " -- " << std::endl;
1810  os << AsRPO(*graph);
1811  }
1812  }
1813 };
1814 
1815 
1817  static const char* phase_name() { return nullptr; }
1818 
1819  void Run(PipelineData* data, Zone* temp_zone, const bool untyped,
1820  bool values_only = false) {
1821  Verifier::CodeType code_type;
1822  switch (data->info()->code_kind()) {
1823  case Code::WASM_FUNCTION:
1824  case Code::WASM_TO_JS_FUNCTION:
1825  case Code::JS_TO_WASM_FUNCTION:
1826  case Code::WASM_INTERPRETER_ENTRY:
1827  case Code::C_WASM_ENTRY:
1828  code_type = Verifier::kWasm;
1829  break;
1830  default:
1831  code_type = Verifier::kDefault;
1832  }
1833  Verifier::Run(data->graph(), !untyped ? Verifier::TYPED : Verifier::UNTYPED,
1834  values_only ? Verifier::kValuesOnly : Verifier::kAll,
1835  code_type);
1836  }
1837 };
1838 
1839 void PipelineImpl::RunPrintAndVerify(const char* phase, bool untyped) {
1840  if (info()->trace_turbo_json_enabled() ||
1841  info()->trace_turbo_graph_enabled()) {
1842  Run<PrintGraphPhase>(phase);
1843  }
1844  if (FLAG_turbo_verify) {
1845  Run<VerifyGraphPhase>(untyped);
1846  }
1847 }
1848 
1849 bool PipelineImpl::CreateGraph() {
1850  PipelineData* data = this->data_;
1851 
1852  data->BeginPhaseKind("graph creation");
1853 
1854  if (info()->trace_turbo_json_enabled() ||
1855  info()->trace_turbo_graph_enabled()) {
1856  CodeTracer::Scope tracing_scope(data->GetCodeTracer());
1857  OFStream os(tracing_scope.file());
1858  os << "---------------------------------------------------\n"
1859  << "Begin compiling method " << info()->GetDebugName().get()
1860  << " using Turbofan" << std::endl;
1861  }
1862  if (info()->trace_turbo_json_enabled()) {
1863  TurboCfgFile tcf(isolate());
1864  tcf << AsC1VCompilation(info());
1865  }
1866 
1867  data->source_positions()->AddDecorator();
1868  if (data->info()->trace_turbo_json_enabled()) {
1869  data->node_origins()->AddDecorator();
1870  }
1871 
1872  Run<GraphBuilderPhase>();
1873  RunPrintAndVerify(GraphBuilderPhase::phase_name(), true);
1874 
1875  if (FLAG_concurrent_inlining) {
1876  data->broker()->StartSerializing();
1877  Run<SerializeStandardObjectsPhase>();
1878  Run<CopyMetadataForConcurrentCompilePhase>();
1879  } else {
1880  data->broker()->SetNativeContextRef();
1881  }
1882 
1883  // Perform function context specialization and inlining (if enabled).
1884  Run<InliningPhase>();
1885  RunPrintAndVerify(InliningPhase::phase_name(), true);
1886 
1887  // Remove dead->live edges from the graph.
1888  Run<EarlyGraphTrimmingPhase>();
1889  RunPrintAndVerify(EarlyGraphTrimmingPhase::phase_name(), true);
1890 
1891  // Determine the Typer operation flags.
1892  {
1893  if (is_sloppy(info()->shared_info()->language_mode()) &&
1894  info()->shared_info()->IsUserJavaScript()) {
1895  // Sloppy mode functions always have an Object for this.
1896  data->AddTyperFlag(Typer::kThisIsReceiver);
1897  }
1898  if (IsClassConstructor(info()->shared_info()->kind())) {
1899  // Class constructors cannot be [[Call]]ed.
1900  data->AddTyperFlag(Typer::kNewTargetIsReceiver);
1901  }
1902  }
1903 
1904  // Run the type-sensitive lowerings and optimizations on the graph.
1905  {
1906  if (FLAG_concurrent_inlining) {
1907  // TODO(neis): Remove CopyMetadataForConcurrentCompilePhase call once
1908  // brokerization of JSNativeContextSpecialization is complete.
1909  Run<CopyMetadataForConcurrentCompilePhase>();
1910  data->broker()->StopSerializing();
1911  } else {
1912  data->broker()->StartSerializing();
1913  Run<SerializeStandardObjectsPhase>();
1914  Run<CopyMetadataForConcurrentCompilePhase>();
1915  data->broker()->StopSerializing();
1916  }
1917  }
1918 
1919  data->EndPhaseKind();
1920 
1921  return true;
1922 }
1923 
1924 bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
1925  PipelineData* data = this->data_;
1926 
1927  data->BeginPhaseKind("lowering");
1928 
1929  // Type the graph and keep the Typer running such that new nodes get
1930  // automatically typed when they are created.
1931  Run<TyperPhase>(data->CreateTyper());
1932  RunPrintAndVerify(TyperPhase::phase_name());
1933  Run<TypedLoweringPhase>();
1934  RunPrintAndVerify(TypedLoweringPhase::phase_name());
1935 
1936  if (data->info()->is_loop_peeling_enabled()) {
1937  Run<LoopPeelingPhase>();
1938  RunPrintAndVerify(LoopPeelingPhase::phase_name(), true);
1939  } else {
1940  Run<LoopExitEliminationPhase>();
1941  RunPrintAndVerify(LoopExitEliminationPhase::phase_name(), true);
1942  }
1943 
1944  if (FLAG_turbo_load_elimination) {
1945  Run<LoadEliminationPhase>();
1946  RunPrintAndVerify(LoadEliminationPhase::phase_name());
1947  }
1948  data->DeleteTyper();
1949 
1950  if (FLAG_turbo_escape) {
1951  Run<EscapeAnalysisPhase>();
1952  if (data->compilation_failed()) {
1953  info()->AbortOptimization(
1954  BailoutReason::kCyclicObjectStateDetectedInEscapeAnalysis);
1955  data->EndPhaseKind();
1956  return false;
1957  }
1958  RunPrintAndVerify(EscapeAnalysisPhase::phase_name());
1959  }
1960 
1961  // Perform simplified lowering. This has to run w/o the Typer decorator,
1962  // because we cannot compute meaningful types anyways, and the computed types
1963  // might even conflict with the representation/truncation logic.
1964  Run<SimplifiedLoweringPhase>();
1965  RunPrintAndVerify(SimplifiedLoweringPhase::phase_name(), true);
1966 
1967  // From now on it is invalid to look at types on the nodes, because the types
1968  // on the nodes might not make sense after representation selection due to the
1969  // way we handle truncations; if we'd want to look at types afterwards we'd
1970  // essentially need to re-type (large portions of) the graph.
1971 
1972  // In order to catch bugs related to type access after this point, we now
1973  // remove the types from the nodes (currently only in Debug builds).
1974 #ifdef DEBUG
1975  Run<UntyperPhase>();
1976  RunPrintAndVerify(UntyperPhase::phase_name(), true);
1977 #endif
1978 
1979  // Run generic lowering pass.
1980  Run<GenericLoweringPhase>();
1981  RunPrintAndVerify(GenericLoweringPhase::phase_name(), true);
1982 
1983  data->BeginPhaseKind("block building");
1984 
1985  // Run early optimization pass.
1986  Run<EarlyOptimizationPhase>();
1987  RunPrintAndVerify(EarlyOptimizationPhase::phase_name(), true);
1988 
1989  Run<EffectControlLinearizationPhase>();
1990  RunPrintAndVerify(EffectControlLinearizationPhase::phase_name(), true);
1991 
1992  if (FLAG_turbo_store_elimination) {
1993  Run<StoreStoreEliminationPhase>();
1994  RunPrintAndVerify(StoreStoreEliminationPhase::phase_name(), true);
1995  }
1996 
1997  // Optimize control flow.
1998  if (FLAG_turbo_cf_optimization) {
1999  Run<ControlFlowOptimizationPhase>();
2000  RunPrintAndVerify(ControlFlowOptimizationPhase::phase_name(), true);
2001  }
2002 
2003  // Optimize memory access and allocation operations.
2004  Run<MemoryOptimizationPhase>();
2005  // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
2006  RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
2007 
2008  // Lower changes that have been inserted before.
2009  Run<LateOptimizationPhase>();
2010  // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
2011  RunPrintAndVerify(LateOptimizationPhase::phase_name(), true);
2012 
2013  data->source_positions()->RemoveDecorator();
2014  if (data->info()->trace_turbo_json_enabled()) {
2015  data->node_origins()->RemoveDecorator();
2016  }
2017 
2018  ComputeScheduledGraph();
2019 
2020  return SelectInstructions(linkage);
2021 }
2022 
2023 MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
2024  Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
2025  Schedule* schedule, Code::Kind kind, const char* debug_name,
2026  uint32_t stub_key, int32_t builtin_index, JumpOptimizationInfo* jump_opt,
2027  PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options) {
2028  OptimizedCompilationInfo info(CStrVector(debug_name), graph->zone(), kind);
2029  info.set_builtin_index(builtin_index);
2030  info.set_stub_key(stub_key);
2031 
2032  if (poisoning_level != PoisoningMitigationLevel::kDontPoison) {
2033  info.SetPoisoningMitigationLevel(poisoning_level);
2034  }
2035 
2036  // Construct a pipeline for scheduling and code generation.
2037  ZoneStats zone_stats(isolate->allocator());
2038  NodeOriginTable node_origins(graph);
2039  PipelineData data(&zone_stats, &info, isolate, graph, schedule, nullptr,
2040  &node_origins, jump_opt, options);
2041  data.set_verify_graph(FLAG_verify_csa);
2042  std::unique_ptr<PipelineStatistics> pipeline_statistics;
2043  if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
2044  pipeline_statistics.reset(new PipelineStatistics(
2045  &info, isolate->GetTurboStatistics(), &zone_stats));
2046  pipeline_statistics->BeginPhaseKind("stub codegen");
2047  }
2048 
2049  PipelineImpl pipeline(&data);
2050 
2051  if (info.trace_turbo_json_enabled() || info.trace_turbo_graph_enabled()) {
2052  CodeTracer::Scope tracing_scope(data.GetCodeTracer());
2053  OFStream os(tracing_scope.file());
2054  os << "---------------------------------------------------\n"
2055  << "Begin compiling " << debug_name << " using Turbofan" << std::endl;
2056  if (info.trace_turbo_json_enabled()) {
2057  TurboJsonFile json_of(&info, std::ios_base::trunc);
2058  json_of << "{\"function\" : ";
2059  JsonPrintFunctionSource(json_of, -1, info.GetDebugName(),
2060  Handle<Script>(), isolate,
2061  Handle<SharedFunctionInfo>());
2062  json_of << ",\n\"phases\":[";
2063  }
2064  pipeline.Run<PrintGraphPhase>("Machine");
2065  }
2066 
2067  if (FLAG_optimize_csa) {
2068  DCHECK_NULL(data.schedule());
2069  pipeline.Run<VerifyGraphPhase>(true, !FLAG_optimize_csa);
2070  pipeline.ComputeScheduledGraph();
2071  } else {
2072  TraceSchedule(data.info(), &data, data.schedule(), "schedule");
2073  }
2074  DCHECK_NOT_NULL(data.schedule());
2075 
2076  return pipeline.GenerateCode(call_descriptor);
2077 }
2078 
2079 // static
2080 wasm::WasmCode* Pipeline::GenerateCodeForWasmNativeStub(
2081  wasm::WasmEngine* wasm_engine, CallDescriptor* call_descriptor,
2082  MachineGraph* mcgraph, Code::Kind kind, int wasm_kind,
2083  const char* debug_name, const AssemblerOptions& options,
2084  wasm::NativeModule* native_module, SourcePositionTable* source_positions) {
2085  Graph* graph = mcgraph->graph();
2086  OptimizedCompilationInfo info(CStrVector(debug_name), graph->zone(), kind);
2087  // Construct a pipeline for scheduling and code generation.
2088  ZoneStats zone_stats(wasm_engine->allocator());
2089  NodeOriginTable* node_positions = new (graph->zone()) NodeOriginTable(graph);
2090  PipelineData data(&zone_stats, wasm_engine, &info, mcgraph, nullptr,
2091  source_positions, node_positions, options);
2092  std::unique_ptr<PipelineStatistics> pipeline_statistics;
2093  if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
2094  pipeline_statistics.reset(new PipelineStatistics(
2095  &info, wasm_engine->GetOrCreateTurboStatistics(), &zone_stats));
2096  pipeline_statistics->BeginPhaseKind("wasm stub codegen");
2097  }
2098 
2099  PipelineImpl pipeline(&data);
2100 
2101  if (info.trace_turbo_json_enabled() || info.trace_turbo_graph_enabled()) {
2102  CodeTracer::Scope tracing_scope(data.GetCodeTracer());
2103  OFStream os(tracing_scope.file());
2104  os << "---------------------------------------------------\n"
2105  << "Begin compiling method " << info.GetDebugName().get()
2106  << " using Turbofan" << std::endl;
2107  }
2108 
2109  if (info.trace_turbo_graph_enabled()) { // Simple textual RPO.
2110  StdoutStream{} << "-- wasm stub " << Code::Kind2String(kind) << " graph -- "
2111  << std::endl
2112  << AsRPO(*graph);
2113  }
2114 
2115  if (info.trace_turbo_json_enabled()) {
2116  TurboJsonFile json_of(&info, std::ios_base::trunc);
2117  json_of << "{\"function\":\"" << info.GetDebugName().get()
2118  << "\", \"source\":\"\",\n\"phases\":[";
2119  }
2120 
2121  pipeline.RunPrintAndVerify("machine", true);
2122  pipeline.ComputeScheduledGraph();
2123 
2124  Linkage linkage(call_descriptor);
2125  if (!pipeline.SelectInstructions(&linkage)) return nullptr;
2126  pipeline.AssembleCode(&linkage);
2127 
2128  CodeGenerator* code_generator = pipeline.code_generator();
2129  CodeDesc code_desc;
2130  code_generator->tasm()->GetCode(nullptr, &code_desc);
2131 
2132  wasm::WasmCode* code = native_module->AddCode(
2133  wasm::WasmCode::kAnonymousFuncIndex, code_desc,
2134  code_generator->frame()->GetTotalFrameSlotCount(),
2135  code_generator->GetSafepointTableOffset(),
2136  code_generator->GetHandlerTableOffset(),
2137  code_generator->GetProtectedInstructions(),
2138  code_generator->GetSourcePositionTable(),
2139  static_cast<wasm::WasmCode::Kind>(wasm_kind), wasm::WasmCode::kOther);
2140 
2141  if (info.trace_turbo_json_enabled()) {
2142  TurboJsonFile json_of(&info, std::ios_base::app);
2143  json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\"";
2144 #ifdef ENABLE_DISASSEMBLER
2145  std::stringstream disassembler_stream;
2146  Disassembler::Decode(
2147  nullptr, &disassembler_stream, code->instructions().start(),
2148  code->instructions().start() + code->safepoint_table_offset(),
2149  CodeReference(code));
2150  for (auto const c : disassembler_stream.str()) {
2151  json_of << AsEscapedUC16ForJSON(c);
2152  }
2153 #endif // ENABLE_DISASSEMBLER
2154  json_of << "\"}\n]";
2155  json_of << "\n}";
2156  }
2157 
2158  if (info.trace_turbo_json_enabled() || info.trace_turbo_graph_enabled()) {
2159  CodeTracer::Scope tracing_scope(data.GetCodeTracer());
2160  OFStream os(tracing_scope.file());
2161  os << "---------------------------------------------------\n"
2162  << "Finished compiling method " << info.GetDebugName().get()
2163  << " using Turbofan" << std::endl;
2164  }
2165 
2166  return code;
2167 }
2168 
2169 // static
2170 MaybeHandle<Code> Pipeline::GenerateCodeForWasmHeapStub(
2171  Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
2172  Code::Kind kind, const char* debug_name, const AssemblerOptions& options,
2173  SourcePositionTable* source_positions) {
2174  OptimizedCompilationInfo info(CStrVector(debug_name), graph->zone(), kind);
2175  // Construct a pipeline for scheduling and code generation.
2176  ZoneStats zone_stats(isolate->allocator());
2177  NodeOriginTable* node_positions = new (graph->zone()) NodeOriginTable(graph);
2178  PipelineData data(&zone_stats, &info, isolate, graph, nullptr,
2179  source_positions, node_positions, nullptr, options);
2180  std::unique_ptr<PipelineStatistics> pipeline_statistics;
2181  if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
2182  pipeline_statistics.reset(new PipelineStatistics(
2183  &info, isolate->GetTurboStatistics(), &zone_stats));
2184  pipeline_statistics->BeginPhaseKind("wasm stub codegen");
2185  }
2186 
2187  PipelineImpl pipeline(&data);
2188 
2189  if (info.trace_turbo_json_enabled() ||
2190  info.trace_turbo_graph_enabled()) {
2191  CodeTracer::Scope tracing_scope(data.GetCodeTracer());
2192  OFStream os(tracing_scope.file());
2193  os << "---------------------------------------------------\n"
2194  << "Begin compiling method " << info.GetDebugName().get()
2195  << " using Turbofan" << std::endl;
2196  }
2197 
2198  if (info.trace_turbo_graph_enabled()) { // Simple textual RPO.
2199  StdoutStream{} << "-- wasm stub " << Code::Kind2String(kind) << " graph -- "
2200  << std::endl
2201  << AsRPO(*graph);
2202  }
2203 
2204  if (info.trace_turbo_json_enabled()) {
2205  TurboJsonFile json_of(&info, std::ios_base::trunc);
2206  json_of << "{\"function\":\"" << info.GetDebugName().get()
2207  << "\", \"source\":\"\",\n\"phases\":[";
2208  }
2209 
2210  pipeline.RunPrintAndVerify("machine", true);
2211  pipeline.ComputeScheduledGraph();
2212 
2213  Handle<Code> code;
2214  if (pipeline.GenerateCode(call_descriptor).ToHandle(&code) &&
2215  pipeline.CommitDependencies(code)) {
2216  return code;
2217  }
2218  return MaybeHandle<Code>();
2219 }
2220 
2221 // static
2222 MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
2223  OptimizedCompilationInfo* info, Isolate* isolate) {
2224  ZoneStats zone_stats(isolate->allocator());
2225  std::unique_ptr<PipelineStatistics> pipeline_statistics(
2226  CreatePipelineStatistics(Handle<Script>::null(), info, isolate,
2227  &zone_stats));
2228  PipelineData data(&zone_stats, isolate, info, pipeline_statistics.get());
2229  PipelineImpl pipeline(&data);
2230 
2231  Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
2232  Deoptimizer::EnsureCodeForMaxDeoptimizationEntries(isolate);
2233 
2234  if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
2235  if (!pipeline.OptimizeGraph(&linkage)) return MaybeHandle<Code>();
2236  pipeline.AssembleCode(&linkage);
2237  Handle<Code> code;
2238  if (pipeline.FinalizeCode().ToHandle(&code) &&
2239  pipeline.CommitDependencies(code)) {
2240  return code;
2241  }
2242  return MaybeHandle<Code>();
2243 }
2244 
2245 // static
2246 MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
2247  OptimizedCompilationInfo* info, Isolate* isolate,
2248  CallDescriptor* call_descriptor, Graph* graph,
2249  const AssemblerOptions& options, Schedule* schedule) {
2250  // Construct a pipeline for scheduling and code generation.
2251  ZoneStats zone_stats(isolate->allocator());
2252  NodeOriginTable* node_positions = new (info->zone()) NodeOriginTable(graph);
2253  PipelineData data(&zone_stats, info, isolate, graph, schedule, nullptr,
2254  node_positions, nullptr, options);
2255  std::unique_ptr<PipelineStatistics> pipeline_statistics;
2256  if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
2257  pipeline_statistics.reset(new PipelineStatistics(
2258  info, isolate->GetTurboStatistics(), &zone_stats));
2259  pipeline_statistics->BeginPhaseKind("test codegen");
2260  }
2261 
2262  PipelineImpl pipeline(&data);
2263 
2264  if (info->trace_turbo_json_enabled()) {
2265  TurboJsonFile json_of(info, std::ios_base::trunc);
2266  json_of << "{\"function\":\"" << info->GetDebugName().get()
2267  << "\", \"source\":\"\",\n\"phases\":[";
2268  }
2269  // TODO(rossberg): Should this really be untyped?
2270  pipeline.RunPrintAndVerify("machine", true);
2271 
2272  // Ensure we have a schedule.
2273  if (data.schedule() == nullptr) {
2274  pipeline.ComputeScheduledGraph();
2275  }
2276 
2277  Handle<Code> code;
2278  if (pipeline.GenerateCode(call_descriptor).ToHandle(&code) &&
2279  pipeline.CommitDependencies(code)) {
2280  return code;
2281  }
2282  return MaybeHandle<Code>();
2283 }
2284 
2285 // static
2286 OptimizedCompilationJob* Pipeline::NewCompilationJob(
2287  Isolate* isolate, Handle<JSFunction> function, bool has_script) {
2288  Handle<SharedFunctionInfo> shared =
2289  handle(function->shared(), function->GetIsolate());
2290  return new PipelineCompilationJob(isolate, shared, function);
2291 }
2292 
2293 // static
2294 wasm::WasmCode* Pipeline::GenerateCodeForWasmFunction(
2295  OptimizedCompilationInfo* info, wasm::WasmEngine* wasm_engine,
2296  MachineGraph* mcgraph, CallDescriptor* call_descriptor,
2297  SourcePositionTable* source_positions, NodeOriginTable* node_origins,
2298  wasm::FunctionBody function_body, wasm::NativeModule* native_module,
2299  int function_index) {
2300  ZoneStats zone_stats(wasm_engine->allocator());
2301  std::unique_ptr<PipelineStatistics> pipeline_statistics(
2302  CreatePipelineStatistics(wasm_engine, function_body,
2303  native_module->module(), info, &zone_stats));
2304  PipelineData data(&zone_stats, wasm_engine, info, mcgraph,
2305  pipeline_statistics.get(), source_positions, node_origins,
2306  WasmAssemblerOptions());
2307 
2308  PipelineImpl pipeline(&data);
2309 
2310  if (data.info()->trace_turbo_json_enabled() ||
2311  data.info()->trace_turbo_graph_enabled()) {
2312  CodeTracer::Scope tracing_scope(data.GetCodeTracer());
2313  OFStream os(tracing_scope.file());
2314  os << "---------------------------------------------------\n"
2315  << "Begin compiling method " << data.info()->GetDebugName().get()
2316  << " using Turbofan" << std::endl;
2317  }
2318 
2319  pipeline.RunPrintAndVerify("Machine", true);
2320 
2321  data.BeginPhaseKind("wasm optimization");
2322  const bool is_asm_js = native_module->module()->origin == wasm::kAsmJsOrigin;
2323  if (FLAG_turbo_splitting && !is_asm_js) {
2324  data.info()->MarkAsSplittingEnabled();
2325  }
2326  if (FLAG_wasm_opt || is_asm_js) {
2327  PipelineRunScope scope(&data, "wasm full optimization");
2328  GraphReducer graph_reducer(scope.zone(), data.graph(),
2329  data.mcgraph()->Dead());
2330  DeadCodeElimination dead_code_elimination(&graph_reducer, data.graph(),
2331  data.common(), scope.zone());
2332  ValueNumberingReducer value_numbering(scope.zone(), data.graph()->zone());
2333  const bool allow_signalling_nan = is_asm_js;
2334  MachineOperatorReducer machine_reducer(data.mcgraph(),
2335  allow_signalling_nan);
2336  CommonOperatorReducer common_reducer(&graph_reducer, data.graph(),
2337  data.broker(), data.common(),
2338  data.machine(), scope.zone());
2339  AddReducer(&data, &graph_reducer, &dead_code_elimination);
2340  AddReducer(&data, &graph_reducer, &machine_reducer);
2341  AddReducer(&data, &graph_reducer, &common_reducer);
2342  AddReducer(&data, &graph_reducer, &value_numbering);
2343  graph_reducer.ReduceGraph();
2344  } else {
2345  PipelineRunScope scope(&data, "wasm base optimization");
2346  GraphReducer graph_reducer(scope.zone(), data.graph(),
2347  data.mcgraph()->Dead());
2348  ValueNumberingReducer value_numbering(scope.zone(), data.graph()->zone());
2349  AddReducer(&data, &graph_reducer, &value_numbering);
2350  graph_reducer.ReduceGraph();
2351  }
2352  pipeline.RunPrintAndVerify("wasm optimization", true);
2353 
2354  if (data.node_origins()) {
2355  data.node_origins()->RemoveDecorator();
2356  }
2357 
2358  pipeline.ComputeScheduledGraph();
2359 
2360  Linkage linkage(call_descriptor);
2361  if (!pipeline.SelectInstructions(&linkage)) return nullptr;
2362  pipeline.AssembleCode(&linkage);
2363 
2364  CodeGenerator* code_generator = pipeline.code_generator();
2365  CodeDesc code_desc;
2366  code_generator->tasm()->GetCode(nullptr, &code_desc);
2367 
2368  wasm::WasmCode* code = native_module->AddCode(
2369  function_index, code_desc,
2370  code_generator->frame()->GetTotalFrameSlotCount(),
2371  code_generator->GetSafepointTableOffset(),
2372  code_generator->GetHandlerTableOffset(),
2373  code_generator->GetProtectedInstructions(),
2374  code_generator->GetSourcePositionTable(), wasm::WasmCode::kFunction,
2375  wasm::WasmCode::kTurbofan);
2376 
2377  if (data.info()->trace_turbo_json_enabled()) {
2378  TurboJsonFile json_of(data.info(), std::ios_base::app);
2379  json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\"";
2380 #ifdef ENABLE_DISASSEMBLER
2381  std::stringstream disassembler_stream;
2382  Disassembler::Decode(
2383  nullptr, &disassembler_stream, code->instructions().start(),
2384  code->instructions().start() + code->safepoint_table_offset(),
2385  CodeReference(code));
2386  for (auto const c : disassembler_stream.str()) {
2387  json_of << AsEscapedUC16ForJSON(c);
2388  }
2389 #endif // ENABLE_DISASSEMBLER
2390  json_of << "\"}\n]";
2391  json_of << "\n}";
2392  }
2393 
2394  if (data.info()->trace_turbo_json_enabled() ||
2395  data.info()->trace_turbo_graph_enabled()) {
2396  CodeTracer::Scope tracing_scope(data.GetCodeTracer());
2397  OFStream os(tracing_scope.file());
2398  os << "---------------------------------------------------\n"
2399  << "Finished compiling method " << data.info()->GetDebugName().get()
2400  << " using Turbofan" << std::endl;
2401  }
2402 
2403  return code;
2404 }
2405 
2406 bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
2407  InstructionSequence* sequence,
2408  bool run_verifier) {
2409  OptimizedCompilationInfo info(ArrayVector("testing"), sequence->zone(),
2410  Code::STUB);
2411  ZoneStats zone_stats(sequence->isolate()->allocator());
2412  PipelineData data(&zone_stats, &info, sequence->isolate(), sequence);
2413  data.InitializeFrameData(nullptr);
2414  PipelineImpl pipeline(&data);
2415  pipeline.AllocateRegisters(config, nullptr, run_verifier);
2416  return !data.compilation_failed();
2417 }
2418 
2419 void PipelineImpl::ComputeScheduledGraph() {
2420  PipelineData* data = this->data_;
2421 
2422  // We should only schedule the graph if it is not scheduled yet.
2423  DCHECK_NULL(data->schedule());
2424 
2425  Run<LateGraphTrimmingPhase>();
2426  RunPrintAndVerify(LateGraphTrimmingPhase::phase_name(), true);
2427 
2428  Run<ComputeSchedulePhase>();
2429  TraceSchedule(data->info(), data, data->schedule(), "schedule");
2430 }
2431 
2432 bool PipelineImpl::SelectInstructions(Linkage* linkage) {
2433  auto call_descriptor = linkage->GetIncomingDescriptor();
2434  PipelineData* data = this->data_;
2435 
2436  // We should have a scheduled graph.
2437  DCHECK_NOT_NULL(data->graph());
2438  DCHECK_NOT_NULL(data->schedule());
2439 
2440  if (FLAG_turbo_profiling) {
2441  data->set_profiler_data(BasicBlockInstrumentor::Instrument(
2442  info(), data->graph(), data->schedule(), data->isolate()));
2443  }
2444 
2445  bool verify_stub_graph = data->verify_graph();
2446  // Jump optimization runs instruction selection twice, but the instruction
2447  // selector mutates nodes like swapping the inputs of a load, which can
2448  // violate the machine graph verification rules. So we skip the second
2449  // verification on a graph that already verified before.
2450  auto jump_opt = data->jump_optimization_info();
2451  if (jump_opt && jump_opt->is_optimizing()) {
2452  verify_stub_graph = false;
2453  }
2454  if (verify_stub_graph ||
2455  (FLAG_turbo_verify_machine_graph != nullptr &&
2456  (!strcmp(FLAG_turbo_verify_machine_graph, "*") ||
2457  !strcmp(FLAG_turbo_verify_machine_graph, data->debug_name())))) {
2458  if (FLAG_trace_verify_csa) {
2459  AllowHandleDereference allow_deref;
2460  CodeTracer::Scope tracing_scope(data->GetCodeTracer());
2461  OFStream os(tracing_scope.file());
2462  os << "--------------------------------------------------\n"
2463  << "--- Verifying " << data->debug_name() << " generated by TurboFan\n"
2464  << "--------------------------------------------------\n"
2465  << *data->schedule()
2466  << "--------------------------------------------------\n"
2467  << "--- End of " << data->debug_name() << " generated by TurboFan\n"
2468  << "--------------------------------------------------\n";
2469  }
2470  Zone temp_zone(data->allocator(), ZONE_NAME);
2471  MachineGraphVerifier::Run(data->graph(), data->schedule(), linkage,
2472  data->info()->IsStub(), data->debug_name(),
2473  &temp_zone);
2474  }
2475 
2476  data->InitializeInstructionSequence(call_descriptor);
2477 
2478  data->InitializeFrameData(call_descriptor);
2479  // Select and schedule instructions covering the scheduled graph.
2480  Run<InstructionSelectionPhase>(linkage);
2481  if (data->compilation_failed()) {
2482  info()->AbortOptimization(BailoutReason::kCodeGenerationFailed);
2483  data->EndPhaseKind();
2484  return false;
2485  }
2486 
2487  if (info()->trace_turbo_json_enabled() && !data->MayHaveUnverifiableGraph()) {
2488  AllowHandleDereference allow_deref;
2489  TurboCfgFile tcf(isolate());
2490  tcf << AsC1V("CodeGen", data->schedule(), data->source_positions(),
2491  data->sequence());
2492  }
2493 
2494  if (info()->trace_turbo_json_enabled()) {
2495  std::ostringstream source_position_output;
2496  // Output source position information before the graph is deleted.
2497  if (data_->source_positions() != nullptr) {
2498  data_->source_positions()->PrintJson(source_position_output);
2499  } else {
2500  source_position_output << "{}";
2501  }
2502  source_position_output << ",\n\"NodeOrigins\" : ";
2503  data_->node_origins()->PrintJson(source_position_output);
2504  data_->set_source_position_output(source_position_output.str());
2505  }
2506 
2507  data->DeleteGraphZone();
2508 
2509  data->BeginPhaseKind("register allocation");
2510 
2511  bool run_verifier = FLAG_turbo_verify_allocation;
2512 
2513  // Allocate registers.
2514  if (call_descriptor->HasRestrictedAllocatableRegisters()) {
2515  RegList registers = call_descriptor->AllocatableRegisters();
2516  DCHECK_LT(0, NumRegs(registers));
2517  std::unique_ptr<const RegisterConfiguration> config;
2518  config.reset(RegisterConfiguration::RestrictGeneralRegisters(registers));
2519  AllocateRegisters(config.get(), call_descriptor, run_verifier);
2520  } else if (data->info()->GetPoisoningMitigationLevel() !=
2521  PoisoningMitigationLevel::kDontPoison) {
2522 #ifdef V8_TARGET_ARCH_IA32
2523  FATAL("Poisoning is not supported on ia32.");
2524 #else
2525  AllocateRegisters(RegisterConfiguration::Poisoning(), call_descriptor,
2526  run_verifier);
2527 #endif // V8_TARGET_ARCH_IA32
2528  } else {
2529  AllocateRegisters(RegisterConfiguration::Default(), call_descriptor,
2530  run_verifier);
2531  }
2532 
2533  // Verify the instruction sequence has the same hash in two stages.
2534  VerifyGeneratedCodeIsIdempotent();
2535 
2536  Run<FrameElisionPhase>();
2537  if (data->compilation_failed()) {
2538  info()->AbortOptimization(
2539  BailoutReason::kNotEnoughVirtualRegistersRegalloc);
2540  data->EndPhaseKind();
2541  return false;
2542  }
2543 
2544  // TODO(mtrofin): move this off to the register allocator.
2545  bool generate_frame_at_start =
2546  data_->sequence()->instruction_blocks().front()->must_construct_frame();
2547  // Optimimize jumps.
2548  if (FLAG_turbo_jt) {
2549  Run<JumpThreadingPhase>(generate_frame_at_start);
2550  }
2551 
2552  data->EndPhaseKind();
2553 
2554  return true;
2555 }
2556 
2557 void PipelineImpl::VerifyGeneratedCodeIsIdempotent() {
2558  PipelineData* data = this->data_;
2559  JumpOptimizationInfo* jump_opt = data->jump_optimization_info();
2560  if (jump_opt == nullptr) return;
2561 
2562  InstructionSequence* code = data->sequence();
2563  int instruction_blocks = code->InstructionBlockCount();
2564  int virtual_registers = code->VirtualRegisterCount();
2565  size_t hash_code = base::hash_combine(instruction_blocks, virtual_registers);
2566  for (auto instr : *code) {
2567  hash_code = base::hash_combine(hash_code, instr->opcode(),
2568  instr->InputCount(), instr->OutputCount());
2569  }
2570  for (int i = 0; i < virtual_registers; i++) {
2571  hash_code = base::hash_combine(hash_code, code->GetRepresentation(i));
2572  }
2573  if (jump_opt->is_collecting()) {
2574  jump_opt->set_hash_code(hash_code);
2575  } else {
2576  CHECK_EQ(hash_code, jump_opt->hash_code());
2577  }
2578 }
2579 
2581  const ZoneVector<int>* instr_starts;
2582 };
2583 
2584 std::ostream& operator<<(std::ostream& out, const InstructionStartsAsJSON& s) {
2585  out << ", \"instructionOffsetToPCOffset\": {";
2586  bool need_comma = false;
2587  for (size_t i = 0; i < s.instr_starts->size(); ++i) {
2588  if (need_comma) out << ", ";
2589  int offset = (*s.instr_starts)[i];
2590  out << "\"" << i << "\":" << offset;
2591  need_comma = true;
2592  }
2593  out << "}";
2594  return out;
2595 }
2596 
2597 void PipelineImpl::AssembleCode(Linkage* linkage) {
2598  PipelineData* data = this->data_;
2599  data->BeginPhaseKind("code generation");
2600  data->InitializeCodeGenerator(linkage);
2601 
2602  Run<AssembleCodePhase>();
2603  if (data->info()->trace_turbo_json_enabled()) {
2604  TurboJsonFile json_of(data->info(), std::ios_base::app);
2605  json_of << "{\"name\":\"code generation\""
2606  << ", \"type\":\"instructions\""
2607  << InstructionStartsAsJSON{&data->code_generator()->instr_starts()};
2608  json_of << "},\n";
2609  }
2610  data->DeleteInstructionZone();
2611 }
2612 
2614  const ZoneVector<int>* block_starts;
2615 };
2616 
2617 std::ostream& operator<<(std::ostream& out, const BlockStartsAsJSON& s) {
2618  out << ", \"blockIdToOffset\": {";
2619  bool need_comma = false;
2620  for (size_t i = 0; i < s.block_starts->size(); ++i) {
2621  if (need_comma) out << ", ";
2622  int offset = (*s.block_starts)[i];
2623  out << "\"" << i << "\":" << offset;
2624  need_comma = true;
2625  }
2626  out << "},";
2627  return out;
2628 }
2629 
2630 MaybeHandle<Code> PipelineImpl::FinalizeCode() {
2631  PipelineData* data = this->data_;
2632  if (data->broker()) {
2633  data->broker()->Retire();
2634  }
2635  Run<FinalizeCodePhase>();
2636 
2637  MaybeHandle<Code> maybe_code = data->code();
2638  Handle<Code> code;
2639  if (!maybe_code.ToHandle(&code)) {
2640  return maybe_code;
2641  }
2642 
2643  if (data->profiler_data()) {
2644 #ifdef ENABLE_DISASSEMBLER
2645  std::ostringstream os;
2646  code->Disassemble(nullptr, os);
2647  data->profiler_data()->SetCode(&os);
2648 #endif // ENABLE_DISASSEMBLER
2649  }
2650 
2651  info()->SetCode(code);
2652  PrintCode(isolate(), code, info());
2653 
2654  if (info()->trace_turbo_json_enabled()) {
2655  TurboJsonFile json_of(info(), std::ios_base::app);
2656 
2657  json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\""
2658  << BlockStartsAsJSON{&data->code_generator()->block_starts()}
2659  << "\"data\":\"";
2660 #ifdef ENABLE_DISASSEMBLER
2661  std::stringstream disassembly_stream;
2662  code->Disassemble(nullptr, disassembly_stream);
2663  std::string disassembly_string(disassembly_stream.str());
2664  for (const auto& c : disassembly_string) {
2665  json_of << AsEscapedUC16ForJSON(c);
2666  }
2667 #endif // ENABLE_DISASSEMBLER
2668  json_of << "\"}\n],\n";
2669  json_of << "\"nodePositions\":";
2670  json_of << data->source_position_output() << ",\n";
2671  JsonPrintAllSourceWithPositions(json_of, data->info(), isolate());
2672  json_of << "\n}";
2673  }
2674  if (info()->trace_turbo_json_enabled() ||
2675  info()->trace_turbo_graph_enabled()) {
2676  CodeTracer::Scope tracing_scope(data->GetCodeTracer());
2677  OFStream os(tracing_scope.file());
2678  os << "---------------------------------------------------\n"
2679  << "Finished compiling method " << info()->GetDebugName().get()
2680  << " using Turbofan" << std::endl;
2681  }
2682  return code;
2683 }
2684 
2685 MaybeHandle<Code> PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) {
2686  Linkage linkage(call_descriptor);
2687 
2688  // Perform instruction selection and register allocation.
2689  if (!SelectInstructions(&linkage)) return MaybeHandle<Code>();
2690 
2691  // Generate the final machine code.
2692  AssembleCode(&linkage);
2693  return FinalizeCode();
2694 }
2695 
2696 bool PipelineImpl::CommitDependencies(Handle<Code> code) {
2697  return data_->dependencies() == nullptr ||
2698  data_->dependencies()->Commit(code);
2699 }
2700 
2701 namespace {
2702 
2703 void TraceSequence(OptimizedCompilationInfo* info, PipelineData* data,
2704  const char* phase_name) {
2705  if (info->trace_turbo_json_enabled()) {
2706  AllowHandleDereference allow_deref;
2707  TurboJsonFile json_of(info, std::ios_base::app);
2708  json_of << "{\"name\":\"" << phase_name << "\",\"type\":\"sequence\",";
2709  json_of << InstructionSequenceAsJSON{data->sequence()};
2710  json_of << "},\n";
2711  }
2712  if (info->trace_turbo_graph_enabled()) {
2713  AllowHandleDereference allow_deref;
2714  CodeTracer::Scope tracing_scope(data->GetCodeTracer());
2715  OFStream os(tracing_scope.file());
2716  os << "----- Instruction sequence " << phase_name << " -----\n"
2717  << *data->sequence();
2718  }
2719 }
2720 
2721 } // namespace
2722 
2723 void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
2724  CallDescriptor* call_descriptor,
2725  bool run_verifier) {
2726  PipelineData* data = this->data_;
2727  // Don't track usage for this zone in compiler stats.
2728  std::unique_ptr<Zone> verifier_zone;
2729  RegisterAllocatorVerifier* verifier = nullptr;
2730  if (run_verifier) {
2731  verifier_zone.reset(new Zone(data->allocator(), ZONE_NAME));
2732  verifier = new (verifier_zone.get()) RegisterAllocatorVerifier(
2733  verifier_zone.get(), config, data->sequence());
2734  }
2735 
2736 #ifdef DEBUG
2737  data_->sequence()->ValidateEdgeSplitForm();
2738  data_->sequence()->ValidateDeferredBlockEntryPaths();
2739  data_->sequence()->ValidateDeferredBlockExitPaths();
2740 #endif
2741 
2742  data->InitializeRegisterAllocationData(config, call_descriptor);
2743  if (info()->is_osr()) data->osr_helper()->SetupFrame(data->frame());
2744 
2745  Run<MeetRegisterConstraintsPhase>();
2746  Run<ResolvePhisPhase>();
2747  Run<BuildLiveRangesPhase>();
2748  TraceSequence(info(), data, "before register allocation");
2749  if (verifier != nullptr) {
2750  CHECK(!data->register_allocation_data()->ExistsUseWithoutDefinition());
2751  CHECK(data->register_allocation_data()
2752  ->RangesDefinedInDeferredStayInDeferred());
2753  }
2754 
2755  if (info()->trace_turbo_json_enabled() && !data->MayHaveUnverifiableGraph()) {
2756  TurboCfgFile tcf(isolate());
2757  tcf << AsC1VRegisterAllocationData("PreAllocation",
2758  data->register_allocation_data());
2759  }
2760 
2761  if (FLAG_turbo_preprocess_ranges) {
2762  Run<SplinterLiveRangesPhase>();
2763  if (info()->trace_turbo_json_enabled() &&
2764  !data->MayHaveUnverifiableGraph()) {
2765  TurboCfgFile tcf(isolate());
2766  tcf << AsC1VRegisterAllocationData("PostSplinter",
2767  data->register_allocation_data());
2768  }
2769  }
2770 
2771  Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
2772 
2773  if (data->sequence()->HasFPVirtualRegisters()) {
2774  Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
2775  }
2776 
2777  if (FLAG_turbo_preprocess_ranges) {
2778  Run<MergeSplintersPhase>();
2779  }
2780 
2781  Run<AssignSpillSlotsPhase>();
2782 
2783  Run<CommitAssignmentPhase>();
2784 
2785  // TODO(chromium:725559): remove this check once
2786  // we understand the cause of the bug. We keep just the
2787  // check at the end of the allocation.
2788  if (verifier != nullptr) {
2789  verifier->VerifyAssignment("Immediately after CommitAssignmentPhase.");
2790  }
2791 
2792  Run<PopulateReferenceMapsPhase>();
2793  Run<ConnectRangesPhase>();
2794  Run<ResolveControlFlowPhase>();
2795  if (FLAG_turbo_move_optimization) {
2796  Run<OptimizeMovesPhase>();
2797  }
2798 
2799  Run<LocateSpillSlotsPhase>();
2800 
2801  TraceSequence(info(), data, "after register allocation");
2802 
2803  if (verifier != nullptr) {
2804  verifier->VerifyAssignment("End of regalloc pipeline.");
2805  verifier->VerifyGapMoves();
2806  }
2807 
2808  if (info()->trace_turbo_json_enabled() && !data->MayHaveUnverifiableGraph()) {
2809  TurboCfgFile tcf(isolate());
2810  tcf << AsC1VRegisterAllocationData("CodeGen",
2811  data->register_allocation_data());
2812  }
2813 
2814  data->DeleteRegisterAllocationZone();
2815 }
2816 
2817 OptimizedCompilationInfo* PipelineImpl::info() const { return data_->info(); }
2818 
2819 Isolate* PipelineImpl::isolate() const { return data_->isolate(); }
2820 
2821 CodeGenerator* PipelineImpl::code_generator() const {
2822  return data_->code_generator();
2823 }
2824 
2825 } // namespace compiler
2826 } // namespace internal
2827 } // namespace v8
Definition: v8.h:56
Definition: libplatform.h:13