V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
code-generator.cc
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/backend/code-generator.h"
6 
7 #include "src/address-map.h"
8 #include "src/assembler-inl.h"
9 #include "src/base/adapters.h"
10 #include "src/compiler/backend/code-generator-impl.h"
11 #include "src/compiler/linkage.h"
12 #include "src/compiler/pipeline.h"
13 #include "src/compiler/wasm-compiler.h"
14 #include "src/counters.h"
15 #include "src/eh-frame.h"
16 #include "src/frames.h"
17 #include "src/lsan.h"
18 #include "src/macro-assembler-inl.h"
19 #include "src/objects/smi.h"
20 #include "src/optimized-compilation-info.h"
21 #include "src/string-constants.h"
22 
23 namespace v8 {
24 namespace internal {
25 namespace compiler {
26 
27 class CodeGenerator::JumpTable final : public ZoneObject {
28  public:
29  JumpTable(JumpTable* next, Label** targets, size_t target_count)
30  : next_(next), targets_(targets), target_count_(target_count) {}
31 
32  Label* label() { return &label_; }
33  JumpTable* next() const { return next_; }
34  Label** targets() const { return targets_; }
35  size_t target_count() const { return target_count_; }
36 
37  private:
38  Label label_;
39  JumpTable* const next_;
40  Label** const targets_;
41  size_t const target_count_;
42 };
43 
44 CodeGenerator::CodeGenerator(
45  Zone* codegen_zone, Frame* frame, Linkage* linkage,
47  base::Optional<OsrHelper> osr_helper, int start_source_position,
48  JumpOptimizationInfo* jump_opt, PoisoningMitigationLevel poisoning_level,
49  const AssemblerOptions& options, int32_t builtin_index)
50  : zone_(codegen_zone),
51  isolate_(isolate),
52  frame_access_state_(nullptr),
53  linkage_(linkage),
54  code_(code),
55  unwinding_info_writer_(zone()),
56  info_(info),
57  labels_(zone()->NewArray<Label>(code->InstructionBlockCount())),
58  current_block_(RpoNumber::Invalid()),
59  start_source_position_(start_source_position),
60  current_source_position_(SourcePosition::Unknown()),
61  tasm_(isolate, options, nullptr, 0, CodeObjectRequired::kNo),
62  resolver_(this),
63  safepoints_(zone()),
64  handlers_(zone()),
65  deoptimization_exits_(zone()),
66  deoptimization_states_(zone()),
67  deoptimization_literals_(zone()),
68  inlined_function_count_(0),
69  translations_(zone()),
70  handler_table_offset_(0),
71  last_lazy_deopt_pc_(0),
72  caller_registers_saved_(false),
73  jump_tables_(nullptr),
74  ools_(nullptr),
75  osr_helper_(std::move(osr_helper)),
76  osr_pc_offset_(-1),
77  optimized_out_literal_id_(-1),
78  source_position_table_builder_(
79  SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS),
80  protected_instructions_(zone()),
81  result_(kSuccess),
82  poisoning_level_(poisoning_level),
83  block_starts_(zone()),
84  instr_starts_(zone()) {
85  for (int i = 0; i < code->InstructionBlockCount(); ++i) {
86  new (&labels_[i]) Label;
87  }
88  CreateFrameAccessState(frame);
89  CHECK_EQ(info->is_osr(), osr_helper_.has_value());
90  tasm_.set_jump_optimization_info(jump_opt);
91  Code::Kind code_kind = info->code_kind();
92  if (code_kind == Code::WASM_FUNCTION ||
93  code_kind == Code::WASM_TO_JS_FUNCTION ||
94  code_kind == Code::WASM_INTERPRETER_ENTRY ||
95  (Builtins::IsBuiltinId(builtin_index) &&
96  Builtins::IsWasmRuntimeStub(builtin_index))) {
97  tasm_.set_abort_hard(true);
98  }
99  tasm_.set_builtin_index(builtin_index);
100 }
101 
102 bool CodeGenerator::wasm_runtime_exception_support() const {
103  DCHECK_NOT_NULL(info_);
104  return info_->wasm_runtime_exception_support();
105 }
106 
107 void CodeGenerator::AddProtectedInstructionLanding(uint32_t instr_offset,
108  uint32_t landing_offset) {
109  protected_instructions_.push_back({instr_offset, landing_offset});
110 }
111 
112 void CodeGenerator::CreateFrameAccessState(Frame* frame) {
113  FinishFrame(frame);
114  frame_access_state_ = new (zone()) FrameAccessState(frame);
115 }
116 
117 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
118  int deoptimization_id, SourcePosition pos) {
119  DeoptimizeKind deopt_kind = GetDeoptimizationKind(deoptimization_id);
120  DeoptimizeReason deoptimization_reason =
121  GetDeoptimizationReason(deoptimization_id);
122  Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
123  tasm()->isolate(), deoptimization_id, deopt_kind);
124  if (deopt_entry == kNullAddress) return kTooManyDeoptimizationBailouts;
125  if (info()->is_source_positions_enabled()) {
126  tasm()->RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
127  }
128  tasm()->CallForDeoptimization(deopt_entry, deoptimization_id,
129  RelocInfo::RUNTIME_ENTRY);
130  return kSuccess;
131 }
132 
133 void CodeGenerator::AssembleCode() {
134  OptimizedCompilationInfo* info = this->info();
135 
136  // Open a frame scope to indicate that there is a frame on the stack. The
137  // MANUAL indicates that the scope shouldn't actually generate code to set up
138  // the frame (that is done in AssemblePrologue).
139  FrameScope frame_scope(tasm(), StackFrame::MANUAL);
140 
141  if (info->is_source_positions_enabled()) {
142  AssembleSourcePosition(start_source_position());
143  }
144 
145  // Check that {kJavaScriptCallCodeStartRegister} has been set correctly.
146  if (FLAG_debug_code & (info->code_kind() == Code::OPTIMIZED_FUNCTION ||
147  info->code_kind() == Code::BYTECODE_HANDLER)) {
148  tasm()->RecordComment("-- Prologue: check code start register --");
149  AssembleCodeStartRegisterCheck();
150  }
151 
152  // We want to bailout only from JS functions, which are the only ones
153  // that are optimized.
154  if (info->IsOptimizing()) {
155  DCHECK(linkage()->GetIncomingDescriptor()->IsJSFunctionCall());
156  tasm()->RecordComment("-- Prologue: check for deoptimization --");
157  BailoutIfDeoptimized();
158  }
159 
160  InitializeSpeculationPoison();
161 
162  // Define deoptimization literals for all inlined functions.
163  DCHECK_EQ(0u, deoptimization_literals_.size());
164  for (OptimizedCompilationInfo::InlinedFunctionHolder& inlined :
165  info->inlined_functions()) {
166  if (!inlined.shared_info.equals(info->shared_info())) {
167  int index = DefineDeoptimizationLiteral(
168  DeoptimizationLiteral(inlined.shared_info));
169  inlined.RegisterInlinedFunctionId(index);
170  }
171  }
172  inlined_function_count_ = deoptimization_literals_.size();
173 
174  // Define deoptimization literals for all BytecodeArrays to which we might
175  // deopt to ensure they are strongly held by the optimized code.
176  if (info->has_bytecode_array()) {
177  DefineDeoptimizationLiteral(DeoptimizationLiteral(info->bytecode_array()));
178  }
179  for (OptimizedCompilationInfo::InlinedFunctionHolder& inlined :
180  info->inlined_functions()) {
181  DefineDeoptimizationLiteral(DeoptimizationLiteral(inlined.bytecode_array));
182  }
183 
184  unwinding_info_writer_.SetNumberOfInstructionBlocks(
185  code()->InstructionBlockCount());
186 
187  if (info->trace_turbo_json_enabled()) {
188  block_starts_.assign(code()->instruction_blocks().size(), -1);
189  instr_starts_.assign(code()->instructions().size(), -1);
190  }
191 
192  // Assemble instructions in assembly order.
193  for (const InstructionBlock* block : code()->ao_blocks()) {
194  // Align loop headers on 16-byte boundaries.
195  if (block->ShouldAlign() && !tasm()->jump_optimization_info()) {
196  tasm()->Align(16);
197  }
198  if (info->trace_turbo_json_enabled()) {
199  block_starts_[block->rpo_number().ToInt()] = tasm()->pc_offset();
200  }
201  // Bind a label for a block.
202  current_block_ = block->rpo_number();
203  unwinding_info_writer_.BeginInstructionBlock(tasm()->pc_offset(), block);
204  if (FLAG_code_comments) {
205  Vector<char> buffer = Vector<char>::New(200);
206  char* buffer_start = buffer.start();
207  LSAN_IGNORE_OBJECT(buffer_start);
208 
209  int next = SNPrintF(
210  buffer, "-- B%d start%s%s%s%s", block->rpo_number().ToInt(),
211  block->IsDeferred() ? " (deferred)" : "",
212  block->needs_frame() ? "" : " (no frame)",
213  block->must_construct_frame() ? " (construct frame)" : "",
214  block->must_deconstruct_frame() ? " (deconstruct frame)" : "");
215 
216  buffer = buffer.SubVector(next, buffer.length());
217 
218  if (block->IsLoopHeader()) {
219  next = SNPrintF(buffer, " (loop up to %d)", block->loop_end().ToInt());
220  buffer = buffer.SubVector(next, buffer.length());
221  }
222  if (block->loop_header().IsValid()) {
223  next = SNPrintF(buffer, " (in loop %d)", block->loop_header().ToInt());
224  buffer = buffer.SubVector(next, buffer.length());
225  }
226  SNPrintF(buffer, " --");
227  tasm()->RecordComment(buffer_start);
228  }
229 
230  frame_access_state()->MarkHasFrame(block->needs_frame());
231 
232  tasm()->bind(GetLabel(current_block_));
233 
234  TryInsertBranchPoisoning(block);
235 
236  if (block->must_construct_frame()) {
237  AssembleConstructFrame();
238  // We need to setup the root register after we assemble the prologue, to
239  // avoid clobbering callee saved registers in case of C linkage and
240  // using the roots.
241  // TODO(mtrofin): investigate how we can avoid doing this repeatedly.
242  if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
243  tasm()->InitializeRootRegister();
244  }
245  }
246 
247  if (FLAG_enable_embedded_constant_pool && !block->needs_frame()) {
248  ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
249  result_ = AssembleBlock(block);
250  } else {
251  result_ = AssembleBlock(block);
252  }
253  if (result_ != kSuccess) return;
254  unwinding_info_writer_.EndInstructionBlock(block);
255  }
256 
257  // Assemble all out-of-line code.
258  if (ools_) {
259  tasm()->RecordComment("-- Out of line code --");
260  for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) {
261  tasm()->bind(ool->entry());
262  ool->Generate();
263  if (ool->exit()->is_bound()) tasm()->jmp(ool->exit());
264  }
265  }
266 
267  // This nop operation is needed to ensure that the trampoline is not
268  // confused with the pc of the call before deoptimization.
269  // The test regress/regress-259 is an example of where we need it.
270  tasm()->nop();
271 
272  // Assemble deoptimization exits.
273  int last_updated = 0;
274  for (DeoptimizationExit* exit : deoptimization_exits_) {
275  tasm()->bind(exit->label());
276  int trampoline_pc = tasm()->pc_offset();
277  int deoptimization_id = exit->deoptimization_id();
278  DeoptimizationState* ds = deoptimization_states_[deoptimization_id];
279 
280  if (ds->kind() == DeoptimizeKind::kLazy) {
281  last_updated = safepoints()->UpdateDeoptimizationInfo(
282  ds->pc_offset(), trampoline_pc, last_updated);
283  }
284  result_ = AssembleDeoptimizerCall(deoptimization_id, exit->pos());
285  if (result_ != kSuccess) return;
286  }
287 
288  FinishCode();
289 
290  // Emit the jump tables.
291  if (jump_tables_) {
292  tasm()->Align(kPointerSize);
293  for (JumpTable* table = jump_tables_; table; table = table->next()) {
294  tasm()->bind(table->label());
295  AssembleJumpTable(table->targets(), table->target_count());
296  }
297  }
298 
299  // The PerfJitLogger logs code up until here, excluding the safepoint
300  // table. Resolve the unwinding info now so it is aware of the same code size
301  // as reported by perf.
302  unwinding_info_writer_.Finish(tasm()->pc_offset());
303 
304  safepoints()->Emit(tasm(), frame()->GetTotalFrameSlotCount());
305 
306  // Emit the exception handler table.
307  if (!handlers_.empty()) {
308  handler_table_offset_ = HandlerTable::EmitReturnTableStart(
309  tasm(), static_cast<int>(handlers_.size()));
310  for (size_t i = 0; i < handlers_.size(); ++i) {
311  HandlerTable::EmitReturnEntry(tasm(), handlers_[i].pc_offset,
312  handlers_[i].handler->pos());
313  }
314  }
315 
316  result_ = kSuccess;
317 }
318 
319 void CodeGenerator::TryInsertBranchPoisoning(const InstructionBlock* block) {
320  // See if our predecessor was a basic block terminated by a branch_and_poison
321  // instruction. If yes, then perform the masking based on the flags.
322  if (block->PredecessorCount() != 1) return;
323  RpoNumber pred_rpo = (block->predecessors())[0];
324  const InstructionBlock* pred = code()->InstructionBlockAt(pred_rpo);
325  if (pred->code_start() == pred->code_end()) return;
326  Instruction* instr = code()->InstructionAt(pred->code_end() - 1);
327  FlagsMode mode = FlagsModeField::decode(instr->opcode());
328  switch (mode) {
329  case kFlags_branch_and_poison: {
330  BranchInfo branch;
331  RpoNumber target = ComputeBranchInfo(&branch, instr);
332  if (!target.IsValid()) {
333  // Non-trivial branch, add the masking code.
334  FlagsCondition condition = branch.condition;
335  if (branch.false_label == GetLabel(block->rpo_number())) {
336  condition = NegateFlagsCondition(condition);
337  }
338  AssembleBranchPoisoning(condition, instr);
339  }
340  break;
341  }
342  case kFlags_deoptimize_and_poison: {
343  UNREACHABLE();
344  break;
345  }
346  default:
347  break;
348  }
349 }
350 
351 void CodeGenerator::AssembleArchBinarySearchSwitchRange(
352  Register input, RpoNumber def_block, std::pair<int32_t, Label*>* begin,
353  std::pair<int32_t, Label*>* end) {
354  if (end - begin < kBinarySearchSwitchMinimalCases) {
355  while (begin != end) {
356  tasm()->JumpIfEqual(input, begin->first, begin->second);
357  ++begin;
358  }
359  AssembleArchJump(def_block);
360  return;
361  }
362  auto middle = begin + (end - begin) / 2;
363  Label less_label;
364  tasm()->JumpIfLessThan(input, middle->first, &less_label);
365  AssembleArchBinarySearchSwitchRange(input, def_block, middle, end);
366  tasm()->bind(&less_label);
367  AssembleArchBinarySearchSwitchRange(input, def_block, begin, middle);
368 }
369 
370 OwnedVector<byte> CodeGenerator::GetSourcePositionTable() {
371  return source_position_table_builder_.ToSourcePositionTableVector();
372 }
373 
374 OwnedVector<trap_handler::ProtectedInstructionData>
375 CodeGenerator::GetProtectedInstructions() {
376  return OwnedVector<trap_handler::ProtectedInstructionData>::Of(
377  protected_instructions_);
378 }
379 
380 MaybeHandle<Code> CodeGenerator::FinalizeCode() {
381  if (result_ != kSuccess) {
382  tasm()->AbortedCodeGeneration();
383  return MaybeHandle<Code>();
384  }
385 
386  // Allocate the source position table.
387  Handle<ByteArray> source_positions =
388  source_position_table_builder_.ToSourcePositionTable(isolate());
389 
390  // Allocate deoptimization data.
391  Handle<DeoptimizationData> deopt_data = GenerateDeoptimizationData();
392 
393  // Allocate and install the code.
394  CodeDesc desc;
395  tasm()->GetCode(isolate(), &desc);
396  if (unwinding_info_writer_.eh_frame_writer()) {
397  unwinding_info_writer_.eh_frame_writer()->GetEhFrame(&desc);
398  }
399 
400  MaybeHandle<Code> maybe_code = isolate()->factory()->TryNewCode(
401  desc, info()->code_kind(), Handle<Object>(), info()->builtin_index(),
402  source_positions, deopt_data, kMovable, info()->stub_key(), true,
403  frame()->GetTotalFrameSlotCount(), safepoints()->GetCodeOffset(),
404  handler_table_offset_);
405 
406  Handle<Code> code;
407  if (!maybe_code.ToHandle(&code)) {
408  tasm()->AbortedCodeGeneration();
409  return MaybeHandle<Code>();
410  }
411  isolate()->counters()->total_compiled_code_size()->Increment(
412  code->raw_instruction_size());
413 
414  LOG_CODE_EVENT(isolate(),
415  CodeLinePosInfoRecordEvent(code->raw_instruction_start(),
416  *source_positions));
417 
418  return code;
419 }
420 
421 bool CodeGenerator::IsNextInAssemblyOrder(RpoNumber block) const {
422  return code()
423  ->InstructionBlockAt(current_block_)
424  ->ao_number()
425  .IsNext(code()->InstructionBlockAt(block)->ao_number());
426 }
427 
428 void CodeGenerator::RecordSafepoint(ReferenceMap* references,
429  Safepoint::Kind kind, int arguments,
430  Safepoint::DeoptMode deopt_mode) {
431  Safepoint safepoint =
432  safepoints()->DefineSafepoint(tasm(), kind, arguments, deopt_mode);
433  int stackSlotToSpillSlotDelta =
434  frame()->GetTotalFrameSlotCount() - frame()->GetSpillSlotCount();
435  for (const InstructionOperand& operand : references->reference_operands()) {
436  if (operand.IsStackSlot()) {
437  int index = LocationOperand::cast(operand).index();
438  DCHECK_LE(0, index);
439  // We might index values in the fixed part of the frame (i.e. the
440  // closure pointer or the context pointer); these are not spill slots
441  // and therefore don't work with the SafepointTable currently, but
442  // we also don't need to worry about them, since the GC has special
443  // knowledge about those fields anyway.
444  if (index < stackSlotToSpillSlotDelta) continue;
445  safepoint.DefinePointerSlot(index);
446  } else if (operand.IsRegister() && (kind & Safepoint::kWithRegisters)) {
447  Register reg = LocationOperand::cast(operand).GetRegister();
448  safepoint.DefinePointerRegister(reg);
449  }
450  }
451 }
452 
453 bool CodeGenerator::IsMaterializableFromRoot(Handle<HeapObject> object,
454  RootIndex* index_return) {
455  const CallDescriptor* incoming_descriptor =
456  linkage()->GetIncomingDescriptor();
457  if (incoming_descriptor->flags() & CallDescriptor::kCanUseRoots) {
458  return isolate()->roots_table().IsRootHandle(object, index_return) &&
459  RootsTable::IsImmortalImmovable(*index_return);
460  }
461  return false;
462 }
463 
464 CodeGenerator::CodeGenResult CodeGenerator::AssembleBlock(
465  const InstructionBlock* block) {
466  for (int i = block->code_start(); i < block->code_end(); ++i) {
467  if (info()->trace_turbo_json_enabled()) {
468  instr_starts_[i] = tasm()->pc_offset();
469  }
470  Instruction* instr = code()->InstructionAt(i);
471  CodeGenResult result = AssembleInstruction(instr, block);
472  if (result != kSuccess) return result;
473  }
474  return kSuccess;
475 }
476 
477 bool CodeGenerator::IsValidPush(InstructionOperand source,
478  CodeGenerator::PushTypeFlags push_type) {
479  if (source.IsImmediate() &&
480  ((push_type & CodeGenerator::kImmediatePush) != 0)) {
481  return true;
482  }
483  if (source.IsRegister() &&
484  ((push_type & CodeGenerator::kRegisterPush) != 0)) {
485  return true;
486  }
487  if (source.IsStackSlot() &&
488  ((push_type & CodeGenerator::kStackSlotPush) != 0)) {
489  return true;
490  }
491  return false;
492 }
493 
494 void CodeGenerator::GetPushCompatibleMoves(Instruction* instr,
495  PushTypeFlags push_type,
496  ZoneVector<MoveOperands*>* pushes) {
497  pushes->clear();
498  for (int i = Instruction::FIRST_GAP_POSITION;
499  i <= Instruction::LAST_GAP_POSITION; ++i) {
500  Instruction::GapPosition inner_pos =
501  static_cast<Instruction::GapPosition>(i);
502  ParallelMove* parallel_move = instr->GetParallelMove(inner_pos);
503  if (parallel_move != nullptr) {
504  for (auto move : *parallel_move) {
505  InstructionOperand source = move->source();
506  InstructionOperand destination = move->destination();
507  int first_push_compatible_index =
508  V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0;
509  // If there are any moves from slots that will be overridden by pushes,
510  // then the full gap resolver must be used since optimization with
511  // pushes don't participate in the parallel move and might clobber
512  // values needed for the gap resolve.
513  if (source.IsStackSlot() && LocationOperand::cast(source).index() >=
514  first_push_compatible_index) {
515  pushes->clear();
516  return;
517  }
518  // TODO(danno): Right now, only consider moves from the FIRST gap for
519  // pushes. Theoretically, we could extract pushes for both gaps (there
520  // are cases where this happens), but the logic for that would also have
521  // to check to make sure that non-memory inputs to the pushes from the
522  // LAST gap don't get clobbered in the FIRST gap.
523  if (i == Instruction::FIRST_GAP_POSITION) {
524  if (destination.IsStackSlot() &&
525  LocationOperand::cast(destination).index() >=
526  first_push_compatible_index) {
527  int index = LocationOperand::cast(destination).index();
528  if (IsValidPush(source, push_type)) {
529  if (index >= static_cast<int>(pushes->size())) {
530  pushes->resize(index + 1);
531  }
532  (*pushes)[index] = move;
533  }
534  }
535  }
536  }
537  }
538  }
539 
540  // For now, only support a set of continuous pushes at the end of the list.
541  size_t push_count_upper_bound = pushes->size();
542  size_t push_begin = push_count_upper_bound;
543  for (auto move : base::Reversed(*pushes)) {
544  if (move == nullptr) break;
545  push_begin--;
546  }
547  size_t push_count = pushes->size() - push_begin;
548  std::copy(pushes->begin() + push_begin,
549  pushes->begin() + push_begin + push_count, pushes->begin());
550  pushes->resize(push_count);
551 }
552 
553 CodeGenerator::MoveType::Type CodeGenerator::MoveType::InferMove(
554  InstructionOperand* source, InstructionOperand* destination) {
555  if (source->IsConstant()) {
556  if (destination->IsAnyRegister()) {
557  return MoveType::kConstantToRegister;
558  } else {
559  DCHECK(destination->IsAnyStackSlot());
560  return MoveType::kConstantToStack;
561  }
562  }
563  DCHECK(LocationOperand::cast(source)->IsCompatible(
564  LocationOperand::cast(destination)));
565  if (source->IsAnyRegister()) {
566  if (destination->IsAnyRegister()) {
567  return MoveType::kRegisterToRegister;
568  } else {
569  DCHECK(destination->IsAnyStackSlot());
570  return MoveType::kRegisterToStack;
571  }
572  } else {
573  DCHECK(source->IsAnyStackSlot());
574  if (destination->IsAnyRegister()) {
575  return MoveType::kStackToRegister;
576  } else {
577  DCHECK(destination->IsAnyStackSlot());
578  return MoveType::kStackToStack;
579  }
580  }
581 }
582 
583 CodeGenerator::MoveType::Type CodeGenerator::MoveType::InferSwap(
584  InstructionOperand* source, InstructionOperand* destination) {
585  DCHECK(LocationOperand::cast(source)->IsCompatible(
586  LocationOperand::cast(destination)));
587  if (source->IsAnyRegister()) {
588  if (destination->IsAnyRegister()) {
589  return MoveType::kRegisterToRegister;
590  } else {
591  DCHECK(destination->IsAnyStackSlot());
592  return MoveType::kRegisterToStack;
593  }
594  } else {
595  DCHECK(source->IsAnyStackSlot());
596  DCHECK(destination->IsAnyStackSlot());
597  return MoveType::kStackToStack;
598  }
599 }
600 
601 RpoNumber CodeGenerator::ComputeBranchInfo(BranchInfo* branch,
602  Instruction* instr) {
603  // Assemble a branch after this instruction.
604  InstructionOperandConverter i(this, instr);
605  RpoNumber true_rpo = i.InputRpo(instr->InputCount() - 2);
606  RpoNumber false_rpo = i.InputRpo(instr->InputCount() - 1);
607 
608  if (true_rpo == false_rpo) {
609  return true_rpo;
610  }
611  FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
612  if (IsNextInAssemblyOrder(true_rpo)) {
613  // true block is next, can fall through if condition negated.
614  std::swap(true_rpo, false_rpo);
615  condition = NegateFlagsCondition(condition);
616  }
617  branch->condition = condition;
618  branch->true_label = GetLabel(true_rpo);
619  branch->false_label = GetLabel(false_rpo);
620  branch->fallthru = IsNextInAssemblyOrder(false_rpo);
621  return RpoNumber::Invalid();
622 }
623 
624 CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
625  Instruction* instr, const InstructionBlock* block) {
626  int first_unused_stack_slot;
627  FlagsMode mode = FlagsModeField::decode(instr->opcode());
628  if (mode != kFlags_trap) {
629  AssembleSourcePosition(instr);
630  }
631  bool adjust_stack =
632  GetSlotAboveSPBeforeTailCall(instr, &first_unused_stack_slot);
633  if (adjust_stack) AssembleTailCallBeforeGap(instr, first_unused_stack_slot);
634  AssembleGaps(instr);
635  if (adjust_stack) AssembleTailCallAfterGap(instr, first_unused_stack_slot);
636  DCHECK_IMPLIES(
637  block->must_deconstruct_frame(),
638  instr != code()->InstructionAt(block->last_instruction_index()) ||
639  instr->IsRet() || instr->IsJump());
640  if (instr->IsJump() && block->must_deconstruct_frame()) {
641  AssembleDeconstructFrame();
642  }
643  // Assemble architecture-specific code for the instruction.
644  CodeGenResult result = AssembleArchInstruction(instr);
645  if (result != kSuccess) return result;
646 
647  FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
648  switch (mode) {
649  case kFlags_branch:
650  case kFlags_branch_and_poison: {
651  BranchInfo branch;
652  RpoNumber target = ComputeBranchInfo(&branch, instr);
653  if (target.IsValid()) {
654  // redundant branch.
655  if (!IsNextInAssemblyOrder(target)) {
656  AssembleArchJump(target);
657  }
658  return kSuccess;
659  }
660  // Assemble architecture-specific branch.
661  AssembleArchBranch(instr, &branch);
662  break;
663  }
664  case kFlags_deoptimize:
665  case kFlags_deoptimize_and_poison: {
666  // Assemble a conditional eager deoptimization after this instruction.
667  InstructionOperandConverter i(this, instr);
668  size_t frame_state_offset = MiscField::decode(instr->opcode());
669  DeoptimizationExit* const exit =
670  AddDeoptimizationExit(instr, frame_state_offset);
671  Label continue_label;
672  BranchInfo branch;
673  branch.condition = condition;
674  branch.true_label = exit->label();
675  branch.false_label = &continue_label;
676  branch.fallthru = true;
677  // Assemble architecture-specific branch.
678  AssembleArchDeoptBranch(instr, &branch);
679  tasm()->bind(&continue_label);
680  if (mode == kFlags_deoptimize_and_poison) {
681  AssembleBranchPoisoning(NegateFlagsCondition(branch.condition), instr);
682  }
683  break;
684  }
685  case kFlags_set: {
686  // Assemble a boolean materialization after this instruction.
687  AssembleArchBoolean(instr, condition);
688  break;
689  }
690  case kFlags_trap: {
691  AssembleArchTrap(instr, condition);
692  break;
693  }
694  case kFlags_none: {
695  break;
696  }
697  }
698 
699  // TODO(jarin) We should thread the flag through rather than set it.
700  if (instr->IsCall()) {
701  ResetSpeculationPoison();
702  }
703 
704  return kSuccess;
705 }
706 
707 void CodeGenerator::AssembleSourcePosition(Instruction* instr) {
708  SourcePosition source_position = SourcePosition::Unknown();
709  if (instr->IsNop() && instr->AreMovesRedundant()) return;
710  if (!code()->GetSourcePosition(instr, &source_position)) return;
711  AssembleSourcePosition(source_position);
712 }
713 
714 void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
715  if (source_position == current_source_position_) return;
716  current_source_position_ = source_position;
717  if (!source_position.IsKnown()) return;
718  source_position_table_builder_.AddPosition(tasm()->pc_offset(),
719  source_position, false);
720  if (FLAG_code_comments) {
721  OptimizedCompilationInfo* info = this->info();
722  if (info->IsStub()) return;
723  std::ostringstream buffer;
724  buffer << "-- ";
725  // Turbolizer only needs the source position, as it can reconstruct
726  // the inlining stack from other information.
727  if (info->trace_turbo_json_enabled() || !tasm()->isolate() ||
728  tasm()->isolate()->concurrent_recompilation_enabled()) {
729  buffer << source_position;
730  } else {
731  AllowHeapAllocation allocation;
732  AllowHandleAllocation handles;
733  AllowHandleDereference deref;
734  buffer << source_position.InliningStack(info);
735  }
736  buffer << " --";
737  char* str = StrDup(buffer.str().c_str());
738  LSAN_IGNORE_OBJECT(str);
739  tasm()->RecordComment(str);
740  }
741 }
742 
743 bool CodeGenerator::GetSlotAboveSPBeforeTailCall(Instruction* instr,
744  int* slot) {
745  if (instr->IsTailCall()) {
746  InstructionOperandConverter g(this, instr);
747  *slot = g.InputInt32(instr->InputCount() - 1);
748  return true;
749  } else {
750  return false;
751  }
752 }
753 
754 StubCallMode CodeGenerator::DetermineStubCallMode() const {
755  Code::Kind code_kind = info()->code_kind();
756  return (code_kind == Code::WASM_FUNCTION ||
757  code_kind == Code::WASM_TO_JS_FUNCTION)
758  ? StubCallMode::kCallWasmRuntimeStub
759  : StubCallMode::kCallOnHeapBuiltin;
760 }
761 
762 void CodeGenerator::AssembleGaps(Instruction* instr) {
763  for (int i = Instruction::FIRST_GAP_POSITION;
764  i <= Instruction::LAST_GAP_POSITION; i++) {
765  Instruction::GapPosition inner_pos =
766  static_cast<Instruction::GapPosition>(i);
767  ParallelMove* move = instr->GetParallelMove(inner_pos);
768  if (move != nullptr) resolver()->Resolve(move);
769  }
770 }
771 
772 namespace {
773 
774 Handle<PodArray<InliningPosition>> CreateInliningPositions(
775  OptimizedCompilationInfo* info, Isolate* isolate) {
776  const OptimizedCompilationInfo::InlinedFunctionList& inlined_functions =
777  info->inlined_functions();
778  if (inlined_functions.size() == 0) {
779  return Handle<PodArray<InliningPosition>>::cast(
780  isolate->factory()->empty_byte_array());
781  }
782  Handle<PodArray<InliningPosition>> inl_positions =
783  PodArray<InliningPosition>::New(
784  isolate, static_cast<int>(inlined_functions.size()), TENURED);
785  for (size_t i = 0; i < inlined_functions.size(); ++i) {
786  inl_positions->set(static_cast<int>(i), inlined_functions[i].position);
787  }
788  return inl_positions;
789 }
790 
791 } // namespace
792 
793 Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() {
794  OptimizedCompilationInfo* info = this->info();
795  int deopt_count = static_cast<int>(deoptimization_states_.size());
796  if (deopt_count == 0 && !info->is_osr()) {
797  return DeoptimizationData::Empty(isolate());
798  }
799  Handle<DeoptimizationData> data =
800  DeoptimizationData::New(isolate(), deopt_count, TENURED);
801 
802  Handle<ByteArray> translation_array =
803  translations_.CreateByteArray(isolate()->factory());
804 
805  data->SetTranslationByteArray(*translation_array);
806  data->SetInlinedFunctionCount(
807  Smi::FromInt(static_cast<int>(inlined_function_count_)));
808  data->SetOptimizationId(Smi::FromInt(info->optimization_id()));
809 
810  if (info->has_shared_info()) {
811  data->SetSharedFunctionInfo(*info->shared_info());
812  } else {
813  data->SetSharedFunctionInfo(Smi::kZero);
814  }
815 
816  Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(
817  static_cast<int>(deoptimization_literals_.size()), TENURED);
818  for (unsigned i = 0; i < deoptimization_literals_.size(); i++) {
819  Handle<Object> object = deoptimization_literals_[i].Reify(isolate());
820  literals->set(i, *object);
821  }
822  data->SetLiteralArray(*literals);
823 
824  Handle<PodArray<InliningPosition>> inl_pos =
825  CreateInliningPositions(info, isolate());
826  data->SetInliningPositions(*inl_pos);
827 
828  if (info->is_osr()) {
829  DCHECK_LE(0, osr_pc_offset_);
830  data->SetOsrBytecodeOffset(Smi::FromInt(info_->osr_offset().ToInt()));
831  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
832  } else {
833  BailoutId osr_offset = BailoutId::None();
834  data->SetOsrBytecodeOffset(Smi::FromInt(osr_offset.ToInt()));
835  data->SetOsrPcOffset(Smi::FromInt(-1));
836  }
837 
838  // Populate deoptimization entries.
839  for (int i = 0; i < deopt_count; i++) {
840  DeoptimizationState* deoptimization_state = deoptimization_states_[i];
841  data->SetBytecodeOffset(i, deoptimization_state->bailout_id());
842  CHECK(deoptimization_state);
843  data->SetTranslationIndex(
844  i, Smi::FromInt(deoptimization_state->translation_id()));
845  data->SetPc(i, Smi::FromInt(deoptimization_state->pc_offset()));
846  }
847 
848  return data;
849 }
850 
851 Label* CodeGenerator::AddJumpTable(Label** targets, size_t target_count) {
852  jump_tables_ = new (zone()) JumpTable(jump_tables_, targets, target_count);
853  return jump_tables_->label();
854 }
855 
856 void CodeGenerator::RecordCallPosition(Instruction* instr) {
857  CallDescriptor::Flags flags(MiscField::decode(instr->opcode()));
858 
859  bool needs_frame_state = (flags & CallDescriptor::kNeedsFrameState);
860 
861  RecordSafepoint(
862  instr->reference_map(), Safepoint::kSimple, 0,
863  needs_frame_state ? Safepoint::kLazyDeopt : Safepoint::kNoLazyDeopt);
864 
865  if (flags & CallDescriptor::kHasExceptionHandler) {
866  InstructionOperandConverter i(this, instr);
867  RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
868  handlers_.push_back({GetLabel(handler_rpo), tasm()->pc_offset()});
869  }
870 
871  if (needs_frame_state) {
872  MarkLazyDeoptSite();
873  // If the frame state is present, it starts at argument 2 - after
874  // the code address and the poison-alias index.
875  size_t frame_state_offset = 2;
876  FrameStateDescriptor* descriptor =
877  GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
878  int pc_offset = tasm()->pc_offset();
879  int deopt_state_id = BuildTranslation(instr, pc_offset, frame_state_offset,
880  descriptor->state_combine());
881 
882  DeoptimizationExit* const exit = new (zone())
883  DeoptimizationExit(deopt_state_id, current_source_position_);
884  deoptimization_exits_.push_back(exit);
885  safepoints()->RecordLazyDeoptimizationIndex(deopt_state_id);
886  }
887 }
888 
889 int CodeGenerator::DefineDeoptimizationLiteral(DeoptimizationLiteral literal) {
890  int result = static_cast<int>(deoptimization_literals_.size());
891  for (unsigned i = 0; i < deoptimization_literals_.size(); ++i) {
892  if (deoptimization_literals_[i] == literal) return i;
893  }
894  deoptimization_literals_.push_back(literal);
895  return result;
896 }
897 
898 DeoptimizationEntry const& CodeGenerator::GetDeoptimizationEntry(
899  Instruction* instr, size_t frame_state_offset) {
900  InstructionOperandConverter i(this, instr);
901  int const state_id = i.InputInt32(frame_state_offset);
902  return code()->GetDeoptimizationEntry(state_id);
903 }
904 
905 DeoptimizeKind CodeGenerator::GetDeoptimizationKind(
906  int deoptimization_id) const {
907  size_t const index = static_cast<size_t>(deoptimization_id);
908  DCHECK_LT(index, deoptimization_states_.size());
909  return deoptimization_states_[index]->kind();
910 }
911 
912 DeoptimizeReason CodeGenerator::GetDeoptimizationReason(
913  int deoptimization_id) const {
914  size_t const index = static_cast<size_t>(deoptimization_id);
915  DCHECK_LT(index, deoptimization_states_.size());
916  return deoptimization_states_[index]->reason();
917 }
918 
919 void CodeGenerator::TranslateStateValueDescriptor(
920  StateValueDescriptor* desc, StateValueList* nested,
921  Translation* translation, InstructionOperandIterator* iter) {
922  // Note:
923  // If translation is null, we just skip the relevant instruction operands.
924  if (desc->IsNested()) {
925  if (translation != nullptr) {
926  translation->BeginCapturedObject(static_cast<int>(nested->size()));
927  }
928  for (auto field : *nested) {
929  TranslateStateValueDescriptor(field.desc, field.nested, translation,
930  iter);
931  }
932  } else if (desc->IsArgumentsElements()) {
933  if (translation != nullptr) {
934  translation->ArgumentsElements(desc->arguments_type());
935  }
936  } else if (desc->IsArgumentsLength()) {
937  if (translation != nullptr) {
938  translation->ArgumentsLength(desc->arguments_type());
939  }
940  } else if (desc->IsDuplicate()) {
941  if (translation != nullptr) {
942  translation->DuplicateObject(static_cast<int>(desc->id()));
943  }
944  } else if (desc->IsPlain()) {
945  InstructionOperand* op = iter->Advance();
946  if (translation != nullptr) {
947  AddTranslationForOperand(translation, iter->instruction(), op,
948  desc->type());
949  }
950  } else {
951  DCHECK(desc->IsOptimizedOut());
952  if (translation != nullptr) {
953  if (optimized_out_literal_id_ == -1) {
954  optimized_out_literal_id_ = DefineDeoptimizationLiteral(
955  DeoptimizationLiteral(isolate()->factory()->optimized_out()));
956  }
957  translation->StoreLiteral(optimized_out_literal_id_);
958  }
959  }
960 }
961 
962 void CodeGenerator::TranslateFrameStateDescriptorOperands(
963  FrameStateDescriptor* desc, InstructionOperandIterator* iter,
964  Translation* translation) {
965  size_t index = 0;
966  StateValueList* values = desc->GetStateValueDescriptors();
967  for (StateValueList::iterator it = values->begin(); it != values->end();
968  ++it, ++index) {
969  TranslateStateValueDescriptor((*it).desc, (*it).nested, translation, iter);
970  }
971  DCHECK_EQ(desc->GetSize(), index);
972 }
973 
974 void CodeGenerator::BuildTranslationForFrameStateDescriptor(
975  FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
976  Translation* translation, OutputFrameStateCombine state_combine) {
977  // Outer-most state must be added to translation first.
978  if (descriptor->outer_state() != nullptr) {
979  BuildTranslationForFrameStateDescriptor(descriptor->outer_state(), iter,
980  translation, state_combine);
981  }
982 
983  Handle<SharedFunctionInfo> shared_info;
984  if (!descriptor->shared_info().ToHandle(&shared_info)) {
985  if (!info()->has_shared_info()) {
986  return; // Stub with no SharedFunctionInfo.
987  }
988  shared_info = info()->shared_info();
989  }
990  int shared_info_id =
991  DefineDeoptimizationLiteral(DeoptimizationLiteral(shared_info));
992 
993  switch (descriptor->type()) {
994  case FrameStateType::kInterpretedFunction: {
995  int return_offset = 0;
996  int return_count = 0;
997  if (!state_combine.IsOutputIgnored()) {
998  return_offset = static_cast<int>(state_combine.GetOffsetToPokeAt());
999  return_count = static_cast<int>(iter->instruction()->OutputCount());
1000  }
1001  translation->BeginInterpretedFrame(
1002  descriptor->bailout_id(), shared_info_id,
1003  static_cast<unsigned int>(descriptor->locals_count() + 1),
1004  return_offset, return_count);
1005  break;
1006  }
1007  case FrameStateType::kArgumentsAdaptor:
1008  translation->BeginArgumentsAdaptorFrame(
1009  shared_info_id,
1010  static_cast<unsigned int>(descriptor->parameters_count()));
1011  break;
1012  case FrameStateType::kConstructStub:
1013  DCHECK(descriptor->bailout_id().IsValidForConstructStub());
1014  translation->BeginConstructStubFrame(
1015  descriptor->bailout_id(), shared_info_id,
1016  static_cast<unsigned int>(descriptor->parameters_count() + 1));
1017  break;
1018  case FrameStateType::kBuiltinContinuation: {
1019  BailoutId bailout_id = descriptor->bailout_id();
1020  int parameter_count =
1021  static_cast<unsigned int>(descriptor->parameters_count());
1022  translation->BeginBuiltinContinuationFrame(bailout_id, shared_info_id,
1023  parameter_count);
1024  break;
1025  }
1026  case FrameStateType::kJavaScriptBuiltinContinuation: {
1027  BailoutId bailout_id = descriptor->bailout_id();
1028  int parameter_count =
1029  static_cast<unsigned int>(descriptor->parameters_count());
1030  translation->BeginJavaScriptBuiltinContinuationFrame(
1031  bailout_id, shared_info_id, parameter_count);
1032  break;
1033  }
1034  case FrameStateType::kJavaScriptBuiltinContinuationWithCatch: {
1035  BailoutId bailout_id = descriptor->bailout_id();
1036  int parameter_count =
1037  static_cast<unsigned int>(descriptor->parameters_count());
1038  translation->BeginJavaScriptBuiltinContinuationWithCatchFrame(
1039  bailout_id, shared_info_id, parameter_count);
1040  break;
1041  }
1042  }
1043 
1044  TranslateFrameStateDescriptorOperands(descriptor, iter, translation);
1045 }
1046 
1047 int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
1048  size_t frame_state_offset,
1049  OutputFrameStateCombine state_combine) {
1050  DeoptimizationEntry const& entry =
1051  GetDeoptimizationEntry(instr, frame_state_offset);
1052  FrameStateDescriptor* const descriptor = entry.descriptor();
1053  frame_state_offset++;
1054 
1055  int update_feedback_count = entry.feedback().IsValid() ? 1 : 0;
1056  Translation translation(&translations_,
1057  static_cast<int>(descriptor->GetFrameCount()),
1058  static_cast<int>(descriptor->GetJSFrameCount()),
1059  update_feedback_count, zone());
1060  if (entry.feedback().IsValid()) {
1061  DeoptimizationLiteral literal =
1062  DeoptimizationLiteral(entry.feedback().vector());
1063  int literal_id = DefineDeoptimizationLiteral(literal);
1064  translation.AddUpdateFeedback(literal_id, entry.feedback().slot().ToInt());
1065  }
1066  InstructionOperandIterator iter(instr, frame_state_offset);
1067  BuildTranslationForFrameStateDescriptor(descriptor, &iter, &translation,
1068  state_combine);
1069 
1070  int deoptimization_id = static_cast<int>(deoptimization_states_.size());
1071 
1072  deoptimization_states_.push_back(new (zone()) DeoptimizationState(
1073  descriptor->bailout_id(), translation.index(), pc_offset, entry.kind(),
1074  entry.reason()));
1075 
1076  return deoptimization_id;
1077 }
1078 
1079 void CodeGenerator::AddTranslationForOperand(Translation* translation,
1080  Instruction* instr,
1081  InstructionOperand* op,
1082  MachineType type) {
1083  if (op->IsStackSlot()) {
1084  if (type.representation() == MachineRepresentation::kBit) {
1085  translation->StoreBoolStackSlot(LocationOperand::cast(op)->index());
1086  } else if (type == MachineType::Int8() || type == MachineType::Int16() ||
1087  type == MachineType::Int32()) {
1088  translation->StoreInt32StackSlot(LocationOperand::cast(op)->index());
1089  } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
1090  type == MachineType::Uint32()) {
1091  translation->StoreUint32StackSlot(LocationOperand::cast(op)->index());
1092  } else if (type == MachineType::Int64()) {
1093  translation->StoreInt64StackSlot(LocationOperand::cast(op)->index());
1094  } else {
1095  CHECK_EQ(MachineRepresentation::kTagged, type.representation());
1096  translation->StoreStackSlot(LocationOperand::cast(op)->index());
1097  }
1098  } else if (op->IsFPStackSlot()) {
1099  if (type.representation() == MachineRepresentation::kFloat64) {
1100  translation->StoreDoubleStackSlot(LocationOperand::cast(op)->index());
1101  } else {
1102  CHECK_EQ(MachineRepresentation::kFloat32, type.representation());
1103  translation->StoreFloatStackSlot(LocationOperand::cast(op)->index());
1104  }
1105  } else if (op->IsRegister()) {
1106  InstructionOperandConverter converter(this, instr);
1107  if (type.representation() == MachineRepresentation::kBit) {
1108  translation->StoreBoolRegister(converter.ToRegister(op));
1109  } else if (type == MachineType::Int8() || type == MachineType::Int16() ||
1110  type == MachineType::Int32()) {
1111  translation->StoreInt32Register(converter.ToRegister(op));
1112  } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
1113  type == MachineType::Uint32()) {
1114  translation->StoreUint32Register(converter.ToRegister(op));
1115  } else if (type == MachineType::Int64()) {
1116  translation->StoreInt64Register(converter.ToRegister(op));
1117  } else {
1118  CHECK_EQ(MachineRepresentation::kTagged, type.representation());
1119  translation->StoreRegister(converter.ToRegister(op));
1120  }
1121  } else if (op->IsFPRegister()) {
1122  InstructionOperandConverter converter(this, instr);
1123  if (type.representation() == MachineRepresentation::kFloat64) {
1124  translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
1125  } else {
1126  CHECK_EQ(MachineRepresentation::kFloat32, type.representation());
1127  translation->StoreFloatRegister(converter.ToFloatRegister(op));
1128  }
1129  } else {
1130  CHECK(op->IsImmediate());
1131  InstructionOperandConverter converter(this, instr);
1132  Constant constant = converter.ToConstant(op);
1133  DeoptimizationLiteral literal;
1134  switch (constant.type()) {
1135  case Constant::kInt32:
1136  if (type.representation() == MachineRepresentation::kTagged) {
1137  // When pointers are 4 bytes, we can use int32 constants to represent
1138  // Smis.
1139  DCHECK_EQ(4, kPointerSize);
1140  Smi smi(static_cast<Address>(constant.ToInt32()));
1141  DCHECK(smi->IsSmi());
1142  literal = DeoptimizationLiteral(smi->value());
1143  } else if (type.representation() == MachineRepresentation::kBit) {
1144  if (constant.ToInt32() == 0) {
1145  literal =
1146  DeoptimizationLiteral(isolate()->factory()->false_value());
1147  } else {
1148  DCHECK_EQ(1, constant.ToInt32());
1149  literal = DeoptimizationLiteral(isolate()->factory()->true_value());
1150  }
1151  } else {
1152  DCHECK(type == MachineType::Int32() ||
1153  type == MachineType::Uint32() ||
1154  type.representation() == MachineRepresentation::kWord32 ||
1155  type.representation() == MachineRepresentation::kNone);
1156  DCHECK(type.representation() != MachineRepresentation::kNone ||
1157  constant.ToInt32() == FrameStateDescriptor::kImpossibleValue);
1158  if (type == MachineType::Uint32()) {
1159  literal = DeoptimizationLiteral(
1160  static_cast<uint32_t>(constant.ToInt32()));
1161  } else {
1162  literal = DeoptimizationLiteral(constant.ToInt32());
1163  }
1164  }
1165  break;
1166  case Constant::kInt64:
1167  DCHECK_EQ(8, kPointerSize);
1168  if (type.representation() == MachineRepresentation::kWord64) {
1169  literal =
1170  DeoptimizationLiteral(static_cast<double>(constant.ToInt64()));
1171  } else {
1172  // When pointers are 8 bytes, we can use int64 constants to represent
1173  // Smis.
1174  DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
1175  Smi smi(static_cast<Address>(constant.ToInt64()));
1176  DCHECK(smi->IsSmi());
1177  literal = DeoptimizationLiteral(smi->value());
1178  }
1179  break;
1180  case Constant::kFloat32:
1181  DCHECK(type.representation() == MachineRepresentation::kFloat32 ||
1182  type.representation() == MachineRepresentation::kTagged);
1183  literal = DeoptimizationLiteral(constant.ToFloat32());
1184  break;
1185  case Constant::kFloat64:
1186  DCHECK(type.representation() == MachineRepresentation::kFloat64 ||
1187  type.representation() == MachineRepresentation::kTagged);
1188  literal = DeoptimizationLiteral(constant.ToFloat64().value());
1189  break;
1190  case Constant::kHeapObject:
1191  DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
1192  literal = DeoptimizationLiteral(constant.ToHeapObject());
1193  break;
1194  case Constant::kDelayedStringConstant:
1195  DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
1196  literal = DeoptimizationLiteral(constant.ToDelayedStringConstant());
1197  break;
1198  default:
1199  UNREACHABLE();
1200  }
1201  if (literal.object().equals(info()->closure())) {
1202  translation->StoreJSFrameFunction();
1203  } else {
1204  int literal_id = DefineDeoptimizationLiteral(literal);
1205  translation->StoreLiteral(literal_id);
1206  }
1207  }
1208 }
1209 
1210 void CodeGenerator::MarkLazyDeoptSite() {
1211  last_lazy_deopt_pc_ = tasm()->pc_offset();
1212 }
1213 
1214 DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
1215  Instruction* instr, size_t frame_state_offset) {
1216  int const deoptimization_id = BuildTranslation(
1217  instr, -1, frame_state_offset, OutputFrameStateCombine::Ignore());
1218 
1219  DeoptimizationExit* const exit = new (zone())
1220  DeoptimizationExit(deoptimization_id, current_source_position_);
1221  deoptimization_exits_.push_back(exit);
1222  return exit;
1223 }
1224 
1225 void CodeGenerator::InitializeSpeculationPoison() {
1226  if (poisoning_level_ == PoisoningMitigationLevel::kDontPoison) return;
1227 
1228  // Initialize {kSpeculationPoisonRegister} either by comparing the expected
1229  // with the actual call target, or by unconditionally using {-1} initially.
1230  // Masking register arguments with it only makes sense in the first case.
1231  if (info()->called_with_code_start_register()) {
1232  tasm()->RecordComment("-- Prologue: generate speculation poison --");
1233  GenerateSpeculationPoisonFromCodeStartRegister();
1234  if (info()->is_poisoning_register_arguments()) {
1235  AssembleRegisterArgumentPoisoning();
1236  }
1237  } else {
1238  ResetSpeculationPoison();
1239  }
1240 }
1241 
1242 void CodeGenerator::ResetSpeculationPoison() {
1243  if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
1244  tasm()->ResetSpeculationPoisonRegister();
1245  }
1246 }
1247 
1248 OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
1249  : frame_(gen->frame()), tasm_(gen->tasm()), next_(gen->ools_) {
1250  gen->ools_ = this;
1251 }
1252 
1253 OutOfLineCode::~OutOfLineCode() = default;
1254 
1255 Handle<Object> DeoptimizationLiteral::Reify(Isolate* isolate) const {
1256  switch (kind_) {
1257  case DeoptimizationLiteralKind::kObject: {
1258  return object_;
1259  }
1260  case DeoptimizationLiteralKind::kNumber: {
1261  return isolate->factory()->NewNumber(number_);
1262  }
1263  case DeoptimizationLiteralKind::kString: {
1264  return string_->AllocateStringConstant(isolate);
1265  }
1266  }
1267  UNREACHABLE();
1268 }
1269 
1270 } // namespace compiler
1271 } // namespace internal
1272 } // namespace v8
STL namespace.
Definition: libplatform.h:13