V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
assembler.h
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34 
35 #ifndef V8_ASSEMBLER_H_
36 #define V8_ASSEMBLER_H_
37 
38 #include <forward_list>
39 
40 #include "src/deoptimize-reason.h"
41 #include "src/external-reference.h"
42 #include "src/flags.h"
43 #include "src/globals.h"
44 #include "src/handles.h"
45 #include "src/objects.h"
46 #include "src/reglist.h"
47 #include "src/reloc-info.h"
48 
49 namespace v8 {
50 
51 // Forward declarations.
52 class ApiFunction;
53 
54 namespace internal {
55 
56 // Forward declarations.
57 class EmbeddedData;
58 class InstructionStream;
59 class Isolate;
60 class SCTableReference;
61 class SourcePosition;
62 class StatsCounter;
63 class StringConstantBase;
64 
65 // -----------------------------------------------------------------------------
66 // Optimization for far-jmp like instructions that can be replaced by shorter.
67 
69  public:
70  bool is_collecting() const { return stage_ == kCollection; }
71  bool is_optimizing() const { return stage_ == kOptimization; }
72  void set_optimizing() { stage_ = kOptimization; }
73 
74  bool is_optimizable() const { return optimizable_; }
75  void set_optimizable() { optimizable_ = true; }
76 
77  // Used to verify the instruction sequence is always the same in two stages.
78  size_t hash_code() const { return hash_code_; }
79  void set_hash_code(size_t hash_code) { hash_code_ = hash_code; }
80 
81  std::vector<uint32_t>& farjmp_bitmap() { return farjmp_bitmap_; }
82 
83  private:
84  enum { kCollection, kOptimization } stage_ = kCollection;
85  bool optimizable_ = false;
86  std::vector<uint32_t> farjmp_bitmap_;
87  size_t hash_code_ = 0u;
88 };
89 
91  public:
92  explicit HeapObjectRequest(double heap_number, int offset = -1);
93  explicit HeapObjectRequest(CodeStub* code_stub, int offset = -1);
94  explicit HeapObjectRequest(const StringConstantBase* string, int offset = -1);
95 
96  enum Kind { kHeapNumber, kCodeStub, kStringConstant };
97  Kind kind() const { return kind_; }
98 
99  double heap_number() const {
100  DCHECK_EQ(kind(), kHeapNumber);
101  return value_.heap_number;
102  }
103 
104  CodeStub* code_stub() const {
105  DCHECK_EQ(kind(), kCodeStub);
106  return value_.code_stub;
107  }
108 
109  const StringConstantBase* string() const {
110  DCHECK_EQ(kind(), kStringConstant);
111  return value_.string;
112  }
113 
114  // The code buffer offset at the time of the request.
115  int offset() const {
116  DCHECK_GE(offset_, 0);
117  return offset_;
118  }
119  void set_offset(int offset) {
120  DCHECK_LT(offset_, 0);
121  offset_ = offset;
122  DCHECK_GE(offset_, 0);
123  }
124 
125  private:
126  Kind kind_;
127 
128  union {
129  double heap_number;
130  CodeStub* code_stub;
131  const StringConstantBase* string;
132  } value_;
133 
134  int offset_;
135 };
136 
137 // -----------------------------------------------------------------------------
138 // Platform independent assembler base class.
139 
140 enum class CodeObjectRequired { kNo, kYes };
141 
142 struct V8_EXPORT_PRIVATE AssemblerOptions {
143  // Prohibits using any V8-specific features of assembler like (isolates,
144  // heap objects, external references, etc.).
145  bool v8_agnostic_code = false;
146  // Recording reloc info for external references and off-heap targets is
147  // needed whenever code is serialized, e.g. into the snapshot or as a WASM
148  // module. This flag allows this reloc info to be disabled for code that
149  // will not survive process destruction.
150  bool record_reloc_info_for_serialization = true;
151  // Recording reloc info can be disabled wholesale. This is needed when the
152  // assembler is used on existing code directly (e.g. JumpTableAssembler)
153  // without any buffer to hold reloc information.
154  bool disable_reloc_info_for_patching = false;
155  // Enables access to exrefs by computing a delta from the root array.
156  // Only valid if code will not survive the process.
157  bool enable_root_array_delta_access = false;
158  // Enables specific assembler sequences only used for the simulator.
159  bool enable_simulator_code = false;
160  // Enables use of isolate-independent constants, indirected through the
161  // root array.
162  // (macro assembler feature).
163  bool isolate_independent_code = false;
164  // Enables the use of isolate-independent builtins through an off-heap
165  // trampoline. (macro assembler feature).
166  bool inline_offheap_trampolines = false;
167  // On some platforms, all code is within a given range in the process,
168  // and the start of this range is configured here.
169  Address code_range_start = 0;
170  // Enable pc-relative calls/jumps on platforms that support it. When setting
171  // this flag, the code range must be small enough to fit all offsets into
172  // the instruction immediates.
173  bool use_pc_relative_calls_and_jumps = false;
174 
175  // Constructs V8-agnostic set of options from current state.
176  AssemblerOptions EnableV8AgnosticCode() const;
177 
178  static AssemblerOptions Default(
179  Isolate* isolate, bool explicitly_support_serialization = false);
180 };
181 
182 class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
183  public:
184  AssemblerBase(const AssemblerOptions& options, void* buffer, int buffer_size);
185  virtual ~AssemblerBase();
186 
187  const AssemblerOptions& options() const { return options_; }
188 
189  bool emit_debug_code() const { return emit_debug_code_; }
190  void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
191 
192  bool predictable_code_size() const { return predictable_code_size_; }
193  void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
194 
195  uint64_t enabled_cpu_features() const { return enabled_cpu_features_; }
196  void set_enabled_cpu_features(uint64_t features) {
197  enabled_cpu_features_ = features;
198  }
199  // Features are usually enabled by CpuFeatureScope, which also asserts that
200  // the features are supported before they are enabled.
201  bool IsEnabled(CpuFeature f) {
202  return (enabled_cpu_features_ & (static_cast<uint64_t>(1) << f)) != 0;
203  }
204  void EnableCpuFeature(CpuFeature f) {
205  enabled_cpu_features_ |= (static_cast<uint64_t>(1) << f);
206  }
207 
208  bool is_constant_pool_available() const {
209  if (FLAG_enable_embedded_constant_pool) {
210  return constant_pool_available_;
211  } else {
212  // Embedded constant pool not supported on this architecture.
213  UNREACHABLE();
214  }
215  }
216 
217  JumpOptimizationInfo* jump_optimization_info() {
218  return jump_optimization_info_;
219  }
220  void set_jump_optimization_info(JumpOptimizationInfo* jump_opt) {
221  jump_optimization_info_ = jump_opt;
222  }
223 
224  // Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for
225  // cross-snapshotting.
226  static void QuietNaN(HeapObject* nan) { }
227 
228  int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
229 
230  // This function is called when code generation is aborted, so that
231  // the assembler could clean up internal data structures.
232  virtual void AbortedCodeGeneration() { }
233 
234  // Debugging
235  void Print(Isolate* isolate);
236 
237  static const int kMinimalBufferSize = 4*KB;
238 
239  static void FlushICache(void* start, size_t size);
240  static void FlushICache(Address start, size_t size) {
241  return FlushICache(reinterpret_cast<void*>(start), size);
242  }
243 
244  // Used to print the name of some special registers.
245  static const char* GetSpecialRegisterName(int code) { return "UNKNOWN"; }
246 
247  protected:
248  // Add 'target' to the {code_targets_} vector, if necessary, and return the
249  // offset at which it is stored.
250  int AddCodeTarget(Handle<Code> target);
251  Handle<Code> GetCodeTarget(intptr_t code_target_index) const;
252  // Update to the code target at {code_target_index} to {target}.
253  void UpdateCodeTarget(intptr_t code_target_index, Handle<Code> target);
254  // Reserves space in the code target vector.
255  void ReserveCodeTargetSpace(size_t num_of_code_targets);
256 
257  // The buffer into which code and relocation info are generated. It could
258  // either be owned by the assembler or be provided externally.
259  byte* buffer_;
260  int buffer_size_;
261  bool own_buffer_;
262  std::forward_list<HeapObjectRequest> heap_object_requests_;
263  // The program counter, which points into the buffer above and moves forward.
264  // TODO(jkummerow): This should probably have type {Address}.
265  byte* pc_;
266 
267  void set_constant_pool_available(bool available) {
268  if (FLAG_enable_embedded_constant_pool) {
269  constant_pool_available_ = available;
270  } else {
271  // Embedded constant pool not supported on this architecture.
272  UNREACHABLE();
273  }
274  }
275 
276  // {RequestHeapObject} records the need for a future heap number allocation,
277  // code stub generation or string allocation. After code assembly, each
278  // platform's {Assembler::AllocateAndInstallRequestedHeapObjects} will
279  // allocate these objects and place them where they are expected (determined
280  // by the pc offset associated with each request).
281  void RequestHeapObject(HeapObjectRequest request);
282 
283  bool ShouldRecordRelocInfo(RelocInfo::Mode rmode) const {
284  DCHECK(!RelocInfo::IsNone(rmode));
285  if (options().disable_reloc_info_for_patching) return false;
286  if (RelocInfo::IsOnlyForSerializer(rmode) &&
287  !options().record_reloc_info_for_serialization && !emit_debug_code()) {
288  return false;
289  }
290  return true;
291  }
292 
293  private:
294  // Before we copy code into the code space, we sometimes cannot encode
295  // call/jump code targets as we normally would, as the difference between the
296  // instruction's location in the temporary buffer and the call target is not
297  // guaranteed to fit in the instruction's offset field. We keep track of the
298  // code handles we encounter in calls in this vector, and encode the index of
299  // the code handle in the vector instead.
300  std::vector<Handle<Code>> code_targets_;
301 
302  const AssemblerOptions options_;
303  uint64_t enabled_cpu_features_;
304  bool emit_debug_code_;
305  bool predictable_code_size_;
306 
307  // Indicates whether the constant pool can be accessed, which is only possible
308  // if the pp register points to the current code object's constant pool.
309  bool constant_pool_available_;
310 
311  JumpOptimizationInfo* jump_optimization_info_;
312 
313  // Constant pool.
314  friend class FrameAndConstantPoolScope;
315  friend class ConstantPoolUnavailableScope;
316 };
317 
318 // Avoids emitting debug code during the lifetime of this scope object.
320  public:
321  explicit DontEmitDebugCodeScope(AssemblerBase* assembler)
322  : assembler_(assembler), old_value_(assembler->emit_debug_code()) {
323  assembler_->set_emit_debug_code(false);
324  }
326  assembler_->set_emit_debug_code(old_value_);
327  }
328  private:
329  AssemblerBase* assembler_;
330  bool old_value_;
331 };
332 
333 
334 // Avoids using instructions that vary in size in unpredictable ways between the
335 // snapshot and the running VM.
337  public:
338  PredictableCodeSizeScope(AssemblerBase* assembler, int expected_size);
340 
341  private:
342  AssemblerBase* const assembler_;
343  int const expected_size_;
344  int const start_offset_;
345  bool const old_value_;
346 };
347 
348 
349 // Enable a specified feature within a scope.
351  public:
352  enum CheckPolicy {
353  kCheckSupported,
354  kDontCheckSupported,
355  };
356 
357 #ifdef DEBUG
358  CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
359  CheckPolicy check = kCheckSupported);
360  ~CpuFeatureScope();
361 
362  private:
363  AssemblerBase* assembler_;
364  uint64_t old_enabled_;
365 #else
366  CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
367  CheckPolicy check = kCheckSupported) {}
368  ~CpuFeatureScope() { // NOLINT (modernize-use-equals-default)
369  // Define a destructor to avoid unused variable warnings.
370  }
371 #endif
372 };
373 
374 // CpuFeatures keeps track of which features are supported by the target CPU.
375 // Supported features must be enabled by a CpuFeatureScope before use.
376 // Example:
377 // if (assembler->IsSupported(SSE3)) {
378 // CpuFeatureScope fscope(assembler, SSE3);
379 // // Generate code containing SSE3 instructions.
380 // } else {
381 // // Generate alternative code.
382 // }
383 class CpuFeatures : public AllStatic {
384  public:
385  static void Probe(bool cross_compile) {
386  STATIC_ASSERT(NUMBER_OF_CPU_FEATURES <= kBitsPerInt);
387  if (initialized_) return;
388  initialized_ = true;
389  ProbeImpl(cross_compile);
390  }
391 
392  static unsigned SupportedFeatures() {
393  Probe(false);
394  return supported_;
395  }
396 
397  static bool IsSupported(CpuFeature f) {
398  return (supported_ & (1u << f)) != 0;
399  }
400 
401  static inline bool SupportsOptimizer();
402 
403  static inline bool SupportsWasmSimd128();
404 
405  static inline unsigned icache_line_size() {
406  DCHECK_NE(icache_line_size_, 0);
407  return icache_line_size_;
408  }
409 
410  static inline unsigned dcache_line_size() {
411  DCHECK_NE(dcache_line_size_, 0);
412  return dcache_line_size_;
413  }
414 
415  static void PrintTarget();
416  static void PrintFeatures();
417 
418  private:
419  friend class ExternalReference;
420  friend class AssemblerBase;
421  // Flush instruction cache.
422  static void FlushICache(void* start, size_t size);
423 
424  // Platform-dependent implementation.
425  static void ProbeImpl(bool cross_compile);
426 
427  static unsigned supported_;
428  static unsigned icache_line_size_;
429  static unsigned dcache_line_size_;
430  static bool initialized_;
431  DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
432 };
433 
434 // -----------------------------------------------------------------------------
435 // Utility functions
436 
437 // Computes pow(x, y) with the special cases in the spec for Math.pow.
438 double power_helper(double x, double y);
439 double power_double_int(double x, int y);
440 double power_double_double(double x, double y);
441 
442 
443 // Base type for CPU Registers.
444 //
445 // 1) We would prefer to use an enum for registers, but enum values are
446 // assignment-compatible with int, which has caused code-generation bugs.
447 //
448 // 2) By not using an enum, we are possibly preventing the compiler from
449 // doing certain constant folds, which may significantly reduce the
450 // code generated for some assembly instructions (because they boil down
451 // to a few constants). If this is a problem, we could change the code
452 // such that we use an enum in optimized mode, and the class in debug
453 // mode. This way we get the compile-time error checking in debug mode
454 // and best performance in optimized code.
455 template <typename SubType, int kAfterLastRegister>
457  // Internal enum class; used for calling constexpr methods, where we need to
458  // pass an integral type as template parameter.
459  enum class RegisterCode : int { kFirst = 0, kAfterLast = kAfterLastRegister };
460 
461  public:
462  static constexpr int kCode_no_reg = -1;
463  static constexpr int kNumRegisters = kAfterLastRegister;
464 
465  static constexpr SubType no_reg() { return SubType{kCode_no_reg}; }
466 
467  template <int code>
468  static constexpr SubType from_code() {
469  static_assert(code >= 0 && code < kNumRegisters, "must be valid reg code");
470  return SubType{code};
471  }
472 
473  constexpr operator RegisterCode() const {
474  return static_cast<RegisterCode>(reg_code_);
475  }
476 
477  template <RegisterCode reg_code>
478  static constexpr int code() {
479  static_assert(
480  reg_code >= RegisterCode::kFirst && reg_code < RegisterCode::kAfterLast,
481  "must be valid reg");
482  return static_cast<int>(reg_code);
483  }
484 
485  template <RegisterCode reg_code>
486  static constexpr RegList bit() {
487  return RegList{1} << code<reg_code>();
488  }
489 
490  static SubType from_code(int code) {
491  DCHECK_LE(0, code);
492  DCHECK_GT(kNumRegisters, code);
493  return SubType{code};
494  }
495 
496  // Constexpr version (pass registers as template parameters).
497  template <RegisterCode... reg_codes>
498  static constexpr RegList ListOf() {
499  return CombineRegLists(RegisterBase::bit<reg_codes>()...);
500  }
501 
502  // Non-constexpr version (pass registers as method parameters).
503  template <typename... Register>
504  static RegList ListOf(Register... regs) {
505  return CombineRegLists(regs.bit()...);
506  }
507 
508  bool is_valid() const { return reg_code_ != kCode_no_reg; }
509 
510  int code() const {
511  DCHECK(is_valid());
512  return reg_code_;
513  }
514 
515  RegList bit() const { return RegList{1} << code(); }
516 
517  inline constexpr bool operator==(SubType other) const {
518  return reg_code_ == other.reg_code_;
519  }
520  inline constexpr bool operator!=(SubType other) const {
521  return reg_code_ != other.reg_code_;
522  }
523 
524  protected:
525  explicit constexpr RegisterBase(int code) : reg_code_(code) {}
526  int reg_code_;
527 };
528 
529 // Helper macros to define a {RegisterName} method based on a macro list
530 // containing all names.
531 #define DEFINE_REGISTER_NAMES_NAME(name) #name,
532 #define DEFINE_REGISTER_NAMES(RegType, LIST) \
533  inline const char* RegisterName(RegType reg) { \
534  static constexpr const char* Names[] = {LIST(DEFINE_REGISTER_NAMES_NAME)}; \
535  STATIC_ASSERT(arraysize(Names) == RegType::kNumRegisters); \
536  return reg.is_valid() ? Names[reg.code()] : "invalid"; \
537  }
538 
539 template <typename RegType,
540  typename = decltype(RegisterName(std::declval<RegType>()))>
541 inline std::ostream& operator<<(std::ostream& os, RegType reg) {
542  return os << RegisterName(reg);
543 }
544 
545 } // namespace internal
546 } // namespace v8
547 #endif // V8_ASSEMBLER_H_
Definition: libplatform.h:13