V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
object-macros.h
1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // Note 1: Any file that includes this one should include object-macros-undef.h
6 // at the bottom.
7 
8 // Note 2: This file is deliberately missing the include guards (the undeffing
9 // approach wouldn't work otherwise).
10 //
11 // PRESUBMIT_INTENTIONALLY_MISSING_INCLUDE_GUARD
12 
13 // The accessors with RELAXED_, ACQUIRE_, and RELEASE_ prefixes should be used
14 // for fields that can be written to and read from multiple threads at the same
15 // time. See comments in src/base/atomicops.h for the memory ordering sematics.
16 
17 #include <src/v8memory.h>
18 
19 // Since this changes visibility, it should always be last in a class
20 // definition.
21 #define OBJECT_CONSTRUCTORS(Type, ...) \
22  public: \
23  constexpr Type() : __VA_ARGS__() {} \
24  Type* operator->() { return this; } \
25  const Type* operator->() const { return this; } \
26  \
27  protected: \
28  explicit inline Type(Address ptr);
29 
30 #define OBJECT_CONSTRUCTORS_IMPL(Type, Super) \
31  inline Type::Type(Address ptr) : Super(ptr) { SLOW_DCHECK(Is##Type()); }
32 
33 #define NEVER_READ_ONLY_SPACE \
34  inline Heap* GetHeap() const; \
35  inline Isolate* GetIsolate() const;
36 
37 #define NEVER_READ_ONLY_SPACE_IMPL(Type) \
38  Heap* Type::GetHeap() const { \
39  return NeverReadOnlySpaceObjectPtr::GetHeap(*this); \
40  } \
41  Isolate* Type::GetIsolate() const { \
42  return NeverReadOnlySpaceObjectPtr::GetIsolate(*this); \
43  }
44 
45 #define DECL_PRIMITIVE_ACCESSORS(name, type) \
46  inline type name() const; \
47  inline void set_##name(type value);
48 
49 #define DECL_BOOLEAN_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, bool)
50 
51 #define DECL_INT_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, int)
52 
53 #define DECL_INT32_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, int32_t)
54 
55 #define DECL_UINT16_ACCESSORS(name) \
56  inline uint16_t name() const; \
57  inline void set_##name(int value);
58 
59 #define DECL_INT16_ACCESSORS(name) \
60  inline int16_t name() const; \
61  inline void set_##name(int16_t value);
62 
63 #define DECL_UINT8_ACCESSORS(name) \
64  inline uint8_t name() const; \
65  inline void set_##name(int value);
66 
67 #define DECL_ACCESSORS(name, type) \
68  inline type* name() const; \
69  inline void set_##name(type* value, \
70  WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
71 
72 // Replacement for the above, temporarily separate to allow incremental
73 // transition.
74 // TODO(3770): Get rid of the duplication when the migration is complete.
75 #define DECL_ACCESSORS2(name, type) \
76  inline type name() const; \
77  inline void set_##name(type value, \
78  WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
79 
80 #define DECL_CAST(type) \
81  V8_INLINE static type* cast(Object* object); \
82  V8_INLINE static const type* cast(const Object* object);
83 
84 // TODO(3770): Replacement for the above, temporarily separate for
85 // incremental transition.
86 #define DECL_CAST2(Type) \
87  V8_INLINE static Type cast(Object* object); \
88  V8_INLINE static const Type cast(const Object* object); \
89  V8_INLINE static Type cast(ObjectPtr object); \
90  V8_INLINE static Type unchecked_cast(const Object* object); \
91  V8_INLINE static Type unchecked_cast(ObjectPtr object) { \
92  return bit_cast<Type>(object); \
93  }
94 
95 #define CAST_ACCESSOR(type) \
96  type* type::cast(Object* object) { \
97  SLOW_DCHECK(object->Is##type()); \
98  return reinterpret_cast<type*>(object); \
99  } \
100  const type* type::cast(const Object* object) { \
101  SLOW_DCHECK(object->Is##type()); \
102  return reinterpret_cast<const type*>(object); \
103  }
104 
105 // TODO(3770): Replacement for the above, temporarily separate for
106 // incremental transition.
107 #define CAST_ACCESSOR2(Type) \
108  Type Type::cast(Object* object) { return Type(object->ptr()); } \
109  const Type Type::cast(const Object* object) { return Type(object->ptr()); } \
110  Type Type::cast(ObjectPtr object) { return Type(object.ptr()); } \
111  Type Type::unchecked_cast(const Object* object) { \
112  return bit_cast<Type>(ObjectPtr(object->ptr())); \
113  }
114 
115 #define INT_ACCESSORS(holder, name, offset) \
116  int holder::name() const { return READ_INT_FIELD(this, offset); } \
117  void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); }
118 
119 #define INT32_ACCESSORS(holder, name, offset) \
120  int32_t holder::name() const { return READ_INT32_FIELD(this, offset); } \
121  void holder::set_##name(int32_t value) { \
122  WRITE_INT32_FIELD(this, offset, value); \
123  }
124 
125 #define RELAXED_INT32_ACCESSORS(holder, name, offset) \
126  int32_t holder::name() const { \
127  return RELAXED_READ_INT32_FIELD(this, offset); \
128  } \
129  void holder::set_##name(int32_t value) { \
130  RELAXED_WRITE_INT32_FIELD(this, offset, value); \
131  }
132 
133 #define UINT16_ACCESSORS(holder, name, offset) \
134  uint16_t holder::name() const { return READ_UINT16_FIELD(this, offset); } \
135  void holder::set_##name(int value) { \
136  DCHECK_GE(value, 0); \
137  DCHECK_LE(value, static_cast<uint16_t>(-1)); \
138  WRITE_UINT16_FIELD(this, offset, value); \
139  }
140 
141 #define UINT8_ACCESSORS(holder, name, offset) \
142  uint8_t holder::name() const { return READ_UINT8_FIELD(this, offset); } \
143  void holder::set_##name(int value) { \
144  DCHECK_GE(value, 0); \
145  DCHECK_LE(value, static_cast<uint8_t>(-1)); \
146  WRITE_UINT8_FIELD(this, offset, value); \
147  }
148 
149 #define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
150  set_condition) \
151  type* holder::name() const { \
152  type* value = type::cast(READ_FIELD(this, offset)); \
153  DCHECK(get_condition); \
154  return value; \
155  } \
156  void holder::set_##name(type* value, WriteBarrierMode mode) { \
157  DCHECK(set_condition); \
158  WRITE_FIELD(this, offset, value); \
159  CONDITIONAL_WRITE_BARRIER(this, offset, value, mode); \
160  }
161 // TODO(3770): Replacement for the above.
162 #define ACCESSORS_CHECKED3(holder, name, type, offset, get_condition, \
163  set_condition) \
164  type holder::name() const { \
165  type value = type::cast(READ_FIELD(this, offset)); \
166  DCHECK(get_condition); \
167  return value; \
168  } \
169  void holder::set_##name(type value, WriteBarrierMode mode) { \
170  DCHECK(set_condition); \
171  WRITE_FIELD(this, offset, value); \
172  CONDITIONAL_WRITE_BARRIER(this, offset, value, mode); \
173  }
174 #define ACCESSORS_CHECKED(holder, name, type, offset, condition) \
175  ACCESSORS_CHECKED2(holder, name, type, offset, condition, condition)
176 
177 #define ACCESSORS(holder, name, type, offset) \
178  ACCESSORS_CHECKED(holder, name, type, offset, true)
179 
180 // Replacement for the above, temporarily separate to allow incremental
181 // transition.
182 // TODO(3770): Get rid of the duplication when the migration is complete.
183 #define ACCESSORS2(holder, name, type, offset) \
184  type holder::name() const { return type::cast(READ_FIELD(this, offset)); } \
185  void holder::set_##name(type value, WriteBarrierMode mode) { \
186  WRITE_FIELD(this, offset, value); \
187  CONDITIONAL_WRITE_BARRIER(this, offset, value, mode); \
188  }
189 
190 #define SYNCHRONIZED_ACCESSORS_CHECKED2(holder, name, type, offset, \
191  get_condition, set_condition) \
192  type holder::name() const { \
193  type value = type::cast(ACQUIRE_READ_FIELD(this, offset)); \
194  DCHECK(get_condition); \
195  return value; \
196  } \
197  void holder::set_##name(type value, WriteBarrierMode mode) { \
198  DCHECK(set_condition); \
199  RELEASE_WRITE_FIELD(this, offset, value); \
200  CONDITIONAL_WRITE_BARRIER(this, offset, value, mode); \
201  }
202 
203 #define SYNCHRONIZED_ACCESSORS_CHECKED(holder, name, type, offset, condition) \
204  SYNCHRONIZED_ACCESSORS_CHECKED2(holder, name, type, offset, condition, \
205  condition)
206 
207 #define SYNCHRONIZED_ACCESSORS(holder, name, type, offset) \
208  SYNCHRONIZED_ACCESSORS_CHECKED(holder, name, type, offset, true)
209 
210 #define WEAK_ACCESSORS_CHECKED2(holder, name, offset, get_condition, \
211  set_condition) \
212  MaybeObject holder::name() const { \
213  MaybeObject value = READ_WEAK_FIELD(this, offset); \
214  DCHECK(get_condition); \
215  return value; \
216  } \
217  void holder::set_##name(MaybeObject value, WriteBarrierMode mode) { \
218  DCHECK(set_condition); \
219  WRITE_WEAK_FIELD(this, offset, value); \
220  CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode); \
221  }
222 
223 #define WEAK_ACCESSORS_CHECKED(holder, name, offset, condition) \
224  WEAK_ACCESSORS_CHECKED2(holder, name, offset, condition, condition)
225 
226 #define WEAK_ACCESSORS(holder, name, offset) \
227  WEAK_ACCESSORS_CHECKED(holder, name, offset, true)
228 
229 // Getter that returns a Smi as an int and writes an int as a Smi.
230 #define SMI_ACCESSORS_CHECKED(holder, name, offset, condition) \
231  int holder::name() const { \
232  DCHECK(condition); \
233  Object* value = READ_FIELD(this, offset); \
234  return Smi::ToInt(value); \
235  } \
236  void holder::set_##name(int value) { \
237  DCHECK(condition); \
238  WRITE_FIELD(this, offset, Smi::FromInt(value)); \
239  }
240 
241 #define SMI_ACCESSORS(holder, name, offset) \
242  SMI_ACCESSORS_CHECKED(holder, name, offset, true)
243 
244 #define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \
245  int holder::synchronized_##name() const { \
246  Object* value = ACQUIRE_READ_FIELD(this, offset); \
247  return Smi::ToInt(value); \
248  } \
249  void holder::synchronized_set_##name(int value) { \
250  RELEASE_WRITE_FIELD(this, offset, Smi::FromInt(value)); \
251  }
252 
253 #define RELAXED_SMI_ACCESSORS(holder, name, offset) \
254  int holder::relaxed_read_##name() const { \
255  Object* value = RELAXED_READ_FIELD(this, offset); \
256  return Smi::ToInt(value); \
257  } \
258  void holder::relaxed_write_##name(int value) { \
259  RELAXED_WRITE_FIELD(this, offset, Smi::FromInt(value)); \
260  }
261 
262 #define BOOL_GETTER(holder, field, name, offset) \
263  bool holder::name() const { return BooleanBit::get(field(), offset); }
264 
265 #define BOOL_ACCESSORS(holder, field, name, offset) \
266  bool holder::name() const { return BooleanBit::get(field(), offset); } \
267  void holder::set_##name(bool value) { \
268  set_##field(BooleanBit::set(field(), offset, value)); \
269  }
270 
271 #define BIT_FIELD_ACCESSORS(holder, field, name, BitField) \
272  typename BitField::FieldType holder::name() const { \
273  return BitField::decode(field()); \
274  } \
275  void holder::set_##name(typename BitField::FieldType value) { \
276  set_##field(BitField::update(field(), value)); \
277  }
278 
279 #define INSTANCE_TYPE_CHECKER(type, forinstancetype) \
280  V8_INLINE bool Is##type(InstanceType instance_type) { \
281  return instance_type == forinstancetype; \
282  }
283 
284 #define TYPE_CHECKER(type, ...) \
285  bool HeapObject::Is##type() const { \
286  return InstanceTypeChecker::Is##type(map()->instance_type()); \
287  }
288 
289 #define RELAXED_INT16_ACCESSORS(holder, name, offset) \
290  int16_t holder::name() const { \
291  return RELAXED_READ_INT16_FIELD(this, offset); \
292  } \
293  void holder::set_##name(int16_t value) { \
294  RELAXED_WRITE_INT16_FIELD(this, offset, value); \
295  }
296 
297 #define FIELD_ADDR(p, offset) ((p)->ptr() + offset - kHeapObjectTag)
298 
299 #define READ_FIELD(p, offset) (*ObjectSlot(FIELD_ADDR(p, offset)))
300 
301 #define READ_WEAK_FIELD(p, offset) (*MaybeObjectSlot(FIELD_ADDR(p, offset)))
302 
303 #define ACQUIRE_READ_FIELD(p, offset) \
304  ObjectSlot(FIELD_ADDR(p, offset)).Acquire_Load1()
305 
306 #define RELAXED_READ_FIELD(p, offset) \
307  ObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Load()
308 
309 #define RELAXED_READ_WEAK_FIELD(p, offset) \
310  MaybeObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Load()
311 
312 #ifdef V8_CONCURRENT_MARKING
313 #define WRITE_FIELD(p, offset, value) \
314  ObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store1(value)
315 #define WRITE_WEAK_FIELD(p, offset, value) \
316  MaybeObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
317 #else
318 #define WRITE_FIELD(p, offset, value) \
319  ObjectSlot(FIELD_ADDR(p, offset)).store(value)
320 #define WRITE_WEAK_FIELD(p, offset, value) \
321  MaybeObjectSlot(FIELD_ADDR(p, offset)).store(value)
322 #endif
323 
324 #define RELEASE_WRITE_FIELD(p, offset, value) \
325  ObjectSlot(FIELD_ADDR(p, offset)).Release_Store1(value)
326 
327 #define RELAXED_WRITE_FIELD(p, offset, value) \
328  ObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store1(value)
329 
330 #define RELAXED_WRITE_WEAK_FIELD(p, offset, value) \
331  MaybeObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
332 
333 #define WRITE_BARRIER(object, offset, value) \
334  do { \
335  DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \
336  MarkingBarrier(object, (object)->RawField(offset), value); \
337  GenerationalBarrier(object, (object)->RawField(offset), value); \
338  } while (false)
339 
340 #define WEAK_WRITE_BARRIER(object, offset, value) \
341  do { \
342  DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \
343  MarkingBarrier(object, (object)->RawMaybeWeakField(offset), value); \
344  GenerationalBarrier(object, (object)->RawMaybeWeakField(offset), value); \
345  } while (false)
346 
347 #define CONDITIONAL_WRITE_BARRIER(object, offset, value, mode) \
348  do { \
349  DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \
350  if (mode != SKIP_WRITE_BARRIER) { \
351  if (mode == UPDATE_WRITE_BARRIER) { \
352  MarkingBarrier(object, (object)->RawField(offset), value); \
353  } \
354  GenerationalBarrier(object, (object)->RawField(offset), value); \
355  } \
356  } while (false)
357 
358 #define CONDITIONAL_WEAK_WRITE_BARRIER(object, offset, value, mode) \
359  do { \
360  DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \
361  if (mode != SKIP_WRITE_BARRIER) { \
362  if (mode == UPDATE_WRITE_BARRIER) { \
363  MarkingBarrier(object, (object)->RawMaybeWeakField(offset), value); \
364  } \
365  GenerationalBarrier(object, (object)->RawMaybeWeakField(offset), value); \
366  } \
367  } while (false)
368 
369 #define READ_DOUBLE_FIELD(p, offset) ReadDoubleValue(FIELD_ADDR(p, offset))
370 
371 #define WRITE_DOUBLE_FIELD(p, offset, value) \
372  WriteDoubleValue(FIELD_ADDR(p, offset), value)
373 
374 #define READ_INT_FIELD(p, offset) \
375  (*reinterpret_cast<const int*>(FIELD_ADDR(p, offset)))
376 
377 #define WRITE_INT_FIELD(p, offset, value) \
378  (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value)
379 
380 #define ACQUIRE_READ_INTPTR_FIELD(p, offset) \
381  static_cast<intptr_t>(base::Acquire_Load( \
382  reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR(p, offset))))
383 
384 #define RELAXED_READ_INTPTR_FIELD(p, offset) \
385  static_cast<intptr_t>(base::Relaxed_Load( \
386  reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR(p, offset))))
387 
388 #define READ_INTPTR_FIELD(p, offset) \
389  (*reinterpret_cast<const intptr_t*>(FIELD_ADDR(p, offset)))
390 
391 #define RELEASE_WRITE_INTPTR_FIELD(p, offset, value) \
392  base::Release_Store( \
393  reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
394  static_cast<base::AtomicWord>(value));
395 
396 #define RELAXED_WRITE_INTPTR_FIELD(p, offset, value) \
397  base::Relaxed_Store( \
398  reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
399  static_cast<base::AtomicWord>(value));
400 
401 #define WRITE_INTPTR_FIELD(p, offset, value) \
402  (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value)
403 
404 #define READ_UINTPTR_FIELD(p, offset) \
405  (*reinterpret_cast<const uintptr_t*>(FIELD_ADDR(p, offset)))
406 
407 #define WRITE_UINTPTR_FIELD(p, offset, value) \
408  (*reinterpret_cast<uintptr_t*>(FIELD_ADDR(p, offset)) = value)
409 
410 #define READ_UINT8_FIELD(p, offset) \
411  (*reinterpret_cast<const uint8_t*>(FIELD_ADDR(p, offset)))
412 
413 #define WRITE_UINT8_FIELD(p, offset, value) \
414  (*reinterpret_cast<uint8_t*>(FIELD_ADDR(p, offset)) = value)
415 
416 #define RELAXED_WRITE_INT8_FIELD(p, offset, value) \
417  base::Relaxed_Store(reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \
418  static_cast<base::Atomic8>(value));
419 
420 #define READ_INT8_FIELD(p, offset) \
421  (*reinterpret_cast<const int8_t*>(FIELD_ADDR(p, offset)))
422 
423 #define RELAXED_READ_INT8_FIELD(p, offset) \
424  static_cast<int8_t>(base::Relaxed_Load( \
425  reinterpret_cast<const base::Atomic8*>(FIELD_ADDR(p, offset))))
426 
427 #define WRITE_INT8_FIELD(p, offset, value) \
428  (*reinterpret_cast<int8_t*>(FIELD_ADDR(p, offset)) = value)
429 
430 #define READ_UINT16_FIELD(p, offset) \
431  (*reinterpret_cast<const uint16_t*>(FIELD_ADDR(p, offset)))
432 
433 #define WRITE_UINT16_FIELD(p, offset, value) \
434  (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)) = value)
435 
436 #define READ_INT16_FIELD(p, offset) \
437  (*reinterpret_cast<const int16_t*>(FIELD_ADDR(p, offset)))
438 
439 #define WRITE_INT16_FIELD(p, offset, value) \
440  (*reinterpret_cast<int16_t*>(FIELD_ADDR(p, offset)) = value)
441 
442 #define RELAXED_READ_INT16_FIELD(p, offset) \
443  static_cast<int16_t>(base::Relaxed_Load( \
444  reinterpret_cast<const base::Atomic16*>(FIELD_ADDR(p, offset))))
445 
446 #define RELAXED_WRITE_INT16_FIELD(p, offset, value) \
447  base::Relaxed_Store( \
448  reinterpret_cast<base::Atomic16*>(FIELD_ADDR(p, offset)), \
449  static_cast<base::Atomic16>(value));
450 
451 #define READ_UINT32_FIELD(p, offset) \
452  (*reinterpret_cast<const uint32_t*>(FIELD_ADDR(p, offset)))
453 
454 #define WRITE_UINT32_FIELD(p, offset, value) \
455  (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
456 
457 #define READ_INT32_FIELD(p, offset) \
458  (*reinterpret_cast<const int32_t*>(FIELD_ADDR(p, offset)))
459 
460 #define RELAXED_READ_INT32_FIELD(p, offset) \
461  static_cast<int32_t>(base::Relaxed_Load( \
462  reinterpret_cast<const base::Atomic32*>(FIELD_ADDR(p, offset))))
463 
464 #define WRITE_INT32_FIELD(p, offset, value) \
465  (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value)
466 
467 #define RELAXED_WRITE_INT32_FIELD(p, offset, value) \
468  base::Relaxed_Store( \
469  reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
470  static_cast<base::Atomic32>(value));
471 
472 #define READ_FLOAT_FIELD(p, offset) \
473  (*reinterpret_cast<const float*>(FIELD_ADDR(p, offset)))
474 
475 #define WRITE_FLOAT_FIELD(p, offset, value) \
476  (*reinterpret_cast<float*>(FIELD_ADDR(p, offset)) = value)
477 
478 #define READ_UINT64_FIELD(p, offset) \
479  (*reinterpret_cast<const uint64_t*>(FIELD_ADDR(p, offset)))
480 
481 #define WRITE_UINT64_FIELD(p, offset, value) \
482  (*reinterpret_cast<uint64_t*>(FIELD_ADDR(p, offset)) = value)
483 
484 #define READ_INT64_FIELD(p, offset) \
485  (*reinterpret_cast<const int64_t*>(FIELD_ADDR(p, offset)))
486 
487 #define WRITE_INT64_FIELD(p, offset, value) \
488  (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)) = value)
489 
490 #define READ_BYTE_FIELD(p, offset) \
491  (*reinterpret_cast<const byte*>(FIELD_ADDR(p, offset)))
492 
493 #define RELAXED_READ_BYTE_FIELD(p, offset) \
494  static_cast<byte>(base::Relaxed_Load( \
495  reinterpret_cast<const base::Atomic8*>(FIELD_ADDR(p, offset))))
496 
497 #define WRITE_BYTE_FIELD(p, offset, value) \
498  (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value)
499 
500 #define RELAXED_WRITE_BYTE_FIELD(p, offset, value) \
501  base::Relaxed_Store(reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \
502  static_cast<base::Atomic8>(value));
503 
504 #ifdef VERIFY_HEAP
505 #define DECL_VERIFIER(Name) void Name##Verify(Isolate* isolate);
506 #else
507 #define DECL_VERIFIER(Name)
508 #endif
509 
510 #define DEFINE_DEOPT_ELEMENT_ACCESSORS(name, type) \
511  type* DeoptimizationData::name() const { \
512  return type::cast(get(k##name##Index)); \
513  } \
514  void DeoptimizationData::Set##name(type* value) { \
515  set(k##name##Index, value); \
516  }
517 
518 // Replacement for the above, temporarily separate for incremental transition.
519 // TODO(3770): Eliminate the duplication.
520 #define DEFINE_DEOPT_ELEMENT_ACCESSORS2(name, type) \
521  type DeoptimizationData::name() const { \
522  return type::cast(get(k##name##Index)); \
523  } \
524  void DeoptimizationData::Set##name(type value) { set(k##name##Index, value); }
525 
526 #define DEFINE_DEOPT_ENTRY_ACCESSORS(name, type) \
527  type DeoptimizationData::name(int i) const { \
528  return type::cast(get(IndexForEntry(i) + k##name##Offset)); \
529  } \
530  void DeoptimizationData::Set##name(int i, type value) { \
531  set(IndexForEntry(i) + k##name##Offset, value); \
532  }