29 #ifndef V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ 30 #define V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ 34 #include "src/base/build_config.h" 35 #include "src/base/macros.h" 43 inline void SeqCst_MemoryFence() {
44 #if defined(__GLIBCXX__) 47 __atomic_thread_fence(std::memory_order_seq_cst);
49 std::atomic_thread_fence(std::memory_order_seq_cst);
53 inline Atomic32 Relaxed_CompareAndSwap(
volatile Atomic32* ptr,
54 Atomic32 old_value, Atomic32 new_value) {
55 __atomic_compare_exchange_n(ptr, &old_value, new_value,
false,
56 __ATOMIC_RELAXED, __ATOMIC_RELAXED);
60 inline Atomic32 Relaxed_AtomicExchange(
volatile Atomic32* ptr,
62 return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
65 inline Atomic32 Relaxed_AtomicIncrement(
volatile Atomic32* ptr,
67 return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
70 inline Atomic32 Barrier_AtomicIncrement(
volatile Atomic32* ptr,
72 return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST);
75 inline Atomic32 Acquire_CompareAndSwap(
volatile Atomic32* ptr,
76 Atomic32 old_value, Atomic32 new_value) {
77 __atomic_compare_exchange_n(ptr, &old_value, new_value,
false,
78 __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
82 inline Atomic8 Release_CompareAndSwap(
volatile Atomic8* ptr, Atomic8 old_value,
84 bool result = __atomic_compare_exchange_n(ptr, &old_value, new_value,
false,
85 __ATOMIC_RELEASE, __ATOMIC_RELAXED);
90 inline Atomic32 Release_CompareAndSwap(
volatile Atomic32* ptr,
91 Atomic32 old_value, Atomic32 new_value) {
92 __atomic_compare_exchange_n(ptr, &old_value, new_value,
false,
93 __ATOMIC_RELEASE, __ATOMIC_RELAXED);
97 inline void Relaxed_Store(
volatile Atomic8* ptr, Atomic8 value) {
98 __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
101 inline void Relaxed_Store(
volatile Atomic16* ptr, Atomic16 value) {
102 __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
105 inline void Relaxed_Store(
volatile Atomic32* ptr, Atomic32 value) {
106 __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
109 inline void Release_Store(
volatile Atomic32* ptr, Atomic32 value) {
110 __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
113 inline Atomic8 Relaxed_Load(
volatile const Atomic8* ptr) {
114 return __atomic_load_n(ptr, __ATOMIC_RELAXED);
117 inline Atomic16 Relaxed_Load(
volatile const Atomic16* ptr) {
118 return __atomic_load_n(ptr, __ATOMIC_RELAXED);
121 inline Atomic32 Relaxed_Load(
volatile const Atomic32* ptr) {
122 return __atomic_load_n(ptr, __ATOMIC_RELAXED);
125 inline Atomic32 Acquire_Load(
volatile const Atomic32* ptr) {
126 return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
129 #if defined(V8_HOST_ARCH_64_BIT) 131 inline Atomic64 Relaxed_CompareAndSwap(
volatile Atomic64* ptr,
132 Atomic64 old_value, Atomic64 new_value) {
133 __atomic_compare_exchange_n(ptr, &old_value, new_value,
false,
134 __ATOMIC_RELAXED, __ATOMIC_RELAXED);
138 inline Atomic64 Relaxed_AtomicExchange(
volatile Atomic64* ptr,
139 Atomic64 new_value) {
140 return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
143 inline Atomic64 Relaxed_AtomicIncrement(
volatile Atomic64* ptr,
144 Atomic64 increment) {
145 return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
148 inline Atomic64 Barrier_AtomicIncrement(
volatile Atomic64* ptr,
149 Atomic64 increment) {
150 return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST);
153 inline Atomic64 Acquire_CompareAndSwap(
volatile Atomic64* ptr,
154 Atomic64 old_value, Atomic64 new_value) {
155 __atomic_compare_exchange_n(ptr, &old_value, new_value,
false,
156 __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
160 inline Atomic64 Release_CompareAndSwap(
volatile Atomic64* ptr,
161 Atomic64 old_value, Atomic64 new_value) {
162 __atomic_compare_exchange_n(ptr, &old_value, new_value,
false,
163 __ATOMIC_RELEASE, __ATOMIC_RELAXED);
167 inline void Relaxed_Store(
volatile Atomic64* ptr, Atomic64 value) {
168 __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
171 inline void Release_Store(
volatile Atomic64* ptr, Atomic64 value) {
172 __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
175 inline Atomic64 Relaxed_Load(
volatile const Atomic64* ptr) {
176 return __atomic_load_n(ptr, __ATOMIC_RELAXED);
179 inline Atomic64 Acquire_Load(
volatile const Atomic64* ptr) {
180 return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
183 #endif // defined(V8_HOST_ARCH_64_BIT) 187 #endif // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_