5 #include "src/arguments-inl.h" 6 #include "src/base/macros.h" 7 #include "src/base/platform/mutex.h" 8 #include "src/conversions-inl.h" 9 #include "src/counters.h" 10 #include "src/heap/factory.h" 11 #include "src/objects/js-array-buffer-inl.h" 12 #include "src/runtime/runtime-utils.h" 22 #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \ 23 V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X 31 #ifdef V8_TARGET_ARCH_32_BIT 32 #pragma GCC diagnostic push 33 #pragma GCC diagnostic ignored "-Wpragmas" 34 #pragma GCC diagnostic ignored "-Watomic-alignment" 35 #endif // V8_TARGET_ARCH_32_BIT 38 inline T LoadSeqCst(T* p) {
39 return __atomic_load_n(p, __ATOMIC_SEQ_CST);
43 inline void StoreSeqCst(T* p, T value) {
44 __atomic_store_n(p, value, __ATOMIC_SEQ_CST);
48 inline T ExchangeSeqCst(T* p, T value) {
49 return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST);
53 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) {
54 (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST,
60 inline T AddSeqCst(T* p, T value) {
61 return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST);
65 inline T SubSeqCst(T* p, T value) {
66 return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST);
70 inline T AndSeqCst(T* p, T value) {
71 return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST);
75 inline T OrSeqCst(T* p, T value) {
76 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST);
80 inline T XorSeqCst(T* p, T value) {
81 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST);
84 #ifdef V8_TARGET_ARCH_32_BIT 85 #pragma GCC diagnostic pop 86 #endif // V8_TARGET_ARCH_32_BIT 90 #define InterlockedExchange32 _InterlockedExchange 91 #define InterlockedCompareExchange32 _InterlockedCompareExchange 92 #define InterlockedCompareExchange8 _InterlockedCompareExchange8 93 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd 94 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16 95 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8 96 #define InterlockedAnd32 _InterlockedAnd 97 #define InterlockedOr64 _InterlockedOr64 98 #define InterlockedOr32 _InterlockedOr 99 #define InterlockedXor32 _InterlockedXor 101 #if defined(V8_HOST_ARCH_ARM64) 102 #define InterlockedExchange8 _InterlockedExchange8 105 #define ATOMIC_OPS(type, suffix, vctype) \ 106 inline type ExchangeSeqCst(type* p, type value) { \ 107 return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ 108 bit_cast<vctype>(value)); \ 110 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ 111 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ 112 bit_cast<vctype>(newval), \ 113 bit_cast<vctype>(oldval)); \ 115 inline type AddSeqCst(type* p, type value) { \ 116 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ 117 bit_cast<vctype>(value)); \ 119 inline type SubSeqCst(type* p, type value) { \ 120 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ 121 -bit_cast<vctype>(value)); \ 123 inline type AndSeqCst(type* p, type value) { \ 124 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \ 125 bit_cast<vctype>(value)); \ 127 inline type OrSeqCst(type* p, type value) { \ 128 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \ 129 bit_cast<vctype>(value)); \ 131 inline type XorSeqCst(type* p, type value) { \ 132 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ 133 bit_cast<vctype>(value)); \ 136 ATOMIC_OPS(int8_t, 8,
char)
137 ATOMIC_OPS(uint8_t, 8,
char)
138 ATOMIC_OPS(int16_t, 16,
short)
139 ATOMIC_OPS(uint16_t, 16,
short)
140 ATOMIC_OPS(int32_t, 32,
long)
142 ATOMIC_OPS(
int64_t, 64, __int64)
143 ATOMIC_OPS(uint64_t, 64, __int64)
145 template <
typename T>
146 inline T LoadSeqCst(T* p) {
150 template <
typename T>
151 inline void StoreSeqCst(T* p, T value) {
157 #undef InterlockedExchange32 158 #undef InterlockedCompareExchange32 159 #undef InterlockedCompareExchange8 160 #undef InterlockedExchangeAdd32 161 #undef InterlockedExchangeAdd16 162 #undef InterlockedExchangeAdd8 163 #undef InterlockedAnd32 164 #undef InterlockedOr64 165 #undef InterlockedOr32 166 #undef InterlockedXor32 168 #if defined(V8_HOST_ARCH_ARM64) 169 #undef InterlockedExchange8 174 #error Unsupported platform! 178 template <
typename T>
179 T FromObject(Handle<Object> number);
182 inline uint8_t FromObject<uint8_t>(Handle<Object> number) {
183 return NumberToUint32(*number);
187 inline int8_t FromObject<int8_t>(Handle<Object> number) {
188 return NumberToInt32(*number);
192 inline uint16_t FromObject<uint16_t>(Handle<Object> number) {
193 return NumberToUint32(*number);
197 inline int16_t FromObject<int16_t>(Handle<Object> number) {
198 return NumberToInt32(*number);
202 inline uint32_t FromObject<uint32_t>(Handle<Object> number) {
203 return NumberToUint32(*number);
207 inline int32_t FromObject<int32_t>(Handle<Object> number) {
208 return NumberToInt32(*number);
212 inline uint64_t FromObject<uint64_t>(Handle<Object> bigint) {
213 return Handle<BigInt>::cast(bigint)->AsUint64();
217 inline int64_t FromObject<int64_t>(Handle<Object> bigint) {
218 return Handle<BigInt>::cast(bigint)->AsInt64();
221 inline Object* ToObject(Isolate* isolate, int8_t t) {
return Smi::FromInt(t); }
223 inline Object* ToObject(Isolate* isolate, uint8_t t) {
return Smi::FromInt(t); }
225 inline Object* ToObject(Isolate* isolate, int16_t t) {
return Smi::FromInt(t); }
227 inline Object* ToObject(Isolate* isolate, uint16_t t) {
228 return Smi::FromInt(t);
231 inline Object* ToObject(Isolate* isolate, int32_t t) {
232 return *isolate->factory()->NewNumber(t);
235 inline Object* ToObject(Isolate* isolate,
uint32_t t) {
236 return *isolate->factory()->NewNumber(t);
239 inline Object* ToObject(Isolate* isolate,
int64_t t) {
240 return *BigInt::FromInt64(isolate, t);
243 inline Object* ToObject(Isolate* isolate, uint64_t t) {
244 return *BigInt::FromUint64(isolate, t);
247 template <
typename T>
249 static inline Object* Do(Isolate* isolate,
void* buffer,
size_t index) {
250 T result = LoadSeqCst(static_cast<T*>(buffer) + index);
251 return ToObject(isolate, result);
255 template <
typename T>
257 static inline void Do(Isolate* isolate,
void* buffer,
size_t index,
258 Handle<Object> obj) {
259 T value = FromObject<T>(obj);
260 StoreSeqCst(static_cast<T*>(buffer) + index, value);
264 template <
typename T>
266 static inline Object* Do(Isolate* isolate,
void* buffer,
size_t index,
267 Handle<Object> obj) {
268 T value = FromObject<T>(obj);
269 T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value);
270 return ToObject(isolate, result);
274 template <
typename T>
275 inline Object* DoCompareExchange(Isolate* isolate,
void* buffer,
size_t index,
276 Handle<Object> oldobj, Handle<Object> newobj) {
277 T oldval = FromObject<T>(oldobj);
278 T newval = FromObject<T>(newobj);
280 CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval);
281 return ToObject(isolate, result);
284 template <
typename T>
286 static inline Object* Do(Isolate* isolate,
void* buffer,
size_t index,
287 Handle<Object> obj) {
288 T value = FromObject<T>(obj);
289 T result = AddSeqCst(static_cast<T*>(buffer) + index, value);
290 return ToObject(isolate, result);
294 template <
typename T>
296 static inline Object* Do(Isolate* isolate,
void* buffer,
size_t index,
297 Handle<Object> obj) {
298 T value = FromObject<T>(obj);
299 T result = SubSeqCst(static_cast<T*>(buffer) + index, value);
300 return ToObject(isolate, result);
304 template <
typename T>
306 static inline Object* Do(Isolate* isolate,
void* buffer,
size_t index,
307 Handle<Object> obj) {
308 T value = FromObject<T>(obj);
309 T result = AndSeqCst(static_cast<T*>(buffer) + index, value);
310 return ToObject(isolate, result);
314 template <
typename T>
316 static inline Object* Do(Isolate* isolate,
void* buffer,
size_t index,
317 Handle<Object> obj) {
318 T value = FromObject<T>(obj);
319 T result = OrSeqCst(static_cast<T*>(buffer) + index, value);
320 return ToObject(isolate, result);
324 template <
typename T>
326 static inline Object* Do(Isolate* isolate,
void* buffer,
size_t index,
327 Handle<Object> obj) {
328 T value = FromObject<T>(obj);
329 T result = XorSeqCst(static_cast<T*>(buffer) + index, value);
330 return ToObject(isolate, result);
338 #define INTEGER_TYPED_ARRAYS(V) \ 339 V(Uint8, uint8, UINT8, uint8_t) \ 340 V(Int8, int8, INT8, int8_t) \ 341 V(Uint16, uint16, UINT16, uint16_t) \ 342 V(Int16, int16, INT16, int16_t) \ 343 V(Uint32, uint32, UINT32, uint32_t) \ 344 V(Int32, int32, INT32, int32_t) 349 template <
template <
typename>
class Op>
350 Object* GetModifySetValueInBuffer(Arguments args, Isolate* isolate) {
351 HandleScope scope(isolate);
352 DCHECK_EQ(3, args.length());
353 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
354 CONVERT_SIZE_ARG_CHECKED(index, 1);
355 CONVERT_ARG_HANDLE_CHECKED(Object, value_obj, 2);
356 CHECK(sta->GetBuffer()->is_shared());
358 uint8_t* source =
static_cast<uint8_t*
>(sta->GetBuffer()->backing_store()) +
361 if (sta->type() >= kExternalBigInt64Array) {
362 Handle<BigInt> bigint;
363 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint,
364 BigInt::FromObject(isolate, value_obj));
366 CHECK_LT(index, NumberToSize(sta->length()));
367 if (sta->type() == kExternalBigInt64Array) {
368 return Op<int64_t>::Do(isolate, source, index, bigint);
370 DCHECK(sta->type() == kExternalBigUint64Array);
371 return Op<uint64_t>::Do(isolate, source, index, bigint);
374 Handle<Object> value;
375 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
376 Object::ToInteger(isolate, value_obj));
378 CHECK_LT(index, NumberToSize(sta->length()));
380 switch (sta->type()) {
381 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \ 382 case kExternal##Type##Array: \ 383 return Op<ctype>::Do(isolate, source, index, value); 385 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
386 #undef TYPED_ARRAY_CASE 395 RUNTIME_FUNCTION(Runtime_AtomicsLoad64) {
396 HandleScope scope(isolate);
397 DCHECK_EQ(2, args.length());
398 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
399 CONVERT_SIZE_ARG_CHECKED(index, 1);
400 CHECK(sta->GetBuffer()->is_shared());
402 uint8_t* source =
static_cast<uint8_t*
>(sta->GetBuffer()->backing_store()) +
405 DCHECK(sta->type() == kExternalBigInt64Array ||
406 sta->type() == kExternalBigUint64Array);
408 CHECK_LT(index, NumberToSize(sta->length()));
409 if (sta->type() == kExternalBigInt64Array) {
410 return Load<int64_t>::Do(isolate, source, index);
412 DCHECK(sta->type() == kExternalBigUint64Array);
413 return Load<uint64_t>::Do(isolate, source, index);
416 RUNTIME_FUNCTION(Runtime_AtomicsStore64) {
417 HandleScope scope(isolate);
418 DCHECK_EQ(3, args.length());
419 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
420 CONVERT_SIZE_ARG_CHECKED(index, 1);
421 CONVERT_ARG_HANDLE_CHECKED(Object, value_obj, 2);
422 CHECK(sta->GetBuffer()->is_shared());
424 uint8_t* source =
static_cast<uint8_t*
>(sta->GetBuffer()->backing_store()) +
427 Handle<BigInt> bigint;
428 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint,
429 BigInt::FromObject(isolate, value_obj));
431 DCHECK(sta->type() == kExternalBigInt64Array ||
432 sta->type() == kExternalBigUint64Array);
434 CHECK_LT(index, NumberToSize(sta->length()));
435 if (sta->type() == kExternalBigInt64Array) {
436 Store<int64_t>::Do(isolate, source, index, bigint);
439 DCHECK(sta->type() == kExternalBigUint64Array);
440 Store<uint64_t>::Do(isolate, source, index, bigint);
444 RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
445 return GetModifySetValueInBuffer<Exchange>(args, isolate);
448 RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
449 HandleScope scope(isolate);
450 DCHECK_EQ(4, args.length());
451 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
452 CONVERT_SIZE_ARG_CHECKED(index, 1);
453 CONVERT_ARG_HANDLE_CHECKED(Object, old_value_obj, 2);
454 CONVERT_ARG_HANDLE_CHECKED(Object, new_value_obj, 3);
455 CHECK(sta->GetBuffer()->is_shared());
456 CHECK_LT(index, NumberToSize(sta->length()));
458 uint8_t* source =
static_cast<uint8_t*
>(sta->GetBuffer()->backing_store()) +
461 if (sta->type() >= kExternalBigInt64Array) {
462 Handle<BigInt> old_bigint;
463 Handle<BigInt> new_bigint;
464 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
465 isolate, old_bigint, BigInt::FromObject(isolate, old_value_obj));
466 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
467 isolate, new_bigint, BigInt::FromObject(isolate, new_value_obj));
469 CHECK_LT(index, NumberToSize(sta->length()));
470 if (sta->type() == kExternalBigInt64Array) {
471 return DoCompareExchange<int64_t>(isolate, source, index, old_bigint,
474 DCHECK(sta->type() == kExternalBigUint64Array);
475 return DoCompareExchange<uint64_t>(isolate, source, index, old_bigint,
479 Handle<Object> old_value;
480 Handle<Object> new_value;
481 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, old_value,
482 Object::ToInteger(isolate, old_value_obj));
483 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, new_value,
484 Object::ToInteger(isolate, new_value_obj));
486 CHECK_LT(index, NumberToSize(sta->length()));
488 switch (sta->type()) {
489 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \ 490 case kExternal##Type##Array: \ 491 return DoCompareExchange<ctype>(isolate, source, index, old_value, \ 494 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
495 #undef TYPED_ARRAY_CASE 506 RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
507 return GetModifySetValueInBuffer<Add>(args, isolate);
512 RUNTIME_FUNCTION(Runtime_AtomicsSub) {
513 return GetModifySetValueInBuffer<Sub>(args, isolate);
518 RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
519 return GetModifySetValueInBuffer<And>(args, isolate);
524 RUNTIME_FUNCTION(Runtime_AtomicsOr) {
525 return GetModifySetValueInBuffer<Or>(args, isolate);
530 RUNTIME_FUNCTION(Runtime_AtomicsXor) {
531 return GetModifySetValueInBuffer<Xor>(args, isolate);
534 #undef INTEGER_TYPED_ARRAYS 538 RUNTIME_FUNCTION(Runtime_AtomicsLoad64) { UNREACHABLE(); }
540 RUNTIME_FUNCTION(Runtime_AtomicsStore64) { UNREACHABLE(); }
542 RUNTIME_FUNCTION(Runtime_AtomicsExchange) { UNREACHABLE(); }
544 RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) { UNREACHABLE(); }
546 RUNTIME_FUNCTION(Runtime_AtomicsAdd) { UNREACHABLE(); }
548 RUNTIME_FUNCTION(Runtime_AtomicsSub) { UNREACHABLE(); }
550 RUNTIME_FUNCTION(Runtime_AtomicsAnd) { UNREACHABLE(); }
552 RUNTIME_FUNCTION(Runtime_AtomicsOr) { UNREACHABLE(); }
554 RUNTIME_FUNCTION(Runtime_AtomicsXor) { UNREACHABLE(); }
556 #endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64