V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
runtime-atomics.cc
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/arguments-inl.h"
6 #include "src/base/macros.h"
7 #include "src/base/platform/mutex.h"
8 #include "src/conversions-inl.h"
9 #include "src/counters.h"
10 #include "src/heap/factory.h"
11 #include "src/objects/js-array-buffer-inl.h"
12 #include "src/runtime/runtime-utils.h"
13 
14 // Implement Atomic accesses to SharedArrayBuffers as defined in the
15 // SharedArrayBuffer draft spec, found here
16 // https://github.com/tc39/ecmascript_sharedmem
17 
18 namespace v8 {
19 namespace internal {
20 
21 // Other platforms have CSA support, see builtins-sharedarraybuffer-gen.h.
22 #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
23  V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
24 
25 namespace {
26 
27 #if V8_CC_GNU
28 
29 // GCC/Clang helpfully warn us that using 64-bit atomics on 32-bit platforms
30 // can be slow. Good to know, but we don't have a choice.
31 #ifdef V8_TARGET_ARCH_32_BIT
32 #pragma GCC diagnostic push
33 #pragma GCC diagnostic ignored "-Wpragmas"
34 #pragma GCC diagnostic ignored "-Watomic-alignment"
35 #endif // V8_TARGET_ARCH_32_BIT
36 
37 template <typename T>
38 inline T LoadSeqCst(T* p) {
39  return __atomic_load_n(p, __ATOMIC_SEQ_CST);
40 }
41 
42 template <typename T>
43 inline void StoreSeqCst(T* p, T value) {
44  __atomic_store_n(p, value, __ATOMIC_SEQ_CST);
45 }
46 
47 template <typename T>
48 inline T ExchangeSeqCst(T* p, T value) {
49  return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST);
50 }
51 
52 template <typename T>
53 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) {
54  (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST,
55  __ATOMIC_SEQ_CST);
56  return oldval;
57 }
58 
59 template <typename T>
60 inline T AddSeqCst(T* p, T value) {
61  return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST);
62 }
63 
64 template <typename T>
65 inline T SubSeqCst(T* p, T value) {
66  return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST);
67 }
68 
69 template <typename T>
70 inline T AndSeqCst(T* p, T value) {
71  return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST);
72 }
73 
74 template <typename T>
75 inline T OrSeqCst(T* p, T value) {
76  return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST);
77 }
78 
79 template <typename T>
80 inline T XorSeqCst(T* p, T value) {
81  return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST);
82 }
83 
84 #ifdef V8_TARGET_ARCH_32_BIT
85 #pragma GCC diagnostic pop
86 #endif // V8_TARGET_ARCH_32_BIT
87 
88 #elif V8_CC_MSVC
89 
90 #define InterlockedExchange32 _InterlockedExchange
91 #define InterlockedCompareExchange32 _InterlockedCompareExchange
92 #define InterlockedCompareExchange8 _InterlockedCompareExchange8
93 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd
94 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16
95 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8
96 #define InterlockedAnd32 _InterlockedAnd
97 #define InterlockedOr64 _InterlockedOr64
98 #define InterlockedOr32 _InterlockedOr
99 #define InterlockedXor32 _InterlockedXor
100 
101 #if defined(V8_HOST_ARCH_ARM64)
102 #define InterlockedExchange8 _InterlockedExchange8
103 #endif
104 
105 #define ATOMIC_OPS(type, suffix, vctype) \
106  inline type ExchangeSeqCst(type* p, type value) { \
107  return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \
108  bit_cast<vctype>(value)); \
109  } \
110  inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \
111  return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \
112  bit_cast<vctype>(newval), \
113  bit_cast<vctype>(oldval)); \
114  } \
115  inline type AddSeqCst(type* p, type value) { \
116  return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
117  bit_cast<vctype>(value)); \
118  } \
119  inline type SubSeqCst(type* p, type value) { \
120  return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
121  -bit_cast<vctype>(value)); \
122  } \
123  inline type AndSeqCst(type* p, type value) { \
124  return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \
125  bit_cast<vctype>(value)); \
126  } \
127  inline type OrSeqCst(type* p, type value) { \
128  return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \
129  bit_cast<vctype>(value)); \
130  } \
131  inline type XorSeqCst(type* p, type value) { \
132  return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \
133  bit_cast<vctype>(value)); \
134  }
135 
136 ATOMIC_OPS(int8_t, 8, char)
137 ATOMIC_OPS(uint8_t, 8, char)
138 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */
139 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */
140 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */
141 ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */
142 ATOMIC_OPS(int64_t, 64, __int64)
143 ATOMIC_OPS(uint64_t, 64, __int64)
144 
145 template <typename T>
146 inline T LoadSeqCst(T* p) {
147  UNREACHABLE();
148 }
149 
150 template <typename T>
151 inline void StoreSeqCst(T* p, T value) {
152  UNREACHABLE();
153 }
154 
155 #undef ATOMIC_OPS
156 
157 #undef InterlockedExchange32
158 #undef InterlockedCompareExchange32
159 #undef InterlockedCompareExchange8
160 #undef InterlockedExchangeAdd32
161 #undef InterlockedExchangeAdd16
162 #undef InterlockedExchangeAdd8
163 #undef InterlockedAnd32
164 #undef InterlockedOr64
165 #undef InterlockedOr32
166 #undef InterlockedXor32
167 
168 #if defined(V8_HOST_ARCH_ARM64)
169 #undef InterlockedExchange8
170 #endif
171 
172 #else
173 
174 #error Unsupported platform!
175 
176 #endif
177 
178 template <typename T>
179 T FromObject(Handle<Object> number);
180 
181 template <>
182 inline uint8_t FromObject<uint8_t>(Handle<Object> number) {
183  return NumberToUint32(*number);
184 }
185 
186 template <>
187 inline int8_t FromObject<int8_t>(Handle<Object> number) {
188  return NumberToInt32(*number);
189 }
190 
191 template <>
192 inline uint16_t FromObject<uint16_t>(Handle<Object> number) {
193  return NumberToUint32(*number);
194 }
195 
196 template <>
197 inline int16_t FromObject<int16_t>(Handle<Object> number) {
198  return NumberToInt32(*number);
199 }
200 
201 template <>
202 inline uint32_t FromObject<uint32_t>(Handle<Object> number) {
203  return NumberToUint32(*number);
204 }
205 
206 template <>
207 inline int32_t FromObject<int32_t>(Handle<Object> number) {
208  return NumberToInt32(*number);
209 }
210 
211 template <>
212 inline uint64_t FromObject<uint64_t>(Handle<Object> bigint) {
213  return Handle<BigInt>::cast(bigint)->AsUint64();
214 }
215 
216 template <>
217 inline int64_t FromObject<int64_t>(Handle<Object> bigint) {
218  return Handle<BigInt>::cast(bigint)->AsInt64();
219 }
220 
221 inline Object* ToObject(Isolate* isolate, int8_t t) { return Smi::FromInt(t); }
222 
223 inline Object* ToObject(Isolate* isolate, uint8_t t) { return Smi::FromInt(t); }
224 
225 inline Object* ToObject(Isolate* isolate, int16_t t) { return Smi::FromInt(t); }
226 
227 inline Object* ToObject(Isolate* isolate, uint16_t t) {
228  return Smi::FromInt(t);
229 }
230 
231 inline Object* ToObject(Isolate* isolate, int32_t t) {
232  return *isolate->factory()->NewNumber(t);
233 }
234 
235 inline Object* ToObject(Isolate* isolate, uint32_t t) {
236  return *isolate->factory()->NewNumber(t);
237 }
238 
239 inline Object* ToObject(Isolate* isolate, int64_t t) {
240  return *BigInt::FromInt64(isolate, t);
241 }
242 
243 inline Object* ToObject(Isolate* isolate, uint64_t t) {
244  return *BigInt::FromUint64(isolate, t);
245 }
246 
247 template <typename T>
248 struct Load {
249  static inline Object* Do(Isolate* isolate, void* buffer, size_t index) {
250  T result = LoadSeqCst(static_cast<T*>(buffer) + index);
251  return ToObject(isolate, result);
252  }
253 };
254 
255 template <typename T>
256 struct Store {
257  static inline void Do(Isolate* isolate, void* buffer, size_t index,
258  Handle<Object> obj) {
259  T value = FromObject<T>(obj);
260  StoreSeqCst(static_cast<T*>(buffer) + index, value);
261  }
262 };
263 
264 template <typename T>
265 struct Exchange {
266  static inline Object* Do(Isolate* isolate, void* buffer, size_t index,
267  Handle<Object> obj) {
268  T value = FromObject<T>(obj);
269  T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value);
270  return ToObject(isolate, result);
271  }
272 };
273 
274 template <typename T>
275 inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
276  Handle<Object> oldobj, Handle<Object> newobj) {
277  T oldval = FromObject<T>(oldobj);
278  T newval = FromObject<T>(newobj);
279  T result =
280  CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval);
281  return ToObject(isolate, result);
282 }
283 
284 template <typename T>
285 struct Add {
286  static inline Object* Do(Isolate* isolate, void* buffer, size_t index,
287  Handle<Object> obj) {
288  T value = FromObject<T>(obj);
289  T result = AddSeqCst(static_cast<T*>(buffer) + index, value);
290  return ToObject(isolate, result);
291  }
292 };
293 
294 template <typename T>
295 struct Sub {
296  static inline Object* Do(Isolate* isolate, void* buffer, size_t index,
297  Handle<Object> obj) {
298  T value = FromObject<T>(obj);
299  T result = SubSeqCst(static_cast<T*>(buffer) + index, value);
300  return ToObject(isolate, result);
301  }
302 };
303 
304 template <typename T>
305 struct And {
306  static inline Object* Do(Isolate* isolate, void* buffer, size_t index,
307  Handle<Object> obj) {
308  T value = FromObject<T>(obj);
309  T result = AndSeqCst(static_cast<T*>(buffer) + index, value);
310  return ToObject(isolate, result);
311  }
312 };
313 
314 template <typename T>
315 struct Or {
316  static inline Object* Do(Isolate* isolate, void* buffer, size_t index,
317  Handle<Object> obj) {
318  T value = FromObject<T>(obj);
319  T result = OrSeqCst(static_cast<T*>(buffer) + index, value);
320  return ToObject(isolate, result);
321  }
322 };
323 
324 template <typename T>
325 struct Xor {
326  static inline Object* Do(Isolate* isolate, void* buffer, size_t index,
327  Handle<Object> obj) {
328  T value = FromObject<T>(obj);
329  T result = XorSeqCst(static_cast<T*>(buffer) + index, value);
330  return ToObject(isolate, result);
331  }
332 };
333 
334 } // anonymous namespace
335 
336 // Duplicated from objects.h
337 // V has parameters (Type, type, TYPE, C type)
338 #define INTEGER_TYPED_ARRAYS(V) \
339  V(Uint8, uint8, UINT8, uint8_t) \
340  V(Int8, int8, INT8, int8_t) \
341  V(Uint16, uint16, UINT16, uint16_t) \
342  V(Int16, int16, INT16, int16_t) \
343  V(Uint32, uint32, UINT32, uint32_t) \
344  V(Int32, int32, INT32, int32_t)
345 
346 // This is https://tc39.github.io/ecma262/#sec-getmodifysetvalueinbuffer
347 // but also includes the ToInteger/ToBigInt conversion that's part of
348 // https://tc39.github.io/ecma262/#sec-atomicreadmodifywrite
349 template <template <typename> class Op>
350 Object* GetModifySetValueInBuffer(Arguments args, Isolate* isolate) {
351  HandleScope scope(isolate);
352  DCHECK_EQ(3, args.length());
353  CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
354  CONVERT_SIZE_ARG_CHECKED(index, 1);
355  CONVERT_ARG_HANDLE_CHECKED(Object, value_obj, 2);
356  CHECK(sta->GetBuffer()->is_shared());
357 
358  uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
359  sta->byte_offset();
360 
361  if (sta->type() >= kExternalBigInt64Array) {
362  Handle<BigInt> bigint;
363  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint,
364  BigInt::FromObject(isolate, value_obj));
365  // SharedArrayBuffers are not neuterable.
366  CHECK_LT(index, NumberToSize(sta->length()));
367  if (sta->type() == kExternalBigInt64Array) {
368  return Op<int64_t>::Do(isolate, source, index, bigint);
369  }
370  DCHECK(sta->type() == kExternalBigUint64Array);
371  return Op<uint64_t>::Do(isolate, source, index, bigint);
372  }
373 
374  Handle<Object> value;
375  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
376  Object::ToInteger(isolate, value_obj));
377  // SharedArrayBuffers are not neuterable.
378  CHECK_LT(index, NumberToSize(sta->length()));
379 
380  switch (sta->type()) {
381 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
382  case kExternal##Type##Array: \
383  return Op<ctype>::Do(isolate, source, index, value);
384 
385  INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
386 #undef TYPED_ARRAY_CASE
387 
388  default:
389  break;
390  }
391 
392  UNREACHABLE();
393 }
394 
395 RUNTIME_FUNCTION(Runtime_AtomicsLoad64) {
396  HandleScope scope(isolate);
397  DCHECK_EQ(2, args.length());
398  CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
399  CONVERT_SIZE_ARG_CHECKED(index, 1);
400  CHECK(sta->GetBuffer()->is_shared());
401 
402  uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
403  sta->byte_offset();
404 
405  DCHECK(sta->type() == kExternalBigInt64Array ||
406  sta->type() == kExternalBigUint64Array);
407  // SharedArrayBuffers are not neuterable.
408  CHECK_LT(index, NumberToSize(sta->length()));
409  if (sta->type() == kExternalBigInt64Array) {
410  return Load<int64_t>::Do(isolate, source, index);
411  }
412  DCHECK(sta->type() == kExternalBigUint64Array);
413  return Load<uint64_t>::Do(isolate, source, index);
414 }
415 
416 RUNTIME_FUNCTION(Runtime_AtomicsStore64) {
417  HandleScope scope(isolate);
418  DCHECK_EQ(3, args.length());
419  CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
420  CONVERT_SIZE_ARG_CHECKED(index, 1);
421  CONVERT_ARG_HANDLE_CHECKED(Object, value_obj, 2);
422  CHECK(sta->GetBuffer()->is_shared());
423 
424  uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
425  sta->byte_offset();
426 
427  Handle<BigInt> bigint;
428  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint,
429  BigInt::FromObject(isolate, value_obj));
430 
431  DCHECK(sta->type() == kExternalBigInt64Array ||
432  sta->type() == kExternalBigUint64Array);
433  // SharedArrayBuffers are not neuterable.
434  CHECK_LT(index, NumberToSize(sta->length()));
435  if (sta->type() == kExternalBigInt64Array) {
436  Store<int64_t>::Do(isolate, source, index, bigint);
437  return *bigint;
438  }
439  DCHECK(sta->type() == kExternalBigUint64Array);
440  Store<uint64_t>::Do(isolate, source, index, bigint);
441  return *bigint;
442 }
443 
444 RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
445  return GetModifySetValueInBuffer<Exchange>(args, isolate);
446 }
447 
448 RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
449  HandleScope scope(isolate);
450  DCHECK_EQ(4, args.length());
451  CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
452  CONVERT_SIZE_ARG_CHECKED(index, 1);
453  CONVERT_ARG_HANDLE_CHECKED(Object, old_value_obj, 2);
454  CONVERT_ARG_HANDLE_CHECKED(Object, new_value_obj, 3);
455  CHECK(sta->GetBuffer()->is_shared());
456  CHECK_LT(index, NumberToSize(sta->length()));
457 
458  uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
459  sta->byte_offset();
460 
461  if (sta->type() >= kExternalBigInt64Array) {
462  Handle<BigInt> old_bigint;
463  Handle<BigInt> new_bigint;
464  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
465  isolate, old_bigint, BigInt::FromObject(isolate, old_value_obj));
466  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
467  isolate, new_bigint, BigInt::FromObject(isolate, new_value_obj));
468  // SharedArrayBuffers are not neuterable.
469  CHECK_LT(index, NumberToSize(sta->length()));
470  if (sta->type() == kExternalBigInt64Array) {
471  return DoCompareExchange<int64_t>(isolate, source, index, old_bigint,
472  new_bigint);
473  }
474  DCHECK(sta->type() == kExternalBigUint64Array);
475  return DoCompareExchange<uint64_t>(isolate, source, index, old_bigint,
476  new_bigint);
477  }
478 
479  Handle<Object> old_value;
480  Handle<Object> new_value;
481  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, old_value,
482  Object::ToInteger(isolate, old_value_obj));
483  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, new_value,
484  Object::ToInteger(isolate, new_value_obj));
485  // SharedArrayBuffers are not neuterable.
486  CHECK_LT(index, NumberToSize(sta->length()));
487 
488  switch (sta->type()) {
489 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
490  case kExternal##Type##Array: \
491  return DoCompareExchange<ctype>(isolate, source, index, old_value, \
492  new_value);
493 
494  INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
495 #undef TYPED_ARRAY_CASE
496 
497  default:
498  break;
499  }
500 
501  UNREACHABLE();
502 }
503 
504 // ES #sec-atomics.add
505 // Atomics.add( typedArray, index, value )
506 RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
507  return GetModifySetValueInBuffer<Add>(args, isolate);
508 }
509 
510 // ES #sec-atomics.sub
511 // Atomics.sub( typedArray, index, value )
512 RUNTIME_FUNCTION(Runtime_AtomicsSub) {
513  return GetModifySetValueInBuffer<Sub>(args, isolate);
514 }
515 
516 // ES #sec-atomics.and
517 // Atomics.and( typedArray, index, value )
518 RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
519  return GetModifySetValueInBuffer<And>(args, isolate);
520 }
521 
522 // ES #sec-atomics.or
523 // Atomics.or( typedArray, index, value )
524 RUNTIME_FUNCTION(Runtime_AtomicsOr) {
525  return GetModifySetValueInBuffer<Or>(args, isolate);
526 }
527 
528 // ES #sec-atomics.xor
529 // Atomics.xor( typedArray, index, value )
530 RUNTIME_FUNCTION(Runtime_AtomicsXor) {
531  return GetModifySetValueInBuffer<Xor>(args, isolate);
532 }
533 
534 #undef INTEGER_TYPED_ARRAYS
535 
536 #else
537 
538 RUNTIME_FUNCTION(Runtime_AtomicsLoad64) { UNREACHABLE(); }
539 
540 RUNTIME_FUNCTION(Runtime_AtomicsStore64) { UNREACHABLE(); }
541 
542 RUNTIME_FUNCTION(Runtime_AtomicsExchange) { UNREACHABLE(); }
543 
544 RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) { UNREACHABLE(); }
545 
546 RUNTIME_FUNCTION(Runtime_AtomicsAdd) { UNREACHABLE(); }
547 
548 RUNTIME_FUNCTION(Runtime_AtomicsSub) { UNREACHABLE(); }
549 
550 RUNTIME_FUNCTION(Runtime_AtomicsAnd) { UNREACHABLE(); }
551 
552 RUNTIME_FUNCTION(Runtime_AtomicsOr) { UNREACHABLE(); }
553 
554 RUNTIME_FUNCTION(Runtime_AtomicsXor) { UNREACHABLE(); }
555 
556 #endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
557  // || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
558 
559 } // namespace internal
560 } // namespace v8
Definition: libplatform.h:13