V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
builtins-sharedarraybuffer-gen.cc
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/builtins/builtins-utils-gen.h"
6 #include "src/builtins/builtins.h"
7 #include "src/code-stub-assembler.h"
8 #include "src/objects.h"
9 
10 namespace v8 {
11 namespace internal {
12 
13 using compiler::Node;
14 template <typename T>
15 using TNode = compiler::TNode<T>;
16 
18  public:
21  : CodeStubAssembler(state) {}
22 
23  protected:
24  typedef Node* (CodeAssembler::*AssemblerFunction)(MachineType type,
25  Node* base, Node* offset,
26  Node* value,
27  Node* value_high);
28  void ValidateSharedTypedArray(Node* tagged, Node* context,
29  Node** out_instance_type,
30  Node** out_backing_store);
31  Node* ConvertTaggedAtomicIndexToWord32(Node* tagged, Node* context,
32  Node** number_index);
33  void ValidateAtomicIndex(Node* array, Node* index_word, Node* context);
34 #if DEBUG
35  void DebugSanityCheckAtomicIndex(Node* array, Node* index_word,
36  Node* context);
37 #endif
38  void AtomicBinopBuiltinCommon(Node* array, Node* index, Node* value,
39  Node* context, AssemblerFunction function,
40  Runtime::FunctionId runtime_function);
41 
42  // Create a BigInt from the result of a 64-bit atomic operation, using
43  // projections on 32-bit platforms.
44  TNode<BigInt> BigIntFromSigned64(Node* signed64);
45  TNode<BigInt> BigIntFromUnsigned64(Node* unsigned64);
46 };
47 
48 void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray(
49  Node* tagged, Node* context, Node** out_instance_type,
50  Node** out_backing_store) {
51  Label not_float_or_clamped(this), invalid(this);
52 
53  // Fail if it is not a heap object.
54  GotoIf(TaggedIsSmi(tagged), &invalid);
55 
56  // Fail if the array's instance type is not JSTypedArray.
57  GotoIfNot(InstanceTypeEqual(LoadInstanceType(tagged), JS_TYPED_ARRAY_TYPE),
58  &invalid);
59 
60  // Fail if the array's JSArrayBuffer is not shared.
61  TNode<JSArrayBuffer> array_buffer = LoadJSArrayBufferViewBuffer(CAST(tagged));
62  TNode<Uint32T> bitfield = LoadJSArrayBufferBitField(array_buffer);
63  GotoIfNot(IsSetWord32<JSArrayBuffer::IsSharedBit>(bitfield), &invalid);
64 
65  // Fail if the array's element type is float32, float64 or clamped.
66  Node* elements_instance_type = LoadInstanceType(LoadElements(tagged));
67  STATIC_ASSERT(FIXED_INT8_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
68  STATIC_ASSERT(FIXED_INT16_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
69  STATIC_ASSERT(FIXED_INT32_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
70  STATIC_ASSERT(FIXED_UINT8_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
71  STATIC_ASSERT(FIXED_UINT16_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
72  STATIC_ASSERT(FIXED_UINT32_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
73  GotoIf(Int32LessThan(elements_instance_type,
74  Int32Constant(FIXED_FLOAT32_ARRAY_TYPE)),
75  &not_float_or_clamped);
76  STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT8_CLAMPED_ARRAY_TYPE);
77  STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT8_CLAMPED_ARRAY_TYPE);
78  Branch(Int32GreaterThan(elements_instance_type,
79  Int32Constant(FIXED_UINT8_CLAMPED_ARRAY_TYPE)),
80  &not_float_or_clamped, &invalid);
81 
82  BIND(&invalid);
83  {
84  ThrowTypeError(context, MessageTemplate::kNotIntegerSharedTypedArray,
85  tagged);
86  }
87 
88  BIND(&not_float_or_clamped);
89  *out_instance_type = elements_instance_type;
90 
91  TNode<RawPtrT> backing_store = LoadJSArrayBufferBackingStore(array_buffer);
92  TNode<UintPtrT> byte_offset = LoadJSArrayBufferViewByteOffset(CAST(tagged));
93  *out_backing_store = IntPtrAdd(backing_store, byte_offset);
94 }
95 
96 // https://tc39.github.io/ecma262/#sec-validateatomicaccess
97 Node* SharedArrayBufferBuiltinsAssembler::ConvertTaggedAtomicIndexToWord32(
98  Node* tagged, Node* context, Node** number_index) {
99  VARIABLE(var_result, MachineRepresentation::kWord32);
100  Label done(this), range_error(this);
101 
102  // Returns word32 since index cannot be longer than a TypedArray length,
103  // which has a uint32 maximum.
104  // The |number_index| output parameter is used only for architectures that
105  // don't currently have a TF implementation and forward to runtime functions
106  // instead; they expect the value has already been coerced to an integer.
107  *number_index = ToSmiIndex(CAST(tagged), CAST(context), &range_error);
108  var_result.Bind(SmiToInt32(*number_index));
109  Goto(&done);
110 
111  BIND(&range_error);
112  { ThrowRangeError(context, MessageTemplate::kInvalidAtomicAccessIndex); }
113 
114  BIND(&done);
115  return var_result.value();
116 }
117 
118 void SharedArrayBufferBuiltinsAssembler::ValidateAtomicIndex(Node* array,
119  Node* index_word,
120  Node* context) {
121  // Check if the index is in bounds. If not, throw RangeError.
122  Label check_passed(this);
123  Node* array_length_word32 =
124  TruncateTaggedToWord32(context, LoadJSTypedArrayLength(CAST(array)));
125  GotoIf(Uint32LessThan(index_word, array_length_word32), &check_passed);
126 
127  ThrowRangeError(context, MessageTemplate::kInvalidAtomicAccessIndex);
128 
129  BIND(&check_passed);
130 }
131 
132 #if DEBUG
133 void SharedArrayBufferBuiltinsAssembler::DebugSanityCheckAtomicIndex(
134  Node* array, Node* index_word, Node* context) {
135  // In Debug mode, we re-validate the index as a sanity check because
136  // ToInteger above calls out to JavaScript. A SharedArrayBuffer can't be
137  // neutered and the TypedArray length can't change either, so skipping this
138  // check in Release mode is safe.
139  CSA_ASSERT(this,
140  Uint32LessThan(index_word,
141  TruncateTaggedToWord32(
142  context, LoadJSTypedArrayLength(CAST(array)))));
143 }
144 #endif
145 
146 TNode<BigInt> SharedArrayBufferBuiltinsAssembler::BigIntFromSigned64(
147  Node* signed64) {
148  if (Is64()) {
149  return BigIntFromInt64(UncheckedCast<IntPtrT>(signed64));
150  } else {
151  TNode<IntPtrT> low = UncheckedCast<IntPtrT>(Projection(0, signed64));
152  TNode<IntPtrT> high = UncheckedCast<IntPtrT>(Projection(1, signed64));
153  return BigIntFromInt32Pair(low, high);
154  }
155 }
156 
157 TNode<BigInt> SharedArrayBufferBuiltinsAssembler::BigIntFromUnsigned64(
158  Node* unsigned64) {
159  if (Is64()) {
160  return BigIntFromUint64(UncheckedCast<UintPtrT>(unsigned64));
161  } else {
162  TNode<UintPtrT> low = UncheckedCast<UintPtrT>(Projection(0, unsigned64));
163  TNode<UintPtrT> high = UncheckedCast<UintPtrT>(Projection(1, unsigned64));
164  return BigIntFromUint32Pair(low, high);
165  }
166 }
167 
168 TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
169  Node* array = Parameter(Descriptor::kArray);
170  Node* index = Parameter(Descriptor::kIndex);
171  Node* context = Parameter(Descriptor::kContext);
172 
173  Node* instance_type;
174  Node* backing_store;
175  ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
176 
177  Node* index_integer;
178  Node* index_word32 =
179  ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
180  ValidateAtomicIndex(array, index_word32, context);
181  Node* index_word = ChangeUint32ToWord(index_word32);
182 
183  Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
184  i64(this), u64(this), other(this);
185  int32_t case_values[] = {
186  FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE,
187  FIXED_INT16_ARRAY_TYPE, FIXED_UINT16_ARRAY_TYPE,
188  FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
189  FIXED_BIGINT64_ARRAY_TYPE, FIXED_BIGUINT64_ARRAY_TYPE,
190  };
191  Label* case_labels[] = {&i8, &u8, &i16, &u16, &i32, &u32, &i64, &u64};
192  Switch(instance_type, &other, case_values, case_labels,
193  arraysize(case_labels));
194 
195  BIND(&i8);
196  Return(
197  SmiFromInt32(AtomicLoad(MachineType::Int8(), backing_store, index_word)));
198 
199  BIND(&u8);
200  Return(SmiFromInt32(
201  AtomicLoad(MachineType::Uint8(), backing_store, index_word)));
202 
203  BIND(&i16);
204  Return(SmiFromInt32(
205  AtomicLoad(MachineType::Int16(), backing_store, WordShl(index_word, 1))));
206 
207  BIND(&u16);
208  Return(SmiFromInt32(AtomicLoad(MachineType::Uint16(), backing_store,
209  WordShl(index_word, 1))));
210 
211  BIND(&i32);
212  Return(ChangeInt32ToTagged(
213  AtomicLoad(MachineType::Int32(), backing_store, WordShl(index_word, 2))));
214 
215  BIND(&u32);
216  Return(ChangeUint32ToTagged(AtomicLoad(MachineType::Uint32(), backing_store,
217  WordShl(index_word, 2))));
218 #if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6
219  BIND(&i64);
220  Return(CallRuntime(Runtime::kAtomicsLoad64, context, array, index_integer));
221 
222  BIND(&u64);
223  Return(CallRuntime(Runtime::kAtomicsLoad64, context, array, index_integer));
224 #else
225  BIND(&i64);
226  // This uses Uint64() intentionally: AtomicLoad is not implemented for
227  // Int64(), which is fine because the machine instruction only cares
228  // about words.
229  Return(BigIntFromSigned64(AtomicLoad(MachineType::Uint64(), backing_store,
230  WordShl(index_word, 3))));
231 
232  BIND(&u64);
233  Return(BigIntFromUnsigned64(AtomicLoad(MachineType::Uint64(), backing_store,
234  WordShl(index_word, 3))));
235 #endif
236  // This shouldn't happen, we've already validated the type.
237  BIND(&other);
238  Unreachable();
239 }
240 
241 TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
242  Node* array = Parameter(Descriptor::kArray);
243  Node* index = Parameter(Descriptor::kIndex);
244  Node* value = Parameter(Descriptor::kValue);
245  Node* context = Parameter(Descriptor::kContext);
246 
247  Node* instance_type;
248  Node* backing_store;
249  ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
250 
251  Node* index_integer;
252  Node* index_word32 =
253  ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
254  ValidateAtomicIndex(array, index_word32, context);
255  Node* index_word = ChangeUint32ToWord(index_word32);
256 
257  Label u8(this), u16(this), u32(this), u64(this), other(this);
258  STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
259  STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
260  GotoIf(
261  Int32GreaterThan(instance_type, Int32Constant(FIXED_UINT32_ARRAY_TYPE)),
262  &u64);
263 
264  Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
265  Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
266 
267 #if DEBUG
268  DebugSanityCheckAtomicIndex(array, index_word32, context);
269 #endif
270 
271  int32_t case_values[] = {
272  FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
273  FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
274  };
275  Label* case_labels[] = {&u8, &u8, &u16, &u16, &u32, &u32};
276  Switch(instance_type, &other, case_values, case_labels,
277  arraysize(case_labels));
278 
279  BIND(&u8);
280  AtomicStore(MachineRepresentation::kWord8, backing_store, index_word,
281  value_word32);
282  Return(value_integer);
283 
284  BIND(&u16);
285  AtomicStore(MachineRepresentation::kWord16, backing_store,
286  WordShl(index_word, 1), value_word32);
287  Return(value_integer);
288 
289  BIND(&u32);
290  AtomicStore(MachineRepresentation::kWord32, backing_store,
291  WordShl(index_word, 2), value_word32);
292  Return(value_integer);
293 
294  BIND(&u64);
295 #if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6
296  Return(CallRuntime(Runtime::kAtomicsStore64, context, array, index_integer,
297  value));
298 #else
299  TNode<BigInt> value_bigint = ToBigInt(CAST(context), CAST(value));
300 #if DEBUG
301  DebugSanityCheckAtomicIndex(array, index_word32, context);
302 #endif
303  TVARIABLE(UintPtrT, var_low);
304  TVARIABLE(UintPtrT, var_high);
305  BigIntToRawBytes(value_bigint, &var_low, &var_high);
306  Node* high = Is64() ? nullptr : static_cast<Node*>(var_high.value());
307  AtomicStore(MachineRepresentation::kWord64, backing_store,
308  WordShl(index_word, 3), var_low.value(), high);
309  Return(value_bigint);
310 #endif
311 
312  // This shouldn't happen, we've already validated the type.
313  BIND(&other);
314  Unreachable();
315 }
316 
317 TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
318  Node* array = Parameter(Descriptor::kArray);
319  Node* index = Parameter(Descriptor::kIndex);
320  Node* value = Parameter(Descriptor::kValue);
321  Node* context = Parameter(Descriptor::kContext);
322 
323  Node* instance_type;
324  Node* backing_store;
325  ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
326 
327  Node* index_integer;
328  Node* index_word32 =
329  ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
330  ValidateAtomicIndex(array, index_word32, context);
331 
332 #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
333  Return(CallRuntime(Runtime::kAtomicsExchange, context, array, index_integer,
334  value));
335 #else
336  Node* index_word = ChangeUint32ToWord(index_word32);
337 
338  Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
339  i64(this), u64(this), big(this), other(this);
340  STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
341  STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
342  GotoIf(
343  Int32GreaterThan(instance_type, Int32Constant(FIXED_UINT32_ARRAY_TYPE)),
344  &big);
345 
346  Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
347 #if DEBUG
348  DebugSanityCheckAtomicIndex(array, index_word32, context);
349 #endif
350  Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
351 
352  int32_t case_values[] = {
353  FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
354  FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
355  };
356  Label* case_labels[] = {
357  &i8, &u8, &i16, &u16, &i32, &u32,
358  };
359  Switch(instance_type, &other, case_values, case_labels,
360  arraysize(case_labels));
361 
362  BIND(&i8);
363  Return(SmiFromInt32(AtomicExchange(MachineType::Int8(), backing_store,
364  index_word, value_word32)));
365 
366  BIND(&u8);
367  Return(SmiFromInt32(AtomicExchange(MachineType::Uint8(), backing_store,
368  index_word, value_word32)));
369 
370  BIND(&i16);
371  Return(SmiFromInt32(AtomicExchange(MachineType::Int16(), backing_store,
372  WordShl(index_word, 1), value_word32)));
373 
374  BIND(&u16);
375  Return(SmiFromInt32(AtomicExchange(MachineType::Uint16(), backing_store,
376  WordShl(index_word, 1), value_word32)));
377 
378  BIND(&i32);
379  Return(ChangeInt32ToTagged(AtomicExchange(MachineType::Int32(), backing_store,
380  WordShl(index_word, 2),
381  value_word32)));
382 
383  BIND(&u32);
384  Return(ChangeUint32ToTagged(
385  AtomicExchange(MachineType::Uint32(), backing_store,
386  WordShl(index_word, 2), value_word32)));
387 
388  BIND(&big);
389  TNode<BigInt> value_bigint = ToBigInt(CAST(context), CAST(value));
390 #if DEBUG
391  DebugSanityCheckAtomicIndex(array, index_word32, context);
392 #endif
393  TVARIABLE(UintPtrT, var_low);
394  TVARIABLE(UintPtrT, var_high);
395  BigIntToRawBytes(value_bigint, &var_low, &var_high);
396  Node* high = Is64() ? nullptr : static_cast<Node*>(var_high.value());
397  GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGINT64_ARRAY_TYPE)),
398  &i64);
399  GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGUINT64_ARRAY_TYPE)),
400  &u64);
401  Unreachable();
402 
403  BIND(&i64);
404  // This uses Uint64() intentionally: AtomicExchange is not implemented for
405  // Int64(), which is fine because the machine instruction only cares
406  // about words.
407  Return(BigIntFromSigned64(AtomicExchange(MachineType::Uint64(), backing_store,
408  WordShl(index_word, 3),
409  var_low.value(), high)));
410 
411  BIND(&u64);
412  Return(BigIntFromUnsigned64(
413  AtomicExchange(MachineType::Uint64(), backing_store,
414  WordShl(index_word, 3), var_low.value(), high)));
415 
416  // This shouldn't happen, we've already validated the type.
417  BIND(&other);
418  Unreachable();
419 #endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
420 }
421 
422 TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
423  Node* array = Parameter(Descriptor::kArray);
424  Node* index = Parameter(Descriptor::kIndex);
425  Node* old_value = Parameter(Descriptor::kOldValue);
426  Node* new_value = Parameter(Descriptor::kNewValue);
427  Node* context = Parameter(Descriptor::kContext);
428 
429  Node* instance_type;
430  Node* backing_store;
431  ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
432 
433  Node* index_integer;
434  Node* index_word32 =
435  ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
436  ValidateAtomicIndex(array, index_word32, context);
437 
438 #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
439  V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
440  Return(CallRuntime(Runtime::kAtomicsCompareExchange, context, array,
441  index_integer, old_value, new_value));
442 #else
443  Node* index_word = ChangeUint32ToWord(index_word32);
444 
445  Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
446  i64(this), u64(this), big(this), other(this);
447  STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
448  STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
449  GotoIf(
450  Int32GreaterThan(instance_type, Int32Constant(FIXED_UINT32_ARRAY_TYPE)),
451  &big);
452 
453  Node* old_value_integer = ToInteger_Inline(CAST(context), CAST(old_value));
454  Node* new_value_integer = ToInteger_Inline(CAST(context), CAST(new_value));
455 #if DEBUG
456  DebugSanityCheckAtomicIndex(array, index_word32, context);
457 #endif
458  Node* old_value_word32 = TruncateTaggedToWord32(context, old_value_integer);
459  Node* new_value_word32 = TruncateTaggedToWord32(context, new_value_integer);
460 
461  int32_t case_values[] = {
462  FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
463  FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
464  };
465  Label* case_labels[] = {
466  &i8, &u8, &i16, &u16, &i32, &u32,
467  };
468  Switch(instance_type, &other, case_values, case_labels,
469  arraysize(case_labels));
470 
471  BIND(&i8);
472  Return(SmiFromInt32(AtomicCompareExchange(MachineType::Int8(), backing_store,
473  index_word, old_value_word32,
474  new_value_word32)));
475 
476  BIND(&u8);
477  Return(SmiFromInt32(AtomicCompareExchange(MachineType::Uint8(), backing_store,
478  index_word, old_value_word32,
479  new_value_word32)));
480 
481  BIND(&i16);
482  Return(SmiFromInt32(AtomicCompareExchange(
483  MachineType::Int16(), backing_store, WordShl(index_word, 1),
484  old_value_word32, new_value_word32)));
485 
486  BIND(&u16);
487  Return(SmiFromInt32(AtomicCompareExchange(
488  MachineType::Uint16(), backing_store, WordShl(index_word, 1),
489  old_value_word32, new_value_word32)));
490 
491  BIND(&i32);
492  Return(ChangeInt32ToTagged(AtomicCompareExchange(
493  MachineType::Int32(), backing_store, WordShl(index_word, 2),
494  old_value_word32, new_value_word32)));
495 
496  BIND(&u32);
497  Return(ChangeUint32ToTagged(AtomicCompareExchange(
498  MachineType::Uint32(), backing_store, WordShl(index_word, 2),
499  old_value_word32, new_value_word32)));
500 
501  BIND(&big);
502  TNode<BigInt> old_value_bigint = ToBigInt(CAST(context), CAST(old_value));
503  TNode<BigInt> new_value_bigint = ToBigInt(CAST(context), CAST(new_value));
504 #if DEBUG
505  DebugSanityCheckAtomicIndex(array, index_word32, context);
506 #endif
507  TVARIABLE(UintPtrT, var_old_low);
508  TVARIABLE(UintPtrT, var_old_high);
509  TVARIABLE(UintPtrT, var_new_low);
510  TVARIABLE(UintPtrT, var_new_high);
511  BigIntToRawBytes(old_value_bigint, &var_old_low, &var_old_high);
512  BigIntToRawBytes(new_value_bigint, &var_new_low, &var_new_high);
513  Node* old_high = Is64() ? nullptr : static_cast<Node*>(var_old_high.value());
514  Node* new_high = Is64() ? nullptr : static_cast<Node*>(var_new_high.value());
515  GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGINT64_ARRAY_TYPE)),
516  &i64);
517  GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGUINT64_ARRAY_TYPE)),
518  &u64);
519  Unreachable();
520 
521  BIND(&i64);
522  // This uses Uint64() intentionally: AtomicCompareExchange is not implemented
523  // for Int64(), which is fine because the machine instruction only cares
524  // about words.
525  Return(BigIntFromSigned64(AtomicCompareExchange(
526  MachineType::Uint64(), backing_store, WordShl(index_word, 3),
527  var_old_low.value(), var_new_low.value(), old_high, new_high)));
528 
529  BIND(&u64);
530  Return(BigIntFromUnsigned64(AtomicCompareExchange(
531  MachineType::Uint64(), backing_store, WordShl(index_word, 3),
532  var_old_low.value(), var_new_low.value(), old_high, new_high)));
533 
534  // This shouldn't happen, we've already validated the type.
535  BIND(&other);
536  Unreachable();
537 #endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
538  // || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
539 }
540 
541 #define BINOP_BUILTIN(op) \
542  TF_BUILTIN(Atomics##op, SharedArrayBufferBuiltinsAssembler) { \
543  Node* array = Parameter(Descriptor::kArray); \
544  Node* index = Parameter(Descriptor::kIndex); \
545  Node* value = Parameter(Descriptor::kValue); \
546  Node* context = Parameter(Descriptor::kContext); \
547  AtomicBinopBuiltinCommon(array, index, value, context, \
548  &CodeAssembler::Atomic##op, \
549  Runtime::kAtomics##op); \
550  }
551 BINOP_BUILTIN(Add)
552 BINOP_BUILTIN(Sub)
553 BINOP_BUILTIN(And)
554 BINOP_BUILTIN(Or)
555 BINOP_BUILTIN(Xor)
556 #undef BINOP_BUILTIN
557 
558 void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
559  Node* array, Node* index, Node* value, Node* context,
560  AssemblerFunction function, Runtime::FunctionId runtime_function) {
561  Node* instance_type;
562  Node* backing_store;
563  ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
564 
565  Node* index_integer;
566  Node* index_word32 =
567  ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
568  ValidateAtomicIndex(array, index_word32, context);
569 
570 #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
571  V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
572  Return(CallRuntime(runtime_function, context, array, index_integer, value));
573 #else
574  Node* index_word = ChangeUint32ToWord(index_word32);
575 
576  Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
577  i64(this), u64(this), big(this), other(this);
578 
579  STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
580  STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
581  GotoIf(
582  Int32GreaterThan(instance_type, Int32Constant(FIXED_UINT32_ARRAY_TYPE)),
583  &big);
584 
585  Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
586 #if DEBUG
587  DebugSanityCheckAtomicIndex(array, index_word32, context);
588 #endif
589  Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
590 
591  int32_t case_values[] = {
592  FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
593  FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
594  };
595  Label* case_labels[] = {
596  &i8, &u8, &i16, &u16, &i32, &u32,
597  };
598  Switch(instance_type, &other, case_values, case_labels,
599  arraysize(case_labels));
600 
601  BIND(&i8);
602  Return(SmiFromInt32((this->*function)(MachineType::Int8(), backing_store,
603  index_word, value_word32, nullptr)));
604 
605  BIND(&u8);
606  Return(SmiFromInt32((this->*function)(MachineType::Uint8(), backing_store,
607  index_word, value_word32, nullptr)));
608 
609  BIND(&i16);
610  Return(SmiFromInt32((this->*function)(MachineType::Int16(), backing_store,
611  WordShl(index_word, 1), value_word32,
612  nullptr)));
613 
614  BIND(&u16);
615  Return(SmiFromInt32((this->*function)(MachineType::Uint16(), backing_store,
616  WordShl(index_word, 1), value_word32,
617  nullptr)));
618 
619  BIND(&i32);
620  Return(ChangeInt32ToTagged(
621  (this->*function)(MachineType::Int32(), backing_store,
622  WordShl(index_word, 2), value_word32, nullptr)));
623 
624  BIND(&u32);
625  Return(ChangeUint32ToTagged(
626  (this->*function)(MachineType::Uint32(), backing_store,
627  WordShl(index_word, 2), value_word32, nullptr)));
628 
629  BIND(&big);
630  TNode<BigInt> value_bigint = ToBigInt(CAST(context), CAST(value));
631 #if DEBUG
632  DebugSanityCheckAtomicIndex(array, index_word32, context);
633 #endif
634  TVARIABLE(UintPtrT, var_low);
635  TVARIABLE(UintPtrT, var_high);
636  BigIntToRawBytes(value_bigint, &var_low, &var_high);
637  Node* high = Is64() ? nullptr : static_cast<Node*>(var_high.value());
638  GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGINT64_ARRAY_TYPE)),
639  &i64);
640  GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGUINT64_ARRAY_TYPE)),
641  &u64);
642  Unreachable();
643 
644  BIND(&i64);
645  // This uses Uint64() intentionally: Atomic* ops are not implemented for
646  // Int64(), which is fine because the machine instructions only care
647  // about words.
648  Return(BigIntFromSigned64(
649  (this->*function)(MachineType::Uint64(), backing_store,
650  WordShl(index_word, 3), var_low.value(), high)));
651 
652  BIND(&u64);
653  Return(BigIntFromUnsigned64(
654  (this->*function)(MachineType::Uint64(), backing_store,
655  WordShl(index_word, 3), var_low.value(), high)));
656 
657  // This shouldn't happen, we've already validated the type.
658  BIND(&other);
659  Unreachable();
660 #endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
661  // || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
662 }
663 
664 } // namespace internal
665 } // namespace v8
Definition: libplatform.h:13