V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
futex-emulation.cc
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/futex-emulation.h"
6 
7 #include <limits>
8 
9 #include "src/base/macros.h"
10 #include "src/base/platform/time.h"
11 #include "src/conversions.h"
12 #include "src/handles-inl.h"
13 #include "src/isolate.h"
14 #include "src/objects-inl.h"
15 #include "src/objects/js-array-buffer-inl.h"
16 
17 namespace v8 {
18 namespace internal {
19 
20 using AtomicsWaitEvent = v8::Isolate::AtomicsWaitEvent;
21 
22 base::LazyMutex FutexEmulation::mutex_ = LAZY_MUTEX_INITIALIZER;
23 base::LazyInstance<FutexWaitList>::type FutexEmulation::wait_list_ =
24  LAZY_INSTANCE_INITIALIZER;
25 
26 
27 void FutexWaitListNode::NotifyWake() {
28  // Lock the FutexEmulation mutex before notifying. We know that the mutex
29  // will have been unlocked if we are currently waiting on the condition
30  // variable. The mutex will not be locked if FutexEmulation::Wait hasn't
31  // locked it yet. In that case, we set the interrupted_
32  // flag to true, which will be tested after the mutex locked by a future wait.
33  base::MutexGuard lock_guard(FutexEmulation::mutex_.Pointer());
34  // if not waiting, this will not have any effect.
35  cond_.NotifyOne();
36  interrupted_ = true;
37 }
38 
39 
40 FutexWaitList::FutexWaitList() : head_(nullptr), tail_(nullptr) {}
41 
42 
43 void FutexWaitList::AddNode(FutexWaitListNode* node) {
44  DCHECK(node->prev_ == nullptr && node->next_ == nullptr);
45  if (tail_) {
46  tail_->next_ = node;
47  } else {
48  head_ = node;
49  }
50 
51  node->prev_ = tail_;
52  node->next_ = nullptr;
53  tail_ = node;
54 }
55 
56 
57 void FutexWaitList::RemoveNode(FutexWaitListNode* node) {
58  if (node->prev_) {
59  node->prev_->next_ = node->next_;
60  } else {
61  head_ = node->next_;
62  }
63 
64  if (node->next_) {
65  node->next_->prev_ = node->prev_;
66  } else {
67  tail_ = node->prev_;
68  }
69 
70  node->prev_ = node->next_ = nullptr;
71 }
72 
73 void AtomicsWaitWakeHandle::Wake() {
74  // Adding a separate `NotifyWake()` variant that doesn't acquire the lock
75  // itself would likely just add unnecessary complexity..
76  // The split lock by itself isn’t an issue, as long as the caller properly
77  // synchronizes this with the closing `AtomicsWaitCallback`.
78  {
79  base::MutexGuard lock_guard(FutexEmulation::mutex_.Pointer());
80  stopped_ = true;
81  }
82  isolate_->futex_wait_list_node()->NotifyWake();
83 }
84 
85 enum WaitReturnValue : int { kOk = 0, kNotEqual = 1, kTimedOut = 2 };
86 
87 Object* FutexEmulation::WaitJs(Isolate* isolate,
88  Handle<JSArrayBuffer> array_buffer, size_t addr,
89  int32_t value, double rel_timeout_ms) {
90  Object* res = Wait(isolate, array_buffer, addr, value, rel_timeout_ms);
91  if (res->IsSmi()) {
92  int val = Smi::ToInt(res);
93  switch (val) {
94  case WaitReturnValue::kOk:
95  return ReadOnlyRoots(isolate).ok();
96  case WaitReturnValue::kNotEqual:
97  return ReadOnlyRoots(isolate).not_equal();
98  case WaitReturnValue::kTimedOut:
99  return ReadOnlyRoots(isolate).timed_out();
100  default:
101  UNREACHABLE();
102  }
103  }
104  return res;
105 }
106 
107 Object* FutexEmulation::Wait(Isolate* isolate,
108  Handle<JSArrayBuffer> array_buffer, size_t addr,
109  int32_t value, double rel_timeout_ms) {
110  DCHECK_LT(addr, array_buffer->byte_length());
111 
112  bool use_timeout = rel_timeout_ms != V8_INFINITY;
113 
114  base::TimeDelta rel_timeout;
115  if (use_timeout) {
116  // Convert to nanoseconds.
117  double rel_timeout_ns = rel_timeout_ms *
118  base::Time::kNanosecondsPerMicrosecond *
119  base::Time::kMicrosecondsPerMillisecond;
120  if (rel_timeout_ns >
121  static_cast<double>(std::numeric_limits<int64_t>::max())) {
122  // 2**63 nanoseconds is 292 years. Let's just treat anything greater as
123  // infinite.
124  use_timeout = false;
125  } else {
126  rel_timeout = base::TimeDelta::FromNanoseconds(
127  static_cast<int64_t>(rel_timeout_ns));
128  }
129  }
130 
131  AtomicsWaitWakeHandle stop_handle(isolate);
132 
133  isolate->RunAtomicsWaitCallback(AtomicsWaitEvent::kStartWait, array_buffer,
134  addr, value, rel_timeout_ms, &stop_handle);
135 
136  if (isolate->has_scheduled_exception()) {
137  return isolate->PromoteScheduledException();
138  }
139 
140  Object* result;
141  AtomicsWaitEvent callback_result = AtomicsWaitEvent::kWokenUp;
142 
143  do { // Not really a loop, just makes it easier to break out early.
144  base::MutexGuard lock_guard(mutex_.Pointer());
145  void* backing_store = array_buffer->backing_store();
146 
147  FutexWaitListNode* node = isolate->futex_wait_list_node();
148  node->backing_store_ = backing_store;
149  node->wait_addr_ = addr;
150  node->waiting_ = true;
151 
152  // Reset node->waiting_ = false when leaving this scope (but while
153  // still holding the lock).
154  ResetWaitingOnScopeExit reset_waiting(node);
155 
156  int32_t* p =
157  reinterpret_cast<int32_t*>(static_cast<int8_t*>(backing_store) + addr);
158  if (*p != value) {
159  result = Smi::FromInt(WaitReturnValue::kNotEqual);
160  callback_result = AtomicsWaitEvent::kNotEqual;
161  break;
162  }
163 
164  base::TimeTicks timeout_time;
165  base::TimeTicks current_time;
166 
167  if (use_timeout) {
168  current_time = base::TimeTicks::Now();
169  timeout_time = current_time + rel_timeout;
170  }
171 
172  wait_list_.Pointer()->AddNode(node);
173 
174  while (true) {
175  bool interrupted = node->interrupted_;
176  node->interrupted_ = false;
177 
178  // Unlock the mutex here to prevent deadlock from lock ordering between
179  // mutex_ and mutexes locked by HandleInterrupts.
180  mutex_.Pointer()->Unlock();
181 
182  // Because the mutex is unlocked, we have to be careful about not dropping
183  // an interrupt. The notification can happen in three different places:
184  // 1) Before Wait is called: the notification will be dropped, but
185  // interrupted_ will be set to 1. This will be checked below.
186  // 2) After interrupted has been checked here, but before mutex_ is
187  // acquired: interrupted is checked again below, with mutex_ locked.
188  // Because the wakeup signal also acquires mutex_, we know it will not
189  // be able to notify until mutex_ is released below, when waiting on
190  // the condition variable.
191  // 3) After the mutex is released in the call to WaitFor(): this
192  // notification will wake up the condition variable. node->waiting() will
193  // be false, so we'll loop and then check interrupts.
194  if (interrupted) {
195  Object* interrupt_object = isolate->stack_guard()->HandleInterrupts();
196  if (interrupt_object->IsException(isolate)) {
197  result = interrupt_object;
198  callback_result = AtomicsWaitEvent::kTerminatedExecution;
199  mutex_.Pointer()->Lock();
200  break;
201  }
202  }
203 
204  mutex_.Pointer()->Lock();
205 
206  if (node->interrupted_) {
207  // An interrupt occurred while the mutex_ was unlocked. Don't wait yet.
208  continue;
209  }
210 
211  if (stop_handle.has_stopped()) {
212  node->waiting_ = false;
213  callback_result = AtomicsWaitEvent::kAPIStopped;
214  }
215 
216  if (!node->waiting_) {
217  result = Smi::FromInt(WaitReturnValue::kOk);
218  break;
219  }
220 
221  // No interrupts, now wait.
222  if (use_timeout) {
223  current_time = base::TimeTicks::Now();
224  if (current_time >= timeout_time) {
225  result = Smi::FromInt(WaitReturnValue::kTimedOut);
226  callback_result = AtomicsWaitEvent::kTimedOut;
227  break;
228  }
229 
230  base::TimeDelta time_until_timeout = timeout_time - current_time;
231  DCHECK_GE(time_until_timeout.InMicroseconds(), 0);
232  bool wait_for_result =
233  node->cond_.WaitFor(mutex_.Pointer(), time_until_timeout);
234  USE(wait_for_result);
235  } else {
236  node->cond_.Wait(mutex_.Pointer());
237  }
238 
239  // Spurious wakeup, interrupt or timeout.
240  }
241 
242  wait_list_.Pointer()->RemoveNode(node);
243  } while (false);
244 
245  isolate->RunAtomicsWaitCallback(callback_result, array_buffer, addr, value,
246  rel_timeout_ms, nullptr);
247 
248  if (isolate->has_scheduled_exception()) {
249  CHECK_NE(callback_result, AtomicsWaitEvent::kTerminatedExecution);
250  result = isolate->PromoteScheduledException();
251  }
252 
253  return result;
254 }
255 
256 Object* FutexEmulation::Wake(Handle<JSArrayBuffer> array_buffer, size_t addr,
257  uint32_t num_waiters_to_wake) {
258  DCHECK_LT(addr, array_buffer->byte_length());
259 
260  int waiters_woken = 0;
261  void* backing_store = array_buffer->backing_store();
262 
263  base::MutexGuard lock_guard(mutex_.Pointer());
264  FutexWaitListNode* node = wait_list_.Pointer()->head_;
265  while (node && num_waiters_to_wake > 0) {
266  if (backing_store == node->backing_store_ && addr == node->wait_addr_) {
267  node->waiting_ = false;
268  node->cond_.NotifyOne();
269  if (num_waiters_to_wake != kWakeAll) {
270  --num_waiters_to_wake;
271  }
272  waiters_woken++;
273  }
274 
275  node = node->next_;
276  }
277 
278  return Smi::FromInt(waiters_woken);
279 }
280 
281 Object* FutexEmulation::NumWaitersForTesting(Handle<JSArrayBuffer> array_buffer,
282  size_t addr) {
283  DCHECK_LT(addr, array_buffer->byte_length());
284  void* backing_store = array_buffer->backing_store();
285 
286  base::MutexGuard lock_guard(mutex_.Pointer());
287 
288  int waiters = 0;
289  FutexWaitListNode* node = wait_list_.Pointer()->head_;
290  while (node) {
291  if (backing_store == node->backing_store_ && addr == node->wait_addr_ &&
292  node->waiting_) {
293  waiters++;
294  }
295 
296  node = node->next_;
297  }
298 
299  return Smi::FromInt(waiters);
300 }
301 
302 } // namespace internal
303 } // namespace v8
Definition: libplatform.h:13