V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
time.cc
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/base/platform/time.h"
6 
7 #if V8_OS_POSIX
8 #include <fcntl.h> // for O_RDONLY
9 #include <sys/time.h>
10 #include <unistd.h>
11 #endif
12 #if V8_OS_MACOSX
13 #include <mach/mach.h>
14 #include <mach/mach_time.h>
15 #include <pthread.h>
16 #endif
17 
18 #include <cstring>
19 #include <ostream>
20 
21 #if V8_OS_WIN
22 #include "src/base/atomicops.h"
23 #include "src/base/lazy-instance.h"
24 #include "src/base/win32-headers.h"
25 #endif
26 #include "src/base/cpu.h"
27 #include "src/base/logging.h"
28 #include "src/base/platform/platform.h"
29 
30 namespace {
31 
32 #if V8_OS_MACOSX
33 int64_t ComputeThreadTicks() {
34  mach_msg_type_number_t thread_info_count = THREAD_BASIC_INFO_COUNT;
35  thread_basic_info_data_t thread_info_data;
36  kern_return_t kr = thread_info(
37  pthread_mach_thread_np(pthread_self()),
38  THREAD_BASIC_INFO,
39  reinterpret_cast<thread_info_t>(&thread_info_data),
40  &thread_info_count);
41  CHECK_EQ(kr, KERN_SUCCESS);
42 
43  v8::base::CheckedNumeric<int64_t> absolute_micros(
44  thread_info_data.user_time.seconds +
45  thread_info_data.system_time.seconds);
46  absolute_micros *= v8::base::Time::kMicrosecondsPerSecond;
47  absolute_micros += (thread_info_data.user_time.microseconds +
48  thread_info_data.system_time.microseconds);
49  return absolute_micros.ValueOrDie();
50 }
51 #elif V8_OS_POSIX
52 // Helper function to get results from clock_gettime() and convert to a
53 // microsecond timebase. Minimum requirement is MONOTONIC_CLOCK to be supported
54 // on the system. FreeBSD 6 has CLOCK_MONOTONIC but defines
55 // _POSIX_MONOTONIC_CLOCK to -1.
56 V8_INLINE int64_t ClockNow(clockid_t clk_id) {
57 #if (defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0) || \
58  defined(V8_OS_BSD) || defined(V8_OS_ANDROID)
59 // On AIX clock_gettime for CLOCK_THREAD_CPUTIME_ID outputs time with
60 // resolution of 10ms. thread_cputime API provides the time in ns
61 #if defined(V8_OS_AIX)
62  thread_cputime_t tc;
63  if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
64  if (thread_cputime(-1, &tc) != 0) {
65  UNREACHABLE();
66  }
67  }
68 #endif
69  struct timespec ts;
70  if (clock_gettime(clk_id, &ts) != 0) {
71  UNREACHABLE();
72  }
74  result *= v8::base::Time::kMicrosecondsPerSecond;
75 #if defined(V8_OS_AIX)
76  if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
77  result += (tc.stime / v8::base::Time::kNanosecondsPerMicrosecond);
78  } else {
79  result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond);
80  }
81 #else
82  result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond);
83 #endif
84  return result.ValueOrDie();
85 #else // Monotonic clock not supported.
86  return 0;
87 #endif
88 }
89 
90 V8_INLINE bool IsHighResolutionTimer(clockid_t clk_id) {
91  // Limit duration of timer resolution measurement to 100 ms. If we cannot
92  // measure timer resoltuion within this time, we assume a low resolution
93  // timer.
94  int64_t end =
95  ClockNow(clk_id) + 100 * v8::base::Time::kMicrosecondsPerMillisecond;
96  int64_t start, delta;
97  do {
98  start = ClockNow(clk_id);
99  // Loop until we can detect that the clock has changed. Non-HighRes timers
100  // will increment in chunks, i.e. 15ms. By spinning until we see a clock
101  // change, we detect the minimum time between measurements.
102  do {
103  delta = ClockNow(clk_id) - start;
104  } while (delta == 0);
105  } while (delta > 1 && start < end);
106  return delta <= 1;
107 }
108 
109 #elif V8_OS_WIN
110 V8_INLINE bool IsQPCReliable() {
111  v8::base::CPU cpu;
112  // On Athlon X2 CPUs (e.g. model 15) QueryPerformanceCounter is unreliable.
113  return strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15;
114 }
115 
116 // Returns the current value of the performance counter.
117 V8_INLINE uint64_t QPCNowRaw() {
118  LARGE_INTEGER perf_counter_now = {};
119  // According to the MSDN documentation for QueryPerformanceCounter(), this
120  // will never fail on systems that run XP or later.
121  // https://msdn.microsoft.com/library/windows/desktop/ms644904.aspx
122  BOOL result = ::QueryPerformanceCounter(&perf_counter_now);
123  DCHECK(result);
124  USE(result);
125  return perf_counter_now.QuadPart;
126 }
127 #endif // V8_OS_MACOSX
128 
129 
130 } // namespace
131 
132 namespace v8 {
133 namespace base {
134 
135 int TimeDelta::InDays() const {
136  if (IsMax()) {
137  // Preserve max to prevent overflow.
138  return std::numeric_limits<int>::max();
139  }
140  return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
141 }
142 
143 int TimeDelta::InHours() const {
144  if (IsMax()) {
145  // Preserve max to prevent overflow.
146  return std::numeric_limits<int>::max();
147  }
148  return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
149 }
150 
151 int TimeDelta::InMinutes() const {
152  if (IsMax()) {
153  // Preserve max to prevent overflow.
154  return std::numeric_limits<int>::max();
155  }
156  return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
157 }
158 
159 double TimeDelta::InSecondsF() const {
160  if (IsMax()) {
161  // Preserve max to prevent overflow.
162  return std::numeric_limits<double>::infinity();
163  }
164  return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
165 }
166 
167 int64_t TimeDelta::InSeconds() const {
168  if (IsMax()) {
169  // Preserve max to prevent overflow.
170  return std::numeric_limits<int64_t>::max();
171  }
172  return delta_ / Time::kMicrosecondsPerSecond;
173 }
174 
175 double TimeDelta::InMillisecondsF() const {
176  if (IsMax()) {
177  // Preserve max to prevent overflow.
178  return std::numeric_limits<double>::infinity();
179  }
180  return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
181 }
182 
183 int64_t TimeDelta::InMilliseconds() const {
184  if (IsMax()) {
185  // Preserve max to prevent overflow.
186  return std::numeric_limits<int64_t>::max();
187  }
188  return delta_ / Time::kMicrosecondsPerMillisecond;
189 }
190 
191 int64_t TimeDelta::InMillisecondsRoundedUp() const {
192  if (IsMax()) {
193  // Preserve max to prevent overflow.
194  return std::numeric_limits<int64_t>::max();
195  }
196  return (delta_ + Time::kMicrosecondsPerMillisecond - 1) /
197  Time::kMicrosecondsPerMillisecond;
198 }
199 
200 int64_t TimeDelta::InMicroseconds() const {
201  if (IsMax()) {
202  // Preserve max to prevent overflow.
203  return std::numeric_limits<int64_t>::max();
204  }
205  return delta_;
206 }
207 
208 int64_t TimeDelta::InNanoseconds() const {
209  if (IsMax()) {
210  // Preserve max to prevent overflow.
211  return std::numeric_limits<int64_t>::max();
212  }
213  return delta_ * Time::kNanosecondsPerMicrosecond;
214 }
215 
216 
217 #if V8_OS_MACOSX
218 
219 TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
220  DCHECK_GE(ts.tv_nsec, 0);
221  DCHECK_LT(ts.tv_nsec,
222  static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
223  return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
224  ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
225 }
226 
227 
228 struct mach_timespec TimeDelta::ToMachTimespec() const {
229  struct mach_timespec ts;
230  DCHECK_GE(delta_, 0);
231  ts.tv_sec = static_cast<unsigned>(delta_ / Time::kMicrosecondsPerSecond);
232  ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
233  Time::kNanosecondsPerMicrosecond;
234  return ts;
235 }
236 
237 #endif // V8_OS_MACOSX
238 
239 
240 #if V8_OS_POSIX
241 
242 TimeDelta TimeDelta::FromTimespec(struct timespec ts) {
243  DCHECK_GE(ts.tv_nsec, 0);
244  DCHECK_LT(ts.tv_nsec,
245  static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT
246  return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
247  ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
248 }
249 
250 
251 struct timespec TimeDelta::ToTimespec() const {
252  struct timespec ts;
253  ts.tv_sec = static_cast<time_t>(delta_ / Time::kMicrosecondsPerSecond);
254  ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
255  Time::kNanosecondsPerMicrosecond;
256  return ts;
257 }
258 
259 #endif // V8_OS_POSIX
260 
261 
262 #if V8_OS_WIN
263 
264 // We implement time using the high-resolution timers so that we can get
265 // timeouts which are smaller than 10-15ms. To avoid any drift, we
266 // periodically resync the internal clock to the system clock.
267 class Clock final {
268  public:
269  Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {}
270 
271  Time Now() {
272  // Time between resampling the un-granular clock for this API (1 minute).
273  const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1);
274 
275  MutexGuard lock_guard(&mutex_);
276 
277  // Determine current time and ticks.
278  TimeTicks ticks = GetSystemTicks();
279  Time time = GetSystemTime();
280 
281  // Check if we need to synchronize with the system clock due to a backwards
282  // time change or the amount of time elapsed.
283  TimeDelta elapsed = ticks - initial_ticks_;
284  if (time < initial_time_ || elapsed > kMaxElapsedTime) {
285  initial_ticks_ = ticks;
286  initial_time_ = time;
287  return time;
288  }
289 
290  return initial_time_ + elapsed;
291  }
292 
293  Time NowFromSystemTime() {
294  MutexGuard lock_guard(&mutex_);
295  initial_ticks_ = GetSystemTicks();
296  initial_time_ = GetSystemTime();
297  return initial_time_;
298  }
299 
300  private:
301  static TimeTicks GetSystemTicks() {
302  return TimeTicks::Now();
303  }
304 
305  static Time GetSystemTime() {
306  FILETIME ft;
307  ::GetSystemTimeAsFileTime(&ft);
308  return Time::FromFiletime(ft);
309  }
310 
311  TimeTicks initial_ticks_;
312  Time initial_time_;
313  Mutex mutex_;
314 };
315 
316 
317 static LazyStaticInstance<Clock, DefaultConstructTrait<Clock>,
318  ThreadSafeInitOnceTrait>::type clock =
319  LAZY_STATIC_INSTANCE_INITIALIZER;
320 
321 
322 Time Time::Now() {
323  return clock.Pointer()->Now();
324 }
325 
326 
327 Time Time::NowFromSystemTime() {
328  return clock.Pointer()->NowFromSystemTime();
329 }
330 
331 
332 // Time between windows epoch and standard epoch.
333 static const int64_t kTimeToEpochInMicroseconds = int64_t{11644473600000000};
334 
335 Time Time::FromFiletime(FILETIME ft) {
336  if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
337  return Time();
338  }
339  if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() &&
340  ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) {
341  return Max();
342  }
343  int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) +
344  (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10;
345  return Time(us - kTimeToEpochInMicroseconds);
346 }
347 
348 
349 FILETIME Time::ToFiletime() const {
350  DCHECK_GE(us_, 0);
351  FILETIME ft;
352  if (IsNull()) {
353  ft.dwLowDateTime = 0;
354  ft.dwHighDateTime = 0;
355  return ft;
356  }
357  if (IsMax()) {
358  ft.dwLowDateTime = std::numeric_limits<DWORD>::max();
359  ft.dwHighDateTime = std::numeric_limits<DWORD>::max();
360  return ft;
361  }
362  uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10;
363  ft.dwLowDateTime = static_cast<DWORD>(us);
364  ft.dwHighDateTime = static_cast<DWORD>(us >> 32);
365  return ft;
366 }
367 
368 #elif V8_OS_POSIX
369 
370 Time Time::Now() {
371  struct timeval tv;
372  int result = gettimeofday(&tv, nullptr);
373  DCHECK_EQ(0, result);
374  USE(result);
375  return FromTimeval(tv);
376 }
377 
378 
379 Time Time::NowFromSystemTime() {
380  return Now();
381 }
382 
383 
384 Time Time::FromTimespec(struct timespec ts) {
385  DCHECK_GE(ts.tv_nsec, 0);
386  DCHECK_LT(ts.tv_nsec, kNanosecondsPerSecond);
387  if (ts.tv_nsec == 0 && ts.tv_sec == 0) {
388  return Time();
389  }
390  if (ts.tv_nsec == static_cast<long>(kNanosecondsPerSecond - 1) && // NOLINT
391  ts.tv_sec == std::numeric_limits<time_t>::max()) {
392  return Max();
393  }
394  return Time(ts.tv_sec * kMicrosecondsPerSecond +
395  ts.tv_nsec / kNanosecondsPerMicrosecond);
396 }
397 
398 
399 struct timespec Time::ToTimespec() const {
400  struct timespec ts;
401  if (IsNull()) {
402  ts.tv_sec = 0;
403  ts.tv_nsec = 0;
404  return ts;
405  }
406  if (IsMax()) {
407  ts.tv_sec = std::numeric_limits<time_t>::max();
408  ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1); // NOLINT
409  return ts;
410  }
411  ts.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
412  ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond;
413  return ts;
414 }
415 
416 
417 Time Time::FromTimeval(struct timeval tv) {
418  DCHECK_GE(tv.tv_usec, 0);
419  DCHECK(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
420  if (tv.tv_usec == 0 && tv.tv_sec == 0) {
421  return Time();
422  }
423  if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) &&
424  tv.tv_sec == std::numeric_limits<time_t>::max()) {
425  return Max();
426  }
427  return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec);
428 }
429 
430 
431 struct timeval Time::ToTimeval() const {
432  struct timeval tv;
433  if (IsNull()) {
434  tv.tv_sec = 0;
435  tv.tv_usec = 0;
436  return tv;
437  }
438  if (IsMax()) {
439  tv.tv_sec = std::numeric_limits<time_t>::max();
440  tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1);
441  return tv;
442  }
443  tv.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
444  tv.tv_usec = us_ % kMicrosecondsPerSecond;
445  return tv;
446 }
447 
448 #endif // V8_OS_WIN
449 
450 // static
451 TimeTicks TimeTicks::HighResolutionNow() {
452  // a DCHECK of TimeTicks::IsHighResolution() was removed from here
453  // as it turns out this path is used in the wild for logs and counters.
454  //
455  // TODO(hpayer) We may eventually want to split TimedHistograms based
456  // on low resolution clocks to avoid polluting metrics
457  return TimeTicks::Now();
458 }
459 
460 Time Time::FromJsTime(double ms_since_epoch) {
461  // The epoch is a valid time, so this constructor doesn't interpret
462  // 0 as the null time.
463  if (ms_since_epoch == std::numeric_limits<double>::max()) {
464  return Max();
465  }
466  return Time(
467  static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond));
468 }
469 
470 
471 double Time::ToJsTime() const {
472  if (IsNull()) {
473  // Preserve 0 so the invalid result doesn't depend on the platform.
474  return 0;
475  }
476  if (IsMax()) {
477  // Preserve max without offset to prevent overflow.
478  return std::numeric_limits<double>::max();
479  }
480  return static_cast<double>(us_) / kMicrosecondsPerMillisecond;
481 }
482 
483 
484 std::ostream& operator<<(std::ostream& os, const Time& time) {
485  return os << time.ToJsTime();
486 }
487 
488 
489 #if V8_OS_WIN
490 
491 namespace {
492 
493 // We define a wrapper to adapt between the __stdcall and __cdecl call of the
494 // mock function, and to avoid a static constructor. Assigning an import to a
495 // function pointer directly would require setup code to fetch from the IAT.
496 DWORD timeGetTimeWrapper() { return timeGetTime(); }
497 
498 DWORD (*g_tick_function)(void) = &timeGetTimeWrapper;
499 
500 // A structure holding the most significant bits of "last seen" and a
501 // "rollover" counter.
502 union LastTimeAndRolloversState {
503  // The state as a single 32-bit opaque value.
504  base::Atomic32 as_opaque_32;
505 
506  // The state as usable values.
507  struct {
508  // The top 8-bits of the "last" time. This is enough to check for rollovers
509  // and the small bit-size means fewer CompareAndSwap operations to store
510  // changes in state, which in turn makes for fewer retries.
511  uint8_t last_8;
512  // A count of the number of detected rollovers. Using this as bits 47-32
513  // of the upper half of a 64-bit value results in a 48-bit tick counter.
514  // This extends the total rollover period from about 49 days to about 8800
515  // years while still allowing it to be stored with last_8 in a single
516  // 32-bit value.
517  uint16_t rollovers;
518  } as_values;
519 };
520 base::Atomic32 g_last_time_and_rollovers = 0;
521 static_assert(sizeof(LastTimeAndRolloversState) <=
522  sizeof(g_last_time_and_rollovers),
523  "LastTimeAndRolloversState does not fit in a single atomic word");
524 
525 // We use timeGetTime() to implement TimeTicks::Now(). This can be problematic
526 // because it returns the number of milliseconds since Windows has started,
527 // which will roll over the 32-bit value every ~49 days. We try to track
528 // rollover ourselves, which works if TimeTicks::Now() is called at least every
529 // 48.8 days (not 49 days because only changes in the top 8 bits get noticed).
530 TimeTicks RolloverProtectedNow() {
531  LastTimeAndRolloversState state;
532  DWORD now; // DWORD is always unsigned 32 bits.
533 
534  while (true) {
535  // Fetch the "now" and "last" tick values, updating "last" with "now" and
536  // incrementing the "rollovers" counter if the tick-value has wrapped back
537  // around. Atomic operations ensure that both "last" and "rollovers" are
538  // always updated together.
539  int32_t original = base::Acquire_Load(&g_last_time_and_rollovers);
540  state.as_opaque_32 = original;
541  now = g_tick_function();
542  uint8_t now_8 = static_cast<uint8_t>(now >> 24);
543  if (now_8 < state.as_values.last_8) ++state.as_values.rollovers;
544  state.as_values.last_8 = now_8;
545 
546  // If the state hasn't changed, exit the loop.
547  if (state.as_opaque_32 == original) break;
548 
549  // Save the changed state. If the existing value is unchanged from the
550  // original, exit the loop.
551  int32_t check = base::Release_CompareAndSwap(&g_last_time_and_rollovers,
552  original, state.as_opaque_32);
553  if (check == original) break;
554 
555  // Another thread has done something in between so retry from the top.
556  }
557 
558  return TimeTicks() +
559  TimeDelta::FromMilliseconds(
560  now + (static_cast<uint64_t>(state.as_values.rollovers) << 32));
561 }
562 
563 // Discussion of tick counter options on Windows:
564 //
565 // (1) CPU cycle counter. (Retrieved via RDTSC)
566 // The CPU counter provides the highest resolution time stamp and is the least
567 // expensive to retrieve. However, on older CPUs, two issues can affect its
568 // reliability: First it is maintained per processor and not synchronized
569 // between processors. Also, the counters will change frequency due to thermal
570 // and power changes, and stop in some states.
571 //
572 // (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
573 // resolution (<1 microsecond) time stamp. On most hardware running today, it
574 // auto-detects and uses the constant-rate RDTSC counter to provide extremely
575 // efficient and reliable time stamps.
576 //
577 // On older CPUs where RDTSC is unreliable, it falls back to using more
578 // expensive (20X to 40X more costly) alternate clocks, such as HPET or the ACPI
579 // PM timer, and can involve system calls; and all this is up to the HAL (with
580 // some help from ACPI). According to
581 // http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx, in the
582 // worst case, it gets the counter from the rollover interrupt on the
583 // programmable interrupt timer. In best cases, the HAL may conclude that the
584 // RDTSC counter runs at a constant frequency, then it uses that instead. On
585 // multiprocessor machines, it will try to verify the values returned from
586 // RDTSC on each processor are consistent with each other, and apply a handful
587 // of workarounds for known buggy hardware. In other words, QPC is supposed to
588 // give consistent results on a multiprocessor computer, but for older CPUs it
589 // can be unreliable due bugs in BIOS or HAL.
590 //
591 // (3) System time. The system time provides a low-resolution (from ~1 to ~15.6
592 // milliseconds) time stamp but is comparatively less expensive to retrieve and
593 // more reliable. Time::EnableHighResolutionTimer() and
594 // Time::ActivateHighResolutionTimer() can be called to alter the resolution of
595 // this timer; and also other Windows applications can alter it, affecting this
596 // one.
597 
598 TimeTicks InitialTimeTicksNowFunction();
599 
600 // See "threading notes" in InitializeNowFunctionPointer() for details on how
601 // concurrent reads/writes to these globals has been made safe.
602 using TimeTicksNowFunction = decltype(&TimeTicks::Now);
603 TimeTicksNowFunction g_time_ticks_now_function = &InitialTimeTicksNowFunction;
604 int64_t g_qpc_ticks_per_second = 0;
605 
606 // As of January 2015, use of <atomic> is forbidden in Chromium code. This is
607 // what std::atomic_thread_fence does on Windows on all Intel architectures when
608 // the memory_order argument is anything but std::memory_order_seq_cst:
609 #define ATOMIC_THREAD_FENCE(memory_order) _ReadWriteBarrier();
610 
611 TimeDelta QPCValueToTimeDelta(LONGLONG qpc_value) {
612  // Ensure that the assignment to |g_qpc_ticks_per_second|, made in
613  // InitializeNowFunctionPointer(), has happened by this point.
614  ATOMIC_THREAD_FENCE(memory_order_acquire);
615 
616  DCHECK_GT(g_qpc_ticks_per_second, 0);
617 
618  // If the QPC Value is below the overflow threshold, we proceed with
619  // simple multiply and divide.
620  if (qpc_value < TimeTicks::kQPCOverflowThreshold) {
621  return TimeDelta::FromMicroseconds(
622  qpc_value * TimeTicks::kMicrosecondsPerSecond / g_qpc_ticks_per_second);
623  }
624  // Otherwise, calculate microseconds in a round about manner to avoid
625  // overflow and precision issues.
626  int64_t whole_seconds = qpc_value / g_qpc_ticks_per_second;
627  int64_t leftover_ticks = qpc_value - (whole_seconds * g_qpc_ticks_per_second);
628  return TimeDelta::FromMicroseconds(
629  (whole_seconds * TimeTicks::kMicrosecondsPerSecond) +
630  ((leftover_ticks * TimeTicks::kMicrosecondsPerSecond) /
631  g_qpc_ticks_per_second));
632 }
633 
634 TimeTicks QPCNow() { return TimeTicks() + QPCValueToTimeDelta(QPCNowRaw()); }
635 
636 bool IsBuggyAthlon(const CPU& cpu) {
637  // On Athlon X2 CPUs (e.g. model 15) QueryPerformanceCounter is unreliable.
638  return strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15;
639 }
640 
641 void InitializeTimeTicksNowFunctionPointer() {
642  LARGE_INTEGER ticks_per_sec = {};
643  if (!QueryPerformanceFrequency(&ticks_per_sec)) ticks_per_sec.QuadPart = 0;
644 
645  // If Windows cannot provide a QPC implementation, TimeTicks::Now() must use
646  // the low-resolution clock.
647  //
648  // If the QPC implementation is expensive and/or unreliable, TimeTicks::Now()
649  // will still use the low-resolution clock. A CPU lacking a non-stop time
650  // counter will cause Windows to provide an alternate QPC implementation that
651  // works, but is expensive to use. Certain Athlon CPUs are known to make the
652  // QPC implementation unreliable.
653  //
654  // Otherwise, Now uses the high-resolution QPC clock. As of 21 August 2015,
655  // ~72% of users fall within this category.
656  TimeTicksNowFunction now_function;
657  CPU cpu;
658  if (ticks_per_sec.QuadPart <= 0 || !cpu.has_non_stop_time_stamp_counter() ||
659  IsBuggyAthlon(cpu)) {
660  now_function = &RolloverProtectedNow;
661  } else {
662  now_function = &QPCNow;
663  }
664 
665  // Threading note 1: In an unlikely race condition, it's possible for two or
666  // more threads to enter InitializeNowFunctionPointer() in parallel. This is
667  // not a problem since all threads should end up writing out the same values
668  // to the global variables.
669  //
670  // Threading note 2: A release fence is placed here to ensure, from the
671  // perspective of other threads using the function pointers, that the
672  // assignment to |g_qpc_ticks_per_second| happens before the function pointers
673  // are changed.
674  g_qpc_ticks_per_second = ticks_per_sec.QuadPart;
675  ATOMIC_THREAD_FENCE(memory_order_release);
676  g_time_ticks_now_function = now_function;
677 }
678 
679 TimeTicks InitialTimeTicksNowFunction() {
680  InitializeTimeTicksNowFunctionPointer();
681  return g_time_ticks_now_function();
682 }
683 
684 #undef ATOMIC_THREAD_FENCE
685 
686 } // namespace
687 
688 // static
689 TimeTicks TimeTicks::Now() {
690  // Make sure we never return 0 here.
691  TimeTicks ticks(g_time_ticks_now_function());
692  DCHECK(!ticks.IsNull());
693  return ticks;
694 }
695 
696 // static
697 bool TimeTicks::IsHighResolution() {
698  if (g_time_ticks_now_function == &InitialTimeTicksNowFunction)
699  InitializeTimeTicksNowFunctionPointer();
700  return g_time_ticks_now_function == &QPCNow;
701 }
702 
703 #else // V8_OS_WIN
704 
705 TimeTicks TimeTicks::Now() {
706  int64_t ticks;
707 #if V8_OS_MACOSX
708  static struct mach_timebase_info info;
709  if (info.denom == 0) {
710  kern_return_t result = mach_timebase_info(&info);
711  DCHECK_EQ(KERN_SUCCESS, result);
712  USE(result);
713  }
714  ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
715  info.numer / info.denom);
716 #elif V8_OS_SOLARIS
717  ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
718 #elif V8_OS_POSIX
719  ticks = ClockNow(CLOCK_MONOTONIC);
720 #else
721 #error platform does not implement TimeTicks::HighResolutionNow.
722 #endif // V8_OS_MACOSX
723  // Make sure we never return 0 here.
724  return TimeTicks(ticks + 1);
725 }
726 
727 // static
728 bool TimeTicks::IsHighResolution() {
729 #if V8_OS_MACOSX
730  return true;
731 #elif V8_OS_POSIX
732  static bool is_high_resolution = IsHighResolutionTimer(CLOCK_MONOTONIC);
733  return is_high_resolution;
734 #else
735  return true;
736 #endif
737 }
738 
739 #endif // V8_OS_WIN
740 
741 
742 bool ThreadTicks::IsSupported() {
743 #if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
744  defined(V8_OS_MACOSX) || defined(V8_OS_ANDROID) || defined(V8_OS_SOLARIS)
745  return true;
746 #elif defined(V8_OS_WIN)
747  return IsSupportedWin();
748 #else
749  return false;
750 #endif
751 }
752 
753 
754 ThreadTicks ThreadTicks::Now() {
755 #if V8_OS_MACOSX
756  return ThreadTicks(ComputeThreadTicks());
757 #elif(defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
758  defined(V8_OS_ANDROID)
759  return ThreadTicks(ClockNow(CLOCK_THREAD_CPUTIME_ID));
760 #elif V8_OS_SOLARIS
761  return ThreadTicks(gethrvtime() / Time::kNanosecondsPerMicrosecond);
762 #elif V8_OS_WIN
763  return ThreadTicks::GetForThread(::GetCurrentThread());
764 #else
765  UNREACHABLE();
766 #endif
767 }
768 
769 
770 #if V8_OS_WIN
771 ThreadTicks ThreadTicks::GetForThread(const HANDLE& thread_handle) {
772  DCHECK(IsSupported());
773 
774  // Get the number of TSC ticks used by the current thread.
775  ULONG64 thread_cycle_time = 0;
776  ::QueryThreadCycleTime(thread_handle, &thread_cycle_time);
777 
778  // Get the frequency of the TSC.
779  double tsc_ticks_per_second = TSCTicksPerSecond();
780  if (tsc_ticks_per_second == 0)
781  return ThreadTicks();
782 
783  // Return the CPU time of the current thread.
784  double thread_time_seconds = thread_cycle_time / tsc_ticks_per_second;
785  return ThreadTicks(
786  static_cast<int64_t>(thread_time_seconds * Time::kMicrosecondsPerSecond));
787 }
788 
789 // static
790 bool ThreadTicks::IsSupportedWin() {
791  static bool is_supported = base::CPU().has_non_stop_time_stamp_counter() &&
792  !IsQPCReliable();
793  return is_supported;
794 }
795 
796 // static
797 void ThreadTicks::WaitUntilInitializedWin() {
798  while (TSCTicksPerSecond() == 0)
799  ::Sleep(10);
800 }
801 
802 #ifdef V8_HOST_ARCH_ARM64
803 #define ReadCycleCounter() _ReadStatusReg(ARM64_PMCCNTR_EL0)
804 #else
805 #define ReadCycleCounter() __rdtsc()
806 #endif
807 
808 double ThreadTicks::TSCTicksPerSecond() {
809  DCHECK(IsSupported());
810 
811  // The value returned by QueryPerformanceFrequency() cannot be used as the TSC
812  // frequency, because there is no guarantee that the TSC frequency is equal to
813  // the performance counter frequency.
814 
815  // The TSC frequency is cached in a static variable because it takes some time
816  // to compute it.
817  static double tsc_ticks_per_second = 0;
818  if (tsc_ticks_per_second != 0)
819  return tsc_ticks_per_second;
820 
821  // Increase the thread priority to reduces the chances of having a context
822  // switch during a reading of the TSC and the performance counter.
823  int previous_priority = ::GetThreadPriority(::GetCurrentThread());
824  ::SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
825 
826  // The first time that this function is called, make an initial reading of the
827  // TSC and the performance counter.
828  static const uint64_t tsc_initial = ReadCycleCounter();
829  static const uint64_t perf_counter_initial = QPCNowRaw();
830 
831  // Make a another reading of the TSC and the performance counter every time
832  // that this function is called.
833  uint64_t tsc_now = ReadCycleCounter();
834  uint64_t perf_counter_now = QPCNowRaw();
835 
836  // Reset the thread priority.
837  ::SetThreadPriority(::GetCurrentThread(), previous_priority);
838 
839  // Make sure that at least 50 ms elapsed between the 2 readings. The first
840  // time that this function is called, we don't expect this to be the case.
841  // Note: The longer the elapsed time between the 2 readings is, the more
842  // accurate the computed TSC frequency will be. The 50 ms value was
843  // chosen because local benchmarks show that it allows us to get a
844  // stddev of less than 1 tick/us between multiple runs.
845  // Note: According to the MSDN documentation for QueryPerformanceFrequency(),
846  // this will never fail on systems that run XP or later.
847  // https://msdn.microsoft.com/library/windows/desktop/ms644905.aspx
848  LARGE_INTEGER perf_counter_frequency = {};
849  ::QueryPerformanceFrequency(&perf_counter_frequency);
850  DCHECK_GE(perf_counter_now, perf_counter_initial);
851  uint64_t perf_counter_ticks = perf_counter_now - perf_counter_initial;
852  double elapsed_time_seconds =
853  perf_counter_ticks / static_cast<double>(perf_counter_frequency.QuadPart);
854 
855  const double kMinimumEvaluationPeriodSeconds = 0.05;
856  if (elapsed_time_seconds < kMinimumEvaluationPeriodSeconds)
857  return 0;
858 
859  // Compute the frequency of the TSC.
860  DCHECK_GE(tsc_now, tsc_initial);
861  uint64_t tsc_ticks = tsc_now - tsc_initial;
862  tsc_ticks_per_second = tsc_ticks / elapsed_time_seconds;
863 
864  return tsc_ticks_per_second;
865 }
866 #undef ReadCycleCounter
867 #endif // V8_OS_WIN
868 
869 } // namespace base
870 } // namespace v8
Definition: libplatform.h:13