V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
platform-posix.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // Platform-specific code for POSIX goes here. This is not a platform on its
6 // own, but contains the parts which are the same across the POSIX platforms
7 // Linux, MacOS, FreeBSD, OpenBSD, NetBSD and QNX.
8 
9 #include <errno.h>
10 #include <limits.h>
11 #include <pthread.h>
12 #if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
13 #include <pthread_np.h> // for pthread_set_name_np
14 #endif
15 #include <sched.h> // for sched_yield
16 #include <stdio.h>
17 #include <time.h>
18 #include <unistd.h>
19 
20 #include <sys/mman.h>
21 #include <sys/stat.h>
22 #include <sys/time.h>
23 #include <sys/types.h>
24 #if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \
25  defined(__NetBSD__) || defined(__OpenBSD__)
26 #include <sys/sysctl.h> // NOLINT, for sysctl
27 #endif
28 
29 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
30 #define LOG_TAG "v8"
31 #include <android/log.h> // NOLINT
32 #endif
33 
34 #include <cmath>
35 #include <cstdlib>
36 
37 #include "src/base/platform/platform-posix.h"
38 
39 #include "src/base/lazy-instance.h"
40 #include "src/base/macros.h"
41 #include "src/base/platform/platform.h"
42 #include "src/base/platform/time.h"
43 #include "src/base/utils/random-number-generator.h"
44 
45 #ifdef V8_FAST_TLS_SUPPORTED
46 #include "src/base/atomicops.h"
47 #endif
48 
49 #if V8_OS_MACOSX
50 #include <dlfcn.h>
51 #endif
52 
53 #if V8_OS_LINUX
54 #include <sys/prctl.h> // NOLINT, for prctl
55 #endif
56 
57 #if defined(V8_OS_FUCHSIA)
58 #include <zircon/process.h>
59 #else
60 #include <sys/resource.h>
61 #endif
62 
63 #if !defined(_AIX) && !defined(V8_OS_FUCHSIA)
64 #include <sys/syscall.h>
65 #endif
66 
67 #if V8_OS_FREEBSD || V8_OS_MACOSX || V8_OS_OPENBSD || V8_OS_SOLARIS
68 #define MAP_ANONYMOUS MAP_ANON
69 #endif
70 
71 #if defined(V8_OS_SOLARIS)
72 #if (defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE > 2) || defined(__EXTENSIONS__)
73 extern "C" int madvise(caddr_t, size_t, int);
74 #else
75 extern int madvise(caddr_t, size_t, int);
76 #endif
77 #endif
78 
79 #ifndef MADV_FREE
80 #define MADV_FREE MADV_DONTNEED
81 #endif
82 
83 namespace v8 {
84 namespace base {
85 
86 namespace {
87 
88 // 0 is never a valid thread id.
89 const pthread_t kNoThread = static_cast<pthread_t>(0);
90 
91 bool g_hard_abort = false;
92 
93 const char* g_gc_fake_mmap = nullptr;
94 
95 static LazyInstance<RandomNumberGenerator>::type
96  platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
97 static LazyMutex rng_mutex = LAZY_MUTEX_INITIALIZER;
98 
99 #if !V8_OS_FUCHSIA
100 #if V8_OS_MACOSX
101 // kMmapFd is used to pass vm_alloc flags to tag the region with the user
102 // defined tag 255 This helps identify V8-allocated regions in memory analysis
103 // tools like vmmap(1).
104 const int kMmapFd = VM_MAKE_TAG(255);
105 #else // !V8_OS_MACOSX
106 const int kMmapFd = -1;
107 #endif // !V8_OS_MACOSX
108 
109 const int kMmapFdOffset = 0;
110 
111 int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
112  switch (access) {
113  case OS::MemoryPermission::kNoAccess:
114  return PROT_NONE;
115  case OS::MemoryPermission::kRead:
116  return PROT_READ;
117  case OS::MemoryPermission::kReadWrite:
118  return PROT_READ | PROT_WRITE;
119  case OS::MemoryPermission::kReadWriteExecute:
120  return PROT_READ | PROT_WRITE | PROT_EXEC;
121  case OS::MemoryPermission::kReadExecute:
122  return PROT_READ | PROT_EXEC;
123  }
124  UNREACHABLE();
125 }
126 
127 int GetFlagsForMemoryPermission(OS::MemoryPermission access) {
128  int flags = MAP_PRIVATE | MAP_ANONYMOUS;
129  if (access == OS::MemoryPermission::kNoAccess) {
130 #if !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX
131  flags |= MAP_NORESERVE;
132 #endif // !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX
133 #if V8_OS_QNX
134  flags |= MAP_LAZY;
135 #endif // V8_OS_QNX
136  }
137  return flags;
138 }
139 
140 void* Allocate(void* address, size_t size, OS::MemoryPermission access) {
141  int prot = GetProtectionFromMemoryPermission(access);
142  int flags = GetFlagsForMemoryPermission(access);
143  void* result = mmap(address, size, prot, flags, kMmapFd, kMmapFdOffset);
144  if (result == MAP_FAILED) return nullptr;
145  return result;
146 }
147 
148 #endif // !V8_OS_FUCHSIA
149 
150 } // namespace
151 
152 void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
153  g_hard_abort = hard_abort;
154  g_gc_fake_mmap = gc_fake_mmap;
155 }
156 
157 int OS::ActivationFrameAlignment() {
158 #if V8_TARGET_ARCH_ARM
159  // On EABI ARM targets this is required for fp correctness in the
160  // runtime system.
161  return 8;
162 #elif V8_TARGET_ARCH_MIPS
163  return 8;
164 #elif V8_TARGET_ARCH_S390
165  return 8;
166 #else
167  // Otherwise we just assume 16 byte alignment, i.e.:
168  // - With gcc 4.4 the tree vectorization optimizer can generate code
169  // that requires 16 byte alignment such as movdqa on x86.
170  // - Mac OS X, PPC and Solaris (64-bit) activation frames must
171  // be 16 byte-aligned; see "Mac OS X ABI Function Call Guide"
172  return 16;
173 #endif
174 }
175 
176 // static
177 size_t OS::AllocatePageSize() {
178  return static_cast<size_t>(sysconf(_SC_PAGESIZE));
179 }
180 
181 // static
182 size_t OS::CommitPageSize() {
183  static size_t page_size = getpagesize();
184  return page_size;
185 }
186 
187 // static
188 void OS::SetRandomMmapSeed(int64_t seed) {
189  if (seed) {
190  MutexGuard guard(rng_mutex.Pointer());
191  platform_random_number_generator.Pointer()->SetSeed(seed);
192  }
193 }
194 
195 // static
196 void* OS::GetRandomMmapAddr() {
197  uintptr_t raw_addr;
198  {
199  MutexGuard guard(rng_mutex.Pointer());
200  platform_random_number_generator.Pointer()->NextBytes(&raw_addr,
201  sizeof(raw_addr));
202  }
203 #if defined(V8_USE_ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
204  defined(THREAD_SANITIZER) || defined(LEAK_SANITIZER)
205  // If random hint addresses interfere with address ranges hard coded in
206  // sanitizers, bad things happen. This address range is copied from TSAN
207  // source but works with all tools.
208  // See crbug.com/539863.
209  raw_addr &= 0x007fffff0000ULL;
210  raw_addr += 0x7e8000000000ULL;
211 #else
212 #if V8_TARGET_ARCH_X64
213  // Currently available CPUs have 48 bits of virtual addressing. Truncate
214  // the hint address to 46 bits to give the kernel a fighting chance of
215  // fulfilling our placement request.
216  raw_addr &= uint64_t{0x3FFFFFFFF000};
217 #elif V8_TARGET_ARCH_PPC64
218 #if V8_OS_AIX
219  // AIX: 64 bits of virtual addressing, but we limit address range to:
220  // a) minimize Segment Lookaside Buffer (SLB) misses and
221  raw_addr &= uint64_t{0x3FFFF000};
222  // Use extra address space to isolate the mmap regions.
223  raw_addr += uint64_t{0x400000000000};
224 #elif V8_TARGET_BIG_ENDIAN
225  // Big-endian Linux: 42 bits of virtual addressing.
226  raw_addr &= uint64_t{0x03FFFFFFF000};
227 #else
228  // Little-endian Linux: 46 bits of virtual addressing.
229  raw_addr &= uint64_t{0x3FFFFFFF0000};
230 #endif
231 #elif V8_TARGET_ARCH_S390X
232  // Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
233  // of virtual addressing. Truncate to 40 bits to allow kernel chance to
234  // fulfill request.
235  raw_addr &= uint64_t{0xFFFFFFF000};
236 #elif V8_TARGET_ARCH_S390
237  // 31 bits of virtual addressing. Truncate to 29 bits to allow kernel chance
238  // to fulfill request.
239  raw_addr &= 0x1FFFF000;
240 #elif V8_TARGET_ARCH_MIPS64
241  // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance
242  // to fulfill request.
243  raw_addr &= uint64_t{0xFFFFFF0000};
244 #else
245  raw_addr &= 0x3FFFF000;
246 
247 #ifdef __sun
248  // For our Solaris/illumos mmap hint, we pick a random address in the bottom
249  // half of the top half of the address space (that is, the third quarter).
250  // Because we do not MAP_FIXED, this will be treated only as a hint -- the
251  // system will not fail to mmap() because something else happens to already
252  // be mapped at our random address. We deliberately set the hint high enough
253  // to get well above the system's break (that is, the heap); Solaris and
254  // illumos will try the hint and if that fails allocate as if there were
255  // no hint at all. The high hint prevents the break from getting hemmed in
256  // at low values, ceding half of the address space to the system heap.
257  raw_addr += 0x80000000;
258 #elif V8_OS_AIX
259  // The range 0x30000000 - 0xD0000000 is available on AIX;
260  // choose the upper range.
261  raw_addr += 0x90000000;
262 #else
263  // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
264  // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
265  // 10.6 and 10.7.
266  raw_addr += 0x20000000;
267 #endif
268 #endif
269 #endif
270  return reinterpret_cast<void*>(raw_addr);
271 }
272 
273 // TODO(bbudge) Move Cygwin and Fuschia stuff into platform-specific files.
274 #if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
275 // static
276 void* OS::Allocate(void* address, size_t size, size_t alignment,
277  MemoryPermission access) {
278  size_t page_size = AllocatePageSize();
279  DCHECK_EQ(0, size % page_size);
280  DCHECK_EQ(0, alignment % page_size);
281  address = AlignedAddress(address, alignment);
282  // Add the maximum misalignment so we are guaranteed an aligned base address.
283  size_t request_size = size + (alignment - page_size);
284  request_size = RoundUp(request_size, OS::AllocatePageSize());
285  void* result = base::Allocate(address, request_size, access);
286  if (result == nullptr) return nullptr;
287 
288  // Unmap memory allocated before the aligned base address.
289  uint8_t* base = static_cast<uint8_t*>(result);
290  uint8_t* aligned_base = reinterpret_cast<uint8_t*>(
291  RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
292  if (aligned_base != base) {
293  DCHECK_LT(base, aligned_base);
294  size_t prefix_size = static_cast<size_t>(aligned_base - base);
295  CHECK(Free(base, prefix_size));
296  request_size -= prefix_size;
297  }
298  // Unmap memory allocated after the potentially unaligned end.
299  if (size != request_size) {
300  DCHECK_LT(size, request_size);
301  size_t suffix_size = request_size - size;
302  CHECK(Free(aligned_base + size, suffix_size));
303  request_size -= suffix_size;
304  }
305 
306  DCHECK_EQ(size, request_size);
307  return static_cast<void*>(aligned_base);
308 }
309 
310 // static
311 bool OS::Free(void* address, const size_t size) {
312  DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
313  DCHECK_EQ(0, size % AllocatePageSize());
314  return munmap(address, size) == 0;
315 }
316 
317 // static
318 bool OS::Release(void* address, size_t size) {
319  DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
320  DCHECK_EQ(0, size % CommitPageSize());
321  return munmap(address, size) == 0;
322 }
323 
324 // static
325 bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
326  DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
327  DCHECK_EQ(0, size % CommitPageSize());
328 
329  int prot = GetProtectionFromMemoryPermission(access);
330  int ret = mprotect(address, size, prot);
331  if (ret == 0 && access == OS::MemoryPermission::kNoAccess) {
332  // This is advisory; ignore errors and continue execution.
333  USE(DiscardSystemPages(address, size));
334  }
335 
336 // For accounting purposes, we want to call MADV_FREE_REUSE on macOS after
337 // changing permissions away from OS::MemoryPermission::kNoAccess. Since this
338 // state is not kept at this layer, we always call this if access != kNoAccess.
339 // The cost is a syscall that effectively no-ops.
340 // TODO(erikchen): Fix this to only call MADV_FREE_REUSE when necessary.
341 // https://crbug.com/823915
342 #if defined(OS_MACOSX)
343  if (access != OS::MemoryPermission::kNoAccess)
344  madvise(address, size, MADV_FREE_REUSE);
345 #endif
346 
347  return ret == 0;
348 }
349 
350 bool OS::DiscardSystemPages(void* address, size_t size) {
351  DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
352  DCHECK_EQ(0, size % CommitPageSize());
353 #if defined(OS_MACOSX)
354  // On OSX, MADV_FREE_REUSABLE has comparable behavior to MADV_FREE, but also
355  // marks the pages with the reusable bit, which allows both Activity Monitor
356  // and memory-infra to correctly track the pages.
357  int ret = madvise(address, size, MADV_FREE_REUSABLE);
358 #elif defined(_AIX) || defined(V8_OS_SOLARIS)
359  int ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_FREE);
360 #else
361  int ret = madvise(address, size, MADV_FREE);
362 #endif
363  if (ret != 0 && errno == ENOSYS)
364  return true; // madvise is not available on all systems.
365  if (ret != 0 && errno == EINVAL) {
366 // MADV_FREE only works on Linux 4.5+ . If request failed, retry with older
367 // MADV_DONTNEED . Note that MADV_FREE being defined at compile time doesn't
368 // imply runtime support.
369 #if defined(_AIX) || defined(V8_OS_SOLARIS)
370  ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_DONTNEED);
371 #else
372  ret = madvise(address, size, MADV_DONTNEED);
373 #endif
374  }
375  return ret == 0;
376 }
377 
378 // static
379 bool OS::HasLazyCommits() {
380 #if V8_OS_AIX || V8_OS_LINUX || V8_OS_MACOSX
381  return true;
382 #else
383  // TODO(bbudge) Return true for all POSIX platforms.
384  return false;
385 #endif
386 }
387 #endif // !V8_OS_CYGWIN && !V8_OS_FUCHSIA
388 
389 const char* OS::GetGCFakeMMapFile() {
390  return g_gc_fake_mmap;
391 }
392 
393 
394 void OS::Sleep(TimeDelta interval) {
395  usleep(static_cast<useconds_t>(interval.InMicroseconds()));
396 }
397 
398 
399 void OS::Abort() {
400  if (g_hard_abort) {
401  V8_IMMEDIATE_CRASH();
402  }
403  // Redirect to std abort to signal abnormal program termination.
404  abort();
405 }
406 
407 
408 void OS::DebugBreak() {
409 #if V8_HOST_ARCH_ARM
410  asm("bkpt 0");
411 #elif V8_HOST_ARCH_ARM64
412  asm("brk 0");
413 #elif V8_HOST_ARCH_MIPS
414  asm("break");
415 #elif V8_HOST_ARCH_MIPS64
416  asm("break");
417 #elif V8_HOST_ARCH_PPC
418  asm("twge 2,2");
419 #elif V8_HOST_ARCH_IA32
420  asm("int $3");
421 #elif V8_HOST_ARCH_X64
422  asm("int $3");
423 #elif V8_HOST_ARCH_S390
424  // Software breakpoint instruction is 0x0001
425  asm volatile(".word 0x0001");
426 #else
427 #error Unsupported host architecture.
428 #endif
429 }
430 
431 
433  public:
434  PosixMemoryMappedFile(FILE* file, void* memory, size_t size)
435  : file_(file), memory_(memory), size_(size) {}
436  ~PosixMemoryMappedFile() final;
437  void* memory() const final { return memory_; }
438  size_t size() const final { return size_; }
439 
440  private:
441  FILE* const file_;
442  void* const memory_;
443  size_t const size_;
444 };
445 
446 
447 // static
448 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
449  if (FILE* file = fopen(name, "r+")) {
450  if (fseek(file, 0, SEEK_END) == 0) {
451  long size = ftell(file); // NOLINT(runtime/int)
452  if (size >= 0) {
453  void* const memory =
454  mmap(OS::GetRandomMmapAddr(), size, PROT_READ | PROT_WRITE,
455  MAP_SHARED, fileno(file), 0);
456  if (memory != MAP_FAILED) {
457  return new PosixMemoryMappedFile(file, memory, size);
458  }
459  }
460  }
461  fclose(file);
462  }
463  return nullptr;
464 }
465 
466 
467 // static
468 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name,
469  size_t size, void* initial) {
470  if (FILE* file = fopen(name, "w+")) {
471  size_t result = fwrite(initial, 1, size, file);
472  if (result == size && !ferror(file)) {
473  void* memory = mmap(OS::GetRandomMmapAddr(), result,
474  PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
475  if (memory != MAP_FAILED) {
476  return new PosixMemoryMappedFile(file, memory, result);
477  }
478  }
479  fclose(file);
480  }
481  return nullptr;
482 }
483 
484 
485 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
486  if (memory_) CHECK(OS::Free(memory_, RoundUp(size_, OS::AllocatePageSize())));
487  fclose(file_);
488 }
489 
490 
491 int OS::GetCurrentProcessId() {
492  return static_cast<int>(getpid());
493 }
494 
495 
496 int OS::GetCurrentThreadId() {
497 #if V8_OS_MACOSX || (V8_OS_ANDROID && defined(__APPLE__))
498  return static_cast<int>(pthread_mach_thread_np(pthread_self()));
499 #elif V8_OS_LINUX
500  return static_cast<int>(syscall(__NR_gettid));
501 #elif V8_OS_ANDROID
502  return static_cast<int>(gettid());
503 #elif V8_OS_AIX
504  return static_cast<int>(thread_self());
505 #elif V8_OS_FUCHSIA
506  return static_cast<int>(zx_thread_self());
507 #elif V8_OS_SOLARIS
508  return static_cast<int>(pthread_self());
509 #else
510  return static_cast<int>(reinterpret_cast<intptr_t>(pthread_self()));
511 #endif
512 }
513 
514 void OS::ExitProcess(int exit_code) {
515  // Use _exit instead of exit to avoid races between isolate
516  // threads and static destructors.
517  fflush(stdout);
518  fflush(stderr);
519  _exit(exit_code);
520 }
521 
522 // ----------------------------------------------------------------------------
523 // POSIX date/time support.
524 //
525 
526 #if !defined(V8_OS_FUCHSIA)
527 int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
528  struct rusage usage;
529 
530  if (getrusage(RUSAGE_SELF, &usage) < 0) return -1;
531  *secs = static_cast<uint32_t>(usage.ru_utime.tv_sec);
532  *usecs = static_cast<uint32_t>(usage.ru_utime.tv_usec);
533  return 0;
534 }
535 #endif
536 
537 double OS::TimeCurrentMillis() {
538  return Time::Now().ToJsTime();
539 }
540 
541 double PosixTimezoneCache::DaylightSavingsOffset(double time) {
542  if (std::isnan(time)) return std::numeric_limits<double>::quiet_NaN();
543  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
544  struct tm tm;
545  struct tm* t = localtime_r(&tv, &tm);
546  if (nullptr == t) return std::numeric_limits<double>::quiet_NaN();
547  return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
548 }
549 
550 
551 int OS::GetLastError() {
552  return errno;
553 }
554 
555 
556 // ----------------------------------------------------------------------------
557 // POSIX stdio support.
558 //
559 
560 FILE* OS::FOpen(const char* path, const char* mode) {
561  FILE* file = fopen(path, mode);
562  if (file == nullptr) return nullptr;
563  struct stat file_stat;
564  if (fstat(fileno(file), &file_stat) != 0) {
565  fclose(file);
566  return nullptr;
567  }
568  bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0);
569  if (is_regular_file) return file;
570  fclose(file);
571  return nullptr;
572 }
573 
574 
575 bool OS::Remove(const char* path) {
576  return (remove(path) == 0);
577 }
578 
579 char OS::DirectorySeparator() { return '/'; }
580 
581 bool OS::isDirectorySeparator(const char ch) {
582  return ch == DirectorySeparator();
583 }
584 
585 
586 FILE* OS::OpenTemporaryFile() {
587  return tmpfile();
588 }
589 
590 
591 const char* const OS::LogFileOpenMode = "w";
592 
593 
594 void OS::Print(const char* format, ...) {
595  va_list args;
596  va_start(args, format);
597  VPrint(format, args);
598  va_end(args);
599 }
600 
601 
602 void OS::VPrint(const char* format, va_list args) {
603 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
604  __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
605 #else
606  vprintf(format, args);
607 #endif
608 }
609 
610 
611 void OS::FPrint(FILE* out, const char* format, ...) {
612  va_list args;
613  va_start(args, format);
614  VFPrint(out, format, args);
615  va_end(args);
616 }
617 
618 
619 void OS::VFPrint(FILE* out, const char* format, va_list args) {
620 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
621  __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
622 #else
623  vfprintf(out, format, args);
624 #endif
625 }
626 
627 
628 void OS::PrintError(const char* format, ...) {
629  va_list args;
630  va_start(args, format);
631  VPrintError(format, args);
632  va_end(args);
633 }
634 
635 
636 void OS::VPrintError(const char* format, va_list args) {
637 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
638  __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args);
639 #else
640  vfprintf(stderr, format, args);
641 #endif
642 }
643 
644 
645 int OS::SNPrintF(char* str, int length, const char* format, ...) {
646  va_list args;
647  va_start(args, format);
648  int result = VSNPrintF(str, length, format, args);
649  va_end(args);
650  return result;
651 }
652 
653 
654 int OS::VSNPrintF(char* str,
655  int length,
656  const char* format,
657  va_list args) {
658  int n = vsnprintf(str, length, format, args);
659  if (n < 0 || n >= length) {
660  // If the length is zero, the assignment fails.
661  if (length > 0)
662  str[length - 1] = '\0';
663  return -1;
664  } else {
665  return n;
666  }
667 }
668 
669 
670 // ----------------------------------------------------------------------------
671 // POSIX string support.
672 //
673 
674 char* OS::StrChr(char* str, int c) {
675  return strchr(str, c);
676 }
677 
678 
679 void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
680  strncpy(dest, src, n);
681 }
682 
683 
684 // ----------------------------------------------------------------------------
685 // POSIX thread support.
686 //
687 
689  public:
690  PlatformData() : thread_(kNoThread) {}
691  pthread_t thread_; // Thread handle for pthread.
692  // Synchronizes thread creation
693  Mutex thread_creation_mutex_;
694 };
695 
696 Thread::Thread(const Options& options)
697  : data_(new PlatformData),
698  stack_size_(options.stack_size()),
699  start_semaphore_(nullptr) {
700  if (stack_size_ > 0 && static_cast<size_t>(stack_size_) < PTHREAD_STACK_MIN) {
701  stack_size_ = PTHREAD_STACK_MIN;
702  }
703  set_name(options.name());
704 }
705 
706 
707 Thread::~Thread() {
708  delete data_;
709 }
710 
711 
712 static void SetThreadName(const char* name) {
713 #if V8_OS_DRAGONFLYBSD || V8_OS_FREEBSD || V8_OS_OPENBSD
714  pthread_set_name_np(pthread_self(), name);
715 #elif V8_OS_NETBSD
716  STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP);
717  pthread_setname_np(pthread_self(), "%s", name);
718 #elif V8_OS_MACOSX
719  // pthread_setname_np is only available in 10.6 or later, so test
720  // for it at runtime.
721  int (*dynamic_pthread_setname_np)(const char*);
722  *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
723  dlsym(RTLD_DEFAULT, "pthread_setname_np");
724  if (dynamic_pthread_setname_np == nullptr) return;
725 
726  // Mac OS X does not expose the length limit of the name, so hardcode it.
727  static const int kMaxNameLength = 63;
728  STATIC_ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
729  dynamic_pthread_setname_np(name);
730 #elif defined(PR_SET_NAME)
731  prctl(PR_SET_NAME,
732  reinterpret_cast<unsigned long>(name), // NOLINT
733  0, 0, 0);
734 #endif
735 }
736 
737 
738 static void* ThreadEntry(void* arg) {
739  Thread* thread = reinterpret_cast<Thread*>(arg);
740  // We take the lock here to make sure that pthread_create finished first since
741  // we don't know which thread will run first (the original thread or the new
742  // one).
743  { MutexGuard lock_guard(&thread->data()->thread_creation_mutex_); }
744  SetThreadName(thread->name());
745  DCHECK_NE(thread->data()->thread_, kNoThread);
746  thread->NotifyStartedAndRun();
747  return nullptr;
748 }
749 
750 
751 void Thread::set_name(const char* name) {
752  strncpy(name_, name, sizeof(name_));
753  name_[sizeof(name_) - 1] = '\0';
754 }
755 
756 
757 void Thread::Start() {
758  int result;
759  pthread_attr_t attr;
760  memset(&attr, 0, sizeof(attr));
761  result = pthread_attr_init(&attr);
762  DCHECK_EQ(0, result);
763  size_t stack_size = stack_size_;
764  if (stack_size == 0) {
765 #if V8_OS_MACOSX
766  // Default on Mac OS X is 512kB -- bump up to 1MB
767  stack_size = 1 * 1024 * 1024;
768 #elif V8_OS_AIX
769  // Default on AIX is 96kB -- bump up to 2MB
770  stack_size = 2 * 1024 * 1024;
771 #endif
772  }
773  if (stack_size > 0) {
774  result = pthread_attr_setstacksize(&attr, stack_size);
775  DCHECK_EQ(0, result);
776  }
777  {
778  MutexGuard lock_guard(&data_->thread_creation_mutex_);
779  result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
780  }
781  DCHECK_EQ(0, result);
782  result = pthread_attr_destroy(&attr);
783  DCHECK_EQ(0, result);
784  DCHECK_NE(data_->thread_, kNoThread);
785  USE(result);
786 }
787 
788 void Thread::Join() { pthread_join(data_->thread_, nullptr); }
789 
790 static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) {
791 #if V8_OS_CYGWIN
792  // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
793  // because pthread_key_t is a pointer type on Cygwin. This will probably not
794  // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
795  STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
796  intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
797  return static_cast<Thread::LocalStorageKey>(ptr_key);
798 #else
799  return static_cast<Thread::LocalStorageKey>(pthread_key);
800 #endif
801 }
802 
803 
804 static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) {
805 #if V8_OS_CYGWIN
806  STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
807  intptr_t ptr_key = static_cast<intptr_t>(local_key);
808  return reinterpret_cast<pthread_key_t>(ptr_key);
809 #else
810  return static_cast<pthread_key_t>(local_key);
811 #endif
812 }
813 
814 
815 #ifdef V8_FAST_TLS_SUPPORTED
816 
817 static Atomic32 tls_base_offset_initialized = 0;
818 intptr_t kMacTlsBaseOffset = 0;
819 
820 // It's safe to do the initialization more that once, but it has to be
821 // done at least once.
822 static void InitializeTlsBaseOffset() {
823  const size_t kBufferSize = 128;
824  char buffer[kBufferSize];
825  size_t buffer_size = kBufferSize;
826  int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
827  if (sysctl(ctl_name, 2, buffer, &buffer_size, nullptr, 0) != 0) {
828  FATAL("V8 failed to get kernel version");
829  }
830  // The buffer now contains a string of the form XX.YY.ZZ, where
831  // XX is the major kernel version component.
832  // Make sure the buffer is 0-terminated.
833  buffer[kBufferSize - 1] = '\0';
834  char* period_pos = strchr(buffer, '.');
835  *period_pos = '\0';
836  int kernel_version_major =
837  static_cast<int>(strtol(buffer, nullptr, 10)); // NOLINT
838  // The constants below are taken from pthreads.s from the XNU kernel
839  // sources archive at www.opensource.apple.com.
840  if (kernel_version_major < 11) {
841  // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
842  // same offsets.
843 #if V8_HOST_ARCH_IA32
844  kMacTlsBaseOffset = 0x48;
845 #else
846  kMacTlsBaseOffset = 0x60;
847 #endif
848  } else {
849  // 11.x.x (Lion) changed the offset.
850  kMacTlsBaseOffset = 0;
851  }
852 
853  Release_Store(&tls_base_offset_initialized, 1);
854 }
855 
856 
857 static void CheckFastTls(Thread::LocalStorageKey key) {
858  void* expected = reinterpret_cast<void*>(0x1234CAFE);
859  Thread::SetThreadLocal(key, expected);
860  void* actual = Thread::GetExistingThreadLocal(key);
861  if (expected != actual) {
862  FATAL("V8 failed to initialize fast TLS on current kernel");
863  }
864  Thread::SetThreadLocal(key, nullptr);
865 }
866 
867 #endif // V8_FAST_TLS_SUPPORTED
868 
869 
870 Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
871 #ifdef V8_FAST_TLS_SUPPORTED
872  bool check_fast_tls = false;
873  if (tls_base_offset_initialized == 0) {
874  check_fast_tls = true;
875  InitializeTlsBaseOffset();
876  }
877 #endif
878  pthread_key_t key;
879  int result = pthread_key_create(&key, nullptr);
880  DCHECK_EQ(0, result);
881  USE(result);
882  LocalStorageKey local_key = PthreadKeyToLocalKey(key);
883 #ifdef V8_FAST_TLS_SUPPORTED
884  // If we just initialized fast TLS support, make sure it works.
885  if (check_fast_tls) CheckFastTls(local_key);
886 #endif
887  return local_key;
888 }
889 
890 
891 void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
892  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
893  int result = pthread_key_delete(pthread_key);
894  DCHECK_EQ(0, result);
895  USE(result);
896 }
897 
898 
899 void* Thread::GetThreadLocal(LocalStorageKey key) {
900  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
901  return pthread_getspecific(pthread_key);
902 }
903 
904 
905 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
906  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
907  int result = pthread_setspecific(pthread_key, value);
908  DCHECK_EQ(0, result);
909  USE(result);
910 }
911 
912 #undef LOG_TAG
913 #undef MAP_ANONYMOUS
914 #undef MADV_FREE
915 
916 } // namespace base
917 } // namespace v8
Definition: libplatform.h:13