5 use crate::num::NonZeroUsize;
7 use crate::sys::{os, stack_overflow};
8 use crate::time::Duration;
10 #[cfg(all(target_os = "linux", target_env = "gnu"))]
11 use crate::sys::weak::dlsym;
12 #[cfg(any(target_os = "solaris", target_os = "illumos"))]
13 use crate::sys::weak::weak;
14 #[cfg(not(any(target_os = "l4re", target_os = "vxworks", target_os = "espidf")))]
15 pub const DEFAULT_MIN_STACK_SIZE: usize = 2 * 1024 * 1024;
16 #[cfg(target_os = "l4re")]
17 pub const DEFAULT_MIN_STACK_SIZE: usize = 1024 * 1024;
18 #[cfg(target_os = "vxworks")]
19 pub const DEFAULT_MIN_STACK_SIZE: usize = 256 * 1024;
20 #[cfg(target_os = "espidf")]
21 pub const DEFAULT_MIN_STACK_SIZE: usize = 0; // 0 indicates that the stack size configured in the ESP-IDF menuconfig system should be used
23 #[cfg(target_os = "fuchsia")]
25 type zx_handle_t = u32;
26 type zx_status_t = i32;
27 pub const ZX_PROP_NAME: u32 = 3;
30 pub fn zx_object_set_property(
33 value: *const libc::c_void,
34 value_size: libc::size_t,
36 pub fn zx_thread_self() -> zx_handle_t;
44 // Some platforms may have pthread_t as a pointer in which case we still want
45 // a thread to be Send/Sync
46 unsafe impl Send for Thread {}
47 unsafe impl Sync for Thread {}
50 // unsafe: see thread::Builder::spawn_unchecked for safety requirements
51 pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> {
52 let p = Box::into_raw(box p);
53 let mut native: libc::pthread_t = mem::zeroed();
54 let mut attr: libc::pthread_attr_t = mem::zeroed();
55 assert_eq!(libc::pthread_attr_init(&mut attr), 0);
57 #[cfg(target_os = "espidf")]
59 // Only set the stack if a non-zero value is passed
60 // 0 is used as an indication that the default stack size configured in the ESP-IDF menuconfig system should be used
62 libc::pthread_attr_setstacksize(&mut attr, cmp::max(stack, min_stack_size(&attr))),
67 #[cfg(not(target_os = "espidf"))]
69 let stack_size = cmp::max(stack, min_stack_size(&attr));
71 match libc::pthread_attr_setstacksize(&mut attr, stack_size) {
74 assert_eq!(n, libc::EINVAL);
75 // EINVAL means |stack_size| is either too small or not a
76 // multiple of the system page size. Because it's definitely
77 // >= PTHREAD_STACK_MIN, it must be an alignment issue.
78 // Round up to the nearest page and try again.
79 let page_size = os::page_size();
81 (stack_size + page_size - 1) & (-(page_size as isize - 1) as usize - 1);
82 assert_eq!(libc::pthread_attr_setstacksize(&mut attr, stack_size), 0);
87 let ret = libc::pthread_create(&mut native, &attr, thread_start, p as *mut _);
88 // Note: if the thread creation fails and this assert fails, then p will
89 // be leaked. However, an alternative design could cause double-free
90 // which is clearly worse.
91 assert_eq!(libc::pthread_attr_destroy(&mut attr), 0);
94 // The thread failed to start and as a result p was not consumed. Therefore, it is
95 // safe to reconstruct the box so that it gets deallocated.
96 drop(Box::from_raw(p));
97 Err(io::Error::from_raw_os_error(ret))
99 Ok(Thread { id: native })
102 extern "C" fn thread_start(main: *mut libc::c_void) -> *mut libc::c_void {
104 // Next, set up our stack overflow handler which may get triggered if we run
106 let _handler = stack_overflow::Handler::new();
107 // Finally, let's run some code.
108 Box::from_raw(main as *mut Box<dyn FnOnce()>)();
115 let ret = unsafe { libc::sched_yield() };
116 debug_assert_eq!(ret, 0);
119 #[cfg(target_os = "android")]
120 pub fn set_name(name: &CStr) {
121 const PR_SET_NAME: libc::c_int = 15;
133 #[cfg(target_os = "linux")]
134 pub fn set_name(name: &CStr) {
136 // Available since glibc 2.12, musl 1.1.16, and uClibc 1.0.20.
137 libc::pthread_setname_np(libc::pthread_self(), name.as_ptr());
141 #[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "openbsd"))]
142 pub fn set_name(name: &CStr) {
144 libc::pthread_set_name_np(libc::pthread_self(), name.as_ptr());
148 #[cfg(any(target_os = "macos", target_os = "ios", target_os = "watchos"))]
149 pub fn set_name(name: &CStr) {
151 libc::pthread_setname_np(name.as_ptr());
155 #[cfg(target_os = "netbsd")]
156 pub fn set_name(name: &CStr) {
158 let cname = CStr::from_bytes_with_nul_unchecked(b"%s\0".as_slice());
159 libc::pthread_setname_np(
160 libc::pthread_self(),
162 name.as_ptr() as *mut libc::c_void,
167 #[cfg(any(target_os = "solaris", target_os = "illumos"))]
168 pub fn set_name(name: &CStr) {
170 fn pthread_setname_np(
171 libc::pthread_t, *const libc::c_char
175 if let Some(f) = pthread_setname_np.get() {
177 f(libc::pthread_self(), name.as_ptr());
182 #[cfg(target_os = "fuchsia")]
183 pub fn set_name(name: &CStr) {
186 zx_object_set_property(
189 name.as_ptr() as *const libc::c_void,
190 name.to_bytes().len(),
195 #[cfg(target_os = "haiku")]
196 pub fn set_name(name: &CStr) {
198 let thread_self = libc::find_thread(ptr::null_mut());
199 libc::rename_thread(thread_self, name.as_ptr());
204 target_env = "newlib",
206 target_os = "emscripten",
208 target_os = "vxworks"
210 pub fn set_name(_name: &CStr) {
211 // Newlib, Emscripten, and VxWorks have no way to set a thread name.
214 #[cfg(not(target_os = "espidf"))]
215 pub fn sleep(dur: Duration) {
216 let mut secs = dur.as_secs();
217 let mut nsecs = dur.subsec_nanos() as _;
219 // If we're awoken with a signal then the return value will be -1 and
220 // nanosleep will fill in `ts` with the remaining time.
222 while secs > 0 || nsecs > 0 {
223 let mut ts = libc::timespec {
224 tv_sec: cmp::min(libc::time_t::MAX as u64, secs) as libc::time_t,
227 secs -= ts.tv_sec as u64;
228 let ts_ptr = &mut ts as *mut _;
229 if libc::nanosleep(ts_ptr, ts_ptr) == -1 {
230 assert_eq!(os::errno(), libc::EINTR);
231 secs += ts.tv_sec as u64;
240 #[cfg(target_os = "espidf")]
241 pub fn sleep(dur: Duration) {
242 let mut micros = dur.as_micros();
245 let st = if micros > u32::MAX as u128 { u32::MAX } else { micros as u32 };
248 micros -= st as u128;
255 let ret = libc::pthread_join(self.id, ptr::null_mut());
257 assert!(ret == 0, "failed to join thread: {}", io::Error::from_raw_os_error(ret));
261 pub fn id(&self) -> libc::pthread_t {
265 pub fn into_id(self) -> libc::pthread_t {
272 impl Drop for Thread {
274 let ret = unsafe { libc::pthread_detach(self.id) };
275 debug_assert_eq!(ret, 0);
279 pub fn available_parallelism() -> io::Result<NonZeroUsize> {
282 target_os = "android",
283 target_os = "emscripten",
284 target_os = "fuchsia",
288 target_os = "solaris",
289 target_os = "illumos",
291 #[cfg(any(target_os = "android", target_os = "linux"))]
293 let quota = cgroups::quota().max(1);
294 let mut set: libc::cpu_set_t = unsafe { mem::zeroed() };
296 if libc::sched_getaffinity(0, mem::size_of::<libc::cpu_set_t>(), &mut set) == 0 {
297 let count = libc::CPU_COUNT(&set) as usize;
298 let count = count.min(quota);
299 // SAFETY: affinity mask can't be empty and the quota gets clamped to a minimum of 1
300 return Ok(NonZeroUsize::new_unchecked(count));
304 match unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) } {
305 -1 => Err(io::Error::last_os_error()),
306 0 => Err(io::const_io_error!(io::ErrorKind::NotFound, "The number of hardware threads is not known for the target platform")),
307 cpus => Ok(unsafe { NonZeroUsize::new_unchecked(cpus as usize) }),
309 } else if #[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "netbsd"))] {
312 let mut cpus: libc::c_uint = 0;
313 let mut cpus_size = crate::mem::size_of_val(&cpus);
316 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
319 // Fallback approach in case of errors or no hardware threads.
321 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
326 &mut cpus as *mut _ as *mut _,
327 &mut cpus_size as *mut _ as *mut _,
333 // Handle errors if any.
335 return Err(io::Error::last_os_error());
336 } else if cpus == 0 {
337 return Err(io::const_io_error!(io::ErrorKind::NotFound, "The number of hardware threads is not known for the target platform"));
340 Ok(unsafe { NonZeroUsize::new_unchecked(cpus as usize) })
341 } else if #[cfg(target_os = "openbsd")] {
344 let mut cpus: libc::c_uint = 0;
345 let mut cpus_size = crate::mem::size_of_val(&cpus);
346 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
352 &mut cpus as *mut _ as *mut _,
353 &mut cpus_size as *mut _ as *mut _,
359 // Handle errors if any.
361 return Err(io::Error::last_os_error());
362 } else if cpus == 0 {
363 return Err(io::const_io_error!(io::ErrorKind::NotFound, "The number of hardware threads is not known for the target platform"));
366 Ok(unsafe { NonZeroUsize::new_unchecked(cpus as usize) })
367 } else if #[cfg(target_os = "haiku")] {
368 // system_info cpu_count field gets the static data set at boot time with `smp_set_num_cpus`
369 // `get_system_info` calls then `smp_get_num_cpus`
371 let mut sinfo: libc::system_info = crate::mem::zeroed();
372 let res = libc::get_system_info(&mut sinfo);
374 if res != libc::B_OK {
375 return Err(io::const_io_error!(io::ErrorKind::NotFound, "The number of hardware threads is not known for the target platform"));
378 Ok(NonZeroUsize::new_unchecked(sinfo.cpu_count as usize))
381 // FIXME: implement on vxWorks, Redox, l4re
382 Err(io::const_io_error!(io::ErrorKind::Unsupported, "Getting the number of hardware threads is not supported on the target platform"))
387 #[cfg(any(target_os = "android", target_os = "linux"))]
389 //! Currently not covered
390 //! * cgroup v2 in non-standard mountpoints
391 //! * paths containing control characters or spaces, since those would be escaped in procfs
392 //! output and we don't unescape
393 use crate::borrow::Cow;
394 use crate::ffi::OsString;
395 use crate::fs::{try_exists, File};
397 use crate::io::{BufRead, BufReader};
398 use crate::os::unix::ffi::OsStringExt;
399 use crate::path::Path;
400 use crate::path::PathBuf;
401 use crate::str::from_utf8;
409 /// Returns cgroup CPU quota in core-equivalents, rounded down or usize::MAX if the quota cannot
410 /// be determined or is not set.
411 pub(super) fn quota() -> usize {
412 let mut quota = usize::MAX;
414 // Attempting to open a file fails under default flags due to isolation.
415 // And Miri does not have parallelism anyway.
419 let _: Option<()> = try {
420 let mut buf = Vec::with_capacity(128);
421 // find our place in the cgroup hierarchy
422 File::open("/proc/self/cgroup").ok()?.read_to_end(&mut buf).ok()?;
423 let (cgroup_path, version) =
424 buf.split(|&c| c == b'\n').fold(None, |previous, line| {
425 let mut fields = line.splitn(3, |&c| c == b':');
426 // 2nd field is a list of controllers for v1 or empty for v2
427 let version = match fields.nth(1) {
428 Some(b"") => Cgroup::V2,
430 if from_utf8(controllers)
431 .is_ok_and(|c| c.split(',').any(|c| c == "cpu")) =>
435 _ => return previous,
438 // already-found v1 trumps v2 since it explicitly specifies its controllers
439 if previous.is_some() && version == Cgroup::V2 {
443 let path = fields.last()?;
444 // skip leading slash
445 Some((path[1..].to_owned(), version))
447 let cgroup_path = PathBuf::from(OsString::from_vec(cgroup_path));
449 quota = match version {
450 Cgroup::V1 => quota_v1(cgroup_path),
451 Cgroup::V2 => quota_v2(cgroup_path),
458 fn quota_v2(group_path: PathBuf) -> usize {
459 let mut quota = usize::MAX;
461 let mut path = PathBuf::with_capacity(128);
462 let mut read_buf = String::with_capacity(20);
464 // standard mount location defined in file-hierarchy(7) manpage
465 let cgroup_mount = "/sys/fs/cgroup";
467 path.push(cgroup_mount);
468 path.push(&group_path);
470 path.push("cgroup.controllers");
472 // skip if we're not looking at cgroup2
473 if matches!(try_exists(&path), Err(_) | Ok(false)) {
479 let _: Option<()> = try {
480 while path.starts_with(cgroup_mount) {
481 path.push("cpu.max");
485 if File::open(&path).and_then(|mut f| f.read_to_string(&mut read_buf)).is_ok() {
486 let raw_quota = read_buf.lines().next()?;
487 let mut raw_quota = raw_quota.split(' ');
488 let limit = raw_quota.next()?;
489 let period = raw_quota.next()?;
490 match (limit.parse::<usize>(), period.parse::<usize>()) {
491 (Ok(limit), Ok(period)) => {
492 quota = quota.min(limit / period);
498 path.pop(); // pop filename
499 path.pop(); // pop dir
506 fn quota_v1(group_path: PathBuf) -> usize {
507 let mut quota = usize::MAX;
508 let mut path = PathBuf::with_capacity(128);
509 let mut read_buf = String::with_capacity(20);
511 // Hardcode commonly used locations mentioned in the cgroups(7) manpage
512 // if that doesn't work scan mountinfo and adjust `group_path` for bind-mounts
513 let mounts: &[fn(&Path) -> Option<(_, &Path)>] = &[
514 |p| Some((Cow::Borrowed("/sys/fs/cgroup/cpu"), p)),
515 |p| Some((Cow::Borrowed("/sys/fs/cgroup/cpu,cpuacct"), p)),
516 // this can be expensive on systems with tons of mountpoints
517 // but we only get to this point when /proc/self/cgroups explicitly indicated
518 // this process belongs to a cpu-controller cgroup v1 and the defaults didn't work
522 for mount in mounts {
523 let Some((mount, group_path)) = mount(&group_path) else { continue };
526 path.push(mount.as_ref());
527 path.push(&group_path);
529 // skip if we guessed the mount incorrectly
530 if matches!(try_exists(&path), Err(_) | Ok(false)) {
534 while path.starts_with(mount.as_ref()) {
535 let mut parse_file = |name| {
539 let f = File::open(&path);
540 path.pop(); // restore buffer before any early returns
541 f.ok()?.read_to_string(&mut read_buf).ok()?;
542 let parsed = read_buf.trim().parse::<usize>().ok()?;
547 let limit = parse_file("cpu.cfs_quota_us");
548 let period = parse_file("cpu.cfs_period_us");
550 match (limit, period) {
551 (Some(limit), Some(period)) => quota = quota.min(limit / period),
558 // we passed the try_exists above so we should have traversed the correct hierarchy
559 // when reaching this line
566 /// Scan mountinfo for cgroup v1 mountpoint with a cpu controller
568 /// If the cgroupfs is a bind mount then `group_path` is adjusted to skip
569 /// over the already-included prefix
570 fn find_mountpoint(group_path: &Path) -> Option<(Cow<'static, str>, &Path)> {
571 let mut reader = BufReader::new(File::open("/proc/self/mountinfo").ok()?);
572 let mut line = String::with_capacity(256);
575 if reader.read_line(&mut line).ok()? == 0 {
579 let line = line.trim();
580 let mut items = line.split(' ');
582 let sub_path = items.nth(3)?;
583 let mount_point = items.next()?;
584 let mount_opts = items.next_back()?;
585 let filesystem_type = items.nth_back(1)?;
587 if filesystem_type != "cgroup" || !mount_opts.split(',').any(|opt| opt == "cpu") {
588 // not a cgroup / not a cpu-controller
592 let sub_path = Path::new(sub_path).strip_prefix("/").ok()?;
594 if !group_path.starts_with(sub_path) {
595 // this is a bind-mount and the bound subdirectory
596 // does not contain the cgroup this process belongs to
600 let trimmed_group_path = group_path.strip_prefix(sub_path).ok()?;
602 return Some((Cow::Owned(mount_point.to_owned()), trimmed_group_path));
610 not(target_os = "linux"),
611 not(target_os = "freebsd"),
612 not(target_os = "macos"),
613 not(target_os = "netbsd"),
614 not(target_os = "openbsd"),
615 not(target_os = "solaris")
617 #[cfg_attr(test, allow(dead_code))]
619 use crate::ops::Range;
620 pub type Guard = Range<usize>;
621 pub unsafe fn current() -> Option<Guard> {
624 pub unsafe fn init() -> Option<Guard> {
631 target_os = "freebsd",
633 target_os = "netbsd",
634 target_os = "openbsd",
635 target_os = "solaris"
637 #[cfg_attr(test, allow(dead_code))]
639 use libc::{mmap, mprotect};
640 use libc::{MAP_ANON, MAP_FAILED, MAP_FIXED, MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE};
643 use crate::ops::Range;
644 use crate::sync::atomic::{AtomicUsize, Ordering};
647 // This is initialized in init() and only read from after
648 static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0);
650 pub type Guard = Range<usize>;
652 #[cfg(target_os = "solaris")]
653 unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
654 let mut current_stack: libc::stack_t = crate::mem::zeroed();
655 assert_eq!(libc::stack_getbounds(&mut current_stack), 0);
656 Some(current_stack.ss_sp)
659 #[cfg(target_os = "macos")]
660 unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
661 let th = libc::pthread_self();
662 let stackptr = libc::pthread_get_stackaddr_np(th);
663 Some(stackptr.map_addr(|addr| addr - libc::pthread_get_stacksize_np(th)))
666 #[cfg(target_os = "openbsd")]
667 unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
668 let mut current_stack: libc::stack_t = crate::mem::zeroed();
669 assert_eq!(libc::pthread_stackseg_np(libc::pthread_self(), &mut current_stack), 0);
671 let stack_ptr = current_stack.ss_sp;
672 let stackaddr = if libc::pthread_main_np() == 1 {
674 stack_ptr.addr() - current_stack.ss_size + PAGE_SIZE.load(Ordering::Relaxed)
677 stack_ptr.addr() - current_stack.ss_size
679 Some(stack_ptr.with_addr(stackaddr))
683 target_os = "android",
684 target_os = "freebsd",
686 target_os = "netbsd",
689 unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
691 let mut attr: libc::pthread_attr_t = crate::mem::zeroed();
692 #[cfg(target_os = "freebsd")]
693 assert_eq!(libc::pthread_attr_init(&mut attr), 0);
694 #[cfg(target_os = "freebsd")]
695 let e = libc::pthread_attr_get_np(libc::pthread_self(), &mut attr);
696 #[cfg(not(target_os = "freebsd"))]
697 let e = libc::pthread_getattr_np(libc::pthread_self(), &mut attr);
699 let mut stackaddr = crate::ptr::null_mut();
700 let mut stacksize = 0;
701 assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackaddr, &mut stacksize), 0);
702 ret = Some(stackaddr);
704 if e == 0 || cfg!(target_os = "freebsd") {
705 assert_eq!(libc::pthread_attr_destroy(&mut attr), 0);
710 // Precondition: PAGE_SIZE is initialized.
711 unsafe fn get_stack_start_aligned() -> Option<*mut libc::c_void> {
712 let page_size = PAGE_SIZE.load(Ordering::Relaxed);
713 assert!(page_size != 0);
714 let stackptr = get_stack_start()?;
715 let stackaddr = stackptr.addr();
717 // Ensure stackaddr is page aligned! A parent process might
718 // have reset RLIMIT_STACK to be non-page aligned. The
719 // pthread_attr_getstack() reports the usable stack area
720 // stackaddr < stackaddr + stacksize, so if stackaddr is not
721 // page-aligned, calculate the fix such that stackaddr <
722 // new_page_aligned_stackaddr < stackaddr + stacksize
723 let remainder = stackaddr % page_size;
724 Some(if remainder == 0 {
727 stackptr.with_addr(stackaddr + page_size - remainder)
731 pub unsafe fn init() -> Option<Guard> {
732 let page_size = os::page_size();
733 PAGE_SIZE.store(page_size, Ordering::Relaxed);
735 if cfg!(all(target_os = "linux", not(target_env = "musl"))) {
736 // Linux doesn't allocate the whole stack right away, and
737 // the kernel has its own stack-guard mechanism to fault
738 // when growing too close to an existing mapping. If we map
739 // our own guard, then the kernel starts enforcing a rather
740 // large gap above that, rendering much of the possible
741 // stack space useless. See #43052.
743 // Instead, we'll just note where we expect rlimit to start
744 // faulting, so our handler can report "stack overflow", and
745 // trust that the kernel's own stack guard will work.
746 let stackptr = get_stack_start_aligned()?;
747 let stackaddr = stackptr.addr();
748 Some(stackaddr - page_size..stackaddr)
749 } else if cfg!(all(target_os = "linux", target_env = "musl")) {
750 // For the main thread, the musl's pthread_attr_getstack
751 // returns the current stack size, rather than maximum size
752 // it can eventually grow to. It cannot be used to determine
753 // the position of kernel's stack guard.
755 } else if cfg!(target_os = "freebsd") {
756 // FreeBSD's stack autogrows, and optionally includes a guard page
757 // at the bottom. If we try to remap the bottom of the stack
758 // ourselves, FreeBSD's guard page moves upwards. So we'll just use
759 // the builtin guard page.
760 let stackptr = get_stack_start_aligned()?;
761 let guardaddr = stackptr.addr();
762 // Technically the number of guard pages is tunable and controlled
763 // by the security.bsd.stack_guard_page sysctl, but there are
764 // few reasons to change it from the default. The default value has
765 // been 1 ever since FreeBSD 11.1 and 10.4.
766 const GUARD_PAGES: usize = 1;
767 let guard = guardaddr..guardaddr + GUARD_PAGES * page_size;
769 } else if cfg!(target_os = "openbsd") {
770 // OpenBSD stack already includes a guard page, and stack is
773 // We'll just note where we expect rlimit to start
774 // faulting, so our handler can report "stack overflow", and
775 // trust that the kernel's own stack guard will work.
776 let stackptr = get_stack_start_aligned()?;
777 let stackaddr = stackptr.addr();
778 Some(stackaddr - page_size..stackaddr)
780 // Reallocate the last page of the stack.
781 // This ensures SIGBUS will be raised on
783 // Systems which enforce strict PAX MPROTECT do not allow
784 // to mprotect() a mapping with less restrictive permissions
785 // than the initial mmap() used, so we mmap() here with
786 // read/write permissions and only then mprotect() it to
787 // no permissions at all. See issue #50313.
788 let stackptr = get_stack_start_aligned()?;
792 PROT_READ | PROT_WRITE,
793 MAP_PRIVATE | MAP_ANON | MAP_FIXED,
797 if result != stackptr || result == MAP_FAILED {
798 panic!("failed to allocate a guard page: {}", io::Error::last_os_error());
801 let result = mprotect(stackptr, page_size, PROT_NONE);
803 panic!("failed to protect the guard page: {}", io::Error::last_os_error());
806 let guardaddr = stackptr.addr();
808 Some(guardaddr..guardaddr + page_size)
812 #[cfg(any(target_os = "macos", target_os = "openbsd", target_os = "solaris"))]
813 pub unsafe fn current() -> Option<Guard> {
814 let stackptr = get_stack_start()?;
815 let stackaddr = stackptr.addr();
816 Some(stackaddr - PAGE_SIZE.load(Ordering::Relaxed)..stackaddr)
820 target_os = "android",
821 target_os = "freebsd",
823 target_os = "netbsd",
826 pub unsafe fn current() -> Option<Guard> {
828 let mut attr: libc::pthread_attr_t = crate::mem::zeroed();
829 #[cfg(target_os = "freebsd")]
830 assert_eq!(libc::pthread_attr_init(&mut attr), 0);
831 #[cfg(target_os = "freebsd")]
832 let e = libc::pthread_attr_get_np(libc::pthread_self(), &mut attr);
833 #[cfg(not(target_os = "freebsd"))]
834 let e = libc::pthread_getattr_np(libc::pthread_self(), &mut attr);
836 let mut guardsize = 0;
837 assert_eq!(libc::pthread_attr_getguardsize(&attr, &mut guardsize), 0);
839 if cfg!(all(target_os = "linux", target_env = "musl")) {
840 // musl versions before 1.1.19 always reported guard
841 // size obtained from pthread_attr_get_np as zero.
842 // Use page size as a fallback.
843 guardsize = PAGE_SIZE.load(Ordering::Relaxed);
845 panic!("there is no guard page");
848 let mut stackptr = crate::ptr::null_mut::<libc::c_void>();
850 assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackptr, &mut size), 0);
852 let stackaddr = stackptr.addr();
853 ret = if cfg!(any(target_os = "freebsd", target_os = "netbsd")) {
854 Some(stackaddr - guardsize..stackaddr)
855 } else if cfg!(all(target_os = "linux", target_env = "musl")) {
856 Some(stackaddr - guardsize..stackaddr)
857 } else if cfg!(all(target_os = "linux", any(target_env = "gnu", target_env = "uclibc")))
859 // glibc used to include the guard area within the stack, as noted in the BUGS
860 // section of `man pthread_attr_getguardsize`. This has been corrected starting
861 // with glibc 2.27, and in some distro backports, so the guard is now placed at the
862 // end (below) the stack. There's no easy way for us to know which we have at
863 // runtime, so we'll just match any fault in the range right above or below the
864 // stack base to call that fault a stack overflow.
865 Some(stackaddr - guardsize..stackaddr + guardsize)
867 Some(stackaddr..stackaddr + guardsize)
870 if e == 0 || cfg!(target_os = "freebsd") {
871 assert_eq!(libc::pthread_attr_destroy(&mut attr), 0);
877 // glibc >= 2.15 has a __pthread_get_minstack() function that returns
878 // PTHREAD_STACK_MIN plus bytes needed for thread-local storage.
879 // We need that information to avoid blowing up when a small stack
880 // is created in an application with big thread-local storage requirements.
881 // See #6233 for rationale and details.
882 #[cfg(all(target_os = "linux", target_env = "gnu"))]
883 fn min_stack_size(attr: *const libc::pthread_attr_t) -> usize {
884 // We use dlsym to avoid an ELF version dependency on GLIBC_PRIVATE. (#23628)
885 // We shouldn't really be using such an internal symbol, but there's currently
886 // no other way to account for the TLS size.
887 dlsym!(fn __pthread_get_minstack(*const libc::pthread_attr_t) -> libc::size_t);
889 match __pthread_get_minstack.get() {
890 None => libc::PTHREAD_STACK_MIN,
891 Some(f) => unsafe { f(attr) },
895 // No point in looking up __pthread_get_minstack() on non-glibc platforms.
896 #[cfg(all(not(all(target_os = "linux", target_env = "gnu")), not(target_os = "netbsd")))]
897 fn min_stack_size(_: *const libc::pthread_attr_t) -> usize {
898 libc::PTHREAD_STACK_MIN
901 #[cfg(target_os = "netbsd")]
902 fn min_stack_size(_: *const libc::pthread_attr_t) -> usize {