Auto merge of #123265 - joboet:guardians_of_the_unix, r=ChrisDenton

Refactor stack overflow handling

Currently, every platform must implement a `Guard` that protects a thread from stack overflow. However, UNIX is the only platform that actually does so. Windows has a different mechanism for detecting stack overflow, while the other platforms don't detect it at all. Also, the UNIX stack overflow handling is split between `sys::pal::unix::stack_overflow`, which implements the signal handler, and `sys::pal::unix::thread`, which detects/installs guard pages.

This PR cleans this by getting rid of `Guard` and unifying UNIX stack overflow handling inside `stack_overflow` (commit 1). Therefore we can get rid of `sys_common::thread_info`, which stores `Guard` and the current `Thread` handle and move the `thread::current` TLS variable into `thread` (commit 2).

The second commit is not strictly speaking necessary. To keep the implementation clean, I've included it here, but if it causes too much noise, I can split it out without any trouble.
This commit is contained in:
bors 2024-04-01 14:35:38 +00:00
commit c518e5aeec
19 changed files with 327 additions and 500 deletions

View file

@ -21,7 +21,6 @@ use crate::sync::atomic::{AtomicBool, Ordering};
use crate::sync::{PoisonError, RwLock};
use crate::sys::stdio::panic_output;
use crate::sys_common::backtrace;
use crate::sys_common::thread_info;
use crate::thread;
#[cfg(not(test))]
@ -256,7 +255,7 @@ fn default_hook(info: &PanicInfo<'_>) {
None => "Box<dyn Any>",
},
};
let thread = thread_info::current_thread();
let thread = thread::try_current();
let name = thread.as_ref().and_then(|t| t.name()).unwrap_or("<unnamed>");
let write = |err: &mut dyn crate::io::Write| {

View file

@ -24,8 +24,7 @@ pub use core::panicking::{panic_display, panic_fmt};
use crate::sync::Once;
use crate::sys;
use crate::sys_common::thread_info;
use crate::thread::Thread;
use crate::thread::{self, Thread};
// Prints to the "panic output", depending on the platform this may be:
// - the standard error output
@ -96,13 +95,9 @@ unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) {
unsafe {
sys::init(argc, argv, sigpipe);
let main_guard = sys::thread::guard::init();
// Next, set up the current Thread with the guard information we just
// created. Note that this isn't necessary in general for new threads,
// but we just do this to name the main thread and to give it correct
// info about the stack bounds.
// Set up the current thread to give it the right name.
let thread = Thread::new(Some(rtunwrap!(Ok, CString::new("main"))));
thread_info::set(main_guard, thread);
thread::set_current(thread);
}
}

View file

@ -104,13 +104,3 @@ impl Thread {
pub fn available_parallelism() -> io::Result<NonZero<usize>> {
unsafe { Ok(NonZero::new_unchecked(abi::get_processor_count())) }
}
pub mod guard {
pub type Guard = !;
pub unsafe fn current() -> Option<Guard> {
None
}
pub unsafe fn init() -> Option<Guard> {
None
}
}

View file

@ -312,16 +312,6 @@ impl Drop for Thread {
}
}
pub mod guard {
pub type Guard = !;
pub unsafe fn current() -> Option<Guard> {
None
}
pub unsafe fn init() -> Option<Guard> {
None
}
}
/// Terminate and delete the specified task.
///
/// This function will abort if `deleted_task` refers to the calling task.

View file

@ -149,13 +149,3 @@ impl Thread {
pub fn available_parallelism() -> io::Result<NonZero<usize>> {
unsupported()
}
pub mod guard {
pub type Guard = !;
pub unsafe fn current() -> Option<Guard> {
None
}
pub unsafe fn init() -> Option<Guard> {
None
}
}

View file

@ -151,18 +151,6 @@ pub fn available_parallelism() -> io::Result<NonZero<usize>> {
))
}
// stub
pub mod guard {
use crate::ops::Range;
pub type Guard = Range<usize>;
pub unsafe fn current() -> Option<Guard> {
None
}
pub unsafe fn init() -> Option<Guard> {
None
}
}
fn min_stack_size(_: *const libc::pthread_attr_t) -> usize {
libc::PTHREAD_STACK_MIN.try_into().expect("Infallible")
}

View file

@ -52,13 +52,3 @@ pub fn available_parallelism() -> io::Result<NonZero<usize>> {
// UEFI is single threaded
Ok(NonZero::new(1).unwrap())
}
pub mod guard {
pub type Guard = !;
pub unsafe fn current() -> Option<Guard> {
None
}
pub unsafe fn init() -> Option<Guard> {
None
}
}

View file

@ -11,7 +11,7 @@ pub struct Handler {
impl Handler {
pub unsafe fn new() -> Handler {
make_handler()
make_handler(false)
}
fn null() -> Handler {
@ -29,34 +29,41 @@ impl Drop for Handler {
#[cfg(any(
target_os = "linux",
target_os = "macos",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "hurd",
target_os = "solaris",
target_os = "illumos",
target_os = "macos",
target_os = "netbsd",
target_os = "openbsd"
target_os = "openbsd",
target_os = "solaris"
))]
mod imp {
use super::Handler;
use crate::cell::Cell;
use crate::io;
use crate::mem;
use crate::ops::Range;
use crate::ptr;
use crate::sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering};
use crate::sys::pal::unix::os;
use crate::thread;
use libc::MAP_FAILED;
#[cfg(not(all(target_os = "linux", target_env = "gnu")))]
use libc::{mmap as mmap64, munmap};
use libc::{mmap as mmap64, mprotect, munmap};
#[cfg(all(target_os = "linux", target_env = "gnu"))]
use libc::{mmap64, munmap};
use libc::{sigaction, sighandler_t, SA_ONSTACK, SA_SIGINFO, SIGBUS, SIG_DFL};
use libc::{mmap64, mprotect, munmap};
use libc::{sigaction, sighandler_t, SA_ONSTACK, SA_SIGINFO, SIGBUS, SIGSEGV, SIG_DFL};
use libc::{sigaltstack, SS_DISABLE};
use libc::{MAP_ANON, MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE, SIGSEGV};
use libc::{MAP_ANON, MAP_FAILED, MAP_FIXED, MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE};
use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
use crate::sys::pal::unix::os::page_size;
use crate::sys_common::thread_info;
// We use a TLS variable to store the address of the guard page. While TLS
// variables are not guaranteed to be signal-safe, this works out in practice
// since we make sure to write to the variable before the signal stack is
// installed, thereby ensuring that the variable is always allocated when
// the signal handler is called.
thread_local! {
// FIXME: use `Range` once that implements `Copy`.
static GUARD: Cell<(usize, usize)> = const { Cell::new((0, 0)) };
}
// Signal handler for the SIGSEGV and SIGBUS handlers. We've got guard pages
// (unmapped pages) at the end of every thread's stack, so if a thread ends
@ -84,12 +91,12 @@ mod imp {
info: *mut libc::siginfo_t,
_data: *mut libc::c_void,
) {
let guard = thread_info::stack_guard().unwrap_or(0..0);
let (start, end) = GUARD.get();
let addr = (*info).si_addr() as usize;
// If the faulting address is within the guard page, then we print a
// message saying so and abort.
if guard.start <= addr && addr < guard.end {
if start <= addr && addr < end {
rtprintpanic!(
"\nthread '{}' has overflowed its stack\n",
thread::current().name().unwrap_or("<unknown>")
@ -105,10 +112,17 @@ mod imp {
}
}
static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0);
static MAIN_ALTSTACK: AtomicPtr<libc::c_void> = AtomicPtr::new(ptr::null_mut());
static NEED_ALTSTACK: AtomicBool = AtomicBool::new(false);
pub unsafe fn init() {
PAGE_SIZE.store(os::page_size(), Ordering::Relaxed);
// Always write to GUARD to ensure the TLS variable is allocated.
let guard = install_main_guard().unwrap_or(0..0);
GUARD.set((guard.start, guard.end));
let mut action: sigaction = mem::zeroed();
for &signal in &[SIGSEGV, SIGBUS] {
sigaction(signal, ptr::null_mut(), &mut action);
@ -121,7 +135,7 @@ mod imp {
}
}
let handler = make_handler();
let handler = make_handler(true);
MAIN_ALTSTACK.store(handler.data, Ordering::Relaxed);
mem::forget(handler);
}
@ -150,7 +164,7 @@ mod imp {
let flags = MAP_PRIVATE | MAP_ANON;
let sigstack_size = sigstack_size();
let page_size = page_size();
let page_size = PAGE_SIZE.load(Ordering::Relaxed);
let stackp = mmap64(
ptr::null_mut(),
@ -172,10 +186,17 @@ mod imp {
libc::stack_t { ss_sp: stackp, ss_flags: 0, ss_size: sigstack_size }
}
pub unsafe fn make_handler() -> Handler {
pub unsafe fn make_handler(main_thread: bool) -> Handler {
if !NEED_ALTSTACK.load(Ordering::Relaxed) {
return Handler::null();
}
if !main_thread {
// Always write to GUARD to ensure the TLS variable is allocated.
let guard = current_guard().unwrap_or(0..0);
GUARD.set((guard.start, guard.end));
}
let mut stack = mem::zeroed();
sigaltstack(ptr::null(), &mut stack);
// Configure alternate signal stack, if one is not already set.
@ -191,7 +212,7 @@ mod imp {
pub unsafe fn drop_handler(data: *mut libc::c_void) {
if !data.is_null() {
let sigstack_size = sigstack_size();
let page_size = page_size();
let page_size = PAGE_SIZE.load(Ordering::Relaxed);
let stack = libc::stack_t {
ss_sp: ptr::null_mut(),
ss_flags: SS_DISABLE,
@ -225,25 +246,266 @@ mod imp {
fn sigstack_size() -> usize {
libc::SIGSTKSZ
}
#[cfg(target_os = "solaris")]
unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
let mut current_stack: libc::stack_t = crate::mem::zeroed();
assert_eq!(libc::stack_getbounds(&mut current_stack), 0);
Some(current_stack.ss_sp)
}
#[cfg(target_os = "macos")]
unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
let th = libc::pthread_self();
let stackptr = libc::pthread_get_stackaddr_np(th);
Some(stackptr.map_addr(|addr| addr - libc::pthread_get_stacksize_np(th)))
}
#[cfg(target_os = "openbsd")]
unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
let mut current_stack: libc::stack_t = crate::mem::zeroed();
assert_eq!(libc::pthread_stackseg_np(libc::pthread_self(), &mut current_stack), 0);
let stack_ptr = current_stack.ss_sp;
let stackaddr = if libc::pthread_main_np() == 1 {
// main thread
stack_ptr.addr() - current_stack.ss_size + PAGE_SIZE.load(Ordering::Relaxed)
} else {
// new thread
stack_ptr.addr() - current_stack.ss_size
};
Some(stack_ptr.with_addr(stackaddr))
}
#[cfg(any(
target_os = "android",
target_os = "freebsd",
target_os = "netbsd",
target_os = "hurd",
target_os = "linux",
target_os = "l4re"
))]
unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
let mut ret = None;
let mut attr: libc::pthread_attr_t = crate::mem::zeroed();
#[cfg(target_os = "freebsd")]
assert_eq!(libc::pthread_attr_init(&mut attr), 0);
#[cfg(target_os = "freebsd")]
let e = libc::pthread_attr_get_np(libc::pthread_self(), &mut attr);
#[cfg(not(target_os = "freebsd"))]
let e = libc::pthread_getattr_np(libc::pthread_self(), &mut attr);
if e == 0 {
let mut stackaddr = crate::ptr::null_mut();
let mut stacksize = 0;
assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackaddr, &mut stacksize), 0);
ret = Some(stackaddr);
}
if e == 0 || cfg!(target_os = "freebsd") {
assert_eq!(libc::pthread_attr_destroy(&mut attr), 0);
}
ret
}
unsafe fn get_stack_start_aligned() -> Option<*mut libc::c_void> {
let page_size = PAGE_SIZE.load(Ordering::Relaxed);
let stackptr = get_stack_start()?;
let stackaddr = stackptr.addr();
// Ensure stackaddr is page aligned! A parent process might
// have reset RLIMIT_STACK to be non-page aligned. The
// pthread_attr_getstack() reports the usable stack area
// stackaddr < stackaddr + stacksize, so if stackaddr is not
// page-aligned, calculate the fix such that stackaddr <
// new_page_aligned_stackaddr < stackaddr + stacksize
let remainder = stackaddr % page_size;
Some(if remainder == 0 {
stackptr
} else {
stackptr.with_addr(stackaddr + page_size - remainder)
})
}
unsafe fn install_main_guard() -> Option<Range<usize>> {
let page_size = PAGE_SIZE.load(Ordering::Relaxed);
if cfg!(all(target_os = "linux", not(target_env = "musl"))) {
// Linux doesn't allocate the whole stack right away, and
// the kernel has its own stack-guard mechanism to fault
// when growing too close to an existing mapping. If we map
// our own guard, then the kernel starts enforcing a rather
// large gap above that, rendering much of the possible
// stack space useless. See #43052.
//
// Instead, we'll just note where we expect rlimit to start
// faulting, so our handler can report "stack overflow", and
// trust that the kernel's own stack guard will work.
let stackptr = get_stack_start_aligned()?;
let stackaddr = stackptr.addr();
Some(stackaddr - page_size..stackaddr)
} else if cfg!(all(target_os = "linux", target_env = "musl")) {
// For the main thread, the musl's pthread_attr_getstack
// returns the current stack size, rather than maximum size
// it can eventually grow to. It cannot be used to determine
// the position of kernel's stack guard.
None
} else if cfg!(target_os = "freebsd") {
// FreeBSD's stack autogrows, and optionally includes a guard page
// at the bottom. If we try to remap the bottom of the stack
// ourselves, FreeBSD's guard page moves upwards. So we'll just use
// the builtin guard page.
let stackptr = get_stack_start_aligned()?;
let guardaddr = stackptr.addr();
// Technically the number of guard pages is tunable and controlled
// by the security.bsd.stack_guard_page sysctl.
// By default it is 1, checking once is enough since it is
// a boot time config value.
static PAGES: crate::sync::OnceLock<usize> = crate::sync::OnceLock::new();
let pages = PAGES.get_or_init(|| {
use crate::sys::weak::dlsym;
dlsym!(fn sysctlbyname(*const libc::c_char, *mut libc::c_void, *mut libc::size_t, *const libc::c_void, libc::size_t) -> libc::c_int);
let mut guard: usize = 0;
let mut size = crate::mem::size_of_val(&guard);
let oid = crate::ffi::CStr::from_bytes_with_nul(
b"security.bsd.stack_guard_page\0",
)
.unwrap();
match sysctlbyname.get() {
Some(fcn) => {
if fcn(oid.as_ptr(), core::ptr::addr_of_mut!(guard) as *mut _, core::ptr::addr_of_mut!(size) as *mut _, crate::ptr::null_mut(), 0) == 0 {
guard
} else {
1
}
},
_ => 1,
}
});
Some(guardaddr..guardaddr + pages * page_size)
} else if cfg!(any(target_os = "openbsd", target_os = "netbsd")) {
// OpenBSD stack already includes a guard page, and stack is
// immutable.
// NetBSD stack includes the guard page.
//
// We'll just note where we expect rlimit to start
// faulting, so our handler can report "stack overflow", and
// trust that the kernel's own stack guard will work.
let stackptr = get_stack_start_aligned()?;
let stackaddr = stackptr.addr();
Some(stackaddr - page_size..stackaddr)
} else {
// Reallocate the last page of the stack.
// This ensures SIGBUS will be raised on
// stack overflow.
// Systems which enforce strict PAX MPROTECT do not allow
// to mprotect() a mapping with less restrictive permissions
// than the initial mmap() used, so we mmap() here with
// read/write permissions and only then mprotect() it to
// no permissions at all. See issue #50313.
let stackptr = get_stack_start_aligned()?;
let result = mmap64(
stackptr,
page_size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-1,
0,
);
if result != stackptr || result == MAP_FAILED {
panic!("failed to allocate a guard page: {}", io::Error::last_os_error());
}
let result = mprotect(stackptr, page_size, PROT_NONE);
if result != 0 {
panic!("failed to protect the guard page: {}", io::Error::last_os_error());
}
let guardaddr = stackptr.addr();
Some(guardaddr..guardaddr + page_size)
}
}
#[cfg(any(target_os = "macos", target_os = "openbsd", target_os = "solaris"))]
unsafe fn current_guard() -> Option<Range<usize>> {
let stackptr = get_stack_start()?;
let stackaddr = stackptr.addr();
Some(stackaddr - PAGE_SIZE.load(Ordering::Relaxed)..stackaddr)
}
#[cfg(any(
target_os = "android",
target_os = "freebsd",
target_os = "hurd",
target_os = "linux",
target_os = "netbsd",
target_os = "l4re"
))]
unsafe fn current_guard() -> Option<Range<usize>> {
let mut ret = None;
let mut attr: libc::pthread_attr_t = crate::mem::zeroed();
#[cfg(target_os = "freebsd")]
assert_eq!(libc::pthread_attr_init(&mut attr), 0);
#[cfg(target_os = "freebsd")]
let e = libc::pthread_attr_get_np(libc::pthread_self(), &mut attr);
#[cfg(not(target_os = "freebsd"))]
let e = libc::pthread_getattr_np(libc::pthread_self(), &mut attr);
if e == 0 {
let mut guardsize = 0;
assert_eq!(libc::pthread_attr_getguardsize(&attr, &mut guardsize), 0);
if guardsize == 0 {
if cfg!(all(target_os = "linux", target_env = "musl")) {
// musl versions before 1.1.19 always reported guard
// size obtained from pthread_attr_get_np as zero.
// Use page size as a fallback.
guardsize = PAGE_SIZE.load(Ordering::Relaxed);
} else {
panic!("there is no guard page");
}
}
let mut stackptr = crate::ptr::null_mut::<libc::c_void>();
let mut size = 0;
assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackptr, &mut size), 0);
let stackaddr = stackptr.addr();
ret = if cfg!(any(target_os = "freebsd", target_os = "netbsd", target_os = "hurd")) {
Some(stackaddr - guardsize..stackaddr)
} else if cfg!(all(target_os = "linux", target_env = "musl")) {
Some(stackaddr - guardsize..stackaddr)
} else if cfg!(all(target_os = "linux", any(target_env = "gnu", target_env = "uclibc")))
{
// glibc used to include the guard area within the stack, as noted in the BUGS
// section of `man pthread_attr_getguardsize`. This has been corrected starting
// with glibc 2.27, and in some distro backports, so the guard is now placed at the
// end (below) the stack. There's no easy way for us to know which we have at
// runtime, so we'll just match any fault in the range right above or below the
// stack base to call that fault a stack overflow.
Some(stackaddr - guardsize..stackaddr + guardsize)
} else {
Some(stackaddr..stackaddr + guardsize)
};
}
if e == 0 || cfg!(target_os = "freebsd") {
assert_eq!(libc::pthread_attr_destroy(&mut attr), 0);
}
ret
}
}
#[cfg(not(any(
target_os = "linux",
target_os = "macos",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "hurd",
target_os = "solaris",
target_os = "illumos",
target_os = "macos",
target_os = "netbsd",
target_os = "openbsd",
target_os = "solaris"
)))]
mod imp {
pub unsafe fn init() {}
pub unsafe fn cleanup() {}
pub unsafe fn make_handler() -> super::Handler {
pub unsafe fn make_handler(_main_thread: bool) -> super::Handler {
super::Handler::null()
}

View file

@ -754,302 +754,6 @@ mod cgroups {
}
}
#[cfg(all(
not(target_os = "linux"),
not(target_os = "freebsd"),
not(target_os = "hurd"),
not(target_os = "macos"),
not(target_os = "netbsd"),
not(target_os = "openbsd"),
not(target_os = "solaris")
))]
#[cfg_attr(test, allow(dead_code))]
pub mod guard {
use crate::ops::Range;
pub type Guard = Range<usize>;
pub unsafe fn current() -> Option<Guard> {
None
}
pub unsafe fn init() -> Option<Guard> {
None
}
}
#[cfg(any(
target_os = "linux",
target_os = "freebsd",
target_os = "hurd",
target_os = "macos",
target_os = "netbsd",
target_os = "openbsd",
target_os = "solaris"
))]
#[cfg_attr(test, allow(dead_code))]
pub mod guard {
#[cfg(not(all(target_os = "linux", target_env = "gnu")))]
use libc::{mmap as mmap64, mprotect};
#[cfg(all(target_os = "linux", target_env = "gnu"))]
use libc::{mmap64, mprotect};
use libc::{MAP_ANON, MAP_FAILED, MAP_FIXED, MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE};
use crate::io;
use crate::ops::Range;
use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::sys::os;
// This is initialized in init() and only read from after
static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0);
pub type Guard = Range<usize>;
#[cfg(target_os = "solaris")]
unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
let mut current_stack: libc::stack_t = crate::mem::zeroed();
assert_eq!(libc::stack_getbounds(&mut current_stack), 0);
Some(current_stack.ss_sp)
}
#[cfg(target_os = "macos")]
unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
let th = libc::pthread_self();
let stackptr = libc::pthread_get_stackaddr_np(th);
Some(stackptr.map_addr(|addr| addr - libc::pthread_get_stacksize_np(th)))
}
#[cfg(target_os = "openbsd")]
unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
let mut current_stack: libc::stack_t = crate::mem::zeroed();
assert_eq!(libc::pthread_stackseg_np(libc::pthread_self(), &mut current_stack), 0);
let stack_ptr = current_stack.ss_sp;
let stackaddr = if libc::pthread_main_np() == 1 {
// main thread
stack_ptr.addr() - current_stack.ss_size + PAGE_SIZE.load(Ordering::Relaxed)
} else {
// new thread
stack_ptr.addr() - current_stack.ss_size
};
Some(stack_ptr.with_addr(stackaddr))
}
#[cfg(any(
target_os = "android",
target_os = "freebsd",
target_os = "netbsd",
target_os = "hurd",
target_os = "linux",
target_os = "l4re"
))]
unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
let mut ret = None;
let mut attr: libc::pthread_attr_t = crate::mem::zeroed();
#[cfg(target_os = "freebsd")]
assert_eq!(libc::pthread_attr_init(&mut attr), 0);
#[cfg(target_os = "freebsd")]
let e = libc::pthread_attr_get_np(libc::pthread_self(), &mut attr);
#[cfg(not(target_os = "freebsd"))]
let e = libc::pthread_getattr_np(libc::pthread_self(), &mut attr);
if e == 0 {
let mut stackaddr = crate::ptr::null_mut();
let mut stacksize = 0;
assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackaddr, &mut stacksize), 0);
ret = Some(stackaddr);
}
if e == 0 || cfg!(target_os = "freebsd") {
assert_eq!(libc::pthread_attr_destroy(&mut attr), 0);
}
ret
}
// Precondition: PAGE_SIZE is initialized.
unsafe fn get_stack_start_aligned() -> Option<*mut libc::c_void> {
let page_size = PAGE_SIZE.load(Ordering::Relaxed);
assert!(page_size != 0);
let stackptr = get_stack_start()?;
let stackaddr = stackptr.addr();
// Ensure stackaddr is page aligned! A parent process might
// have reset RLIMIT_STACK to be non-page aligned. The
// pthread_attr_getstack() reports the usable stack area
// stackaddr < stackaddr + stacksize, so if stackaddr is not
// page-aligned, calculate the fix such that stackaddr <
// new_page_aligned_stackaddr < stackaddr + stacksize
let remainder = stackaddr % page_size;
Some(if remainder == 0 {
stackptr
} else {
stackptr.with_addr(stackaddr + page_size - remainder)
})
}
pub unsafe fn init() -> Option<Guard> {
let page_size = os::page_size();
PAGE_SIZE.store(page_size, Ordering::Relaxed);
if cfg!(all(target_os = "linux", not(target_env = "musl"))) {
// Linux doesn't allocate the whole stack right away, and
// the kernel has its own stack-guard mechanism to fault
// when growing too close to an existing mapping. If we map
// our own guard, then the kernel starts enforcing a rather
// large gap above that, rendering much of the possible
// stack space useless. See #43052.
//
// Instead, we'll just note where we expect rlimit to start
// faulting, so our handler can report "stack overflow", and
// trust that the kernel's own stack guard will work.
let stackptr = get_stack_start_aligned()?;
let stackaddr = stackptr.addr();
Some(stackaddr - page_size..stackaddr)
} else if cfg!(all(target_os = "linux", target_env = "musl")) {
// For the main thread, the musl's pthread_attr_getstack
// returns the current stack size, rather than maximum size
// it can eventually grow to. It cannot be used to determine
// the position of kernel's stack guard.
None
} else if cfg!(target_os = "freebsd") {
// FreeBSD's stack autogrows, and optionally includes a guard page
// at the bottom. If we try to remap the bottom of the stack
// ourselves, FreeBSD's guard page moves upwards. So we'll just use
// the builtin guard page.
let stackptr = get_stack_start_aligned()?;
let guardaddr = stackptr.addr();
// Technically the number of guard pages is tunable and controlled
// by the security.bsd.stack_guard_page sysctl.
// By default it is 1, checking once is enough since it is
// a boot time config value.
static LOCK: crate::sync::OnceLock<usize> = crate::sync::OnceLock::new();
let guard = guardaddr
..guardaddr
+ *LOCK.get_or_init(|| {
use crate::sys::weak::dlsym;
dlsym!(fn sysctlbyname(*const libc::c_char, *mut libc::c_void, *mut libc::size_t, *const libc::c_void, libc::size_t) -> libc::c_int);
let mut guard: usize = 0;
let mut size = crate::mem::size_of_val(&guard);
let oid = crate::ffi::CStr::from_bytes_with_nul(
b"security.bsd.stack_guard_page\0",
)
.unwrap();
match sysctlbyname.get() {
Some(fcn) => {
if fcn(oid.as_ptr(), core::ptr::addr_of_mut!(guard) as *mut _, core::ptr::addr_of_mut!(size) as *mut _, crate::ptr::null_mut(), 0) == 0 {
return guard;
}
return 1;
},
_ => { return 1; }
}
}) * page_size;
Some(guard)
} else if cfg!(any(target_os = "openbsd", target_os = "netbsd")) {
// OpenBSD stack already includes a guard page, and stack is
// immutable.
// NetBSD stack includes the guard page.
//
// We'll just note where we expect rlimit to start
// faulting, so our handler can report "stack overflow", and
// trust that the kernel's own stack guard will work.
let stackptr = get_stack_start_aligned()?;
let stackaddr = stackptr.addr();
Some(stackaddr - page_size..stackaddr)
} else {
// Reallocate the last page of the stack.
// This ensures SIGBUS will be raised on
// stack overflow.
// Systems which enforce strict PAX MPROTECT do not allow
// to mprotect() a mapping with less restrictive permissions
// than the initial mmap() used, so we mmap() here with
// read/write permissions and only then mprotect() it to
// no permissions at all. See issue #50313.
let stackptr = get_stack_start_aligned()?;
let result = mmap64(
stackptr,
page_size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-1,
0,
);
if result != stackptr || result == MAP_FAILED {
panic!("failed to allocate a guard page: {}", io::Error::last_os_error());
}
let result = mprotect(stackptr, page_size, PROT_NONE);
if result != 0 {
panic!("failed to protect the guard page: {}", io::Error::last_os_error());
}
let guardaddr = stackptr.addr();
Some(guardaddr..guardaddr + page_size)
}
}
#[cfg(any(target_os = "macos", target_os = "openbsd", target_os = "solaris"))]
pub unsafe fn current() -> Option<Guard> {
let stackptr = get_stack_start()?;
let stackaddr = stackptr.addr();
Some(stackaddr - PAGE_SIZE.load(Ordering::Relaxed)..stackaddr)
}
#[cfg(any(
target_os = "android",
target_os = "freebsd",
target_os = "hurd",
target_os = "linux",
target_os = "netbsd",
target_os = "l4re"
))]
pub unsafe fn current() -> Option<Guard> {
let mut ret = None;
let mut attr: libc::pthread_attr_t = crate::mem::zeroed();
#[cfg(target_os = "freebsd")]
assert_eq!(libc::pthread_attr_init(&mut attr), 0);
#[cfg(target_os = "freebsd")]
let e = libc::pthread_attr_get_np(libc::pthread_self(), &mut attr);
#[cfg(not(target_os = "freebsd"))]
let e = libc::pthread_getattr_np(libc::pthread_self(), &mut attr);
if e == 0 {
let mut guardsize = 0;
assert_eq!(libc::pthread_attr_getguardsize(&attr, &mut guardsize), 0);
if guardsize == 0 {
if cfg!(all(target_os = "linux", target_env = "musl")) {
// musl versions before 1.1.19 always reported guard
// size obtained from pthread_attr_get_np as zero.
// Use page size as a fallback.
guardsize = PAGE_SIZE.load(Ordering::Relaxed);
} else {
panic!("there is no guard page");
}
}
let mut stackptr = crate::ptr::null_mut::<libc::c_void>();
let mut size = 0;
assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackptr, &mut size), 0);
let stackaddr = stackptr.addr();
ret = if cfg!(any(target_os = "freebsd", target_os = "netbsd", target_os = "hurd")) {
Some(stackaddr - guardsize..stackaddr)
} else if cfg!(all(target_os = "linux", target_env = "musl")) {
Some(stackaddr - guardsize..stackaddr)
} else if cfg!(all(target_os = "linux", any(target_env = "gnu", target_env = "uclibc")))
{
// glibc used to include the guard area within the stack, as noted in the BUGS
// section of `man pthread_attr_getguardsize`. This has been corrected starting
// with glibc 2.27, and in some distro backports, so the guard is now placed at the
// end (below) the stack. There's no easy way for us to know which we have at
// runtime, so we'll just match any fault in the range right above or below the
// stack base to call that fault a stack overflow.
Some(stackaddr - guardsize..stackaddr + guardsize)
} else {
Some(stackaddr..stackaddr + guardsize)
};
}
if e == 0 || cfg!(target_os = "freebsd") {
assert_eq!(libc::pthread_attr_destroy(&mut attr), 0);
}
ret
}
}
// glibc >= 2.15 has a __pthread_get_minstack() function that returns
// PTHREAD_STACK_MIN plus bytes needed for thread-local storage.
// We need that information to avoid blowing up when a small stack

View file

@ -38,13 +38,3 @@ impl Thread {
pub fn available_parallelism() -> io::Result<NonZero<usize>> {
unsupported()
}
pub mod guard {
pub type Guard = !;
pub unsafe fn current() -> Option<Guard> {
None
}
pub unsafe fn init() -> Option<Guard> {
None
}
}

View file

@ -193,13 +193,3 @@ impl Thread {
pub fn available_parallelism() -> io::Result<NonZero<usize>> {
unsupported()
}
pub mod guard {
pub type Guard = !;
pub unsafe fn current() -> Option<Guard> {
None
}
pub unsafe fn init() -> Option<Guard> {
None
}
}

View file

@ -144,14 +144,3 @@ pub fn available_parallelism() -> io::Result<NonZero<usize>> {
cpus => Ok(unsafe { NonZero::new_unchecked(cpus) }),
}
}
#[cfg_attr(test, allow(dead_code))]
pub mod guard {
pub type Guard = !;
pub unsafe fn current() -> Option<Guard> {
None
}
pub unsafe fn init() -> Option<Guard> {
None
}
}

View file

@ -140,13 +140,3 @@ pub fn available_parallelism() -> io::Result<NonZero<usize>> {
// We're unicore right now.
Ok(unsafe { NonZero::new_unchecked(1) })
}
pub mod guard {
pub type Guard = !;
pub unsafe fn current() -> Option<Guard> {
None
}
pub unsafe fn init() -> Option<Guard> {
None
}
}

View file

@ -115,8 +115,7 @@ use crate::sync::atomic::{
AtomicBool, AtomicPtr,
Ordering::{AcqRel, Acquire, Relaxed, Release},
};
use crate::sys_common::thread_info;
use crate::thread::Thread;
use crate::thread::{self, Thread};
// Locking uses exponential backoff. `SPIN_COUNT` indicates how many times the
// locking operation will be retried.
@ -203,8 +202,7 @@ impl Node {
fn prepare(&mut self) {
// Fall back to creating an unnamed `Thread` handle to allow locking in
// TLS destructors.
self.thread
.get_or_init(|| thread_info::current_thread().unwrap_or_else(|| Thread::new(None)));
self.thread.get_or_init(|| thread::try_current().unwrap_or_else(|| Thread::new(None)));
self.completed = AtomicBool::new(false);
}

View file

@ -26,7 +26,6 @@ pub mod io;
pub mod lazy_box;
pub mod process;
pub mod thread;
pub mod thread_info;
pub mod thread_local_dtor;
pub mod thread_parking;
pub mod wstr;

View file

@ -1,53 +0,0 @@
#![allow(dead_code)] // stack_guard isn't used right now on all platforms
use crate::cell::OnceCell;
use crate::sys;
use crate::sys::thread::guard::Guard;
use crate::thread::Thread;
struct ThreadInfo {
stack_guard: OnceCell<Guard>,
thread: OnceCell<Thread>,
}
thread_local! {
static THREAD_INFO: ThreadInfo = const { ThreadInfo {
stack_guard: OnceCell::new(),
thread: OnceCell::new()
} };
}
impl ThreadInfo {
fn with<R, F>(f: F) -> Option<R>
where
F: FnOnce(&Thread, &OnceCell<Guard>) -> R,
{
THREAD_INFO
.try_with(move |thread_info| {
let thread =
thread_info.thread.get_or_init(|| Thread::new(sys::thread::Thread::get_name()));
f(thread, &thread_info.stack_guard)
})
.ok()
}
}
pub fn current_thread() -> Option<Thread> {
ThreadInfo::with(|thread, _| thread.clone())
}
pub fn stack_guard() -> Option<Guard> {
ThreadInfo::with(|_, guard| guard.get().cloned()).flatten()
}
/// Set new thread info, panicking if it has already been initialized
#[allow(unreachable_code, unreachable_patterns)] // some platforms don't use stack_guard
pub fn set(stack_guard: Option<Guard>, thread: Thread) {
THREAD_INFO.with(move |thread_info| {
rtassert!(thread_info.stack_guard.get().is_none() && thread_info.thread.get().is_none());
if let Some(guard) = stack_guard {
thread_info.stack_guard.set(guard).unwrap();
}
thread_info.thread.set(thread).unwrap();
});
}

View file

@ -159,7 +159,7 @@
mod tests;
use crate::any::Any;
use crate::cell::UnsafeCell;
use crate::cell::{OnceCell, UnsafeCell};
use crate::ffi::{CStr, CString};
use crate::fmt;
use crate::io;
@ -174,7 +174,6 @@ use crate::str;
use crate::sync::Arc;
use crate::sys::thread as imp;
use crate::sys_common::thread;
use crate::sys_common::thread_info;
use crate::sys_common::thread_parking::Parker;
use crate::sys_common::{AsInner, IntoInner};
use crate::time::{Duration, Instant};
@ -518,12 +517,8 @@ impl Builder {
crate::io::set_output_capture(output_capture);
// SAFETY: we constructed `f` initialized.
let f = f.into_inner();
// SAFETY: the stack guard passed is the one for the current thread.
// This means the current thread's stack and the new thread's stack
// are properly set and protected from each other.
thread_info::set(unsafe { imp::guard::current() }, their_thread);
set_current(their_thread);
let try_result = panic::catch_unwind(panic::AssertUnwindSafe(|| {
crate::sys_common::backtrace::__rust_begin_short_backtrace(f)
}));
@ -683,6 +678,27 @@ where
Builder::new().spawn(f).expect("failed to spawn thread")
}
thread_local! {
static CURRENT: OnceCell<Thread> = const { OnceCell::new() };
}
/// Sets the thread handle for the current thread.
///
/// Panics if the handle has been set already or when called from a TLS destructor.
pub(crate) fn set_current(thread: Thread) {
CURRENT.with(|current| current.set(thread).unwrap());
}
/// Gets a handle to the thread that invokes it.
///
/// In contrast to the public `current` function, this will not panic if called
/// from inside a TLS destructor.
pub(crate) fn try_current() -> Option<Thread> {
CURRENT
.try_with(|current| current.get_or_init(|| Thread::new(imp::Thread::get_name())).clone())
.ok()
}
/// Gets a handle to the thread that invokes it.
///
/// # Examples
@ -705,7 +721,7 @@ where
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn current() -> Thread {
thread_info::current_thread().expect(
try_current().expect(
"use of std::thread::current() is not possible \
after the thread's local data has been destroyed",
)

View file

@ -1,4 +1,4 @@
fn main() {
let _ = std::thread::thread_info::current_thread();
//~^ERROR module `thread_info` is private
let _ = std::sys::os::errno();
//~^ERROR module `sys` is private
}

View file

@ -1,13 +1,13 @@
error[E0603]: module `thread_info` is private
--> $DIR/stability-in-private-module.rs:2:26
error[E0603]: module `sys` is private
--> $DIR/stability-in-private-module.rs:2:18
|
LL | let _ = std::thread::thread_info::current_thread();
| ^^^^^^^^^^^ -------------- function `current_thread` is not publicly re-exported
| |
| private module
LL | let _ = std::sys::os::errno();
| ^^^ ----- function `errno` is not publicly re-exported
| |
| private module
|
note: the module `thread_info` is defined here
--> $SRC_DIR/std/src/thread/mod.rs:LL:COL
note: the module `sys` is defined here
--> $SRC_DIR/std/src/lib.rs:LL:COL
error: aborting due to 1 previous error