Auto merge of #95727 - m-ou-se:futex-reentrantmutex, r=Amanieu
Replace ReentrantMutex by a futex-based one on Linux. Tracking issue: https://github.com/rust-lang/rust/issues/93740 r? `@Amanieu`
This commit is contained in:
commit
ab33f71a8b
4 changed files with 101 additions and 7 deletions
|
@ -1,5 +1,6 @@
|
|||
use crate::cell::UnsafeCell;
|
||||
use crate::sync::atomic::{
|
||||
AtomicI32,
|
||||
AtomicI32, AtomicUsize,
|
||||
Ordering::{Acquire, Relaxed, Release},
|
||||
};
|
||||
use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all};
|
||||
|
@ -162,3 +163,98 @@ impl Condvar {
|
|||
r
|
||||
}
|
||||
}
|
||||
|
||||
/// A reentrant mutex. Used by stdout().lock() and friends.
|
||||
///
|
||||
/// The 'owner' field tracks which thread has locked the mutex.
|
||||
///
|
||||
/// We use current_thread_unique_ptr() as the thread identifier,
|
||||
/// which is just the address of a thread local variable.
|
||||
///
|
||||
/// If `owner` is set to the identifier of the current thread,
|
||||
/// we assume the mutex is already locked and instead of locking it again,
|
||||
/// we increment `lock_count`.
|
||||
///
|
||||
/// When unlocking, we decrement `lock_count`, and only unlock the mutex when
|
||||
/// it reaches zero.
|
||||
///
|
||||
/// `lock_count` is protected by the mutex and only accessed by the thread that has
|
||||
/// locked the mutex, so needs no synchronization.
|
||||
///
|
||||
/// `owner` can be checked by other threads that want to see if they already
|
||||
/// hold the lock, so needs to be atomic. If it compares equal, we're on the
|
||||
/// same thread that holds the mutex and memory access can use relaxed ordering
|
||||
/// since we're not dealing with multiple threads. If it compares unequal,
|
||||
/// synchronization is left to the mutex, making relaxed memory ordering for
|
||||
/// the `owner` field fine in all cases.
|
||||
pub struct ReentrantMutex {
|
||||
mutex: Mutex,
|
||||
owner: AtomicUsize,
|
||||
lock_count: UnsafeCell<u32>,
|
||||
}
|
||||
|
||||
unsafe impl Send for ReentrantMutex {}
|
||||
unsafe impl Sync for ReentrantMutex {}
|
||||
|
||||
impl ReentrantMutex {
|
||||
#[inline]
|
||||
pub const unsafe fn uninitialized() -> Self {
|
||||
Self { mutex: Mutex::new(), owner: AtomicUsize::new(0), lock_count: UnsafeCell::new(0) }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn init(&self) {}
|
||||
|
||||
#[inline]
|
||||
pub unsafe fn destroy(&self) {}
|
||||
|
||||
pub unsafe fn try_lock(&self) -> bool {
|
||||
let this_thread = current_thread_unique_ptr();
|
||||
if self.owner.load(Relaxed) == this_thread {
|
||||
self.increment_lock_count();
|
||||
true
|
||||
} else if self.mutex.try_lock() {
|
||||
self.owner.store(this_thread, Relaxed);
|
||||
debug_assert_eq!(*self.lock_count.get(), 0);
|
||||
*self.lock_count.get() = 1;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe fn lock(&self) {
|
||||
let this_thread = current_thread_unique_ptr();
|
||||
if self.owner.load(Relaxed) == this_thread {
|
||||
self.increment_lock_count();
|
||||
} else {
|
||||
self.mutex.lock();
|
||||
self.owner.store(this_thread, Relaxed);
|
||||
debug_assert_eq!(*self.lock_count.get(), 0);
|
||||
*self.lock_count.get() = 1;
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn increment_lock_count(&self) {
|
||||
*self.lock_count.get() = (*self.lock_count.get())
|
||||
.checked_add(1)
|
||||
.expect("lock count overflow in reentrant mutex");
|
||||
}
|
||||
|
||||
pub unsafe fn unlock(&self) {
|
||||
*self.lock_count.get() -= 1;
|
||||
if *self.lock_count.get() == 0 {
|
||||
self.owner.store(0, Relaxed);
|
||||
self.mutex.unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get an address that is unique per running thread.
|
||||
///
|
||||
/// This can be used as a non-null usize-sized ID.
|
||||
pub fn current_thread_unique_ptr() -> usize {
|
||||
// Use a non-drop type to make sure it's still available during thread destruction.
|
||||
thread_local! { static X: u8 = const { 0 } }
|
||||
X.with(|x| <*const _>::addr(x))
|
||||
}
|
||||
|
|
|
@ -5,11 +5,7 @@ cfg_if::cfg_if! {
|
|||
))] {
|
||||
mod futex;
|
||||
mod futex_rwlock;
|
||||
#[allow(dead_code)]
|
||||
mod pthread_mutex; // Only used for PthreadMutexAttr, needed by pthread_remutex.
|
||||
mod pthread_remutex; // FIXME: Implement this using a futex
|
||||
pub use futex::{Mutex, MovableMutex, Condvar, MovableCondvar};
|
||||
pub use pthread_remutex::ReentrantMutex;
|
||||
pub use futex::{Mutex, MovableMutex, Condvar, MovableCondvar, ReentrantMutex};
|
||||
pub use futex_rwlock::{RwLock, MovableRwLock};
|
||||
} else {
|
||||
mod pthread_mutex;
|
||||
|
|
|
@ -215,6 +215,7 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)] // Not used on all platforms.
|
||||
pub fn cvt_nz(error: libc::c_int) -> crate::io::Result<()> {
|
||||
if error == 0 { Ok(()) } else { Err(crate::io::Error::from_raw_os_error(error)) }
|
||||
}
|
||||
|
|
|
@ -179,6 +179,7 @@ macro_rules! __thread_local_inner {
|
|||
// used to generate the `LocalKey` value for const-initialized thread locals
|
||||
(@key $t:ty, const $init:expr) => {{
|
||||
#[cfg_attr(not(windows), inline(always))] // see comments below
|
||||
#[deny(unsafe_op_in_unsafe_fn)]
|
||||
unsafe fn __getit(
|
||||
_init: $crate::option::Option<&mut $crate::option::Option<$t>>,
|
||||
) -> $crate::option::Option<&'static $t> {
|
||||
|
@ -193,7 +194,7 @@ macro_rules! __thread_local_inner {
|
|||
#[cfg(all(target_family = "wasm", not(target_feature = "atomics")))]
|
||||
{
|
||||
static mut VAL: $t = INIT_EXPR;
|
||||
$crate::option::Option::Some(&VAL)
|
||||
unsafe { $crate::option::Option::Some(&VAL) }
|
||||
}
|
||||
|
||||
// If the platform has support for `#[thread_local]`, use it.
|
||||
|
|
Loading…
Add table
Reference in a new issue