From 9c98dd74b6bc97259863aab206d63cfb17b10adc Mon Sep 17 00:00:00 2001 From: Wedson Almeida Filho Date: Tue, 11 Apr 2023 02:45:32 -0300 Subject: [PATCH 01/10] rust: sync: introduce `Lock` and `Guard` They are generic Rust implementations of a lock and a lock guard that contain code that is common to all locks. Different backends will be introduced in subsequent commits. Reviewed-by: Martin Rodriguez Reboredo Suggested-by: Gary Guo Signed-off-by: Wedson Almeida Filho Link: https://lore.kernel.org/r/20230411054543.21278-2-wedsonaf@gmail.com Signed-off-by: Miguel Ojeda --- rust/kernel/sync.rs | 2 +- rust/kernel/sync/lock.rs | 162 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 163 insertions(+), 1 deletion(-) create mode 100644 rust/kernel/sync/lock.rs diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs index 541d235ffbebe5..81b0998eaa18b6 100644 --- a/rust/kernel/sync.rs +++ b/rust/kernel/sync.rs @@ -8,6 +8,7 @@ use crate::types::Opaque; mod arc; +pub mod lock; pub use arc::{Arc, ArcBorrow, UniqueArc}; @@ -25,7 +26,6 @@ impl LockClassKey { Self(Opaque::uninit()) } - #[allow(dead_code)] pub(crate) fn as_ptr(&self) -> *mut bindings::lock_class_key { self.0.get() } diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs new file mode 100644 index 00000000000000..1a8ecccf4f24e0 --- /dev/null +++ b/rust/kernel/sync/lock.rs @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Generic kernel lock and guard. +//! +//! It contains a generic Rust lock and guard that allow for different backends (e.g., mutexes, +//! spinlocks, raw spinlocks) to be provided with minimal effort. + +use super::LockClassKey; +use crate::{bindings, init::PinInit, pin_init, str::CStr, types::Opaque}; +use core::{cell::UnsafeCell, marker::PhantomData, marker::PhantomPinned}; +use macros::pin_data; + +/// The "backend" of a lock. +/// +/// It is the actual implementation of the lock, without the need to repeat patterns used in all +/// locks. +/// +/// # Safety +/// +/// - Implementers must ensure that only one thread/CPU may access the protected data once the lock +/// is owned, that is, between calls to `lock` and `unlock`. +pub unsafe trait Backend { + /// The state required by the lock. + type State; + + /// The state required to be kept between lock and unlock. + type GuardState; + + /// Initialises the lock. + /// + /// # Safety + /// + /// `ptr` must be valid for write for the duration of the call, while `name` and `key` must + /// remain valid for read indefinitely. + unsafe fn init( + ptr: *mut Self::State, + name: *const core::ffi::c_char, + key: *mut bindings::lock_class_key, + ); + + /// Acquires the lock, making the caller its owner. + /// + /// # Safety + /// + /// Callers must ensure that [`Backend::init`] has been previously called. + #[must_use] + unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState; + + /// Releases the lock, giving up its ownership. + /// + /// # Safety + /// + /// It must only be called by the current owner of the lock. + unsafe fn unlock(ptr: *mut Self::State, guard_state: &Self::GuardState); +} + +/// A mutual exclusion primitive. +/// +/// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock banckend +/// specified as the generic parameter `B`. +#[pin_data] +pub struct Lock { + /// The kernel lock object. + #[pin] + state: Opaque, + + /// Some locks are known to be self-referential (e.g., mutexes), while others are architecture + /// or config defined (e.g., spinlocks). So we conservatively require them to be pinned in case + /// some architecture uses self-references now or in the future. + #[pin] + _pin: PhantomPinned, + + /// The data protected by the lock. + data: UnsafeCell, +} + +// SAFETY: `Lock` can be transferred across thread boundaries iff the data it protects can. +unsafe impl Send for Lock {} + +// SAFETY: `Lock` serialises the interior mutability it provides, so it is `Sync` as long as the +// data it protects is `Send`. +unsafe impl Sync for Lock {} + +impl Lock { + /// Constructs a new lock initialiser. + #[allow(clippy::new_ret_no_self)] + pub fn new(t: T, name: &'static CStr, key: &'static LockClassKey) -> impl PinInit { + pin_init!(Self { + data: UnsafeCell::new(t), + _pin: PhantomPinned, + // SAFETY: `slot` is valid while the closure is called and both `name` and `key` have + // static lifetimes so they live indefinitely. + state <- Opaque::ffi_init(|slot| unsafe { + B::init(slot, name.as_char_ptr(), key.as_ptr()) + }), + }) + } +} + +impl Lock { + /// Acquires the lock and gives the caller access to the data protected by it. + pub fn lock(&self) -> Guard<'_, T, B> { + // SAFETY: The constructor of the type calls `init`, so the existence of the object proves + // that `init` was called. + let state = unsafe { B::lock(self.state.get()) }; + // SAFETY: The lock was just acquired. + unsafe { Guard::new(self, state) } + } +} + +/// A lock guard. +/// +/// Allows mutual exclusion primitives that implement the `Backend` trait to automatically unlock +/// when a guard goes out of scope. It also provides a safe and convenient way to access the data +/// protected by the lock. +#[must_use = "the lock unlocks immediately when the guard is unused"] +pub struct Guard<'a, T: ?Sized, B: Backend> { + pub(crate) lock: &'a Lock, + pub(crate) state: B::GuardState, + _not_send: PhantomData<*mut ()>, +} + +// SAFETY: `Guard` is sync when the data protected by the lock is also sync. +unsafe impl Sync for Guard<'_, T, B> {} + +impl core::ops::Deref for Guard<'_, T, B> { + type Target = T; + + fn deref(&self) -> &Self::Target { + // SAFETY: The caller owns the lock, so it is safe to deref the protected data. + unsafe { &*self.lock.data.get() } + } +} + +impl core::ops::DerefMut for Guard<'_, T, B> { + fn deref_mut(&mut self) -> &mut Self::Target { + // SAFETY: The caller owns the lock, so it is safe to deref the protected data. + unsafe { &mut *self.lock.data.get() } + } +} + +impl Drop for Guard<'_, T, B> { + fn drop(&mut self) { + // SAFETY: The caller owns the lock, so it is safe to unlock it. + unsafe { B::unlock(self.lock.state.get(), &self.state) }; + } +} + +impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> { + /// Constructs a new immutable lock guard. + /// + /// # Safety + /// + /// The caller must ensure that it owns the lock. + pub(crate) unsafe fn new(lock: &'a Lock, state: B::GuardState) -> Self { + Self { + lock, + state, + _not_send: PhantomData, + } + } +} From 540cbb2e0c247819946922590c84de61c8c188c2 Mon Sep 17 00:00:00 2001 From: Wedson Almeida Filho Date: Tue, 11 Apr 2023 02:45:33 -0300 Subject: [PATCH 02/10] rust: lock: introduce `Mutex` This is the `struct mutex` lock backend and allows Rust code to use the kernel mutex idiomatically. Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Will Deacon Cc: Waiman Long Reviewed-by: Martin Rodriguez Reboredo Signed-off-by: Wedson Almeida Filho Link: https://lore.kernel.org/r/20230411054543.21278-3-wedsonaf@gmail.com Signed-off-by: Miguel Ojeda --- rust/helpers.c | 7 ++ rust/kernel/sync.rs | 1 + rust/kernel/sync/lock.rs | 2 + rust/kernel/sync/lock/mutex.rs | 118 +++++++++++++++++++++++++++++++++ 4 files changed, 128 insertions(+) create mode 100644 rust/kernel/sync/lock/mutex.rs diff --git a/rust/helpers.c b/rust/helpers.c index 04b9be46e88778..86af099d2d6671 100644 --- a/rust/helpers.c +++ b/rust/helpers.c @@ -22,6 +22,7 @@ #include #include #include +#include __noreturn void rust_helper_BUG(void) { @@ -29,6 +30,12 @@ __noreturn void rust_helper_BUG(void) } EXPORT_SYMBOL_GPL(rust_helper_BUG); +void rust_helper_mutex_lock(struct mutex *lock) +{ + mutex_lock(lock); +} +EXPORT_SYMBOL_GPL(rust_helper_mutex_lock); + refcount_t rust_helper_REFCOUNT_INIT(int n) { return (refcount_t)REFCOUNT_INIT(n); diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs index 81b0998eaa18b6..693f0b7f4e4fd7 100644 --- a/rust/kernel/sync.rs +++ b/rust/kernel/sync.rs @@ -11,6 +11,7 @@ mod arc; pub mod lock; pub use arc::{Arc, ArcBorrow, UniqueArc}; +pub use lock::mutex::Mutex; /// Represents a lockdep class. It's a wrapper around C's `lock_class_key`. #[repr(transparent)] diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs index 1a8ecccf4f24e0..98de109d9e40a7 100644 --- a/rust/kernel/sync/lock.rs +++ b/rust/kernel/sync/lock.rs @@ -10,6 +10,8 @@ use crate::{bindings, init::PinInit, pin_init, str::CStr, types::Opaque}; use core::{cell::UnsafeCell, marker::PhantomData, marker::PhantomPinned}; use macros::pin_data; +pub mod mutex; + /// The "backend" of a lock. /// /// It is the actual implementation of the lock, without the need to repeat patterns used in all diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs new file mode 100644 index 00000000000000..923472f04af4bd --- /dev/null +++ b/rust/kernel/sync/lock/mutex.rs @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! A kernel mutex. +//! +//! This module allows Rust code to use the kernel's `struct mutex`. + +use crate::bindings; + +/// Creates a [`Mutex`] initialiser with the given name and a newly-created lock class. +/// +/// It uses the name if one is given, otherwise it generates one based on the file name and line +/// number. +#[macro_export] +macro_rules! new_mutex { + ($inner:expr $(, $name:literal)? $(,)?) => { + $crate::sync::Mutex::new( + $inner, $crate::optional_name!($($name)?), $crate::static_lock_class!()) + }; +} + +/// A mutual exclusion primitive. +/// +/// Exposes the kernel's [`struct mutex`]. When multiple threads attempt to lock the same mutex, +/// only one at a time is allowed to progress, the others will block (sleep) until the mutex is +/// unlocked, at which point another thread will be allowed to wake up and make progress. +/// +/// Since it may block, [`Mutex`] needs to be used with care in atomic contexts. +/// +/// Instances of [`Mutex`] need a lock class and to be pinned. The recommended way to create such +/// instances is with the [`pin_init`](crate::pin_init) and [`new_mutex`] macros. +/// +/// # Examples +/// +/// The following example shows how to declare, allocate and initialise a struct (`Example`) that +/// contains an inner struct (`Inner`) that is protected by a mutex. +/// +/// ``` +/// use kernel::{init::InPlaceInit, init::PinInit, new_mutex, pin_init, sync::Mutex}; +/// +/// struct Inner { +/// a: u32, +/// b: u32, +/// } +/// +/// #[pin_data] +/// struct Example { +/// c: u32, +/// #[pin] +/// d: Mutex, +/// } +/// +/// impl Example { +/// fn new() -> impl PinInit { +/// pin_init!(Self { +/// c: 10, +/// d <- new_mutex!(Inner { a: 20, b: 30 }), +/// }) +/// } +/// } +/// +/// // Allocate a boxed `Example`. +/// let e = Box::pin_init(Example::new())?; +/// assert_eq!(e.c, 10); +/// assert_eq!(e.d.lock().a, 20); +/// assert_eq!(e.d.lock().b, 30); +/// ``` +/// +/// The following example shows how to use interior mutability to modify the contents of a struct +/// protected by a mutex despite only having a shared reference: +/// +/// ``` +/// use kernel::sync::Mutex; +/// +/// struct Example { +/// a: u32, +/// b: u32, +/// } +/// +/// fn example(m: &Mutex) { +/// let mut guard = m.lock(); +/// guard.a += 10; +/// guard.b += 20; +/// } +/// ``` +/// +/// [`struct mutex`]: ../../../../include/linux/mutex.h +pub type Mutex = super::Lock; + +/// A kernel `struct mutex` lock backend. +pub struct MutexBackend; + +// SAFETY: The underlying kernel `struct mutex` object ensures mutual exclusion. +unsafe impl super::Backend for MutexBackend { + type State = bindings::mutex; + type GuardState = (); + + unsafe fn init( + ptr: *mut Self::State, + name: *const core::ffi::c_char, + key: *mut bindings::lock_class_key, + ) { + // SAFETY: The safety requirements ensure that `ptr` is valid for writes, and `name` and + // `key` are valid for read indefinitely. + unsafe { bindings::__mutex_init(ptr, name, key) } + } + + unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState { + // SAFETY: The safety requirements of this function ensure that `ptr` points to valid + // memory, and that it has been initialised before. + unsafe { bindings::mutex_lock(ptr) }; + } + + unsafe fn unlock(ptr: *mut Self::State, _guard_state: &Self::GuardState) { + // SAFETY: The safety requirements of this function ensure that `ptr` is valid and that the + // caller is the owner of the mutex. + unsafe { bindings::mutex_unlock(ptr) }; + } +} From 0af44c55f9682774a810a92017427496d30590b3 Mon Sep 17 00:00:00 2001 From: Wedson Almeida Filho Date: Tue, 11 Apr 2023 02:45:34 -0300 Subject: [PATCH 03/10] locking/spinlock: introduce spin_lock_init_with_key Rust cannot call C macros, so it has its own macro to create a new lock class when a spin lock is initialised. This new function allows Rust code to pass the lock class it generates to the C implementation. Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Will Deacon Cc: Waiman Long Reviewed-by: Martin Rodriguez Reboredo Signed-off-by: Wedson Almeida Filho Reviewed-by: Boqun Feng Link: https://lore.kernel.org/r/20230411054543.21278-4-wedsonaf@gmail.com Signed-off-by: Miguel Ojeda --- include/linux/spinlock.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index be48f1cb1878ba..cdc92d0951334d 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -327,12 +327,17 @@ static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) #ifdef CONFIG_DEBUG_SPINLOCK +static inline void spin_lock_init_with_key(spinlock_t *lock, const char *name, + struct lock_class_key *key) +{ + __raw_spin_lock_init(spinlock_check(lock), name, key, LD_WAIT_CONFIG); +} + # define spin_lock_init(lock) \ do { \ static struct lock_class_key __key; \ \ - __raw_spin_lock_init(spinlock_check(lock), \ - #lock, &__key, LD_WAIT_CONFIG); \ + spin_lock_init_with_key(lock, #lock, &__key); \ } while (0) #else From c1c1904675125a2a400bb70bbed0d4989ef75f96 Mon Sep 17 00:00:00 2001 From: Wedson Almeida Filho Date: Wed, 19 Apr 2023 14:44:26 -0300 Subject: [PATCH 04/10] rust: lock: introduce `SpinLock` This is the `spinlock_t` lock backend and allows Rust code to use the kernel spinlock idiomatically. Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Will Deacon Cc: Waiman Long Reviewed-by: Martin Rodriguez Reboredo Signed-off-by: Wedson Almeida Filho Link: https://lore.kernel.org/r/20230419174426.132207-1-wedsonaf@gmail.com Signed-off-by: Miguel Ojeda --- rust/helpers.c | 24 +++++++ rust/kernel/sync.rs | 2 +- rust/kernel/sync/lock.rs | 1 + rust/kernel/sync/lock/spinlock.rs | 116 ++++++++++++++++++++++++++++++ 4 files changed, 142 insertions(+), 1 deletion(-) create mode 100644 rust/kernel/sync/lock/spinlock.rs diff --git a/rust/helpers.c b/rust/helpers.c index 86af099d2d6671..446e3cfdb935c1 100644 --- a/rust/helpers.c +++ b/rust/helpers.c @@ -23,6 +23,7 @@ #include #include #include +#include __noreturn void rust_helper_BUG(void) { @@ -36,6 +37,29 @@ void rust_helper_mutex_lock(struct mutex *lock) } EXPORT_SYMBOL_GPL(rust_helper_mutex_lock); +void rust_helper___spin_lock_init(spinlock_t *lock, const char *name, + struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_SPINLOCK + __raw_spin_lock_init(spinlock_check(lock), name, key, LD_WAIT_CONFIG); +#else + spin_lock_init(lock); +#endif +} +EXPORT_SYMBOL_GPL(rust_helper___spin_lock_init); + +void rust_helper_spin_lock(spinlock_t *lock) +{ + spin_lock(lock); +} +EXPORT_SYMBOL_GPL(rust_helper_spin_lock); + +void rust_helper_spin_unlock(spinlock_t *lock) +{ + spin_unlock(lock); +} +EXPORT_SYMBOL_GPL(rust_helper_spin_unlock); + refcount_t rust_helper_REFCOUNT_INIT(int n) { return (refcount_t)REFCOUNT_INIT(n); diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs index 693f0b7f4e4fd7..c997ff7e951e5b 100644 --- a/rust/kernel/sync.rs +++ b/rust/kernel/sync.rs @@ -11,7 +11,7 @@ mod arc; pub mod lock; pub use arc::{Arc, ArcBorrow, UniqueArc}; -pub use lock::mutex::Mutex; +pub use lock::{mutex::Mutex, spinlock::SpinLock}; /// Represents a lockdep class. It's a wrapper around C's `lock_class_key`. #[repr(transparent)] diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs index 98de109d9e40a7..08adc37470331a 100644 --- a/rust/kernel/sync/lock.rs +++ b/rust/kernel/sync/lock.rs @@ -11,6 +11,7 @@ use core::{cell::UnsafeCell, marker::PhantomData, marker::PhantomPinned}; use macros::pin_data; pub mod mutex; +pub mod spinlock; /// The "backend" of a lock. /// diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs new file mode 100644 index 00000000000000..a52d20fc975543 --- /dev/null +++ b/rust/kernel/sync/lock/spinlock.rs @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! A kernel spinlock. +//! +//! This module allows Rust code to use the kernel's `spinlock_t`. + +use crate::bindings; + +/// Creates a [`SpinLock`] initialiser with the given name and a newly-created lock class. +/// +/// It uses the name if one is given, otherwise it generates one based on the file name and line +/// number. +#[macro_export] +macro_rules! new_spinlock { + ($inner:expr $(, $name:literal)? $(,)?) => { + $crate::sync::SpinLock::new( + $inner, $crate::optional_name!($($name)?), $crate::static_lock_class!()) + }; +} + +/// A spinlock. +/// +/// Exposes the kernel's [`spinlock_t`]. When multiple CPUs attempt to lock the same spinlock, only +/// one at a time is allowed to progress, the others will block (spinning) until the spinlock is +/// unlocked, at which point another CPU will be allowed to make progress. +/// +/// Instances of [`SpinLock`] need a lock class and to be pinned. The recommended way to create such +/// instances is with the [`pin_init`](crate::pin_init) and [`new_spinlock`] macros. +/// +/// # Examples +/// +/// The following example shows how to declare, allocate and initialise a struct (`Example`) that +/// contains an inner struct (`Inner`) that is protected by a spinlock. +/// +/// ``` +/// use kernel::{init::InPlaceInit, init::PinInit, new_spinlock, pin_init, sync::SpinLock}; +/// +/// struct Inner { +/// a: u32, +/// b: u32, +/// } +/// +/// #[pin_data] +/// struct Example { +/// c: u32, +/// #[pin] +/// d: SpinLock, +/// } +/// +/// impl Example { +/// fn new() -> impl PinInit { +/// pin_init!(Self { +/// c: 10, +/// d <- new_spinlock!(Inner { a: 20, b: 30 }), +/// }) +/// } +/// } +/// +/// // Allocate a boxed `Example`. +/// let e = Box::pin_init(Example::new())?; +/// assert_eq!(e.c, 10); +/// assert_eq!(e.d.lock().a, 20); +/// assert_eq!(e.d.lock().b, 30); +/// ``` +/// +/// The following example shows how to use interior mutability to modify the contents of a struct +/// protected by a spinlock despite only having a shared reference: +/// +/// ``` +/// use kernel::sync::SpinLock; +/// +/// struct Example { +/// a: u32, +/// b: u32, +/// } +/// +/// fn example(m: &SpinLock) { +/// let mut guard = m.lock(); +/// guard.a += 10; +/// guard.b += 20; +/// } +/// ``` +/// +/// [`spinlock_t`]: ../../../../include/linux/spinlock.h +pub type SpinLock = super::Lock; + +/// A kernel `spinlock_t` lock backend. +pub struct SpinLockBackend; + +// SAFETY: The underlying kernel `spinlock_t` object ensures mutual exclusion. +unsafe impl super::Backend for SpinLockBackend { + type State = bindings::spinlock_t; + type GuardState = (); + + unsafe fn init( + ptr: *mut Self::State, + name: *const core::ffi::c_char, + key: *mut bindings::lock_class_key, + ) { + // SAFETY: The safety requirements ensure that `ptr` is valid for writes, and `name` and + // `key` are valid for read indefinitely. + unsafe { bindings::__spin_lock_init(ptr, name, key) } + } + + unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState { + // SAFETY: The safety requirements of this function ensure that `ptr` points to valid + // memory, and that it has been initialised before. + unsafe { bindings::spin_lock(ptr) } + } + + unsafe fn unlock(ptr: *mut Self::State, _guard_state: &Self::GuardState) { + // SAFETY: The safety requirements of this function ensure that `ptr` is valid and that the + // caller is the owner of the mutex. + unsafe { bindings::spin_unlock(ptr) } + } +} From 39da92dc3f178c97e44cff632b2d6f25c7a9a29f Mon Sep 17 00:00:00 2001 From: Wedson Almeida Filho Date: Tue, 11 Apr 2023 02:45:38 -0300 Subject: [PATCH 05/10] rust: introduce `ARef` This is an owned reference to an object that is always ref-counted. This is meant to be used in wrappers for C types that have their own ref counting functions, for example, tasks, files, inodes, dentries, etc. Reviewed-by: Martin Rodriguez Reboredo Signed-off-by: Wedson Almeida Filho Reviewed-by: Gary Guo Link: https://lore.kernel.org/r/20230411054543.21278-8-wedsonaf@gmail.com Signed-off-by: Miguel Ojeda --- rust/kernel/types.rs | 107 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs index a4b1e3778da7d8..29db59d6119a9b 100644 --- a/rust/kernel/types.rs +++ b/rust/kernel/types.rs @@ -6,8 +6,10 @@ use crate::init::{self, PinInit}; use alloc::boxed::Box; use core::{ cell::UnsafeCell, + marker::PhantomData, mem::MaybeUninit, ops::{Deref, DerefMut}, + ptr::NonNull, }; /// Used to transfer ownership to and from foreign (non-Rust) languages. @@ -268,6 +270,111 @@ impl Opaque { } } +/// Types that are _always_ reference counted. +/// +/// It allows such types to define their own custom ref increment and decrement functions. +/// Additionally, it allows users to convert from a shared reference `&T` to an owned reference +/// [`ARef`]. +/// +/// This is usually implemented by wrappers to existing structures on the C side of the code. For +/// Rust code, the recommendation is to use [`Arc`](crate::sync::Arc) to create reference-counted +/// instances of a type. +/// +/// # Safety +/// +/// Implementers must ensure that increments to the reference count keep the object alive in memory +/// at least until matching decrements are performed. +/// +/// Implementers must also ensure that all instances are reference-counted. (Otherwise they +/// won't be able to honour the requirement that [`AlwaysRefCounted::inc_ref`] keep the object +/// alive.) +pub unsafe trait AlwaysRefCounted { + /// Increments the reference count on the object. + fn inc_ref(&self); + + /// Decrements the reference count on the object. + /// + /// Frees the object when the count reaches zero. + /// + /// # Safety + /// + /// Callers must ensure that there was a previous matching increment to the reference count, + /// and that the object is no longer used after its reference count is decremented (as it may + /// result in the object being freed), unless the caller owns another increment on the refcount + /// (e.g., it calls [`AlwaysRefCounted::inc_ref`] twice, then calls + /// [`AlwaysRefCounted::dec_ref`] once). + unsafe fn dec_ref(obj: NonNull); +} + +/// An owned reference to an always-reference-counted object. +/// +/// The object's reference count is automatically decremented when an instance of [`ARef`] is +/// dropped. It is also automatically incremented when a new instance is created via +/// [`ARef::clone`]. +/// +/// # Invariants +/// +/// The pointer stored in `ptr` is non-null and valid for the lifetime of the [`ARef`] instance. In +/// particular, the [`ARef`] instance owns an increment on the underlying object's reference count. +pub struct ARef { + ptr: NonNull, + _p: PhantomData, +} + +impl ARef { + /// Creates a new instance of [`ARef`]. + /// + /// It takes over an increment of the reference count on the underlying object. + /// + /// # Safety + /// + /// Callers must ensure that the reference count was incremented at least once, and that they + /// are properly relinquishing one increment. That is, if there is only one increment, callers + /// must not use the underlying object anymore -- it is only safe to do so via the newly + /// created [`ARef`]. + pub unsafe fn from_raw(ptr: NonNull) -> Self { + // INVARIANT: The safety requirements guarantee that the new instance now owns the + // increment on the refcount. + Self { + ptr, + _p: PhantomData, + } + } +} + +impl Clone for ARef { + fn clone(&self) -> Self { + self.inc_ref(); + // SAFETY: We just incremented the refcount above. + unsafe { Self::from_raw(self.ptr) } + } +} + +impl Deref for ARef { + type Target = T; + + fn deref(&self) -> &Self::Target { + // SAFETY: The type invariants guarantee that the object is valid. + unsafe { self.ptr.as_ref() } + } +} + +impl From<&T> for ARef { + fn from(b: &T) -> Self { + b.inc_ref(); + // SAFETY: We just incremented the refcount above. + unsafe { Self::from_raw(NonNull::from(b)) } + } +} + +impl Drop for ARef { + fn drop(&mut self) { + // SAFETY: The type invariants guarantee that the `ARef` owns the reference we're about to + // decrement. + unsafe { T::dec_ref(self.ptr) }; + } +} + /// A sum type that always holds either a value of type `L` or `R`. pub enum Either { /// Constructs an instance of [`Either`] containing a value of type `L`. From fdc7f4bb7e06de9a00f65057b55245c70ef2ee93 Mon Sep 17 00:00:00 2001 From: Wedson Almeida Filho Date: Tue, 11 Apr 2023 02:45:39 -0300 Subject: [PATCH 06/10] rust: add basic `Task` It is an abstraction for C's `struct task_struct`. It implements `AlwaysRefCounted`, so the refcount of the wrapped object is managed safely on the Rust side. Cc: Ingo Molnar Cc: Peter Zijlstra Reviewed-by: Martin Rodriguez Reboredo Signed-off-by: Wedson Almeida Filho Link: https://lore.kernel.org/r/20230411054543.21278-9-wedsonaf@gmail.com Signed-off-by: Miguel Ojeda --- rust/bindings/bindings_helper.h | 1 + rust/helpers.c | 19 +++++++++ rust/kernel/lib.rs | 1 + rust/kernel/task.rs | 75 +++++++++++++++++++++++++++++++++ 4 files changed, 96 insertions(+) create mode 100644 rust/kernel/task.rs diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h index 75d85bd6c59211..03656a44a83f45 100644 --- a/rust/bindings/bindings_helper.h +++ b/rust/bindings/bindings_helper.h @@ -8,6 +8,7 @@ #include #include +#include /* `bindgen` gets confused at certain things. */ const gfp_t BINDINGS_GFP_KERNEL = GFP_KERNEL; diff --git a/rust/helpers.c b/rust/helpers.c index 446e3cfdb935c1..6e5b2c953d367d 100644 --- a/rust/helpers.c +++ b/rust/helpers.c @@ -24,6 +24,7 @@ #include #include #include +#include __noreturn void rust_helper_BUG(void) { @@ -60,6 +61,12 @@ void rust_helper_spin_unlock(spinlock_t *lock) } EXPORT_SYMBOL_GPL(rust_helper_spin_unlock); +int rust_helper_signal_pending(struct task_struct *t) +{ + return signal_pending(t); +} +EXPORT_SYMBOL_GPL(rust_helper_signal_pending); + refcount_t rust_helper_REFCOUNT_INIT(int n) { return (refcount_t)REFCOUNT_INIT(n); @@ -96,6 +103,18 @@ long rust_helper_PTR_ERR(__force const void *ptr) } EXPORT_SYMBOL_GPL(rust_helper_PTR_ERR); +void rust_helper_get_task_struct(struct task_struct *t) +{ + get_task_struct(t); +} +EXPORT_SYMBOL_GPL(rust_helper_get_task_struct); + +void rust_helper_put_task_struct(struct task_struct *t) +{ + put_task_struct(t); +} +EXPORT_SYMBOL_GPL(rust_helper_put_task_struct); + /* * We use `bindgen`'s `--size_t-is-usize` option to bind the C `size_t` type * as the Rust `usize` type, so we can use it in contexts where Rust diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs index 2d7606135ef61c..ee27e10da479b2 100644 --- a/rust/kernel/lib.rs +++ b/rust/kernel/lib.rs @@ -44,6 +44,7 @@ mod static_assert; pub mod std_vendor; pub mod str; pub mod sync; +pub mod task; pub mod types; #[doc(hidden)] diff --git a/rust/kernel/task.rs b/rust/kernel/task.rs new file mode 100644 index 00000000000000..d70cad13195601 --- /dev/null +++ b/rust/kernel/task.rs @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Tasks (threads and processes). +//! +//! C header: [`include/linux/sched.h`](../../../../include/linux/sched.h). + +use crate::{bindings, types::Opaque}; +use core::ptr; + +/// Wraps the kernel's `struct task_struct`. +/// +/// # Invariants +/// +/// All instances are valid tasks created by the C portion of the kernel. +/// +/// Instances of this type are always ref-counted, that is, a call to `get_task_struct` ensures +/// that the allocation remains valid at least until the matching call to `put_task_struct`. +#[repr(transparent)] +pub struct Task(pub(crate) Opaque); + +// SAFETY: It's OK to access `Task` through references from other threads because we're either +// accessing properties that don't change (e.g., `pid`, `group_leader`) or that are properly +// synchronised by C code (e.g., `signal_pending`). +unsafe impl Sync for Task {} + +/// The type of process identifiers (PIDs). +type Pid = bindings::pid_t; + +impl Task { + /// Returns the group leader of the given task. + pub fn group_leader(&self) -> &Task { + // SAFETY: By the type invariant, we know that `self.0` is a valid task. Valid tasks always + // have a valid group_leader. + let ptr = unsafe { *ptr::addr_of!((*self.0.get()).group_leader) }; + + // SAFETY: The lifetime of the returned task reference is tied to the lifetime of `self`, + // and given that a task has a reference to its group leader, we know it must be valid for + // the lifetime of the returned task reference. + unsafe { &*ptr.cast() } + } + + /// Returns the PID of the given task. + pub fn pid(&self) -> Pid { + // SAFETY: By the type invariant, we know that `self.0` is a valid task. Valid tasks always + // have a valid pid. + unsafe { *ptr::addr_of!((*self.0.get()).pid) } + } + + /// Determines whether the given task has pending signals. + pub fn signal_pending(&self) -> bool { + // SAFETY: By the type invariant, we know that `self.0` is valid. + unsafe { bindings::signal_pending(self.0.get()) != 0 } + } + + /// Wakes up the task. + pub fn wake_up(&self) { + // SAFETY: By the type invariant, we know that `self.0.get()` is non-null and valid. + // And `wake_up_process` is safe to be called for any valid task, even if the task is + // running. + unsafe { bindings::wake_up_process(self.0.get()) }; + } +} + +// SAFETY: The type invariants guarantee that `Task` is always ref-counted. +unsafe impl crate::types::AlwaysRefCounted for Task { + fn inc_ref(&self) { + // SAFETY: The existence of a shared reference means that the refcount is nonzero. + unsafe { bindings::get_task_struct(self.0.get()) }; + } + + unsafe fn dec_ref(obj: ptr::NonNull) { + // SAFETY: The safety requirements guarantee that the refcount is nonzero. + unsafe { bindings::put_task_struct(obj.cast().as_ptr()) } + } +} From 3aed05199948c7fab467ed5d6a0098cef927e111 Mon Sep 17 00:00:00 2001 From: Wedson Almeida Filho Date: Tue, 11 Apr 2023 02:45:40 -0300 Subject: [PATCH 07/10] rust: introduce `current` This allows Rust code to get a reference to the current task without having to increment the refcount, but still guaranteeing memory safety. Cc: Ingo Molnar Cc: Peter Zijlstra Reviewed-by: Martin Rodriguez Reboredo Signed-off-by: Wedson Almeida Filho Link: https://lore.kernel.org/r/20230411054543.21278-10-wedsonaf@gmail.com Signed-off-by: Miguel Ojeda --- rust/helpers.c | 6 ++++ rust/kernel/prelude.rs | 2 ++ rust/kernel/task.rs | 82 +++++++++++++++++++++++++++++++++++++++++- 3 files changed, 89 insertions(+), 1 deletion(-) diff --git a/rust/helpers.c b/rust/helpers.c index 6e5b2c953d367d..2cb7b82053d818 100644 --- a/rust/helpers.c +++ b/rust/helpers.c @@ -103,6 +103,12 @@ long rust_helper_PTR_ERR(__force const void *ptr) } EXPORT_SYMBOL_GPL(rust_helper_PTR_ERR); +struct task_struct *rust_helper_get_current(void) +{ + return current; +} +EXPORT_SYMBOL_GPL(rust_helper_get_current); + void rust_helper_get_task_struct(struct task_struct *t) { get_task_struct(t); diff --git a/rust/kernel/prelude.rs b/rust/kernel/prelude.rs index fcdc511d2ce837..c28587d68ebc1a 100644 --- a/rust/kernel/prelude.rs +++ b/rust/kernel/prelude.rs @@ -36,3 +36,5 @@ pub use super::error::{code::*, Error, Result}; pub use super::{str::CStr, ThisModule}; pub use super::init::{InPlaceInit, Init, PinInit}; + +pub use super::current; diff --git a/rust/kernel/task.rs b/rust/kernel/task.rs index d70cad13195601..526d29a0ae2786 100644 --- a/rust/kernel/task.rs +++ b/rust/kernel/task.rs @@ -5,7 +5,17 @@ //! C header: [`include/linux/sched.h`](../../../../include/linux/sched.h). use crate::{bindings, types::Opaque}; -use core::ptr; +use core::{marker::PhantomData, ops::Deref, ptr}; + +/// Returns the currently running task. +#[macro_export] +macro_rules! current { + () => { + // SAFETY: Deref + addr-of below create a temporary `TaskRef` that cannot outlive the + // caller. + unsafe { &*$crate::task::Task::current() } + }; +} /// Wraps the kernel's `struct task_struct`. /// @@ -15,6 +25,42 @@ use core::ptr; /// /// Instances of this type are always ref-counted, that is, a call to `get_task_struct` ensures /// that the allocation remains valid at least until the matching call to `put_task_struct`. +/// +/// # Examples +/// +/// The following is an example of getting the PID of the current thread with zero additional cost +/// when compared to the C version: +/// +/// ``` +/// let pid = current!().pid(); +/// ``` +/// +/// Getting the PID of the current process, also zero additional cost: +/// +/// ``` +/// let pid = current!().group_leader().pid(); +/// ``` +/// +/// Getting the current task and storing it in some struct. The reference count is automatically +/// incremented when creating `State` and decremented when it is dropped: +/// +/// ``` +/// use kernel::{task::Task, types::ARef}; +/// +/// struct State { +/// creator: ARef, +/// index: u32, +/// } +/// +/// impl State { +/// fn new() -> Self { +/// Self { +/// creator: current!().into(), +/// index: 0, +/// } +/// } +/// } +/// ``` #[repr(transparent)] pub struct Task(pub(crate) Opaque); @@ -27,6 +73,40 @@ unsafe impl Sync for Task {} type Pid = bindings::pid_t; impl Task { + /// Returns a task reference for the currently executing task/thread. + /// + /// The recommended way to get the current task/thread is to use the + /// [`current`](crate::current) macro because it is safe. + /// + /// # Safety + /// + /// Callers must ensure that the returned object doesn't outlive the current task/thread. + pub unsafe fn current() -> impl Deref { + struct TaskRef<'a> { + task: &'a Task, + _not_send: PhantomData<*mut ()>, + } + + impl Deref for TaskRef<'_> { + type Target = Task; + + fn deref(&self) -> &Self::Target { + self.task + } + } + + // SAFETY: Just an FFI call with no additional safety requirements. + let ptr = unsafe { bindings::get_current() }; + + TaskRef { + // SAFETY: If the current thread is still running, the current task is valid. Given + // that `TaskRef` is not `Send`, we know it cannot be transferred to another thread + // (where it could potentially outlive the caller). + task: unsafe { &*ptr.cast() }, + _not_send: PhantomData, + } + } + /// Returns the group leader of the given task. pub fn group_leader(&self) -> &Task { // SAFETY: By the type invariant, we know that `self.0` is a valid task. Valid tasks always From fd9670cd5f76df067d8a4b27e62ca111bc311114 Mon Sep 17 00:00:00 2001 From: Wedson Almeida Filho Date: Tue, 11 Apr 2023 02:45:43 -0300 Subject: [PATCH 08/10] rust: sync: introduce `LockedBy` This allows us to have data protected by a lock despite not being wrapped by it. Access is granted by providing evidence that the lock is held by the caller. Reviewed-by: Martin Rodriguez Reboredo Signed-off-by: Wedson Almeida Filho Reviewed-by: Benno Lossin Link: https://lore.kernel.org/r/20230411054543.21278-13-wedsonaf@gmail.com Signed-off-by: Miguel Ojeda --- rust/kernel/sync.rs | 2 + rust/kernel/sync/lock.rs | 2 +- rust/kernel/sync/locked_by.rs | 156 ++++++++++++++++++++++++++++++++++ 3 files changed, 159 insertions(+), 1 deletion(-) create mode 100644 rust/kernel/sync/locked_by.rs diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs index c997ff7e951e5b..5393b0d7661908 100644 --- a/rust/kernel/sync.rs +++ b/rust/kernel/sync.rs @@ -9,9 +9,11 @@ use crate::types::Opaque; mod arc; pub mod lock; +mod locked_by; pub use arc::{Arc, ArcBorrow, UniqueArc}; pub use lock::{mutex::Mutex, spinlock::SpinLock}; +pub use locked_by::LockedBy; /// Represents a lockdep class. It's a wrapper around C's `lock_class_key`. #[repr(transparent)] diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs index 08adc37470331a..07b6d6bda05588 100644 --- a/rust/kernel/sync/lock.rs +++ b/rust/kernel/sync/lock.rs @@ -74,7 +74,7 @@ pub struct Lock { _pin: PhantomPinned, /// The data protected by the lock. - data: UnsafeCell, + pub(crate) data: UnsafeCell, } // SAFETY: `Lock` can be transferred across thread boundaries iff the data it protects can. diff --git a/rust/kernel/sync/locked_by.rs b/rust/kernel/sync/locked_by.rs new file mode 100644 index 00000000000000..b17ee5cd98f3eb --- /dev/null +++ b/rust/kernel/sync/locked_by.rs @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! A wrapper for data protected by a lock that does not wrap it. + +use super::{lock::Backend, lock::Lock}; +use crate::build_assert; +use core::{cell::UnsafeCell, mem::size_of, ptr}; + +/// Allows access to some data to be serialised by a lock that does not wrap it. +/// +/// In most cases, data protected by a lock is wrapped by the appropriate lock type, e.g., +/// [`super::Mutex`] or [`super::SpinLock`]. [`LockedBy`] is meant for cases when this is not +/// possible. For example, if a container has a lock and some data in the contained elements needs +/// to be protected by the same lock. +/// +/// [`LockedBy`] wraps the data in lieu of another locking primitive, and only allows access to it +/// when the caller shows evidence that the 'external' lock is locked. It panics if the evidence +/// refers to the wrong instance of the lock. +/// +/// # Examples +/// +/// The following is an example for illustrative purposes: `InnerDirectory::bytes_used` is an +/// aggregate of all `InnerFile::bytes_used` and must be kept consistent; so we wrap `InnerFile` in +/// a `LockedBy` so that it shares a lock with `InnerDirectory`. This allows us to enforce at +/// compile-time that access to `InnerFile` is only granted when an `InnerDirectory` is also +/// locked; we enforce at run time that the right `InnerDirectory` is locked. +/// +/// ``` +/// use kernel::sync::{LockedBy, Mutex}; +/// +/// struct InnerFile { +/// bytes_used: u64, +/// } +/// +/// struct File { +/// _ino: u32, +/// inner: LockedBy, +/// } +/// +/// struct InnerDirectory { +/// /// The sum of the bytes used by all files. +/// bytes_used: u64, +/// _files: Vec, +/// } +/// +/// struct Directory { +/// _ino: u32, +/// inner: Mutex, +/// } +/// +/// /// Prints `bytes_used` from both the directory and file. +/// fn print_bytes_used(dir: &Directory, file: &File) { +/// let guard = dir.inner.lock(); +/// let inner_file = file.inner.access(&guard); +/// pr_info!("{} {}", guard.bytes_used, inner_file.bytes_used); +/// } +/// +/// /// Increments `bytes_used` for both the directory and file. +/// fn inc_bytes_used(dir: &Directory, file: &File) { +/// let mut guard = dir.inner.lock(); +/// guard.bytes_used += 10; +/// +/// let file_inner = file.inner.access_mut(&mut guard); +/// file_inner.bytes_used += 10; +/// } +/// +/// /// Creates a new file. +/// fn new_file(ino: u32, dir: &Directory) -> File { +/// File { +/// _ino: ino, +/// inner: LockedBy::new(&dir.inner, InnerFile { bytes_used: 0 }), +/// } +/// } +/// ``` +pub struct LockedBy { + owner: *const U, + data: UnsafeCell, +} + +// SAFETY: `LockedBy` can be transferred across thread boundaries iff the data it protects can. +unsafe impl Send for LockedBy {} + +// SAFETY: `LockedBy` serialises the interior mutability it provides, so it is `Sync` as long as the +// data it protects is `Send`. +unsafe impl Sync for LockedBy {} + +impl LockedBy { + /// Constructs a new instance of [`LockedBy`]. + /// + /// It stores a raw pointer to the owner that is never dereferenced. It is only used to ensure + /// that the right owner is being used to access the protected data. If the owner is freed, the + /// data becomes inaccessible; if another instance of the owner is allocated *on the same + /// memory location*, the data becomes accessible again: none of this affects memory safety + /// because in any case at most one thread (or CPU) can access the protected data at a time. + pub fn new(owner: &Lock, data: T) -> Self { + build_assert!( + size_of::>() > 0, + "The lock type cannot be a ZST because it may be impossible to distinguish instances" + ); + Self { + owner: owner.data.get(), + data: UnsafeCell::new(data), + } + } +} + +impl LockedBy { + /// Returns a reference to the protected data when the caller provides evidence (via a + /// reference) that the owner is locked. + /// + /// `U` cannot be a zero-sized type (ZST) because there are ways to get an `&U` that matches + /// the data protected by the lock without actually holding it. + /// + /// # Panics + /// + /// Panics if `owner` is different from the data protected by the lock used in + /// [`new`](LockedBy::new). + pub fn access<'a>(&'a self, owner: &'a U) -> &'a T { + build_assert!( + size_of::() > 0, + "`U` cannot be a ZST because `owner` wouldn't be unique" + ); + if !ptr::eq(owner, self.owner) { + panic!("mismatched owners"); + } + + // SAFETY: `owner` is evidence that the owner is locked. + unsafe { &*self.data.get() } + } + + /// Returns a mutable reference to the protected data when the caller provides evidence (via a + /// mutable owner) that the owner is locked mutably. + /// + /// `U` cannot be a zero-sized type (ZST) because there are ways to get an `&mut U` that + /// matches the data protected by the lock without actually holding it. + /// + /// Showing a mutable reference to the owner is sufficient because we know no other references + /// can exist to it. + /// + /// # Panics + /// + /// Panics if `owner` is different from the data protected by the lock used in + /// [`new`](LockedBy::new). + pub fn access_mut<'a>(&'a self, owner: &'a mut U) -> &'a mut T { + build_assert!( + size_of::() > 0, + "`U` cannot be a ZST because `owner` wouldn't be unique" + ); + if !ptr::eq(owner, self.owner) { + panic!("mismatched owners"); + } + + // SAFETY: `owner` is evidence that there is only one reference to the owner. + unsafe { &mut *self.data.get() } + } +} From 21d4e1e1f7ec176d84c04f373abf070015456f92 Mon Sep 17 00:00:00 2001 From: Wedson Almeida Filho Date: Mon, 27 Mar 2023 15:43:08 -0300 Subject: [PATCH 09/10] rust: lock: add `Guard::do_unlocked` It releases the lock, executes some function provided by the caller, then reacquires the lock. This is preparation for the implementation of condvars, which will sleep after between unlocking and relocking. We need an explicit `relock` method for primitives like `SpinLock` that have an irqsave variant: we use the guard state to determine if the lock was originally acquired with the regular `lock` function or `lock_irqsave`. Reviewed-by: Martin Rodriguez Reboredo Signed-off-by: Wedson Almeida Filho --- rust/kernel/sync/lock.rs | 29 ++++++++++++++++++++++++++++- rust/kernel/sync/lock/spinlock.rs | 3 ++- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs index 07b6d6bda05588..9cc5410fd81064 100644 --- a/rust/kernel/sync/lock.rs +++ b/rust/kernel/sync/lock.rs @@ -6,7 +6,7 @@ //! spinlocks, raw spinlocks) to be provided with minimal effort. use super::LockClassKey; -use crate::{bindings, init::PinInit, pin_init, str::CStr, types::Opaque}; +use crate::{bindings, init::PinInit, pin_init, str::CStr, types::Opaque, types::ScopeGuard}; use core::{cell::UnsafeCell, marker::PhantomData, marker::PhantomPinned}; use macros::pin_data; @@ -22,6 +22,8 @@ pub mod spinlock; /// /// - Implementers must ensure that only one thread/CPU may access the protected data once the lock /// is owned, that is, between calls to `lock` and `unlock`. +/// - Implementers must also ensure that `relock` uses the same locking method as the original +/// lock operation. pub unsafe trait Backend { /// The state required by the lock. type State; @@ -55,6 +57,17 @@ pub unsafe trait Backend { /// /// It must only be called by the current owner of the lock. unsafe fn unlock(ptr: *mut Self::State, guard_state: &Self::GuardState); + + /// Reacquires the lock, making the caller its owner. + /// + /// # Safety + /// + /// Callers must ensure that `guard_state` comes from a previous call to [`Backend::lock`] (or + /// variant) that has been unlocked with [`Backend::unlock`] and will be relocked now. + unsafe fn relock(ptr: *mut Self::State, guard_state: &mut Self::GuardState) { + // SAFETY: The safety requirements ensure that the lock is initialised. + *guard_state = unsafe { Self::lock(ptr) }; + } } /// A mutual exclusion primitive. @@ -126,6 +139,20 @@ pub struct Guard<'a, T: ?Sized, B: Backend> { // SAFETY: `Guard` is sync when the data protected by the lock is also sync. unsafe impl Sync for Guard<'_, T, B> {} +impl Guard<'_, T, B> { + #[allow(dead_code)] + pub(crate) fn do_unlocked(&mut self, cb: impl FnOnce()) { + // SAFETY: The caller owns the lock, so it is safe to unlock it. + unsafe { B::unlock(self.lock.state.get(), &self.state) }; + + // SAFETY: The lock was just unlocked above and is being relocked now. + let _relock = + ScopeGuard::new(|| unsafe { B::relock(self.lock.state.get(), &mut self.state) }); + + cb(); + } +} + impl core::ops::Deref for Guard<'_, T, B> { type Target = T; diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs index a52d20fc975543..979b56464a4e9f 100644 --- a/rust/kernel/sync/lock/spinlock.rs +++ b/rust/kernel/sync/lock/spinlock.rs @@ -87,7 +87,8 @@ pub type SpinLock = super::Lock; /// A kernel `spinlock_t` lock backend. pub struct SpinLockBackend; -// SAFETY: The underlying kernel `spinlock_t` object ensures mutual exclusion. +// SAFETY: The underlying kernel `spinlock_t` object ensures mutual exclusion. `relock` uses the +// default implementation that always calls the same locking method. unsafe impl super::Backend for SpinLockBackend { type State = bindings::spinlock_t; type GuardState = (); From a03c1c137b15a6c4976b51477881bca67d893cdd Mon Sep 17 00:00:00 2001 From: Wedson Almeida Filho Date: Sun, 26 Mar 2023 00:57:38 -0300 Subject: [PATCH 10/10] rust: sync: introduce `CondVar` This is the traditional condition variable or monitor synchronisation primitive. It is implemented with C's `wait_queue_head_t`. It allows users to release a lock and go to sleep while guaranteeing that notifications won't be missed. This is achieved by enqueuing a wait entry before releasing the lock. Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Will Deacon Cc: Waiman Long Reviewed-by: Martin Rodriguez Reboredo Signed-off-by: Wedson Almeida Filho --- rust/bindings/bindings_helper.h | 1 + rust/helpers.c | 7 ++ rust/kernel/sync.rs | 2 + rust/kernel/sync/condvar.rs | 174 ++++++++++++++++++++++++++++++++ rust/kernel/sync/lock.rs | 1 - 5 files changed, 184 insertions(+), 1 deletion(-) create mode 100644 rust/kernel/sync/condvar.rs diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h index 03656a44a83f45..50e7a76d545509 100644 --- a/rust/bindings/bindings_helper.h +++ b/rust/bindings/bindings_helper.h @@ -8,6 +8,7 @@ #include #include +#include #include /* `bindgen` gets confused at certain things. */ diff --git a/rust/helpers.c b/rust/helpers.c index 2cb7b82053d818..81e80261d597c6 100644 --- a/rust/helpers.c +++ b/rust/helpers.c @@ -25,6 +25,7 @@ #include #include #include +#include __noreturn void rust_helper_BUG(void) { @@ -61,6 +62,12 @@ void rust_helper_spin_unlock(spinlock_t *lock) } EXPORT_SYMBOL_GPL(rust_helper_spin_unlock); +void rust_helper_init_wait(struct wait_queue_entry *wq_entry) +{ + init_wait(wq_entry); +} +EXPORT_SYMBOL_GPL(rust_helper_init_wait); + int rust_helper_signal_pending(struct task_struct *t) { return signal_pending(t); diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs index 5393b0d7661908..d219ee518eff15 100644 --- a/rust/kernel/sync.rs +++ b/rust/kernel/sync.rs @@ -8,10 +8,12 @@ use crate::types::Opaque; mod arc; +mod condvar; pub mod lock; mod locked_by; pub use arc::{Arc, ArcBorrow, UniqueArc}; +pub use condvar::CondVar; pub use lock::{mutex::Mutex, spinlock::SpinLock}; pub use locked_by::LockedBy; diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs new file mode 100644 index 00000000000000..ed353399c4e56f --- /dev/null +++ b/rust/kernel/sync/condvar.rs @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! A condition variable. +//! +//! This module allows Rust code to use the kernel's [`struct wait_queue_head`] as a condition +//! variable. + +use super::{lock::Backend, lock::Guard, LockClassKey}; +use crate::{bindings, init::PinInit, pin_init, str::CStr, types::Opaque}; +use core::marker::PhantomPinned; +use macros::pin_data; + +/// Creates a [`CondVar`] initialiser with the given name and a newly-created lock class. +#[macro_export] +macro_rules! new_condvar { + ($($name:literal)?) => { + $crate::sync::CondVar::new($crate::optional_name!($($name)?), $crate::static_lock_class!()) + }; +} + +/// A conditional variable. +/// +/// Exposes the kernel's [`struct wait_queue_head`] as a condition variable. It allows the caller to +/// atomically release the given lock and go to sleep. It reacquires the lock when it wakes up. And +/// it wakes up when notified by another thread (via [`CondVar::notify_one`] or +/// [`CondVar::notify_all`]) or because the thread received a signal. It may also wake up +/// spuriously. +/// +/// Instances of [`CondVar`] need a lock class and to be pinned. The recommended way to create such +/// instances is with the [`pin_init`](crate::pin_init) and [`new_condvar`] macros. +/// +/// # Examples +/// +/// The following is an example of using a condvar with a mutex: +/// +/// ``` +/// use kernel::sync::{CondVar, Mutex}; +/// use kernel::{new_condvar, new_mutex}; +/// +/// #[pin_data] +/// pub struct Example { +/// #[pin] +/// value: Mutex, +/// +/// #[pin] +/// value_changed: CondVar, +/// } +/// +/// /// Waits for `e.value` to become `v`. +/// fn wait_for_value(e: &Example, v: u32) { +/// let mut guard = e.value.lock(); +/// while *guard != v { +/// e.value_changed.wait_uninterruptible(&mut guard); +/// } +/// } +/// +/// /// Increments `e.value` and notifies all potential waiters. +/// fn increment(e: &Example) { +/// *e.value.lock() += 1; +/// e.value_changed.notify_all(); +/// } +/// +/// /// Allocates a new boxed `Example`. +/// fn new_example() -> Result>> { +/// Box::pin_init(pin_init!(Example { +/// value <- new_mutex!(0), +/// value_changed <- new_condvar!(), +/// })) +/// } +/// ``` +/// +/// [`struct wait_queue_head`]: ../../../include/linux/wait.h +#[pin_data] +pub struct CondVar { + #[pin] + pub(crate) wait_list: Opaque, + + /// A condvar needs to be pinned because it contains a [`struct list_head`] that is + /// self-referential, so it cannot be safely moved once it is initialised. + #[pin] + _pin: PhantomPinned, +} + +// SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on any thread. +#[allow(clippy::non_send_fields_in_send_ty)] +unsafe impl Send for CondVar {} + +// SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on multiple threads +// concurrently. +unsafe impl Sync for CondVar {} + +impl CondVar { + /// Constructs a new condvar initialiser. + #[allow(clippy::new_ret_no_self)] + pub fn new(name: &'static CStr, key: &'static LockClassKey) -> impl PinInit { + pin_init!(Self { + _pin: PhantomPinned, + // SAFETY: `slot` is valid while the closure is called and both `name` and `key` have + // static lifetimes so they live indefinitely. + wait_list <- Opaque::ffi_init(|slot| unsafe { + bindings::__init_waitqueue_head(slot, name.as_char_ptr(), key.as_ptr()) + }), + }) + } + + fn wait_internal(&self, wait_state: u32, guard: &mut Guard<'_, T, B>) { + let wait = Opaque::::uninit(); + + // SAFETY: `wait` points to valid memory. + unsafe { bindings::init_wait(wait.get()) }; + + // SAFETY: Both `wait` and `wait_list` point to valid memory. + unsafe { + bindings::prepare_to_wait_exclusive(self.wait_list.get(), wait.get(), wait_state as _) + }; + + // SAFETY: No arguments, switches to another thread. + guard.do_unlocked(|| unsafe { bindings::schedule() }); + + // SAFETY: Both `wait` and `wait_list` point to valid memory. + unsafe { bindings::finish_wait(self.wait_list.get(), wait.get()) }; + } + + /// Releases the lock and waits for a notification in interruptible mode. + /// + /// Atomically releases the given lock (whose ownership is proven by the guard) and puts the + /// thread to sleep, reacquiring the lock on wake up. It wakes up when notified by + /// [`CondVar::notify_one`] or [`CondVar::notify_all`], or when the thread receives a signal. + /// It may also wake up spuriously. + /// + /// Returns whether there is a signal pending. + #[must_use = "wait returns if a signal is pending, so the caller must check the return value"] + pub fn wait(&self, guard: &mut Guard<'_, T, B>) -> bool { + self.wait_internal(bindings::TASK_INTERRUPTIBLE, guard); + crate::current!().signal_pending() + } + + /// Releases the lock and waits for a notification in uninterruptible mode. + /// + /// Similar to [`CondVar::wait`], except that the wait is not interruptible. That is, the + /// thread won't wake up due to signals. It may, however, wake up supirously. + pub fn wait_uninterruptible(&self, guard: &mut Guard<'_, T, B>) { + self.wait_internal(bindings::TASK_UNINTERRUPTIBLE, guard) + } + + /// Calls the kernel function to notify the appropriate number of threads with the given flags. + fn notify(&self, count: i32, flags: u32) { + // SAFETY: `wait_list` points to valid memory. + unsafe { + bindings::__wake_up( + self.wait_list.get(), + bindings::TASK_NORMAL, + count, + flags as _, + ) + }; + } + + /// Wakes a single waiter up, if any. + /// + /// This is not 'sticky' in the sense that if no thread is waiting, the notification is lost + /// completely (as opposed to automatically waking up the next waiter). + pub fn notify_one(&self) { + self.notify(1, 0); + } + + /// Wakes all waiters up, if any. + /// + /// This is not 'sticky' in the sense that if no thread is waiting, the notification is lost + /// completely (as opposed to automatically waking up the next waiter). + pub fn notify_all(&self) { + self.notify(0, 0); + } +} diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs index 9cc5410fd81064..45edc60aa27bc7 100644 --- a/rust/kernel/sync/lock.rs +++ b/rust/kernel/sync/lock.rs @@ -140,7 +140,6 @@ pub struct Guard<'a, T: ?Sized, B: Backend> { unsafe impl Sync for Guard<'_, T, B> {} impl Guard<'_, T, B> { - #[allow(dead_code)] pub(crate) fn do_unlocked(&mut self, cb: impl FnOnce()) { // SAFETY: The caller owns the lock, so it is safe to unlock it. unsafe { B::unlock(self.lock.state.get(), &self.state) };