kernel/sync/
lock.rs

1// SPDX-License-Identifier: GPL-2.0
2
3//! Generic kernel lock and guard.
4//!
5//! It contains a generic Rust lock and guard that allow for different backends (e.g., mutexes,
6//! spinlocks, raw spinlocks) to be provided with minimal effort.
7
8use super::LockClassKey;
9use crate::{
10    init::PinInit,
11    pin_init,
12    str::CStr,
13    types::{NotThreadSafe, Opaque, ScopeGuard},
14};
15use core::{cell::UnsafeCell, marker::PhantomPinned};
16use macros::pin_data;
17
18pub mod mutex;
19pub mod spinlock;
20
21pub(super) mod global;
22pub use global::{GlobalGuard, GlobalLock, GlobalLockBackend, GlobalLockedBy};
23
24/// The "backend" of a lock.
25///
26/// It is the actual implementation of the lock, without the need to repeat patterns used in all
27/// locks.
28///
29/// # Safety
30///
31/// - Implementers must ensure that only one thread/CPU may access the protected data once the lock
32///   is owned, that is, between calls to [`lock`] and [`unlock`].
33/// - Implementers must also ensure that [`relock`] uses the same locking method as the original
34///   lock operation.
35///
36/// [`lock`]: Backend::lock
37/// [`unlock`]: Backend::unlock
38/// [`relock`]: Backend::relock
39pub unsafe trait Backend {
40    /// The state required by the lock.
41    type State;
42
43    /// The state required to be kept between [`lock`] and [`unlock`].
44    ///
45    /// [`lock`]: Backend::lock
46    /// [`unlock`]: Backend::unlock
47    type GuardState;
48
49    /// Initialises the lock.
50    ///
51    /// # Safety
52    ///
53    /// `ptr` must be valid for write for the duration of the call, while `name` and `key` must
54    /// remain valid for read indefinitely.
55    unsafe fn init(
56        ptr: *mut Self::State,
57        name: *const crate::ffi::c_char,
58        key: *mut bindings::lock_class_key,
59    );
60
61    /// Acquires the lock, making the caller its owner.
62    ///
63    /// # Safety
64    ///
65    /// Callers must ensure that [`Backend::init`] has been previously called.
66    #[must_use]
67    unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState;
68
69    /// Tries to acquire the lock.
70    ///
71    /// # Safety
72    ///
73    /// Callers must ensure that [`Backend::init`] has been previously called.
74    unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState>;
75
76    /// Releases the lock, giving up its ownership.
77    ///
78    /// # Safety
79    ///
80    /// It must only be called by the current owner of the lock.
81    unsafe fn unlock(ptr: *mut Self::State, guard_state: &Self::GuardState);
82
83    /// Reacquires the lock, making the caller its owner.
84    ///
85    /// # Safety
86    ///
87    /// Callers must ensure that `guard_state` comes from a previous call to [`Backend::lock`] (or
88    /// variant) that has been unlocked with [`Backend::unlock`] and will be relocked now.
89    unsafe fn relock(ptr: *mut Self::State, guard_state: &mut Self::GuardState) {
90        // SAFETY: The safety requirements ensure that the lock is initialised.
91        *guard_state = unsafe { Self::lock(ptr) };
92    }
93
94    /// Asserts that the lock is held using lockdep.
95    ///
96    /// # Safety
97    ///
98    /// Callers must ensure that [`Backend::init`] has been previously called.
99    unsafe fn assert_is_held(ptr: *mut Self::State);
100}
101
102/// A mutual exclusion primitive.
103///
104/// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock
105/// [`Backend`] specified as the generic parameter `B`.
106#[repr(C)]
107#[pin_data]
108pub struct Lock<T: ?Sized, B: Backend> {
109    /// The kernel lock object.
110    #[pin]
111    state: Opaque<B::State>,
112
113    /// Some locks are known to be self-referential (e.g., mutexes), while others are architecture
114    /// or config defined (e.g., spinlocks). So we conservatively require them to be pinned in case
115    /// some architecture uses self-references now or in the future.
116    #[pin]
117    _pin: PhantomPinned,
118
119    /// The data protected by the lock.
120    pub(crate) data: UnsafeCell<T>,
121}
122
123// SAFETY: `Lock` can be transferred across thread boundaries iff the data it protects can.
124unsafe impl<T: ?Sized + Send, B: Backend> Send for Lock<T, B> {}
125
126// SAFETY: `Lock` serialises the interior mutability it provides, so it is `Sync` as long as the
127// data it protects is `Send`.
128unsafe impl<T: ?Sized + Send, B: Backend> Sync for Lock<T, B> {}
129
130impl<T, B: Backend> Lock<T, B> {
131    /// Constructs a new lock initialiser.
132    pub fn new(t: T, name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> {
133        pin_init!(Self {
134            data: UnsafeCell::new(t),
135            _pin: PhantomPinned,
136            // SAFETY: `slot` is valid while the closure is called and both `name` and `key` have
137            // static lifetimes so they live indefinitely.
138            state <- Opaque::ffi_init(|slot| unsafe {
139                B::init(slot, name.as_char_ptr(), key.as_ptr())
140            }),
141        })
142    }
143}
144
145impl<B: Backend> Lock<(), B> {
146    /// Constructs a [`Lock`] from a raw pointer.
147    ///
148    /// This can be useful for interacting with a lock which was initialised outside of Rust.
149    ///
150    /// # Safety
151    ///
152    /// The caller promises that `ptr` points to a valid initialised instance of [`State`] during
153    /// the whole lifetime of `'a`.
154    ///
155    /// [`State`]: Backend::State
156    pub unsafe fn from_raw<'a>(ptr: *mut B::State) -> &'a Self {
157        // SAFETY:
158        // - By the safety contract `ptr` must point to a valid initialised instance of `B::State`
159        // - Since the lock data type is `()` which is a ZST, `state` is the only non-ZST member of
160        //   the struct
161        // - Combined with `#[repr(C)]`, this guarantees `Self` has an equivalent data layout to
162        //   `B::State`.
163        unsafe { &*ptr.cast() }
164    }
165}
166
167impl<T: ?Sized, B: Backend> Lock<T, B> {
168    /// Acquires the lock and gives the caller access to the data protected by it.
169    pub fn lock(&self) -> Guard<'_, T, B> {
170        // SAFETY: The constructor of the type calls `init`, so the existence of the object proves
171        // that `init` was called.
172        let state = unsafe { B::lock(self.state.get()) };
173        // SAFETY: The lock was just acquired.
174        unsafe { Guard::new(self, state) }
175    }
176
177    /// Tries to acquire the lock.
178    ///
179    /// Returns a guard that can be used to access the data protected by the lock if successful.
180    pub fn try_lock(&self) -> Option<Guard<'_, T, B>> {
181        // SAFETY: The constructor of the type calls `init`, so the existence of the object proves
182        // that `init` was called.
183        unsafe { B::try_lock(self.state.get()).map(|state| Guard::new(self, state)) }
184    }
185}
186
187/// A lock guard.
188///
189/// Allows mutual exclusion primitives that implement the [`Backend`] trait to automatically unlock
190/// when a guard goes out of scope. It also provides a safe and convenient way to access the data
191/// protected by the lock.
192#[must_use = "the lock unlocks immediately when the guard is unused"]
193pub struct Guard<'a, T: ?Sized, B: Backend> {
194    pub(crate) lock: &'a Lock<T, B>,
195    pub(crate) state: B::GuardState,
196    _not_send: NotThreadSafe,
197}
198
199// SAFETY: `Guard` is sync when the data protected by the lock is also sync.
200unsafe impl<T: Sync + ?Sized, B: Backend> Sync for Guard<'_, T, B> {}
201
202impl<T: ?Sized, B: Backend> Guard<'_, T, B> {
203    pub(crate) fn do_unlocked<U>(&mut self, cb: impl FnOnce() -> U) -> U {
204        // SAFETY: The caller owns the lock, so it is safe to unlock it.
205        unsafe { B::unlock(self.lock.state.get(), &self.state) };
206
207        let _relock = ScopeGuard::new(||
208                // SAFETY: The lock was just unlocked above and is being relocked now.
209                unsafe { B::relock(self.lock.state.get(), &mut self.state) });
210
211        cb()
212    }
213}
214
215impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> {
216    type Target = T;
217
218    fn deref(&self) -> &Self::Target {
219        // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
220        unsafe { &*self.lock.data.get() }
221    }
222}
223
224impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B> {
225    fn deref_mut(&mut self) -> &mut Self::Target {
226        // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
227        unsafe { &mut *self.lock.data.get() }
228    }
229}
230
231impl<T: ?Sized, B: Backend> Drop for Guard<'_, T, B> {
232    fn drop(&mut self) {
233        // SAFETY: The caller owns the lock, so it is safe to unlock it.
234        unsafe { B::unlock(self.lock.state.get(), &self.state) };
235    }
236}
237
238impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
239    /// Constructs a new immutable lock guard.
240    ///
241    /// # Safety
242    ///
243    /// The caller must ensure that it owns the lock.
244    pub unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
245        // SAFETY: The caller can only hold the lock if `Backend::init` has already been called.
246        unsafe { B::assert_is_held(lock.state.get()) };
247
248        Self {
249            lock,
250            state,
251            _not_send: NotThreadSafe,
252        }
253    }
254}