esp_sync/
lib.rs

1//! Syncronization primitives for ESP32 devices
2//!
3//! ## Feature Flags
4#![doc = document_features::document_features!(feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#)]
5#![doc(html_logo_url = "https://avatars.githubusercontent.com/u/46717278")]
6#![cfg_attr(xtensa, feature(asm_experimental_arch))]
7#![deny(missing_docs, rust_2018_idioms, rustdoc::all)]
8// Don't trip up on broken/private links when running semver-checks
9#![cfg_attr(
10    semver_checks,
11    allow(rustdoc::private_intra_doc_links, rustdoc::broken_intra_doc_links)
12)]
13#![no_std]
14
15// MUST be the first module
16mod fmt;
17
18use core::{cell::UnsafeCell, marker::PhantomData};
19
20pub mod raw;
21
22use raw::{RawLock, SingleCoreInterruptLock};
23
24/// Opaque token that can be used to release a lock.
25// The interpretation of this value depends on the lock type that created it,
26// but bit #31 is reserved for the reentry flag.
27//
28// Xtensa: PS has 15 useful bits. Bits 12..16 and 19..32 are unused, so we can
29// use bit #31 as our reentry flag.
30// We can assume the reserved bit is 0 otherwise rsil - wsr pairings would be
31// undefined behavior: Quoting the ISA summary, table 64:
32// Writing a non-zero value to these fields results in undefined processor
33// behavior.
34//
35// Risc-V: we either get the restore state from bit 3 of mstatus, or
36// we create the restore state from the current Priority, which is at most 31.
37#[derive(Clone, Copy, Debug)]
38#[cfg_attr(feature = "defmt", derive(defmt::Format))]
39pub struct RestoreState(u32, PhantomData<*const ()>);
40
41impl RestoreState {
42    const REENTRY_FLAG: u32 = 1 << 31;
43
44    /// Creates a new RestoreState from a raw inner state.
45    ///
46    /// # Safety
47    ///
48    /// The `inner` value must be appropriate for the [RawMutex] implementation that creates it.
49    pub const unsafe fn new(inner: u32) -> Self {
50        Self(inner, PhantomData)
51    }
52
53    /// Returns an invalid RestoreState.
54    ///
55    /// Note that due to the safety contract of [`RawLock::enter`]/[`RawLock::exit`], you must not
56    /// pass a `RestoreState` obtained from this method to [`RawLock::exit`].
57    pub const fn invalid() -> Self {
58        Self(0, PhantomData)
59    }
60
61    fn mark_reentry(&mut self) {
62        self.0 |= Self::REENTRY_FLAG;
63    }
64
65    fn is_reentry(self) -> bool {
66        self.0 & Self::REENTRY_FLAG != 0
67    }
68
69    /// Returns the raw value used to create this RestoreState.
70    pub fn inner(self) -> u32 {
71        self.0
72    }
73}
74
75#[cfg(single_core)]
76mod single_core {
77    use core::cell::Cell;
78
79    #[repr(transparent)]
80    pub(super) struct LockedState {
81        locked: Cell<bool>,
82    }
83
84    impl LockedState {
85        pub const fn new() -> Self {
86            Self {
87                locked: Cell::new(false),
88            }
89        }
90
91        pub fn lock(&self, lock: &impl crate::RawLock) -> crate::RestoreState {
92            let mut tkn = unsafe { lock.enter() };
93            let was_locked = self.locked.replace(true);
94            if was_locked {
95                tkn.mark_reentry();
96            }
97            tkn
98        }
99
100        /// # Safety:
101        ///
102        /// This function must only be called if the lock was acquired by the
103        /// current thread.
104        pub unsafe fn unlock(&self) {
105            self.locked.set(false)
106        }
107    }
108}
109
110#[cfg(multi_core)]
111mod multi_core {
112    use core::sync::atomic::{AtomicUsize, Ordering};
113
114    // Safety: Ensure that when adding new chips `raw_core` doesn't return this
115    // value.
116    const UNUSED_THREAD_ID_VALUE: usize = 0x100;
117
118    #[inline]
119    fn thread_id() -> usize {
120        // This method must never return UNUSED_THREAD_ID_VALUE
121        cfg_if::cfg_if! {
122            if #[cfg(all(multi_core, riscv))] {
123                riscv::register::mhartid::read()
124            } else if #[cfg(all(multi_core, xtensa))] {
125                (xtensa_lx::get_processor_id() & 0x2000) as usize
126            } else {
127                0
128            }
129        }
130    }
131
132    #[repr(transparent)]
133    pub(super) struct LockedState {
134        owner: AtomicUsize,
135    }
136
137    impl LockedState {
138        #[inline]
139        pub const fn new() -> Self {
140            Self {
141                owner: AtomicUsize::new(UNUSED_THREAD_ID_VALUE),
142            }
143        }
144
145        #[inline]
146        pub fn lock(&self, lock: &impl crate::RawLock) -> crate::RestoreState {
147            // We acquire the lock inside an interrupt-free context to prevent a subtle
148            // race condition:
149            // In case an interrupt handler tries to lock the same resource, it could win if
150            // the current thread is holding the lock but isn't yet in interrupt-free context.
151            // If we maintain non-reentrant semantics, this situation would panic.
152            // If we allow reentrancy, the interrupt handler would technically be a different
153            // context with the same `current_thread_id`, so it would be allowed to lock the
154            // resource in a theoretically incorrect way.
155            let try_lock = || {
156                let mut tkn = unsafe { lock.enter() };
157
158                let current_thread_id = thread_id();
159
160                let try_lock_result = self
161                    .owner
162                    .compare_exchange(
163                        UNUSED_THREAD_ID_VALUE,
164                        current_thread_id,
165                        Ordering::Acquire,
166                        Ordering::Relaxed,
167                    )
168                    .map(|_| ());
169
170                match try_lock_result {
171                    Ok(()) => Some(tkn),
172                    Err(owner) if owner == current_thread_id => {
173                        tkn.mark_reentry();
174                        Some(tkn)
175                    }
176                    Err(_) => {
177                        unsafe { lock.exit(tkn) };
178                        None
179                    }
180                }
181            };
182
183            loop {
184                if let Some(token) = try_lock() {
185                    return token;
186                }
187            }
188        }
189
190        /// # Safety:
191        ///
192        /// This function must only be called if the lock was acquired by the
193        /// current thread.
194        #[inline]
195        pub unsafe fn unlock(&self) {
196            #[cfg(debug_assertions)]
197            if self.owner.load(Ordering::Relaxed) != thread_id() {
198                panic_attempt_unlock_not_owned();
199            }
200            self.owner.store(UNUSED_THREAD_ID_VALUE, Ordering::Release);
201        }
202    }
203
204    #[cfg(debug_assertions)]
205    #[inline(never)]
206    #[cold]
207    fn panic_attempt_unlock_not_owned() -> ! {
208        panic!("tried to unlock a mutex locked on a different thread");
209    }
210}
211
212#[cfg(multi_core)]
213use multi_core::LockedState;
214#[cfg(single_core)]
215use single_core::LockedState;
216
217/// A generic lock that wraps a [`RawLock`] implementation and tracks
218/// whether the caller has locked recursively.
219pub struct GenericRawMutex<L: RawLock> {
220    lock: L,
221    inner: LockedState,
222}
223
224// Safety: LockedState ensures thread-safety
225unsafe impl<L: RawLock> Sync for GenericRawMutex<L> {}
226
227impl<L: RawLock> GenericRawMutex<L> {
228    /// Create a new lock.
229    pub const fn new(lock: L) -> Self {
230        Self {
231            lock,
232            inner: LockedState::new(),
233        }
234    }
235
236    /// Acquires the lock.
237    ///
238    /// # Safety
239    ///
240    /// - Each release call must be paired with an acquire call.
241    /// - The returned token must be passed to the corresponding `release` call.
242    /// - The caller must ensure to release the locks in the reverse order they were acquired.
243    unsafe fn acquire(&self) -> RestoreState {
244        self.inner.lock(&self.lock)
245    }
246
247    /// Releases the lock.
248    ///
249    /// # Safety
250    ///
251    /// - This function must only be called if the lock was acquired by the current thread.
252    /// - The caller must ensure to release the locks in the reverse order they were acquired.
253    /// - Each release call must be paired with an acquire call.
254    unsafe fn release(&self, token: RestoreState) {
255        if !token.is_reentry() {
256            unsafe {
257                self.inner.unlock();
258
259                self.lock.exit(token)
260            }
261        }
262    }
263
264    /// Runs the callback with this lock locked.
265    ///
266    /// Note that this function is not reentrant, calling it reentrantly will
267    /// panic.
268    pub fn lock_non_reentrant<R>(&self, f: impl FnOnce() -> R) -> R {
269        let _token = LockGuard::new_non_reentrant(self);
270        f()
271    }
272
273    /// Runs the callback with this lock locked.
274    pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
275        let _token = LockGuard::new_reentrant(self);
276        f()
277    }
278}
279
280/// A mutual exclusion primitive.
281///
282/// This lock disables interrupts on the current core while locked.
283#[cfg_attr(
284    multi_core,
285    doc = r#"It needs a bit of memory, but it does not take a global critical
286    section, making it preferrable for use in multi-core systems."#
287)]
288pub struct RawMutex {
289    inner: GenericRawMutex<SingleCoreInterruptLock>,
290}
291
292impl Default for RawMutex {
293    fn default() -> Self {
294        Self::new()
295    }
296}
297
298impl RawMutex {
299    /// Create a new lock.
300    pub const fn new() -> Self {
301        Self {
302            inner: GenericRawMutex::new(SingleCoreInterruptLock),
303        }
304    }
305
306    /// Acquires the lock.
307    ///
308    /// # Safety
309    ///
310    /// - Each release call must be paired with an acquire call.
311    /// - The returned token must be passed to the corresponding `release` call.
312    /// - The caller must ensure to release the locks in the reverse order they were acquired.
313    pub unsafe fn acquire(&self) -> RestoreState {
314        unsafe { self.inner.acquire() }
315    }
316
317    /// Releases the lock.
318    ///
319    /// # Safety
320    ///
321    /// - This function must only be called if the lock was acquired by the current thread.
322    /// - The caller must ensure to release the locks in the reverse order they were acquired.
323    /// - Each release call must be paired with an acquire call.
324    pub unsafe fn release(&self, token: RestoreState) {
325        unsafe {
326            self.inner.release(token);
327        }
328    }
329
330    /// Runs the callback with this lock locked.
331    ///
332    /// Note that this function is not reentrant, calling it reentrantly will
333    /// panic.
334    pub fn lock_non_reentrant<R>(&self, f: impl FnOnce() -> R) -> R {
335        self.inner.lock_non_reentrant(f)
336    }
337
338    /// Runs the callback with this lock locked.
339    pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
340        self.inner.lock(f)
341    }
342}
343
344unsafe impl embassy_sync_06::blocking_mutex::raw::RawMutex for RawMutex {
345    #[allow(clippy::declare_interior_mutable_const)]
346    const INIT: Self = Self::new();
347
348    fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
349        self.inner.lock(f)
350    }
351}
352
353unsafe impl embassy_sync_07::blocking_mutex::raw::RawMutex for RawMutex {
354    #[allow(clippy::declare_interior_mutable_const)]
355    const INIT: Self = Self::new();
356
357    fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
358        self.inner.lock(f)
359    }
360}
361
362/// A non-reentrant (panicking) mutex.
363///
364/// This is largely equivalent to a `critical_section::Mutex<RefCell<T>>`, but accessing the inner
365/// data doesn't hold a critical section on multi-core systems.
366pub struct NonReentrantMutex<T> {
367    lock_state: RawMutex,
368    data: UnsafeCell<T>,
369}
370
371impl<T> NonReentrantMutex<T> {
372    /// Create a new instance
373    pub const fn new(data: T) -> Self {
374        Self {
375            lock_state: RawMutex::new(),
376            data: UnsafeCell::new(data),
377        }
378    }
379
380    /// Provide exclusive access to the protected data to the given closure.
381    ///
382    /// Calling this reentrantly will panic.
383    pub fn with<R>(&self, f: impl FnOnce(&mut T) -> R) -> R {
384        self.lock_state
385            .lock_non_reentrant(|| f(unsafe { &mut *self.data.get() }))
386    }
387}
388
389unsafe impl<T: Send> Send for NonReentrantMutex<T> {}
390unsafe impl<T: Send> Sync for NonReentrantMutex<T> {}
391
392struct LockGuard<'a, L: RawLock> {
393    lock: &'a GenericRawMutex<L>,
394    token: RestoreState,
395}
396
397impl<'a, L: RawLock> LockGuard<'a, L> {
398    fn new_non_reentrant(lock: &'a GenericRawMutex<L>) -> Self {
399        let this = Self::new_reentrant(lock);
400        if this.token.is_reentry() {
401            panic_lock_not_reentrant();
402        }
403        this
404    }
405
406    fn new_reentrant(lock: &'a GenericRawMutex<L>) -> Self {
407        let token = unsafe {
408            // SAFETY: the same lock will be released when dropping the guard.
409            // This ensures that the lock is released on the same thread, in the reverse
410            // order it was acquired.
411            lock.acquire()
412        };
413
414        Self { lock, token }
415    }
416}
417
418impl<L: RawLock> Drop for LockGuard<'_, L> {
419    fn drop(&mut self) {
420        unsafe { self.lock.release(self.token) };
421    }
422}
423
424#[inline(never)]
425#[cold]
426fn panic_lock_not_reentrant() -> ! {
427    panic!("lock is not reentrant");
428}