Skip to main content

esp_sync/
lib.rs

1//! Syncronization primitives for ESP32 devices
2//!
3//! ## Feature Flags
4#![doc = document_features::document_features!(feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#)]
5#![doc(html_logo_url = "https://avatars.githubusercontent.com/u/46717278")]
6#![cfg_attr(xtensa, feature(asm_experimental_arch))]
7#![deny(missing_docs, rust_2018_idioms, rustdoc::all)]
8// Don't trip up on broken/private links when running semver-checks
9#![cfg_attr(
10    semver_checks,
11    allow(rustdoc::private_intra_doc_links, rustdoc::broken_intra_doc_links)
12)]
13#![no_std]
14
15// MUST be the first module
16mod fmt;
17
18use core::{cell::UnsafeCell, marker::PhantomData};
19
20pub mod raw;
21
22use raw::{RawLock, SingleCoreInterruptLock};
23
24/// Opaque token that can be used to release a lock.
25// The interpretation of this value depends on the lock type that created it,
26// but bit #31 is reserved for the reentry flag.
27//
28// Xtensa: PS has 15 useful bits. Bits 12..16 and 19..32 are unused, so we can
29// use bit #31 as our reentry flag.
30// We can assume the reserved bit is 0 otherwise rsil - wsr pairings would be
31// undefined behavior: Quoting the ISA summary, table 64:
32// Writing a non-zero value to these fields results in undefined processor
33// behavior.
34//
35// Risc-V: we either get the restore state from bit 3 of mstatus, or
36// we create the restore state from the current Priority, which is at most 31.
37#[derive(Clone, Copy, Debug)]
38#[cfg_attr(feature = "defmt", derive(defmt::Format))]
39pub struct RestoreState(u32, PhantomData<*const ()>);
40
41impl RestoreState {
42    const REENTRY_FLAG: u32 = 1 << 31;
43
44    /// Creates a new RestoreState from a raw inner state.
45    ///
46    /// # Safety
47    ///
48    /// The `inner` value must be appropriate for the [RawMutex] implementation that creates it.
49    pub const unsafe fn new(inner: u32) -> Self {
50        Self(inner, PhantomData)
51    }
52
53    /// Returns an invalid RestoreState.
54    ///
55    /// Note that due to the safety contract of [`RawLock::enter`]/[`RawLock::exit`], you must not
56    /// pass a `RestoreState` obtained from this method to [`RawLock::exit`].
57    pub const fn invalid() -> Self {
58        Self(0, PhantomData)
59    }
60
61    #[inline]
62    fn mark_reentry(&mut self) {
63        self.0 |= Self::REENTRY_FLAG;
64    }
65
66    #[inline]
67    fn is_reentry(self) -> bool {
68        self.0 & Self::REENTRY_FLAG != 0
69    }
70
71    /// Returns the raw value used to create this RestoreState.
72    #[inline]
73    pub fn inner(self) -> u32 {
74        self.0
75    }
76}
77
78#[cfg(single_core)]
79mod single_core {
80    use core::cell::Cell;
81
82    #[repr(transparent)]
83    pub(super) struct LockedState {
84        locked: Cell<bool>,
85    }
86
87    impl LockedState {
88        pub const fn new() -> Self {
89            Self {
90                locked: Cell::new(false),
91            }
92        }
93
94        #[inline]
95        pub fn lock(&self, lock: &impl crate::RawLock) -> crate::RestoreState {
96            let mut tkn = unsafe { lock.enter() };
97            let was_locked = self.locked.replace(true);
98            if was_locked {
99                tkn.mark_reentry();
100            }
101            tkn
102        }
103
104        /// # Safety:
105        ///
106        /// This function must only be called if the lock was acquired by the
107        /// current thread.
108        #[inline]
109        pub unsafe fn unlock(&self) {
110            self.locked.set(false)
111        }
112    }
113}
114
115#[cfg(multi_core)]
116mod multi_core {
117    use core::sync::atomic::{AtomicUsize, Ordering};
118
119    // Safety: Ensure that when adding new chips `raw_core` doesn't return this
120    // value.
121    const UNUSED_THREAD_ID_VALUE: usize = 0x100;
122
123    #[inline]
124    fn thread_id() -> usize {
125        // This method must never return UNUSED_THREAD_ID_VALUE
126        cfg_if::cfg_if! {
127            if #[cfg(all(multi_core, riscv))] {
128                riscv::register::mhartid::read()
129            } else if #[cfg(all(multi_core, xtensa))] {
130                (xtensa_lx::get_processor_id() & 0x2000) as usize
131            } else {
132                0
133            }
134        }
135    }
136
137    #[repr(transparent)]
138    pub(super) struct LockedState {
139        owner: AtomicUsize,
140    }
141
142    impl LockedState {
143        #[inline]
144        pub const fn new() -> Self {
145            Self {
146                owner: AtomicUsize::new(UNUSED_THREAD_ID_VALUE),
147            }
148        }
149
150        #[inline]
151        pub fn lock(&self, lock: &impl crate::RawLock) -> crate::RestoreState {
152            // We acquire the lock inside an interrupt-free context to prevent a subtle
153            // race condition:
154            // In case an interrupt handler tries to lock the same resource, it could win if
155            // the current thread is holding the lock but isn't yet in interrupt-free context.
156            // If we maintain non-reentrant semantics, this situation would panic.
157            // If we allow reentrancy, the interrupt handler would technically be a different
158            // context with the same `current_thread_id`, so it would be allowed to lock the
159            // resource in a theoretically incorrect way.
160            let try_lock = || {
161                let mut tkn = unsafe { lock.enter() };
162
163                let current_thread_id = thread_id();
164
165                let try_lock_result = self
166                    .owner
167                    .compare_exchange(
168                        UNUSED_THREAD_ID_VALUE,
169                        current_thread_id,
170                        Ordering::Acquire,
171                        Ordering::Relaxed,
172                    )
173                    .map(|_| ());
174
175                match try_lock_result {
176                    Ok(()) => Some(tkn),
177                    Err(owner) if owner == current_thread_id => {
178                        tkn.mark_reentry();
179                        Some(tkn)
180                    }
181                    Err(_) => {
182                        unsafe { lock.exit(tkn) };
183                        None
184                    }
185                }
186            };
187
188            loop {
189                if let Some(token) = try_lock() {
190                    return token;
191                }
192            }
193        }
194
195        /// # Safety:
196        ///
197        /// This function must only be called if the lock was acquired by the
198        /// current thread.
199        #[inline]
200        pub unsafe fn unlock(&self) {
201            #[cfg(debug_assertions)]
202            if self.owner.load(Ordering::Relaxed) != thread_id() {
203                panic_attempt_unlock_not_owned();
204            }
205            self.owner.store(UNUSED_THREAD_ID_VALUE, Ordering::Release);
206        }
207    }
208
209    #[cfg(debug_assertions)]
210    #[inline(never)]
211    #[cold]
212    fn panic_attempt_unlock_not_owned() -> ! {
213        panic!("tried to unlock a mutex locked on a different thread");
214    }
215}
216
217#[cfg(multi_core)]
218use multi_core::LockedState;
219#[cfg(single_core)]
220use single_core::LockedState;
221
222/// A generic lock that wraps a [`RawLock`] implementation and tracks
223/// whether the caller has locked recursively.
224pub struct GenericRawMutex<L: RawLock> {
225    lock: L,
226    inner: LockedState,
227}
228
229// Safety: LockedState ensures thread-safety
230unsafe impl<L: RawLock> Sync for GenericRawMutex<L> {}
231
232impl<L: RawLock> GenericRawMutex<L> {
233    /// Create a new lock.
234    pub const fn new(lock: L) -> Self {
235        Self {
236            lock,
237            inner: LockedState::new(),
238        }
239    }
240
241    /// Acquires the lock.
242    ///
243    /// # Safety
244    ///
245    /// - Each release call must be paired with an acquire call.
246    /// - The returned token must be passed to the corresponding `release` call.
247    /// - The caller must ensure to release the locks in the reverse order they were acquired.
248    #[inline]
249    unsafe fn acquire(&self) -> RestoreState {
250        self.inner.lock(&self.lock)
251    }
252
253    /// Releases the lock.
254    ///
255    /// # Safety
256    ///
257    /// - This function must only be called if the lock was acquired by the current thread.
258    /// - The caller must ensure to release the locks in the reverse order they were acquired.
259    /// - Each release call must be paired with an acquire call.
260    #[inline]
261    unsafe fn release(&self, token: RestoreState) {
262        if !token.is_reentry() {
263            unsafe {
264                self.inner.unlock();
265
266                self.lock.exit(token)
267            }
268        }
269    }
270
271    /// Runs the callback with this lock locked.
272    ///
273    /// Note that this function is not reentrant, calling it reentrantly will
274    /// panic.
275    #[inline]
276    pub fn lock_non_reentrant<R>(&self, f: impl FnOnce() -> R) -> R {
277        let _token = LockGuard::new_non_reentrant(self);
278        f()
279    }
280
281    /// Runs the callback with this lock locked.
282    #[inline]
283    pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
284        let _token = LockGuard::new_reentrant(self);
285        f()
286    }
287}
288
289/// A mutual exclusion primitive.
290///
291/// This lock disables interrupts on the current core while locked.
292#[cfg_attr(
293    multi_core,
294    doc = r#"It needs a bit of memory, but it does not take a global critical
295    section, making it preferrable for use in multi-core systems."#
296)]
297pub struct RawMutex {
298    inner: GenericRawMutex<SingleCoreInterruptLock>,
299}
300
301impl Default for RawMutex {
302    #[inline]
303    fn default() -> Self {
304        Self::new()
305    }
306}
307
308impl RawMutex {
309    /// Create a new lock.
310    #[inline]
311    pub const fn new() -> Self {
312        Self {
313            inner: GenericRawMutex::new(SingleCoreInterruptLock),
314        }
315    }
316
317    /// Acquires the lock.
318    ///
319    /// # Safety
320    ///
321    /// - Each release call must be paired with an acquire call.
322    /// - The returned token must be passed to the corresponding `release` call.
323    /// - The caller must ensure to release the locks in the reverse order they were acquired.
324    #[inline]
325    pub unsafe fn acquire(&self) -> RestoreState {
326        unsafe { self.inner.acquire() }
327    }
328
329    /// Releases the lock.
330    ///
331    /// # Safety
332    ///
333    /// - This function must only be called if the lock was acquired by the current thread.
334    /// - The caller must ensure to release the locks in the reverse order they were acquired.
335    /// - Each release call must be paired with an acquire call.
336    #[inline]
337    pub unsafe fn release(&self, token: RestoreState) {
338        unsafe {
339            self.inner.release(token);
340        }
341    }
342
343    /// Runs the callback with this lock locked.
344    ///
345    /// Note that this function is not reentrant, calling it reentrantly will
346    /// panic.
347    #[inline]
348    pub fn lock_non_reentrant<R>(&self, f: impl FnOnce() -> R) -> R {
349        self.inner.lock_non_reentrant(f)
350    }
351
352    /// Runs the callback with this lock locked.
353    #[inline]
354    pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
355        self.inner.lock(f)
356    }
357}
358
359unsafe impl embassy_sync_06::blocking_mutex::raw::RawMutex for RawMutex {
360    #[allow(clippy::declare_interior_mutable_const)]
361    const INIT: Self = Self::new();
362
363    fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
364        self.inner.lock(f)
365    }
366}
367
368unsafe impl embassy_sync_07::blocking_mutex::raw::RawMutex for RawMutex {
369    #[allow(clippy::declare_interior_mutable_const)]
370    const INIT: Self = Self::new();
371
372    fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
373        self.inner.lock(f)
374    }
375}
376
377unsafe impl embassy_sync_08::blocking_mutex::raw::RawMutex for RawMutex {
378    #[allow(clippy::declare_interior_mutable_const)]
379    const INIT: Self = Self::new();
380
381    fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
382        self.inner.lock(f)
383    }
384}
385
386/// A non-reentrant (panicking) mutex.
387///
388/// This is largely equivalent to a `critical_section::Mutex<RefCell<T>>`, but accessing the inner
389/// data doesn't hold a critical section on multi-core systems.
390pub struct NonReentrantMutex<T> {
391    lock_state: RawMutex,
392    data: UnsafeCell<T>,
393}
394
395impl<T> NonReentrantMutex<T> {
396    /// Create a new instance
397    pub const fn new(data: T) -> Self {
398        Self {
399            lock_state: RawMutex::new(),
400            data: UnsafeCell::new(data),
401        }
402    }
403
404    /// Provide exclusive access to the protected data to the given closure.
405    ///
406    /// Calling this reentrantly will panic.
407    pub fn with<R>(&self, f: impl FnOnce(&mut T) -> R) -> R {
408        self.lock_state
409            .lock_non_reentrant(|| f(unsafe { &mut *self.data.get() }))
410    }
411}
412
413unsafe impl<T: Send> Send for NonReentrantMutex<T> {}
414unsafe impl<T: Send> Sync for NonReentrantMutex<T> {}
415
416struct LockGuard<'a, L: RawLock> {
417    lock: &'a GenericRawMutex<L>,
418    token: RestoreState,
419}
420
421impl<'a, L: RawLock> LockGuard<'a, L> {
422    #[inline]
423    fn new_non_reentrant(lock: &'a GenericRawMutex<L>) -> Self {
424        let this = Self::new_reentrant(lock);
425        if this.token.is_reentry() {
426            panic_lock_not_reentrant();
427        }
428        this
429    }
430
431    #[inline]
432    fn new_reentrant(lock: &'a GenericRawMutex<L>) -> Self {
433        let token = unsafe {
434            // SAFETY: the same lock will be released when dropping the guard.
435            // This ensures that the lock is released on the same thread, in the reverse
436            // order it was acquired.
437            lock.acquire()
438        };
439
440        Self { lock, token }
441    }
442}
443
444impl<L: RawLock> Drop for LockGuard<'_, L> {
445    fn drop(&mut self) {
446        unsafe { self.lock.release(self.token) };
447    }
448}
449
450#[inline(never)]
451#[cold]
452fn panic_lock_not_reentrant() -> ! {
453    panic!("lock is not reentrant");
454}