esp_hal/
sync.rs

1//! Under construction: This is public only for tests, please avoid using it
2//! directly.
3
4#[cfg(single_core)]
5use core::cell::Cell;
6use core::cell::UnsafeCell;
7
8use crate::interrupt::Priority;
9
10/// Opaque token that can be used to release a lock.
11// The interpretation of this value depends on the lock type that created it,
12// but bit #31 is reserved for the reentry flag.
13//
14// Xtensa: PS has 15 useful bits. Bits 12..16 and 19..32 are unused, so we can
15// use bit #31 as our reentry flag.
16// We can assume the reserved bit is 0 otherwise rsil - wsr pairings would be
17// undefined behavior: Quoting the ISA summary, table 64:
18// Writing a non-zero value to these fields results in undefined processor
19// behavior.
20//
21// Risc-V: we either get the restore state from bit 3 of mstatus, or
22// we create the restore state from the current Priority, which is at most 31.
23#[derive(Clone, Copy, Debug)]
24#[cfg_attr(feature = "defmt", derive(defmt::Format))]
25pub struct RestoreState(u32);
26impl RestoreState {
27    const REENTRY_FLAG: u32 = 1 << 31;
28
29    fn mark_reentry(&mut self) {
30        self.0 |= Self::REENTRY_FLAG;
31    }
32
33    fn is_reentry(&self) -> bool {
34        self.0 & Self::REENTRY_FLAG != 0
35    }
36}
37
38impl From<Priority> for RestoreState {
39    fn from(priority: Priority) -> Self {
40        Self(priority as _)
41    }
42}
43
44impl TryFrom<RestoreState> for Priority {
45    type Error = crate::interrupt::Error;
46
47    fn try_from(token: RestoreState) -> Result<Self, Self::Error> {
48        Self::try_from(token.0)
49    }
50}
51
52mod single_core {
53    use core::sync::atomic::{Ordering, compiler_fence};
54
55    use super::RestoreState;
56    use crate::interrupt::Priority;
57
58    /// Trait for single-core locks.
59    pub trait RawLock {
60        unsafe fn enter(&self) -> RestoreState;
61        unsafe fn exit(&self, token: RestoreState);
62    }
63
64    /// A lock that disables interrupts below a certain priority.
65    pub struct PriorityLock(pub Priority);
66
67    impl PriorityLock {
68        fn current_priority() -> Priority {
69            crate::interrupt::current_runlevel()
70        }
71
72        /// Prevents interrupts above `level` from firing and returns the
73        /// current run level.
74        unsafe fn change_current_level(level: Priority) -> Priority {
75            unsafe { crate::interrupt::change_current_runlevel(level) }
76        }
77    }
78
79    impl RawLock for PriorityLock {
80        unsafe fn enter(&self) -> RestoreState {
81            #[cfg(riscv)]
82            if self.0 == Priority::max() {
83                return unsafe { InterruptLock.enter() };
84            }
85
86            let prev_interrupt_priority = unsafe { Self::change_current_level(self.0) };
87            assert!(prev_interrupt_priority <= self.0);
88
89            // Ensure no subsequent memory accesses are reordered to before interrupts are
90            // disabled.
91            compiler_fence(Ordering::SeqCst);
92
93            RestoreState::from(prev_interrupt_priority)
94        }
95
96        unsafe fn exit(&self, token: RestoreState) {
97            #[cfg(riscv)]
98            if self.0 == Priority::max() {
99                return unsafe { InterruptLock.exit(token) };
100            }
101            assert!(Self::current_priority() <= self.0);
102            // Ensure no preceeding memory accesses are reordered to after interrupts are
103            // enabled.
104            compiler_fence(Ordering::SeqCst);
105
106            let priority = unwrap!(Priority::try_from(token));
107            unsafe {
108                Self::change_current_level(priority);
109            }
110        }
111    }
112
113    /// A lock that disables interrupts.
114    pub struct InterruptLock;
115
116    impl RawLock for InterruptLock {
117        unsafe fn enter(&self) -> RestoreState {
118            cfg_if::cfg_if! {
119                if #[cfg(riscv)] {
120                    let mut mstatus = 0u32;
121                    unsafe {core::arch::asm!("csrrci {0}, mstatus, 8", inout(reg) mstatus);}
122                    let token = mstatus & 0b1000;
123                } else if #[cfg(xtensa)] {
124                    let token: u32;
125                    unsafe {core::arch::asm!("rsil {0}, 5", out(reg) token);}
126                } else {
127                    compile_error!("Unsupported architecture")
128                }
129            };
130
131            // Ensure no subsequent memory accesses are reordered to before interrupts are
132            // disabled.
133            compiler_fence(Ordering::SeqCst);
134
135            RestoreState(token)
136        }
137
138        unsafe fn exit(&self, token: RestoreState) {
139            // Ensure no preceeding memory accesses are reordered to after interrupts are
140            // enabled.
141            compiler_fence(Ordering::SeqCst);
142
143            let RestoreState(token) = token;
144
145            cfg_if::cfg_if! {
146                if #[cfg(riscv)] {
147                    if token != 0 {
148                        unsafe {
149                            esp_riscv_rt::riscv::interrupt::enable();
150                        }
151                    }
152                } else if #[cfg(xtensa)] {
153                    // Reserved bits in the PS register, these must be written as 0.
154                    const RESERVED_MASK: u32 = 0b1111_1111_1111_1000_1111_0000_0000_0000;
155                    debug_assert!(token & RESERVED_MASK == 0);
156                    unsafe {
157                        core::arch::asm!(
158                            "wsr.ps {0}",
159                            "rsync", in(reg) token)
160                    }
161                } else {
162                    compile_error!("Unsupported architecture")
163                }
164            }
165        }
166    }
167}
168
169#[cfg(multi_core)]
170mod multicore {
171    use portable_atomic::{AtomicUsize, Ordering};
172
173    // Safety: Ensure that when adding new chips `raw_core` doesn't return this
174    // value.
175    // FIXME: ensure in HIL tests this is the case!
176    const UNUSED_THREAD_ID_VALUE: usize = 0x100;
177
178    pub fn thread_id() -> usize {
179        crate::system::raw_core()
180    }
181
182    pub(super) struct AtomicLock {
183        owner: AtomicUsize,
184    }
185
186    impl AtomicLock {
187        pub const fn new() -> Self {
188            Self {
189                owner: AtomicUsize::new(UNUSED_THREAD_ID_VALUE),
190            }
191        }
192
193        pub fn is_owned_by_current_thread(&self) -> bool {
194            self.is_owned_by(thread_id())
195        }
196
197        pub fn is_owned_by(&self, thread: usize) -> bool {
198            self.owner.load(Ordering::Relaxed) == thread
199        }
200
201        pub fn try_lock(&self, new_owner: usize) -> Result<(), usize> {
202            self.owner
203                .compare_exchange(
204                    UNUSED_THREAD_ID_VALUE,
205                    new_owner,
206                    Ordering::Acquire,
207                    Ordering::Relaxed,
208                )
209                .map(|_| ())
210        }
211
212        /// # Safety:
213        ///
214        /// This function must only be called if the lock was acquired by the
215        /// current thread.
216        pub unsafe fn unlock(&self) {
217            debug_assert!(self.is_owned_by_current_thread());
218            self.owner.store(UNUSED_THREAD_ID_VALUE, Ordering::Release);
219        }
220    }
221}
222
223/// A generic lock that wraps [`single_core::RawLock`] and
224/// [`multicore::AtomicLock`] and tracks whether the caller has locked
225/// recursively.
226struct GenericRawMutex<L: single_core::RawLock> {
227    lock: L,
228    #[cfg(multi_core)]
229    inner: multicore::AtomicLock,
230    #[cfg(single_core)]
231    is_locked: Cell<bool>,
232}
233
234unsafe impl<L: single_core::RawLock> Sync for GenericRawMutex<L> {}
235
236impl<L: single_core::RawLock> GenericRawMutex<L> {
237    /// Create a new lock.
238    pub const fn new(lock: L) -> Self {
239        Self {
240            lock,
241            #[cfg(multi_core)]
242            inner: multicore::AtomicLock::new(),
243            #[cfg(single_core)]
244            is_locked: Cell::new(false),
245        }
246    }
247
248    /// Acquires the lock.
249    ///
250    /// # Safety
251    ///
252    /// - Each release call must be paired with an acquire call.
253    /// - The returned token must be passed to the corresponding `release` call.
254    /// - The caller must ensure to release the locks in the reverse order they
255    ///   were acquired.
256    unsafe fn acquire(&self) -> RestoreState {
257        cfg_if::cfg_if! {
258            if #[cfg(single_core)] {
259                let mut tkn = unsafe { self.lock.enter() };
260                let was_locked = self.is_locked.replace(true);
261                if was_locked {
262                    tkn.mark_reentry();
263                }
264                tkn
265            } else if #[cfg(multi_core)] {
266                // We acquire the lock inside an interrupt-free context to prevent a subtle
267                // race condition:
268                // In case an interrupt handler tries to lock the same resource, it could win if
269                // the current thread is holding the lock but isn't yet in interrupt-free context.
270                // If we maintain non-reentrant semantics, this situation would panic.
271                // If we allow reentrancy, the interrupt handler would technically be a different
272                // context with the same `current_thread_id`, so it would be allowed to lock the
273                // resource in a theoretically incorrect way.
274                let try_lock = |current_thread_id| {
275                    let mut tkn = unsafe { self.lock.enter() };
276
277                    match self.inner.try_lock(current_thread_id) {
278                        Ok(()) => Some(tkn),
279                        Err(owner) if owner == current_thread_id => {
280                            tkn.mark_reentry();
281                            Some(tkn)
282                        }
283                        Err(_) => {
284                            unsafe { self.lock.exit(tkn) };
285                            None
286                        }
287                    }
288                };
289
290                let current_thread_id = multicore::thread_id();
291                loop {
292                    if let Some(token) = try_lock(current_thread_id) {
293                        return token;
294                    }
295                }
296            }
297        }
298    }
299
300    /// Releases the lock.
301    ///
302    /// # Safety
303    ///
304    /// - This function must only be called if the lock was acquired by the
305    ///   current thread.
306    /// - The caller must ensure to release the locks in the reverse order they
307    ///   were acquired.
308    /// - Each release call must be paired with an acquire call.
309    unsafe fn release(&self, token: RestoreState) {
310        unsafe {
311            if !token.is_reentry() {
312                #[cfg(multi_core)]
313                self.inner.unlock();
314
315                #[cfg(single_core)]
316                self.is_locked.set(false);
317
318                self.lock.exit(token)
319            }
320        }
321    }
322
323    /// Runs the callback with this lock locked.
324    ///
325    /// Note that this function is not reentrant, calling it reentrantly will
326    /// panic.
327    pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
328        let _token = LockGuard::new(self);
329        f()
330    }
331}
332
333/// A mutual exclusion primitive.
334///
335/// This lock disables interrupts on the current core while locked.
336#[cfg_attr(
337    multi_core,
338    doc = r#"It needs a bit of memory, but it does not take a global critical
339    section, making it preferrable for use in multi-core systems."#
340)]
341pub struct RawMutex {
342    inner: GenericRawMutex<single_core::InterruptLock>,
343}
344
345impl Default for RawMutex {
346    fn default() -> Self {
347        Self::new()
348    }
349}
350
351impl RawMutex {
352    /// Create a new lock.
353    pub const fn new() -> Self {
354        Self {
355            inner: GenericRawMutex::new(single_core::InterruptLock),
356        }
357    }
358
359    /// Acquires the lock.
360    ///
361    /// # Safety
362    ///
363    /// - Each release call must be paired with an acquire call.
364    /// - The returned token must be passed to the corresponding `release` call.
365    /// - The caller must ensure to release the locks in the reverse order they
366    ///   were acquired.
367    pub unsafe fn acquire(&self) -> RestoreState {
368        unsafe { self.inner.acquire() }
369    }
370
371    /// Releases the lock.
372    ///
373    /// # Safety
374    ///
375    /// - This function must only be called if the lock was acquired by the
376    ///   current thread.
377    /// - The caller must ensure to release the locks in the reverse order they
378    ///   were acquired.
379    /// - Each release call must be paired with an acquire call.
380    pub unsafe fn release(&self, token: RestoreState) {
381        unsafe {
382            self.inner.release(token);
383        }
384    }
385
386    /// Runs the callback with this lock locked.
387    ///
388    /// Note that this function is not reentrant, calling it reentrantly will
389    /// panic.
390    pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
391        self.inner.lock(f)
392    }
393}
394
395unsafe impl embassy_sync::blocking_mutex::raw::RawMutex for RawMutex {
396    #[allow(clippy::declare_interior_mutable_const)]
397    const INIT: Self = Self::new();
398
399    fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
400        // embassy_sync semantics allow reentrancy.
401        let _token = LockGuard::new_reentrant(&self.inner);
402        f()
403    }
404}
405
406/// A mutual exclusion primitive that only disables a limited range of
407/// interrupts.
408///
409/// Trying to acquire or release the lock at a higher priority level will panic.
410pub struct RawPriorityLimitedMutex {
411    inner: GenericRawMutex<single_core::PriorityLock>,
412}
413
414impl RawPriorityLimitedMutex {
415    /// Create a new lock that is accessible at or below the given `priority`.
416    pub const fn new(priority: Priority) -> Self {
417        Self {
418            inner: GenericRawMutex::new(single_core::PriorityLock(priority)),
419        }
420    }
421
422    /// Runs the callback with this lock locked.
423    ///
424    /// Note that this function is not reentrant, calling it reentrantly will
425    /// panic.
426    pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
427        self.inner.lock(f)
428    }
429}
430
431unsafe impl embassy_sync::blocking_mutex::raw::RawMutex for RawPriorityLimitedMutex {
432    #[allow(clippy::declare_interior_mutable_const)]
433    const INIT: Self = Self::new(Priority::max());
434
435    fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
436        // embassy_sync semantics allow reentrancy.
437        let _token = LockGuard::new_reentrant(&self.inner);
438        f()
439    }
440}
441
442// Prefer this over a critical-section as this allows you to have multiple
443// locks active at the same time rather than using the global mutex that is
444// critical-section.
445pub(crate) fn lock<T>(lock: &RawMutex, f: impl FnOnce() -> T) -> T {
446    lock.lock(f)
447}
448
449/// Data protected by a [RawMutex].
450///
451/// This is largely equivalent to a `Mutex<RefCell<T>>`, but accessing the inner
452/// data doesn't hold a critical section on multi-core systems.
453pub struct Locked<T> {
454    lock_state: RawMutex,
455    data: UnsafeCell<T>,
456}
457
458impl<T> Locked<T> {
459    /// Create a new instance
460    pub const fn new(data: T) -> Self {
461        Self {
462            lock_state: RawMutex::new(),
463            data: UnsafeCell::new(data),
464        }
465    }
466
467    /// Provide exclusive access to the protected data to the given closure.
468    ///
469    /// Calling this reentrantly will panic.
470    pub fn with<R>(&self, f: impl FnOnce(&mut T) -> R) -> R {
471        lock(&self.lock_state, || f(unsafe { &mut *self.data.get() }))
472    }
473}
474
475unsafe impl<T> Sync for Locked<T> {}
476
477struct LockGuard<'a, L: single_core::RawLock> {
478    lock: &'a GenericRawMutex<L>,
479    token: RestoreState,
480}
481
482impl<'a, L: single_core::RawLock> LockGuard<'a, L> {
483    fn new(lock: &'a GenericRawMutex<L>) -> Self {
484        let this = Self::new_reentrant(lock);
485        assert!(!this.token.is_reentry(), "lock is not reentrant");
486        this
487    }
488
489    fn new_reentrant(lock: &'a GenericRawMutex<L>) -> Self {
490        let token = unsafe {
491            // SAFETY: the same lock will be released when dropping the guard.
492            // This ensures that the lock is released on the same thread, in the reverse
493            // order it was acquired.
494            lock.acquire()
495        };
496
497        Self { lock, token }
498    }
499}
500
501impl<L: single_core::RawLock> Drop for LockGuard<'_, L> {
502    fn drop(&mut self) {
503        unsafe { self.lock.release(self.token) };
504    }
505}
506
507#[cfg(impl_critical_section)]
508mod critical_section {
509    struct CriticalSection;
510
511    critical_section::set_impl!(CriticalSection);
512
513    static CRITICAL_SECTION: super::RawMutex = super::RawMutex::new();
514
515    unsafe impl critical_section::Impl for CriticalSection {
516        unsafe fn acquire() -> critical_section::RawRestoreState {
517            unsafe { CRITICAL_SECTION.acquire().0 }
518        }
519
520        unsafe fn release(token: critical_section::RawRestoreState) {
521            unsafe {
522                CRITICAL_SECTION.release(super::RestoreState(token));
523            }
524        }
525    }
526}