esp_hal/
sync.rs

1//! Under construction: This is public only for tests, please avoid using it
2//! directly.
3
4#[cfg(single_core)]
5use core::cell::Cell;
6use core::cell::UnsafeCell;
7
8use crate::interrupt::Priority;
9
10mod single_core {
11    use core::sync::atomic::{compiler_fence, Ordering};
12
13    use crate::interrupt::Priority;
14
15    /// Trait for single-core locks.
16    pub trait RawLock {
17        unsafe fn enter(&self) -> critical_section::RawRestoreState;
18        unsafe fn exit(&self, token: critical_section::RawRestoreState);
19    }
20
21    /// A lock that disables interrupts below a certain priority.
22    pub struct PriorityLock(pub Priority);
23
24    impl PriorityLock {
25        fn current_priority() -> Priority {
26            crate::interrupt::current_runlevel()
27        }
28
29        /// Prevents interrupts above `level` from firing and returns the
30        /// current run level.
31        unsafe fn change_current_level(level: Priority) -> Priority {
32            crate::interrupt::change_current_runlevel(level)
33        }
34    }
35
36    impl RawLock for PriorityLock {
37        unsafe fn enter(&self) -> critical_section::RawRestoreState {
38            #[cfg(riscv)]
39            if self.0 == Priority::max() {
40                return InterruptLock.enter();
41            }
42
43            let prev_interrupt_priority = unsafe { Self::change_current_level(self.0) };
44            assert!(prev_interrupt_priority <= self.0);
45
46            // Ensure no subsequent memory accesses are reordered to before interrupts are
47            // disabled.
48            compiler_fence(Ordering::SeqCst);
49
50            prev_interrupt_priority as _
51        }
52
53        unsafe fn exit(&self, token: critical_section::RawRestoreState) {
54            #[cfg(riscv)]
55            if self.0 == Priority::max() {
56                return InterruptLock.exit(token);
57            }
58            assert!(Self::current_priority() <= self.0);
59            // Ensure no preceeding memory accesses are reordered to after interrupts are
60            // enabled.
61            compiler_fence(Ordering::SeqCst);
62
63            #[cfg(xtensa)]
64            let token = token as u8;
65
66            let priority = unwrap!(Priority::try_from(token));
67            unsafe { Self::change_current_level(priority) };
68        }
69    }
70
71    /// A lock that disables interrupts.
72    pub struct InterruptLock;
73
74    impl RawLock for InterruptLock {
75        unsafe fn enter(&self) -> critical_section::RawRestoreState {
76            cfg_if::cfg_if! {
77                if #[cfg(riscv)] {
78                    let mut mstatus = 0u32;
79                    core::arch::asm!("csrrci {0}, mstatus, 8", inout(reg) mstatus);
80                    let token = ((mstatus & 0b1000) != 0) as critical_section::RawRestoreState;
81                } else if #[cfg(xtensa)] {
82                    let token: critical_section::RawRestoreState;
83                    core::arch::asm!("rsil {0}, 5", out(reg) token);
84                } else {
85                    compile_error!("Unsupported architecture")
86                }
87            };
88
89            // Ensure no subsequent memory accesses are reordered to before interrupts are
90            // disabled.
91            compiler_fence(Ordering::SeqCst);
92
93            token
94        }
95
96        unsafe fn exit(&self, token: critical_section::RawRestoreState) {
97            // Ensure no preceeding memory accesses are reordered to after interrupts are
98            // enabled.
99            compiler_fence(Ordering::SeqCst);
100
101            cfg_if::cfg_if! {
102                if #[cfg(riscv)] {
103                    if token != 0 {
104                        esp_riscv_rt::riscv::interrupt::enable();
105                    }
106                } else if #[cfg(xtensa)] {
107                    // Reserved bits in the PS register, these must be written as 0.
108                    const RESERVED_MASK: u32 = 0b1111_1111_1111_1000_1111_0000_0000_0000;
109                    debug_assert!(token & RESERVED_MASK == 0);
110                    core::arch::asm!(
111                        "wsr.ps {0}",
112                        "rsync", in(reg) token)
113                } else {
114                    compile_error!("Unsupported architecture")
115                }
116            }
117        }
118    }
119}
120
121#[cfg(multi_core)]
122mod multicore {
123    use portable_atomic::{AtomicUsize, Ordering};
124
125    // Safety: Ensure that when adding new chips `raw_core` doesn't return this
126    // value.
127    // FIXME: ensure in HIL tests this is the case!
128    const UNUSED_THREAD_ID_VALUE: usize = 0x100;
129
130    pub fn thread_id() -> usize {
131        crate::system::raw_core()
132    }
133
134    pub(super) struct AtomicLock {
135        owner: AtomicUsize,
136    }
137
138    impl AtomicLock {
139        pub const fn new() -> Self {
140            Self {
141                owner: AtomicUsize::new(UNUSED_THREAD_ID_VALUE),
142            }
143        }
144
145        pub fn is_owned_by_current_thread(&self) -> bool {
146            self.is_owned_by(thread_id())
147        }
148
149        pub fn is_owned_by(&self, thread: usize) -> bool {
150            self.owner.load(Ordering::Relaxed) == thread
151        }
152
153        pub fn try_lock(&self, new_owner: usize) -> Result<(), usize> {
154            self.owner
155                .compare_exchange(
156                    UNUSED_THREAD_ID_VALUE,
157                    new_owner,
158                    Ordering::Acquire,
159                    Ordering::Relaxed,
160                )
161                .map(|_| ())
162        }
163
164        /// # Safety:
165        ///
166        /// This function must only be called if the lock was acquired by the
167        /// current thread.
168        pub unsafe fn unlock(&self) {
169            debug_assert!(self.is_owned_by_current_thread());
170            self.owner.store(UNUSED_THREAD_ID_VALUE, Ordering::Release);
171        }
172    }
173}
174
175cfg_if::cfg_if! {
176    if #[cfg(riscv)] {
177        // The restore state is a u8 that is casted from a bool, so it has a value of
178        // 0x00 or 0x01 before we add the reentry flag to it.
179        pub const REENTRY_FLAG: u8 = 1 << 7;
180    } else if #[cfg(xtensa)] {
181        // PS has 15 useful bits. Bits 12..16 and 19..32 are unused, so we can use bit
182        // #31 as our reentry flag.
183        // We can assume the reserved bit is 0 otherwise rsil - wsr pairings would be
184        // undefined behavior: Quoting the ISA summary, table 64:
185        // Writing a non-zero value to these fields results in undefined processor behavior.
186        pub const REENTRY_FLAG: u32 = 1 << 31;
187    }
188}
189
190/// A generic lock that wraps [`single_core::RawLock`] and
191/// [`multicore::AtomicLock`] and tracks whether the caller has locked
192/// recursively.
193struct GenericRawMutex<L: single_core::RawLock> {
194    lock: L,
195    #[cfg(multi_core)]
196    inner: multicore::AtomicLock,
197    #[cfg(single_core)]
198    is_locked: Cell<bool>,
199}
200
201unsafe impl<L: single_core::RawLock> Sync for GenericRawMutex<L> {}
202
203impl<L: single_core::RawLock> GenericRawMutex<L> {
204    /// Create a new lock.
205    pub const fn new(lock: L) -> Self {
206        Self {
207            lock,
208            #[cfg(multi_core)]
209            inner: multicore::AtomicLock::new(),
210            #[cfg(single_core)]
211            is_locked: Cell::new(false),
212        }
213    }
214
215    /// Acquires the lock.
216    ///
217    /// # Safety
218    ///
219    /// - Each release call must be paired with an acquire call.
220    /// - The returned token must be passed to the corresponding `release` call.
221    /// - The caller must ensure to release the locks in the reverse order they
222    ///   were acquired.
223    unsafe fn acquire(&self) -> critical_section::RawRestoreState {
224        cfg_if::cfg_if! {
225            if #[cfg(single_core)] {
226                let mut tkn = unsafe { self.lock.enter() };
227                let was_locked = self.is_locked.replace(true);
228                if was_locked {
229                    tkn |= REENTRY_FLAG;
230                }
231                tkn
232            } else if #[cfg(multi_core)] {
233                // We acquire the lock inside an interrupt-free context to prevent a subtle
234                // race condition:
235                // In case an interrupt handler tries to lock the same resource, it could win if
236                // the current thread is holding the lock but isn't yet in interrupt-free context.
237                // If we maintain non-reentrant semantics, this situation would panic.
238                // If we allow reentrancy, the interrupt handler would technically be a different
239                // context with the same `current_thread_id`, so it would be allowed to lock the
240                // resource in a theoretically incorrect way.
241                let try_lock = |current_thread_id| {
242                    let mut tkn = unsafe { self.lock.enter() };
243
244                    match self.inner.try_lock(current_thread_id) {
245                        Ok(()) => Some(tkn),
246                        Err(owner) if owner == current_thread_id => {
247                            tkn |= REENTRY_FLAG;
248                            Some(tkn)
249                        }
250                        Err(_) => {
251                            unsafe { self.lock.exit(tkn) };
252                            None
253                        }
254                    }
255                };
256
257                let current_thread_id = multicore::thread_id();
258                loop {
259                    if let Some(token) = try_lock(current_thread_id) {
260                        return token;
261                    }
262                }
263            }
264        }
265    }
266
267    /// Releases the lock.
268    ///
269    /// # Safety
270    ///
271    /// - This function must only be called if the lock was acquired by the
272    ///   current thread.
273    /// - The caller must ensure to release the locks in the reverse order they
274    ///   were acquired.
275    /// - Each release call must be paired with an acquire call.
276    unsafe fn release(&self, token: critical_section::RawRestoreState) {
277        if token & REENTRY_FLAG == 0 {
278            #[cfg(multi_core)]
279            self.inner.unlock();
280
281            #[cfg(single_core)]
282            self.is_locked.set(false);
283
284            self.lock.exit(token)
285        }
286    }
287
288    /// Runs the callback with this lock locked.
289    ///
290    /// Note that this function is not reentrant, calling it reentrantly will
291    /// panic.
292    pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
293        let _token = LockGuard::new(self);
294        f()
295    }
296}
297
298/// A mutual exclusion primitive.
299///
300/// This lock disables interrupts on the current core while locked.
301#[cfg_attr(
302    multi_core,
303    doc = r#"It needs a bit of memory, but it does not take a global critical
304    section, making it preferrable for use in multi-core systems."#
305)]
306pub struct RawMutex {
307    inner: GenericRawMutex<single_core::InterruptLock>,
308}
309
310impl Default for RawMutex {
311    fn default() -> Self {
312        Self::new()
313    }
314}
315
316impl RawMutex {
317    /// Create a new lock.
318    pub const fn new() -> Self {
319        Self {
320            inner: GenericRawMutex::new(single_core::InterruptLock),
321        }
322    }
323
324    /// Acquires the lock.
325    ///
326    /// # Safety
327    ///
328    /// - Each release call must be paired with an acquire call.
329    /// - The returned token must be passed to the corresponding `release` call.
330    /// - The caller must ensure to release the locks in the reverse order they
331    ///   were acquired.
332    pub unsafe fn acquire(&self) -> critical_section::RawRestoreState {
333        self.inner.acquire()
334    }
335
336    /// Releases the lock.
337    ///
338    /// # Safety
339    ///
340    /// - This function must only be called if the lock was acquired by the
341    ///   current thread.
342    /// - The caller must ensure to release the locks in the reverse order they
343    ///   were acquired.
344    /// - Each release call must be paired with an acquire call.
345    pub unsafe fn release(&self, token: critical_section::RawRestoreState) {
346        self.inner.release(token);
347    }
348
349    /// Runs the callback with this lock locked.
350    ///
351    /// Note that this function is not reentrant, calling it reentrantly will
352    /// panic.
353    pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
354        self.inner.lock(f)
355    }
356}
357
358unsafe impl embassy_sync::blocking_mutex::raw::RawMutex for RawMutex {
359    #[allow(clippy::declare_interior_mutable_const)]
360    const INIT: Self = Self::new();
361
362    fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
363        // embassy_sync semantics allow reentrancy.
364        let _token = LockGuard::new_reentrant(&self.inner);
365        f()
366    }
367}
368
369/// A mutual exclusion primitive that only disables a limited range of
370/// interrupts.
371///
372/// Trying to acquire or release the lock at a higher priority level will panic.
373pub struct RawPriorityLimitedMutex {
374    inner: GenericRawMutex<single_core::PriorityLock>,
375}
376
377impl RawPriorityLimitedMutex {
378    /// Create a new lock that is accessible at or below the given `priority`.
379    pub const fn new(priority: Priority) -> Self {
380        Self {
381            inner: GenericRawMutex::new(single_core::PriorityLock(priority)),
382        }
383    }
384
385    /// Runs the callback with this lock locked.
386    ///
387    /// Note that this function is not reentrant, calling it reentrantly will
388    /// panic.
389    pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
390        self.inner.lock(f)
391    }
392}
393
394unsafe impl embassy_sync::blocking_mutex::raw::RawMutex for RawPriorityLimitedMutex {
395    #[allow(clippy::declare_interior_mutable_const)]
396    const INIT: Self = Self::new(Priority::max());
397
398    fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
399        // embassy_sync semantics allow reentrancy.
400        let _token = LockGuard::new_reentrant(&self.inner);
401        f()
402    }
403}
404
405// Prefer this over a critical-section as this allows you to have multiple
406// locks active at the same time rather than using the global mutex that is
407// critical-section.
408pub(crate) fn lock<T>(lock: &RawMutex, f: impl FnOnce() -> T) -> T {
409    lock.lock(f)
410}
411
412/// Data protected by a [RawMutex].
413///
414/// This is largely equivalent to a `Mutex<RefCell<T>>`, but accessing the inner
415/// data doesn't hold a critical section on multi-core systems.
416pub struct Locked<T> {
417    lock_state: RawMutex,
418    data: UnsafeCell<T>,
419}
420
421impl<T> Locked<T> {
422    /// Create a new instance
423    pub const fn new(data: T) -> Self {
424        Self {
425            lock_state: RawMutex::new(),
426            data: UnsafeCell::new(data),
427        }
428    }
429
430    /// Provide exclusive access to the protected data to the given closure.
431    ///
432    /// Calling this reentrantly will panic.
433    pub fn with<R>(&self, f: impl FnOnce(&mut T) -> R) -> R {
434        lock(&self.lock_state, || f(unsafe { &mut *self.data.get() }))
435    }
436}
437
438unsafe impl<T> Sync for Locked<T> {}
439
440struct CriticalSection;
441
442critical_section::set_impl!(CriticalSection);
443
444static CRITICAL_SECTION: RawMutex = RawMutex::new();
445
446unsafe impl critical_section::Impl for CriticalSection {
447    unsafe fn acquire() -> critical_section::RawRestoreState {
448        CRITICAL_SECTION.acquire()
449    }
450
451    unsafe fn release(token: critical_section::RawRestoreState) {
452        CRITICAL_SECTION.release(token);
453    }
454}
455
456struct LockGuard<'a, L: single_core::RawLock> {
457    lock: &'a GenericRawMutex<L>,
458    token: critical_section::RawRestoreState,
459}
460
461impl<'a, L: single_core::RawLock> LockGuard<'a, L> {
462    fn new(lock: &'a GenericRawMutex<L>) -> Self {
463        let this = Self::new_reentrant(lock);
464        assert!(this.token & REENTRY_FLAG == 0, "lock is not reentrant");
465        this
466    }
467
468    fn new_reentrant(lock: &'a GenericRawMutex<L>) -> Self {
469        let token = unsafe {
470            // SAFETY: the same lock will be released when dropping the guard.
471            // This ensures that the lock is released on the same thread, in the reverse
472            // order it was acquired.
473            lock.acquire()
474        };
475
476        Self { lock, token }
477    }
478}
479
480impl<L: single_core::RawLock> Drop for LockGuard<'_, L> {
481    fn drop(&mut self) {
482        unsafe { self.lock.release(self.token) };
483    }
484}