1#[cfg(single_core)]
5use core::cell::Cell;
6use core::cell::UnsafeCell;
7
8use crate::interrupt::Priority;
9
10mod single_core {
11 use core::sync::atomic::{compiler_fence, Ordering};
12
13 use crate::interrupt::Priority;
14
15 pub trait RawLock {
17 unsafe fn enter(&self) -> critical_section::RawRestoreState;
18 unsafe fn exit(&self, token: critical_section::RawRestoreState);
19 }
20
21 pub struct PriorityLock(pub Priority);
23
24 impl PriorityLock {
25 fn current_priority() -> Priority {
26 crate::interrupt::current_runlevel()
27 }
28
29 unsafe fn change_current_level(level: Priority) -> Priority {
32 crate::interrupt::change_current_runlevel(level)
33 }
34 }
35
36 impl RawLock for PriorityLock {
37 unsafe fn enter(&self) -> critical_section::RawRestoreState {
38 #[cfg(riscv)]
39 if self.0 == Priority::max() {
40 return InterruptLock.enter();
41 }
42
43 let prev_interrupt_priority = unsafe { Self::change_current_level(self.0) };
44 assert!(prev_interrupt_priority <= self.0);
45
46 compiler_fence(Ordering::SeqCst);
49
50 prev_interrupt_priority as _
51 }
52
53 unsafe fn exit(&self, token: critical_section::RawRestoreState) {
54 #[cfg(riscv)]
55 if self.0 == Priority::max() {
56 return InterruptLock.exit(token);
57 }
58 assert!(Self::current_priority() <= self.0);
59 compiler_fence(Ordering::SeqCst);
62
63 #[cfg(xtensa)]
64 let token = token as u8;
65
66 let priority = unwrap!(Priority::try_from(token));
67 unsafe { Self::change_current_level(priority) };
68 }
69 }
70
71 pub struct InterruptLock;
73
74 impl RawLock for InterruptLock {
75 unsafe fn enter(&self) -> critical_section::RawRestoreState {
76 cfg_if::cfg_if! {
77 if #[cfg(riscv)] {
78 let mut mstatus = 0u32;
79 core::arch::asm!("csrrci {0}, mstatus, 8", inout(reg) mstatus);
80 let token = ((mstatus & 0b1000) != 0) as critical_section::RawRestoreState;
81 } else if #[cfg(xtensa)] {
82 let token: critical_section::RawRestoreState;
83 core::arch::asm!("rsil {0}, 5", out(reg) token);
84 } else {
85 compile_error!("Unsupported architecture")
86 }
87 };
88
89 compiler_fence(Ordering::SeqCst);
92
93 token
94 }
95
96 unsafe fn exit(&self, token: critical_section::RawRestoreState) {
97 compiler_fence(Ordering::SeqCst);
100
101 cfg_if::cfg_if! {
102 if #[cfg(riscv)] {
103 if token != 0 {
104 esp_riscv_rt::riscv::interrupt::enable();
105 }
106 } else if #[cfg(xtensa)] {
107 const RESERVED_MASK: u32 = 0b1111_1111_1111_1000_1111_0000_0000_0000;
109 debug_assert!(token & RESERVED_MASK == 0);
110 core::arch::asm!(
111 "wsr.ps {0}",
112 "rsync", in(reg) token)
113 } else {
114 compile_error!("Unsupported architecture")
115 }
116 }
117 }
118 }
119}
120
121#[cfg(multi_core)]
122mod multicore {
123 use portable_atomic::{AtomicUsize, Ordering};
124
125 const UNUSED_THREAD_ID_VALUE: usize = 0x100;
129
130 pub fn thread_id() -> usize {
131 crate::system::raw_core()
132 }
133
134 pub(super) struct AtomicLock {
135 owner: AtomicUsize,
136 }
137
138 impl AtomicLock {
139 pub const fn new() -> Self {
140 Self {
141 owner: AtomicUsize::new(UNUSED_THREAD_ID_VALUE),
142 }
143 }
144
145 pub fn is_owned_by_current_thread(&self) -> bool {
146 self.is_owned_by(thread_id())
147 }
148
149 pub fn is_owned_by(&self, thread: usize) -> bool {
150 self.owner.load(Ordering::Relaxed) == thread
151 }
152
153 pub fn try_lock(&self, new_owner: usize) -> Result<(), usize> {
154 self.owner
155 .compare_exchange(
156 UNUSED_THREAD_ID_VALUE,
157 new_owner,
158 Ordering::Acquire,
159 Ordering::Relaxed,
160 )
161 .map(|_| ())
162 }
163
164 pub unsafe fn unlock(&self) {
169 debug_assert!(self.is_owned_by_current_thread());
170 self.owner.store(UNUSED_THREAD_ID_VALUE, Ordering::Release);
171 }
172 }
173}
174
175cfg_if::cfg_if! {
176 if #[cfg(riscv)] {
177 pub const REENTRY_FLAG: u8 = 1 << 7;
180 } else if #[cfg(xtensa)] {
181 pub const REENTRY_FLAG: u32 = 1 << 31;
187 }
188}
189
190struct GenericRawMutex<L: single_core::RawLock> {
194 lock: L,
195 #[cfg(multi_core)]
196 inner: multicore::AtomicLock,
197 #[cfg(single_core)]
198 is_locked: Cell<bool>,
199}
200
201unsafe impl<L: single_core::RawLock> Sync for GenericRawMutex<L> {}
202
203impl<L: single_core::RawLock> GenericRawMutex<L> {
204 pub const fn new(lock: L) -> Self {
206 Self {
207 lock,
208 #[cfg(multi_core)]
209 inner: multicore::AtomicLock::new(),
210 #[cfg(single_core)]
211 is_locked: Cell::new(false),
212 }
213 }
214
215 unsafe fn acquire(&self) -> critical_section::RawRestoreState {
224 cfg_if::cfg_if! {
225 if #[cfg(single_core)] {
226 let mut tkn = unsafe { self.lock.enter() };
227 let was_locked = self.is_locked.replace(true);
228 if was_locked {
229 tkn |= REENTRY_FLAG;
230 }
231 tkn
232 } else if #[cfg(multi_core)] {
233 let try_lock = |current_thread_id| {
242 let mut tkn = unsafe { self.lock.enter() };
243
244 match self.inner.try_lock(current_thread_id) {
245 Ok(()) => Some(tkn),
246 Err(owner) if owner == current_thread_id => {
247 tkn |= REENTRY_FLAG;
248 Some(tkn)
249 }
250 Err(_) => {
251 unsafe { self.lock.exit(tkn) };
252 None
253 }
254 }
255 };
256
257 let current_thread_id = multicore::thread_id();
258 loop {
259 if let Some(token) = try_lock(current_thread_id) {
260 return token;
261 }
262 }
263 }
264 }
265 }
266
267 unsafe fn release(&self, token: critical_section::RawRestoreState) {
277 if token & REENTRY_FLAG == 0 {
278 #[cfg(multi_core)]
279 self.inner.unlock();
280
281 #[cfg(single_core)]
282 self.is_locked.set(false);
283
284 self.lock.exit(token)
285 }
286 }
287
288 pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
293 let _token = LockGuard::new(self);
294 f()
295 }
296}
297
298#[cfg_attr(
302 multi_core,
303 doc = r#"It needs a bit of memory, but it does not take a global critical
304 section, making it preferrable for use in multi-core systems."#
305)]
306pub struct RawMutex {
307 inner: GenericRawMutex<single_core::InterruptLock>,
308}
309
310impl Default for RawMutex {
311 fn default() -> Self {
312 Self::new()
313 }
314}
315
316impl RawMutex {
317 pub const fn new() -> Self {
319 Self {
320 inner: GenericRawMutex::new(single_core::InterruptLock),
321 }
322 }
323
324 pub unsafe fn acquire(&self) -> critical_section::RawRestoreState {
333 self.inner.acquire()
334 }
335
336 pub unsafe fn release(&self, token: critical_section::RawRestoreState) {
346 self.inner.release(token);
347 }
348
349 pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
354 self.inner.lock(f)
355 }
356}
357
358unsafe impl embassy_sync::blocking_mutex::raw::RawMutex for RawMutex {
359 #[allow(clippy::declare_interior_mutable_const)]
360 const INIT: Self = Self::new();
361
362 fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
363 let _token = LockGuard::new_reentrant(&self.inner);
365 f()
366 }
367}
368
369pub struct RawPriorityLimitedMutex {
374 inner: GenericRawMutex<single_core::PriorityLock>,
375}
376
377impl RawPriorityLimitedMutex {
378 pub const fn new(priority: Priority) -> Self {
380 Self {
381 inner: GenericRawMutex::new(single_core::PriorityLock(priority)),
382 }
383 }
384
385 pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
390 self.inner.lock(f)
391 }
392}
393
394unsafe impl embassy_sync::blocking_mutex::raw::RawMutex for RawPriorityLimitedMutex {
395 #[allow(clippy::declare_interior_mutable_const)]
396 const INIT: Self = Self::new(Priority::max());
397
398 fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
399 let _token = LockGuard::new_reentrant(&self.inner);
401 f()
402 }
403}
404
405pub(crate) fn lock<T>(lock: &RawMutex, f: impl FnOnce() -> T) -> T {
409 lock.lock(f)
410}
411
412pub struct Locked<T> {
417 lock_state: RawMutex,
418 data: UnsafeCell<T>,
419}
420
421impl<T> Locked<T> {
422 pub const fn new(data: T) -> Self {
424 Self {
425 lock_state: RawMutex::new(),
426 data: UnsafeCell::new(data),
427 }
428 }
429
430 pub fn with<R>(&self, f: impl FnOnce(&mut T) -> R) -> R {
434 lock(&self.lock_state, || f(unsafe { &mut *self.data.get() }))
435 }
436}
437
438unsafe impl<T> Sync for Locked<T> {}
439
440struct CriticalSection;
441
442critical_section::set_impl!(CriticalSection);
443
444static CRITICAL_SECTION: RawMutex = RawMutex::new();
445
446unsafe impl critical_section::Impl for CriticalSection {
447 unsafe fn acquire() -> critical_section::RawRestoreState {
448 CRITICAL_SECTION.acquire()
449 }
450
451 unsafe fn release(token: critical_section::RawRestoreState) {
452 CRITICAL_SECTION.release(token);
453 }
454}
455
456struct LockGuard<'a, L: single_core::RawLock> {
457 lock: &'a GenericRawMutex<L>,
458 token: critical_section::RawRestoreState,
459}
460
461impl<'a, L: single_core::RawLock> LockGuard<'a, L> {
462 fn new(lock: &'a GenericRawMutex<L>) -> Self {
463 let this = Self::new_reentrant(lock);
464 assert!(this.token & REENTRY_FLAG == 0, "lock is not reentrant");
465 this
466 }
467
468 fn new_reentrant(lock: &'a GenericRawMutex<L>) -> Self {
469 let token = unsafe {
470 lock.acquire()
474 };
475
476 Self { lock, token }
477 }
478}
479
480impl<L: single_core::RawLock> Drop for LockGuard<'_, L> {
481 fn drop(&mut self) {
482 unsafe { self.lock.release(self.token) };
483 }
484}