1#[cfg(single_core)]
5use core::cell::Cell;
6use core::cell::UnsafeCell;
7
8use crate::interrupt::Priority;
9
10#[derive(Clone, Copy, Debug)]
24#[cfg_attr(feature = "defmt", derive(defmt::Format))]
25pub struct RestoreState(u32);
26impl RestoreState {
27 const REENTRY_FLAG: u32 = 1 << 31;
28
29 fn mark_reentry(&mut self) {
30 self.0 |= Self::REENTRY_FLAG;
31 }
32
33 fn is_reentry(&self) -> bool {
34 self.0 & Self::REENTRY_FLAG != 0
35 }
36}
37
38impl From<Priority> for RestoreState {
39 fn from(priority: Priority) -> Self {
40 Self(priority as _)
41 }
42}
43
44impl TryFrom<RestoreState> for Priority {
45 type Error = crate::interrupt::Error;
46
47 fn try_from(token: RestoreState) -> Result<Self, Self::Error> {
48 Self::try_from(token.0)
49 }
50}
51
52mod single_core {
53 use core::sync::atomic::{Ordering, compiler_fence};
54
55 use super::RestoreState;
56 use crate::interrupt::Priority;
57
58 pub trait RawLock {
60 unsafe fn enter(&self) -> RestoreState;
61 unsafe fn exit(&self, token: RestoreState);
62 }
63
64 pub struct PriorityLock(pub Priority);
66
67 impl PriorityLock {
68 fn current_priority() -> Priority {
69 crate::interrupt::current_runlevel()
70 }
71
72 unsafe fn change_current_level(level: Priority) -> Priority {
75 unsafe { crate::interrupt::change_current_runlevel(level) }
76 }
77 }
78
79 impl RawLock for PriorityLock {
80 unsafe fn enter(&self) -> RestoreState {
81 #[cfg(riscv)]
82 if self.0 == Priority::max() {
83 return unsafe { InterruptLock.enter() };
84 }
85
86 let prev_interrupt_priority = unsafe { Self::change_current_level(self.0) };
87 assert!(prev_interrupt_priority <= self.0);
88
89 compiler_fence(Ordering::SeqCst);
92
93 RestoreState::from(prev_interrupt_priority)
94 }
95
96 unsafe fn exit(&self, token: RestoreState) {
97 #[cfg(riscv)]
98 if self.0 == Priority::max() {
99 return unsafe { InterruptLock.exit(token) };
100 }
101 assert!(Self::current_priority() <= self.0);
102 compiler_fence(Ordering::SeqCst);
105
106 let priority = unwrap!(Priority::try_from(token));
107 unsafe {
108 Self::change_current_level(priority);
109 }
110 }
111 }
112
113 pub struct InterruptLock;
115
116 impl RawLock for InterruptLock {
117 unsafe fn enter(&self) -> RestoreState {
118 cfg_if::cfg_if! {
119 if #[cfg(riscv)] {
120 let mut mstatus = 0u32;
121 unsafe {core::arch::asm!("csrrci {0}, mstatus, 8", inout(reg) mstatus);}
122 let token = mstatus & 0b1000;
123 } else if #[cfg(xtensa)] {
124 let token: u32;
125 unsafe {core::arch::asm!("rsil {0}, 5", out(reg) token);}
126 } else {
127 compile_error!("Unsupported architecture")
128 }
129 };
130
131 compiler_fence(Ordering::SeqCst);
134
135 RestoreState(token)
136 }
137
138 unsafe fn exit(&self, token: RestoreState) {
139 compiler_fence(Ordering::SeqCst);
142
143 let RestoreState(token) = token;
144
145 cfg_if::cfg_if! {
146 if #[cfg(riscv)] {
147 if token != 0 {
148 unsafe {
149 esp_riscv_rt::riscv::interrupt::enable();
150 }
151 }
152 } else if #[cfg(xtensa)] {
153 const RESERVED_MASK: u32 = 0b1111_1111_1111_1000_1111_0000_0000_0000;
155 debug_assert!(token & RESERVED_MASK == 0);
156 unsafe {
157 core::arch::asm!(
158 "wsr.ps {0}",
159 "rsync", in(reg) token)
160 }
161 } else {
162 compile_error!("Unsupported architecture")
163 }
164 }
165 }
166 }
167}
168
169#[cfg(multi_core)]
170mod multicore {
171 use portable_atomic::{AtomicUsize, Ordering};
172
173 const UNUSED_THREAD_ID_VALUE: usize = 0x100;
177
178 pub fn thread_id() -> usize {
179 crate::system::raw_core()
180 }
181
182 pub(super) struct AtomicLock {
183 owner: AtomicUsize,
184 }
185
186 impl AtomicLock {
187 pub const fn new() -> Self {
188 Self {
189 owner: AtomicUsize::new(UNUSED_THREAD_ID_VALUE),
190 }
191 }
192
193 pub fn is_owned_by_current_thread(&self) -> bool {
194 self.is_owned_by(thread_id())
195 }
196
197 pub fn is_owned_by(&self, thread: usize) -> bool {
198 self.owner.load(Ordering::Relaxed) == thread
199 }
200
201 pub fn try_lock(&self, new_owner: usize) -> Result<(), usize> {
202 self.owner
203 .compare_exchange(
204 UNUSED_THREAD_ID_VALUE,
205 new_owner,
206 Ordering::Acquire,
207 Ordering::Relaxed,
208 )
209 .map(|_| ())
210 }
211
212 pub unsafe fn unlock(&self) {
217 debug_assert!(self.is_owned_by_current_thread());
218 self.owner.store(UNUSED_THREAD_ID_VALUE, Ordering::Release);
219 }
220 }
221}
222
223struct GenericRawMutex<L: single_core::RawLock> {
227 lock: L,
228 #[cfg(multi_core)]
229 inner: multicore::AtomicLock,
230 #[cfg(single_core)]
231 is_locked: Cell<bool>,
232}
233
234unsafe impl<L: single_core::RawLock> Sync for GenericRawMutex<L> {}
235
236impl<L: single_core::RawLock> GenericRawMutex<L> {
237 pub const fn new(lock: L) -> Self {
239 Self {
240 lock,
241 #[cfg(multi_core)]
242 inner: multicore::AtomicLock::new(),
243 #[cfg(single_core)]
244 is_locked: Cell::new(false),
245 }
246 }
247
248 unsafe fn acquire(&self) -> RestoreState {
257 cfg_if::cfg_if! {
258 if #[cfg(single_core)] {
259 let mut tkn = unsafe { self.lock.enter() };
260 let was_locked = self.is_locked.replace(true);
261 if was_locked {
262 tkn.mark_reentry();
263 }
264 tkn
265 } else if #[cfg(multi_core)] {
266 let try_lock = |current_thread_id| {
275 let mut tkn = unsafe { self.lock.enter() };
276
277 match self.inner.try_lock(current_thread_id) {
278 Ok(()) => Some(tkn),
279 Err(owner) if owner == current_thread_id => {
280 tkn.mark_reentry();
281 Some(tkn)
282 }
283 Err(_) => {
284 unsafe { self.lock.exit(tkn) };
285 None
286 }
287 }
288 };
289
290 let current_thread_id = multicore::thread_id();
291 loop {
292 if let Some(token) = try_lock(current_thread_id) {
293 return token;
294 }
295 }
296 }
297 }
298 }
299
300 unsafe fn release(&self, token: RestoreState) {
310 unsafe {
311 if !token.is_reentry() {
312 #[cfg(multi_core)]
313 self.inner.unlock();
314
315 #[cfg(single_core)]
316 self.is_locked.set(false);
317
318 self.lock.exit(token)
319 }
320 }
321 }
322
323 pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
328 let _token = LockGuard::new(self);
329 f()
330 }
331}
332
333#[cfg_attr(
337 multi_core,
338 doc = r#"It needs a bit of memory, but it does not take a global critical
339 section, making it preferrable for use in multi-core systems."#
340)]
341pub struct RawMutex {
342 inner: GenericRawMutex<single_core::InterruptLock>,
343}
344
345impl Default for RawMutex {
346 fn default() -> Self {
347 Self::new()
348 }
349}
350
351impl RawMutex {
352 pub const fn new() -> Self {
354 Self {
355 inner: GenericRawMutex::new(single_core::InterruptLock),
356 }
357 }
358
359 pub unsafe fn acquire(&self) -> RestoreState {
368 unsafe { self.inner.acquire() }
369 }
370
371 pub unsafe fn release(&self, token: RestoreState) {
381 unsafe {
382 self.inner.release(token);
383 }
384 }
385
386 pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
391 self.inner.lock(f)
392 }
393}
394
395unsafe impl embassy_sync::blocking_mutex::raw::RawMutex for RawMutex {
396 #[allow(clippy::declare_interior_mutable_const)]
397 const INIT: Self = Self::new();
398
399 fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
400 let _token = LockGuard::new_reentrant(&self.inner);
402 f()
403 }
404}
405
406pub struct RawPriorityLimitedMutex {
411 inner: GenericRawMutex<single_core::PriorityLock>,
412}
413
414impl RawPriorityLimitedMutex {
415 pub const fn new(priority: Priority) -> Self {
417 Self {
418 inner: GenericRawMutex::new(single_core::PriorityLock(priority)),
419 }
420 }
421
422 pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
427 self.inner.lock(f)
428 }
429}
430
431unsafe impl embassy_sync::blocking_mutex::raw::RawMutex for RawPriorityLimitedMutex {
432 #[allow(clippy::declare_interior_mutable_const)]
433 const INIT: Self = Self::new(Priority::max());
434
435 fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
436 let _token = LockGuard::new_reentrant(&self.inner);
438 f()
439 }
440}
441
442pub(crate) fn lock<T>(lock: &RawMutex, f: impl FnOnce() -> T) -> T {
446 lock.lock(f)
447}
448
449pub struct Locked<T> {
454 lock_state: RawMutex,
455 data: UnsafeCell<T>,
456}
457
458impl<T> Locked<T> {
459 pub const fn new(data: T) -> Self {
461 Self {
462 lock_state: RawMutex::new(),
463 data: UnsafeCell::new(data),
464 }
465 }
466
467 pub fn with<R>(&self, f: impl FnOnce(&mut T) -> R) -> R {
471 lock(&self.lock_state, || f(unsafe { &mut *self.data.get() }))
472 }
473}
474
475unsafe impl<T> Sync for Locked<T> {}
476
477struct LockGuard<'a, L: single_core::RawLock> {
478 lock: &'a GenericRawMutex<L>,
479 token: RestoreState,
480}
481
482impl<'a, L: single_core::RawLock> LockGuard<'a, L> {
483 fn new(lock: &'a GenericRawMutex<L>) -> Self {
484 let this = Self::new_reentrant(lock);
485 assert!(!this.token.is_reentry(), "lock is not reentrant");
486 this
487 }
488
489 fn new_reentrant(lock: &'a GenericRawMutex<L>) -> Self {
490 let token = unsafe {
491 lock.acquire()
495 };
496
497 Self { lock, token }
498 }
499}
500
501impl<L: single_core::RawLock> Drop for LockGuard<'_, L> {
502 fn drop(&mut self) {
503 unsafe { self.lock.release(self.token) };
504 }
505}
506
507#[cfg(impl_critical_section)]
508mod critical_section {
509 struct CriticalSection;
510
511 critical_section::set_impl!(CriticalSection);
512
513 static CRITICAL_SECTION: super::RawMutex = super::RawMutex::new();
514
515 unsafe impl critical_section::Impl for CriticalSection {
516 unsafe fn acquire() -> critical_section::RawRestoreState {
517 unsafe { CRITICAL_SECTION.acquire().0 }
518 }
519
520 unsafe fn release(token: critical_section::RawRestoreState) {
521 unsafe {
522 CRITICAL_SECTION.release(super::RestoreState(token));
523 }
524 }
525 }
526}