1#![doc = document_features::document_features!(feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#)]
5#![doc(html_logo_url = "https://avatars.githubusercontent.com/u/46717278")]
6#![cfg_attr(xtensa, feature(asm_experimental_arch))]
7#![deny(missing_docs, rust_2018_idioms, rustdoc::all)]
8#![cfg_attr(
10 semver_checks,
11 allow(rustdoc::private_intra_doc_links, rustdoc::broken_intra_doc_links)
12)]
13#![no_std]
14
15mod fmt;
17
18use core::{cell::UnsafeCell, marker::PhantomData};
19
20pub mod raw;
21
22use raw::{RawLock, SingleCoreInterruptLock};
23
24#[derive(Clone, Copy, Debug)]
38#[cfg_attr(feature = "defmt", derive(defmt::Format))]
39pub struct RestoreState(u32, PhantomData<*const ()>);
40
41impl RestoreState {
42 const REENTRY_FLAG: u32 = 1 << 31;
43
44 pub const unsafe fn new(inner: u32) -> Self {
50 Self(inner, PhantomData)
51 }
52
53 pub const fn invalid() -> Self {
58 Self(0, PhantomData)
59 }
60
61 #[inline]
62 fn mark_reentry(&mut self) {
63 self.0 |= Self::REENTRY_FLAG;
64 }
65
66 #[inline]
67 fn is_reentry(self) -> bool {
68 self.0 & Self::REENTRY_FLAG != 0
69 }
70
71 #[inline]
73 pub fn inner(self) -> u32 {
74 self.0
75 }
76}
77
78#[cfg(single_core)]
79mod single_core {
80 use core::cell::Cell;
81
82 #[repr(transparent)]
83 pub(super) struct LockedState {
84 locked: Cell<bool>,
85 }
86
87 impl LockedState {
88 pub const fn new() -> Self {
89 Self {
90 locked: Cell::new(false),
91 }
92 }
93
94 #[inline]
95 pub fn lock(&self, lock: &impl crate::RawLock) -> crate::RestoreState {
96 let mut tkn = unsafe { lock.enter() };
97 let was_locked = self.locked.replace(true);
98 if was_locked {
99 tkn.mark_reentry();
100 }
101 tkn
102 }
103
104 #[inline]
109 pub unsafe fn unlock(&self) {
110 self.locked.set(false)
111 }
112 }
113}
114
115#[cfg(multi_core)]
116mod multi_core {
117 use core::sync::atomic::{AtomicUsize, Ordering};
118
119 const UNUSED_THREAD_ID_VALUE: usize = 0x100;
122
123 #[inline]
124 fn thread_id() -> usize {
125 cfg_if::cfg_if! {
127 if #[cfg(all(multi_core, riscv))] {
128 riscv::register::mhartid::read()
129 } else if #[cfg(all(multi_core, xtensa))] {
130 (xtensa_lx::get_processor_id() & 0x2000) as usize
131 } else {
132 0
133 }
134 }
135 }
136
137 #[repr(transparent)]
138 pub(super) struct LockedState {
139 owner: AtomicUsize,
140 }
141
142 impl LockedState {
143 #[inline]
144 pub const fn new() -> Self {
145 Self {
146 owner: AtomicUsize::new(UNUSED_THREAD_ID_VALUE),
147 }
148 }
149
150 #[inline]
151 pub fn lock(&self, lock: &impl crate::RawLock) -> crate::RestoreState {
152 let try_lock = || {
161 let mut tkn = unsafe { lock.enter() };
162
163 let current_thread_id = thread_id();
164
165 let try_lock_result = self
166 .owner
167 .compare_exchange(
168 UNUSED_THREAD_ID_VALUE,
169 current_thread_id,
170 Ordering::Acquire,
171 Ordering::Relaxed,
172 )
173 .map(|_| ());
174
175 match try_lock_result {
176 Ok(()) => Some(tkn),
177 Err(owner) if owner == current_thread_id => {
178 tkn.mark_reentry();
179 Some(tkn)
180 }
181 Err(_) => {
182 unsafe { lock.exit(tkn) };
183 None
184 }
185 }
186 };
187
188 loop {
189 if let Some(token) = try_lock() {
190 return token;
191 }
192 }
193 }
194
195 #[inline]
200 pub unsafe fn unlock(&self) {
201 #[cfg(debug_assertions)]
202 if self.owner.load(Ordering::Relaxed) != thread_id() {
203 panic_attempt_unlock_not_owned();
204 }
205 self.owner.store(UNUSED_THREAD_ID_VALUE, Ordering::Release);
206 }
207 }
208
209 #[cfg(debug_assertions)]
210 #[inline(never)]
211 #[cold]
212 fn panic_attempt_unlock_not_owned() -> ! {
213 panic!("tried to unlock a mutex locked on a different thread");
214 }
215}
216
217#[cfg(multi_core)]
218use multi_core::LockedState;
219#[cfg(single_core)]
220use single_core::LockedState;
221
222pub struct GenericRawMutex<L: RawLock> {
225 lock: L,
226 inner: LockedState,
227}
228
229unsafe impl<L: RawLock> Sync for GenericRawMutex<L> {}
231
232impl<L: RawLock> GenericRawMutex<L> {
233 pub const fn new(lock: L) -> Self {
235 Self {
236 lock,
237 inner: LockedState::new(),
238 }
239 }
240
241 #[inline]
249 unsafe fn acquire(&self) -> RestoreState {
250 self.inner.lock(&self.lock)
251 }
252
253 #[inline]
261 unsafe fn release(&self, token: RestoreState) {
262 if !token.is_reentry() {
263 unsafe {
264 self.inner.unlock();
265
266 self.lock.exit(token)
267 }
268 }
269 }
270
271 #[inline]
276 pub fn lock_non_reentrant<R>(&self, f: impl FnOnce() -> R) -> R {
277 let _token = LockGuard::new_non_reentrant(self);
278 f()
279 }
280
281 #[inline]
283 pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
284 let _token = LockGuard::new_reentrant(self);
285 f()
286 }
287}
288
289#[cfg_attr(
293 multi_core,
294 doc = r#"It needs a bit of memory, but it does not take a global critical
295 section, making it preferrable for use in multi-core systems."#
296)]
297pub struct RawMutex {
298 inner: GenericRawMutex<SingleCoreInterruptLock>,
299}
300
301impl Default for RawMutex {
302 #[inline]
303 fn default() -> Self {
304 Self::new()
305 }
306}
307
308impl RawMutex {
309 #[inline]
311 pub const fn new() -> Self {
312 Self {
313 inner: GenericRawMutex::new(SingleCoreInterruptLock),
314 }
315 }
316
317 #[inline]
325 pub unsafe fn acquire(&self) -> RestoreState {
326 unsafe { self.inner.acquire() }
327 }
328
329 #[inline]
337 pub unsafe fn release(&self, token: RestoreState) {
338 unsafe {
339 self.inner.release(token);
340 }
341 }
342
343 #[inline]
348 pub fn lock_non_reentrant<R>(&self, f: impl FnOnce() -> R) -> R {
349 self.inner.lock_non_reentrant(f)
350 }
351
352 #[inline]
354 pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
355 self.inner.lock(f)
356 }
357}
358
359unsafe impl embassy_sync_06::blocking_mutex::raw::RawMutex for RawMutex {
360 #[allow(clippy::declare_interior_mutable_const)]
361 const INIT: Self = Self::new();
362
363 fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
364 self.inner.lock(f)
365 }
366}
367
368unsafe impl embassy_sync_07::blocking_mutex::raw::RawMutex for RawMutex {
369 #[allow(clippy::declare_interior_mutable_const)]
370 const INIT: Self = Self::new();
371
372 fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
373 self.inner.lock(f)
374 }
375}
376
377unsafe impl embassy_sync_08::blocking_mutex::raw::RawMutex for RawMutex {
378 #[allow(clippy::declare_interior_mutable_const)]
379 const INIT: Self = Self::new();
380
381 fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
382 self.inner.lock(f)
383 }
384}
385
386pub struct NonReentrantMutex<T> {
391 lock_state: RawMutex,
392 data: UnsafeCell<T>,
393}
394
395impl<T> NonReentrantMutex<T> {
396 pub const fn new(data: T) -> Self {
398 Self {
399 lock_state: RawMutex::new(),
400 data: UnsafeCell::new(data),
401 }
402 }
403
404 pub fn with<R>(&self, f: impl FnOnce(&mut T) -> R) -> R {
408 self.lock_state
409 .lock_non_reentrant(|| f(unsafe { &mut *self.data.get() }))
410 }
411}
412
413unsafe impl<T: Send> Send for NonReentrantMutex<T> {}
414unsafe impl<T: Send> Sync for NonReentrantMutex<T> {}
415
416struct LockGuard<'a, L: RawLock> {
417 lock: &'a GenericRawMutex<L>,
418 token: RestoreState,
419}
420
421impl<'a, L: RawLock> LockGuard<'a, L> {
422 #[inline]
423 fn new_non_reentrant(lock: &'a GenericRawMutex<L>) -> Self {
424 let this = Self::new_reentrant(lock);
425 if this.token.is_reentry() {
426 panic_lock_not_reentrant();
427 }
428 this
429 }
430
431 #[inline]
432 fn new_reentrant(lock: &'a GenericRawMutex<L>) -> Self {
433 let token = unsafe {
434 lock.acquire()
438 };
439
440 Self { lock, token }
441 }
442}
443
444impl<L: RawLock> Drop for LockGuard<'_, L> {
445 fn drop(&mut self) {
446 unsafe { self.lock.release(self.token) };
447 }
448}
449
450#[inline(never)]
451#[cold]
452fn panic_lock_not_reentrant() -> ! {
453 panic!("lock is not reentrant");
454}