1#![doc = document_features::document_features!(feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#)]
5#![doc(html_logo_url = "https://avatars.githubusercontent.com/u/46717278")]
6#![cfg_attr(xtensa, feature(asm_experimental_arch))]
7#![deny(missing_docs, rust_2018_idioms, rustdoc::all)]
8#![cfg_attr(
10 semver_checks,
11 allow(rustdoc::private_intra_doc_links, rustdoc::broken_intra_doc_links)
12)]
13#![no_std]
14
15mod fmt;
17
18use core::{cell::UnsafeCell, marker::PhantomData};
19
20pub mod raw;
21
22use raw::{RawLock, SingleCoreInterruptLock};
23
24#[derive(Clone, Copy, Debug)]
38#[cfg_attr(feature = "defmt", derive(defmt::Format))]
39pub struct RestoreState(u32, PhantomData<*const ()>);
40
41impl RestoreState {
42 const REENTRY_FLAG: u32 = 1 << 31;
43
44 pub const unsafe fn new(inner: u32) -> Self {
50 Self(inner, PhantomData)
51 }
52
53 pub const fn invalid() -> Self {
58 Self(0, PhantomData)
59 }
60
61 fn mark_reentry(&mut self) {
62 self.0 |= Self::REENTRY_FLAG;
63 }
64
65 fn is_reentry(self) -> bool {
66 self.0 & Self::REENTRY_FLAG != 0
67 }
68
69 pub fn inner(self) -> u32 {
71 self.0
72 }
73}
74
75#[cfg(single_core)]
76mod single_core {
77 use core::cell::Cell;
78
79 #[repr(transparent)]
80 pub(super) struct LockedState {
81 locked: Cell<bool>,
82 }
83
84 impl LockedState {
85 pub const fn new() -> Self {
86 Self {
87 locked: Cell::new(false),
88 }
89 }
90
91 pub fn lock(&self, lock: &impl crate::RawLock) -> crate::RestoreState {
92 let mut tkn = unsafe { lock.enter() };
93 let was_locked = self.locked.replace(true);
94 if was_locked {
95 tkn.mark_reentry();
96 }
97 tkn
98 }
99
100 pub unsafe fn unlock(&self) {
105 self.locked.set(false)
106 }
107 }
108}
109
110#[cfg(multi_core)]
111mod multi_core {
112 use core::sync::atomic::{AtomicUsize, Ordering};
113
114 const UNUSED_THREAD_ID_VALUE: usize = 0x100;
117
118 #[inline]
119 fn thread_id() -> usize {
120 cfg_if::cfg_if! {
122 if #[cfg(all(multi_core, riscv))] {
123 riscv::register::mhartid::read()
124 } else if #[cfg(all(multi_core, xtensa))] {
125 (xtensa_lx::get_processor_id() & 0x2000) as usize
126 } else {
127 0
128 }
129 }
130 }
131
132 #[repr(transparent)]
133 pub(super) struct LockedState {
134 owner: AtomicUsize,
135 }
136
137 impl LockedState {
138 #[inline]
139 pub const fn new() -> Self {
140 Self {
141 owner: AtomicUsize::new(UNUSED_THREAD_ID_VALUE),
142 }
143 }
144
145 #[inline]
146 pub fn lock(&self, lock: &impl crate::RawLock) -> crate::RestoreState {
147 let try_lock = || {
156 let mut tkn = unsafe { lock.enter() };
157
158 let current_thread_id = thread_id();
159
160 let try_lock_result = self
161 .owner
162 .compare_exchange(
163 UNUSED_THREAD_ID_VALUE,
164 current_thread_id,
165 Ordering::Acquire,
166 Ordering::Relaxed,
167 )
168 .map(|_| ());
169
170 match try_lock_result {
171 Ok(()) => Some(tkn),
172 Err(owner) if owner == current_thread_id => {
173 tkn.mark_reentry();
174 Some(tkn)
175 }
176 Err(_) => {
177 unsafe { lock.exit(tkn) };
178 None
179 }
180 }
181 };
182
183 loop {
184 if let Some(token) = try_lock() {
185 return token;
186 }
187 }
188 }
189
190 #[inline]
195 pub unsafe fn unlock(&self) {
196 #[cfg(debug_assertions)]
197 if self.owner.load(Ordering::Relaxed) != thread_id() {
198 panic_attempt_unlock_not_owned();
199 }
200 self.owner.store(UNUSED_THREAD_ID_VALUE, Ordering::Release);
201 }
202 }
203
204 #[cfg(debug_assertions)]
205 #[inline(never)]
206 #[cold]
207 fn panic_attempt_unlock_not_owned() -> ! {
208 panic!("tried to unlock a mutex locked on a different thread");
209 }
210}
211
212#[cfg(multi_core)]
213use multi_core::LockedState;
214#[cfg(single_core)]
215use single_core::LockedState;
216
217pub struct GenericRawMutex<L: RawLock> {
220 lock: L,
221 inner: LockedState,
222}
223
224unsafe impl<L: RawLock> Sync for GenericRawMutex<L> {}
226
227impl<L: RawLock> GenericRawMutex<L> {
228 pub const fn new(lock: L) -> Self {
230 Self {
231 lock,
232 inner: LockedState::new(),
233 }
234 }
235
236 unsafe fn acquire(&self) -> RestoreState {
244 self.inner.lock(&self.lock)
245 }
246
247 unsafe fn release(&self, token: RestoreState) {
255 if !token.is_reentry() {
256 unsafe {
257 self.inner.unlock();
258
259 self.lock.exit(token)
260 }
261 }
262 }
263
264 pub fn lock_non_reentrant<R>(&self, f: impl FnOnce() -> R) -> R {
269 let _token = LockGuard::new_non_reentrant(self);
270 f()
271 }
272
273 pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
275 let _token = LockGuard::new_reentrant(self);
276 f()
277 }
278}
279
280#[cfg_attr(
284 multi_core,
285 doc = r#"It needs a bit of memory, but it does not take a global critical
286 section, making it preferrable for use in multi-core systems."#
287)]
288pub struct RawMutex {
289 inner: GenericRawMutex<SingleCoreInterruptLock>,
290}
291
292impl Default for RawMutex {
293 fn default() -> Self {
294 Self::new()
295 }
296}
297
298impl RawMutex {
299 pub const fn new() -> Self {
301 Self {
302 inner: GenericRawMutex::new(SingleCoreInterruptLock),
303 }
304 }
305
306 pub unsafe fn acquire(&self) -> RestoreState {
314 unsafe { self.inner.acquire() }
315 }
316
317 pub unsafe fn release(&self, token: RestoreState) {
325 unsafe {
326 self.inner.release(token);
327 }
328 }
329
330 pub fn lock_non_reentrant<R>(&self, f: impl FnOnce() -> R) -> R {
335 self.inner.lock_non_reentrant(f)
336 }
337
338 pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
340 self.inner.lock(f)
341 }
342}
343
344unsafe impl embassy_sync_06::blocking_mutex::raw::RawMutex for RawMutex {
345 #[allow(clippy::declare_interior_mutable_const)]
346 const INIT: Self = Self::new();
347
348 fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
349 self.inner.lock(f)
350 }
351}
352
353unsafe impl embassy_sync_07::blocking_mutex::raw::RawMutex for RawMutex {
354 #[allow(clippy::declare_interior_mutable_const)]
355 const INIT: Self = Self::new();
356
357 fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
358 self.inner.lock(f)
359 }
360}
361
362pub struct NonReentrantMutex<T> {
367 lock_state: RawMutex,
368 data: UnsafeCell<T>,
369}
370
371impl<T> NonReentrantMutex<T> {
372 pub const fn new(data: T) -> Self {
374 Self {
375 lock_state: RawMutex::new(),
376 data: UnsafeCell::new(data),
377 }
378 }
379
380 pub fn with<R>(&self, f: impl FnOnce(&mut T) -> R) -> R {
384 self.lock_state
385 .lock_non_reentrant(|| f(unsafe { &mut *self.data.get() }))
386 }
387}
388
389unsafe impl<T: Send> Send for NonReentrantMutex<T> {}
390unsafe impl<T: Send> Sync for NonReentrantMutex<T> {}
391
392struct LockGuard<'a, L: RawLock> {
393 lock: &'a GenericRawMutex<L>,
394 token: RestoreState,
395}
396
397impl<'a, L: RawLock> LockGuard<'a, L> {
398 fn new_non_reentrant(lock: &'a GenericRawMutex<L>) -> Self {
399 let this = Self::new_reentrant(lock);
400 if this.token.is_reentry() {
401 panic_lock_not_reentrant();
402 }
403 this
404 }
405
406 fn new_reentrant(lock: &'a GenericRawMutex<L>) -> Self {
407 let token = unsafe {
408 lock.acquire()
412 };
413
414 Self { lock, token }
415 }
416}
417
418impl<L: RawLock> Drop for LockGuard<'_, L> {
419 fn drop(&mut self) {
420 unsafe { self.lock.release(self.token) };
421 }
422}
423
424#[inline(never)]
425#[cold]
426fn panic_lock_not_reentrant() -> ! {
427 panic!("lock is not reentrant");
428}