esp_hal_embassy/executor/
thread.rs

1//! Multicore-aware thread-mode embassy executor.
2
3use core::marker::PhantomData;
4
5use embassy_executor::Spawner;
6#[cfg(all(low_power_wait, multi_core))]
7use esp_hal::interrupt::software::SoftwareInterrupt;
8use esp_hal::{interrupt::Priority, system::Cpu};
9#[cfg(low_power_wait)]
10use portable_atomic::{AtomicBool, Ordering};
11
12use super::InnerExecutor;
13
14pub(crate) const THREAD_MODE_CONTEXT: usize = 16;
15
16/// global atomic used to keep track of whether there is work to do since sev()
17/// is not available on either Xtensa or RISC-V
18#[cfg(low_power_wait)]
19static SIGNAL_WORK_THREAD_MODE: [AtomicBool; Cpu::COUNT] =
20    [const { AtomicBool::new(false) }; Cpu::COUNT];
21
22pub(crate) fn pend_thread_mode(_core: usize) {
23    #[cfg(low_power_wait)]
24    {
25        // Signal that there is work to be done.
26        SIGNAL_WORK_THREAD_MODE[_core].store(true, Ordering::Relaxed);
27
28        // If we are pending a task on the current core, we're done. Otherwise, we
29        // need to make sure the other core wakes up.
30        #[cfg(multi_core)]
31        if _core != Cpu::current() as usize {
32            // We need to clear the interrupt from software. We don't actually
33            // need it to trigger and run the interrupt handler, we just need to
34            // kick waiti to return.
35            unsafe { SoftwareInterrupt::<3>::steal().raise() };
36        }
37    }
38}
39
40/// Thread mode executor.
41///
42/// This is the simplest and most common kind of executor. It runs on thread
43/// mode (at the lowest priority level).
44#[cfg_attr(multi_core, doc = "")]
45#[cfg_attr(
46    multi_core,
47    doc = "This executor is safe to use on multiple cores. You need to
48create one instance per core. The executors don't steal tasks from each other."
49)]
50pub struct Executor {
51    inner: InnerExecutor,
52    not_send: PhantomData<*mut ()>,
53}
54
55impl Executor {
56    /// Create a new Executor.
57    #[cfg_attr(
58        all(multi_core, low_power_wait),
59        doc = r#"
60
61This will use software-interrupt 3 which isn't available for anything else to wake the other core(s)."#
62    )]
63    pub fn new() -> Self {
64        Self {
65            inner: InnerExecutor::new(
66                // Priority 1 means the timer queue can be accessed at interrupt priority 1 - for
67                // the thread mode executor it needs to be one higher than the base run level, to
68                // allow alarm interrupts to be handled.
69                Priority::Priority1,
70                (THREAD_MODE_CONTEXT + Cpu::current() as usize) as *mut (),
71            ),
72            not_send: PhantomData,
73        }
74    }
75
76    /// Run the executor.
77    ///
78    /// The `init` closure is called with a [`Spawner`] that spawns tasks on
79    /// this executor. Use it to spawn the initial task(s). After `init`
80    /// returns, the executor starts running the tasks.
81    ///
82    /// To spawn more tasks later, you may keep copies of the [`Spawner`] (it is
83    /// `Copy`), for example by passing it as an argument to the initial
84    /// tasks.
85    ///
86    /// This function requires `&'static mut self`. This means you have to store
87    /// the Executor instance in a place where it'll live forever and grants
88    /// you mutable access. There's a few ways to do this:
89    ///
90    /// - a [StaticCell](https://docs.rs/static_cell/latest/static_cell/) (safe)
91    /// - a `static mut` (unsafe, not recommended)
92    /// - a local variable in a function you know never returns (like `fn main()
93    ///   -> !`), upgrading its lifetime with `transmute`. (unsafe)
94    ///
95    /// This function never returns.
96    pub fn run(&'static mut self, init: impl FnOnce(Spawner)) -> ! {
97        #[cfg(all(multi_core, low_power_wait))]
98        unwrap!(esp_hal::interrupt::enable(
99            esp_hal::peripherals::Interrupt::FROM_CPU_INTR3,
100            Priority::min(),
101        ));
102
103        self.inner.init();
104
105        init(self.inner.inner.spawner());
106
107        #[cfg(low_power_wait)]
108        let cpu = Cpu::current() as usize;
109
110        loop {
111            unsafe { self.inner.inner.poll() };
112
113            #[cfg(low_power_wait)]
114            Self::wait_impl(cpu);
115        }
116    }
117
118    #[cfg(all(xtensa, low_power_wait))]
119    // This function must be in RAM. Loading parts of it from flash can cause a race
120    // that results in the core not waking up. Placing `wait_impl` in RAM ensures that
121    // it is shorter than the interrupt handler that would clear the interrupt source.
122    #[macros::ram]
123    fn wait_impl(cpu: usize) {
124        // Manual critical section implementation that only masks interrupts handlers.
125        // We must not acquire the cross-core on dual-core systems because that would
126        // prevent the other core from doing useful work while this core is sleeping.
127        let token: u32;
128        unsafe { core::arch::asm!("rsil {0}, 5", out(reg) token) };
129
130        // we do not care about race conditions between the load and store operations,
131        // interrupts will only set this value to true.
132        // Acquire makes no sense but at this time it's slightly faster than Relaxed.
133        if SIGNAL_WORK_THREAD_MODE[cpu].load(Ordering::Acquire) {
134            // if there is work to do, exit critical section and loop back to polling
135            unsafe {
136                core::arch::asm!(
137                    "wsr.ps {0}",
138                    "rsync",
139                    in(reg) token
140                );
141            }
142        } else {
143            // `waiti` sets the `PS.INTLEVEL` when slipping into sleep because critical
144            // sections in Xtensa are implemented via increasing `PS.INTLEVEL`.
145            // The critical section ends here. Take care not add code after
146            // `waiti` if it needs to be inside the CS.
147            // Do not lower INTLEVEL below the current value.
148            match token & 0x0F {
149                0 => unsafe { core::arch::asm!("waiti 0") },
150                1 => unsafe { core::arch::asm!("waiti 1") },
151                2 => unsafe { core::arch::asm!("waiti 2") },
152                3 => unsafe { core::arch::asm!("waiti 3") },
153                4 => unsafe { core::arch::asm!("waiti 4") },
154                _ => unsafe { core::arch::asm!("waiti 5") },
155            }
156        }
157        // If this races and some waker sets the signal, we'll reset it, but still poll.
158        SIGNAL_WORK_THREAD_MODE[cpu].store(false, Ordering::Relaxed);
159    }
160
161    #[cfg(all(riscv, low_power_wait))]
162    fn wait_impl(cpu: usize) {
163        // we do not care about race conditions between the load and store operations,
164        // interrupts will only set this value to true.
165        critical_section::with(|_| {
166            // if there is work to do, loop back to polling
167            if !SIGNAL_WORK_THREAD_MODE[cpu].load(Ordering::Relaxed) {
168                // if not, wait for interrupt
169                unsafe { core::arch::asm!("wfi") };
170            }
171        });
172        // if an interrupt occurred while waiting, it will be serviced here
173        // If this races and some waker sets the signal, we'll reset it, but still poll.
174        SIGNAL_WORK_THREAD_MODE[cpu].store(false, Ordering::Relaxed);
175    }
176}
177
178impl Default for Executor {
179    fn default() -> Self {
180        Self::new()
181    }
182}