esp_hal_embassy/executor/
thread.rs

1//! Multicore-aware thread-mode embassy executor.
2
3use core::marker::PhantomData;
4
5use embassy_executor::Spawner;
6#[cfg(all(low_power_wait, multi_core))]
7use esp_hal::interrupt::software::SoftwareInterrupt;
8use esp_hal::{interrupt::Priority, system::Cpu};
9use portable_atomic::{AtomicBool, Ordering};
10
11use super::InnerExecutor;
12
13pub(crate) const THREAD_MODE_CONTEXT: usize = 16;
14
15/// global atomic used to keep track of whether there is work to do since sev()
16/// is not available on either Xtensa or RISC-V
17static SIGNAL_WORK_THREAD_MODE: [AtomicBool; Cpu::COUNT] =
18    [const { AtomicBool::new(false) }; Cpu::COUNT];
19
20pub(crate) fn pend_thread_mode(core: usize) {
21    // Signal that there is work to be done.
22    SIGNAL_WORK_THREAD_MODE[core].store(true, Ordering::Relaxed);
23
24    // If we are pending a task on the current core, we're done. Otherwise, we
25    // need to make sure the other core wakes up.
26    #[cfg(all(low_power_wait, multi_core))]
27    if core != Cpu::current() as usize {
28        // We need to clear the interrupt from software. We don't actually
29        // need it to trigger and run the interrupt handler, we just need to
30        // kick waiti to return.
31        unsafe { SoftwareInterrupt::<3>::steal().raise() };
32    }
33}
34
35/// Callbacks to run code before/after polling the task queue.
36pub trait Callbacks {
37    /// Called just before polling the executor.
38    fn before_poll(&mut self);
39
40    /// Called after the executor is polled, if there is no work scheduled.
41    ///
42    /// Note that tasks can become ready at any point during the execution
43    /// of this function.
44    fn on_idle(&mut self);
45}
46
47/// Thread mode executor.
48///
49/// This is the simplest and most common kind of executor. It runs on thread
50/// mode (at the lowest priority level).
51#[cfg_attr(multi_core, doc = "")]
52#[cfg_attr(
53    multi_core,
54    doc = "This executor is safe to use on multiple cores. You need to
55create one instance per core. The executors don't steal tasks from each other."
56)]
57pub struct Executor {
58    inner: InnerExecutor,
59    cpu: Cpu,
60    not_send: PhantomData<*mut ()>,
61}
62
63impl Executor {
64    /// Create a new Executor.
65    #[cfg_attr(
66        all(multi_core, low_power_wait),
67        doc = r#"
68
69This will use software-interrupt 3 which isn't available for anything else to wake the other core(s)."#
70    )]
71    pub fn new() -> Self {
72        let cpu = Cpu::current();
73        Self {
74            inner: InnerExecutor::new(
75                // Priority 1 means the timer queue can be accessed at interrupt priority 1 - for
76                // the thread mode executor it needs to be one higher than the base run level, to
77                // allow alarm interrupts to be handled.
78                Priority::Priority1,
79                (THREAD_MODE_CONTEXT + cpu as usize) as *mut (),
80            ),
81            cpu,
82            not_send: PhantomData,
83        }
84    }
85
86    /// Run the executor.
87    ///
88    /// The `init` closure is called with a [`Spawner`] that spawns tasks on
89    /// this executor. Use it to spawn the initial task(s). After `init`
90    /// returns, the executor starts running the tasks.
91    ///
92    /// To spawn more tasks later, you may keep copies of the [`Spawner`] (it is
93    /// `Copy`), for example by passing it as an argument to the initial
94    /// tasks.
95    ///
96    /// This function requires `&'static mut self`. This means you have to store
97    /// the Executor instance in a place where it'll live forever and grants
98    /// you mutable access. There's a few ways to do this:
99    ///
100    /// - a [StaticCell](https://docs.rs/static_cell/latest/static_cell/) (safe)
101    /// - a `static mut` (unsafe, not recommended)
102    /// - a local variable in a function you know never returns (like `fn main() -> !`), upgrading
103    ///   its lifetime with `transmute`. (unsafe)
104    ///
105    /// This function never returns.
106    pub fn run(&'static mut self, init: impl FnOnce(Spawner)) -> ! {
107        #[cfg_attr(not(low_power_wait), expect(dead_code, reason = "cpu index is unused"))]
108        struct NoHooks(usize);
109
110        impl Callbacks for NoHooks {
111            fn before_poll(&mut self) {
112                #[cfg(low_power_wait)]
113                SIGNAL_WORK_THREAD_MODE[self.0].store(false, Ordering::Relaxed);
114            }
115
116            fn on_idle(&mut self) {}
117        }
118
119        self.run_inner(init, NoHooks(self.cpu as usize))
120    }
121
122    /// Run the executor with callbacks.
123    ///
124    /// See [Callbacks] on when the callbacks are called.
125    ///
126    /// See [Self::run] for more information about running the executor.
127    ///
128    /// This function never returns.
129    pub fn run_with_callbacks(
130        &'static mut self,
131        init: impl FnOnce(Spawner),
132        callbacks: impl Callbacks,
133    ) -> ! {
134        struct Hooks<'a, CB: Callbacks>(CB, &'a AtomicBool);
135
136        impl<CB: Callbacks> Callbacks for Hooks<'_, CB> {
137            fn before_poll(&mut self) {
138                // Clear the flag unconditionally since we'll use it to decide
139                // if on_idle should be called.
140                self.1.store(false, Ordering::Relaxed);
141
142                self.0.before_poll()
143            }
144
145            fn on_idle(&mut self) {
146                // Make sure we only call on_idle if the executor would otherwise go to sleep.
147                if !self.1.load(Ordering::Acquire) {
148                    self.0.on_idle();
149                }
150            }
151        }
152
153        self.run_inner(
154            init,
155            Hooks(callbacks, &SIGNAL_WORK_THREAD_MODE[self.cpu as usize]),
156        )
157    }
158
159    fn run_inner(&'static mut self, init: impl FnOnce(Spawner), mut hooks: impl Callbacks) -> ! {
160        #[cfg(all(multi_core, low_power_wait))]
161        unwrap!(esp_hal::interrupt::enable(
162            esp_hal::peripherals::Interrupt::FROM_CPU_INTR3,
163            Priority::min(),
164        ));
165
166        self.inner.init();
167
168        init(self.inner.inner.spawner());
169
170        loop {
171            hooks.before_poll();
172
173            unsafe { self.inner.inner.poll() };
174
175            hooks.on_idle();
176
177            #[cfg(low_power_wait)]
178            Self::wait_impl(self.cpu as usize);
179        }
180    }
181
182    #[cfg(all(xtensa, low_power_wait))]
183    // This function must be in RAM. Loading parts of it from flash can cause a race
184    // that results in the core not waking up. Placing `wait_impl` in RAM ensures that
185    // it is shorter than the interrupt handler that would clear the interrupt source.
186    #[macros::ram]
187    fn wait_impl(cpu: usize) {
188        // Manual critical section implementation that only masks interrupts handlers.
189        // We must not acquire the cross-core on dual-core systems because that would
190        // prevent the other core from doing useful work while this core is sleeping.
191        let token: u32;
192        unsafe { core::arch::asm!("rsil {0}, 5", out(reg) token) };
193
194        // we do not care about race conditions between the load and store operations,
195        // interrupts will only set this value to true.
196        // Acquire makes no sense but at this time it's slightly faster than Relaxed.
197        if SIGNAL_WORK_THREAD_MODE[cpu].load(Ordering::Acquire) {
198            // if there is work to do, exit critical section and loop back to polling
199            unsafe {
200                core::arch::asm!(
201                    "wsr.ps {0}",
202                    "rsync",
203                    in(reg) token
204                );
205            }
206        } else {
207            // `waiti` sets the `PS.INTLEVEL` when slipping into sleep because critical
208            // sections in Xtensa are implemented via increasing `PS.INTLEVEL`.
209            // The critical section ends here. Take care not add code after
210            // `waiti` if it needs to be inside the CS.
211            // Do not lower INTLEVEL below the current value.
212            match token & 0x0F {
213                0 => unsafe { core::arch::asm!("waiti 0") },
214                1 => unsafe { core::arch::asm!("waiti 1") },
215                2 => unsafe { core::arch::asm!("waiti 2") },
216                3 => unsafe { core::arch::asm!("waiti 3") },
217                4 => unsafe { core::arch::asm!("waiti 4") },
218                _ => unsafe { core::arch::asm!("waiti 5") },
219            }
220        }
221    }
222
223    #[cfg(all(riscv, low_power_wait))]
224    fn wait_impl(cpu: usize) {
225        // we do not care about race conditions between the load and store operations,
226        // interrupts will only set this value to true.
227        critical_section::with(|_| {
228            // if there is work to do, loop back to polling
229            if !SIGNAL_WORK_THREAD_MODE[cpu].load(Ordering::Relaxed) {
230                // if not, wait for interrupt
231                unsafe { core::arch::asm!("wfi") };
232            }
233        });
234    }
235}
236
237impl Default for Executor {
238    fn default() -> Self {
239        Self::new()
240    }
241}