esp_hal_embassy/executor/
thread.rs

1//! Multicore-aware thread-mode embassy executor.
2
3use core::marker::PhantomData;
4
5use embassy_executor::Spawner;
6#[cfg(all(low_power_wait, multi_core))]
7use esp_hal::interrupt::software::SoftwareInterrupt;
8use esp_hal::{interrupt::Priority, system::Cpu};
9#[cfg(low_power_wait)]
10use portable_atomic::{AtomicBool, Ordering};
11
12use super::InnerExecutor;
13
14pub(crate) const THREAD_MODE_CONTEXT: usize = 16;
15
16/// global atomic used to keep track of whether there is work to do since sev()
17/// is not available on either Xtensa or RISC-V
18#[cfg(low_power_wait)]
19static SIGNAL_WORK_THREAD_MODE: [AtomicBool; Cpu::COUNT] =
20    [const { AtomicBool::new(false) }; Cpu::COUNT];
21
22pub(crate) fn pend_thread_mode(_core: usize) {
23    #[cfg(low_power_wait)]
24    {
25        // Signal that there is work to be done.
26        SIGNAL_WORK_THREAD_MODE[_core].store(true, Ordering::Relaxed);
27
28        // If we are pending a task on the current core, we're done. Otherwise, we
29        // need to make sure the other core wakes up.
30        #[cfg(multi_core)]
31        if _core != Cpu::current() as usize {
32            // We need to clear the interrupt from software. We don't actually
33            // need it to trigger and run the interrupt handler, we just need to
34            // kick waiti to return.
35            unsafe { SoftwareInterrupt::<3>::steal().raise() };
36        }
37    }
38}
39
40/// Thread mode executor.
41///
42/// This is the simplest and most common kind of executor. It runs on thread
43/// mode (at the lowest priority level).
44#[cfg_attr(multi_core, doc = "")]
45#[cfg_attr(
46    multi_core,
47    doc = "This executor is safe to use on multiple cores. You need to
48create one instance per core. The executors don't steal tasks from each other."
49)]
50pub struct Executor {
51    inner: InnerExecutor,
52    not_send: PhantomData<*mut ()>,
53}
54
55impl Executor {
56    /// Create a new Executor.
57    #[cfg_attr(
58        all(multi_core, low_power_wait),
59        doc = r#"
60
61This will use software-interrupt 3 which isn't available for anything else to wake the other core(s)."#
62    )]
63    pub fn new() -> Self {
64        Self {
65            inner: InnerExecutor::new(
66                // Priority 1 means the timer queue can be accessed at interrupt priority 1 - for
67                // the thread mode executor it needs to be one higher than the base run level, to
68                // allow alarm interrupts to be handled.
69                Priority::Priority1,
70                (THREAD_MODE_CONTEXT + Cpu::current() as usize) as *mut (),
71            ),
72            not_send: PhantomData,
73        }
74    }
75
76    /// Run the executor.
77    ///
78    /// The `init` closure is called with a [`Spawner`] that spawns tasks on
79    /// this executor. Use it to spawn the initial task(s). After `init`
80    /// returns, the executor starts running the tasks.
81    ///
82    /// To spawn more tasks later, you may keep copies of the [`Spawner`] (it is
83    /// `Copy`), for example by passing it as an argument to the initial
84    /// tasks.
85    ///
86    /// This function requires `&'static mut self`. This means you have to store
87    /// the Executor instance in a place where it'll live forever and grants
88    /// you mutable access. There's a few ways to do this:
89    ///
90    /// - a [StaticCell](https://docs.rs/static_cell/latest/static_cell/) (safe)
91    /// - a `static mut` (unsafe, not recommended)
92    /// - a local variable in a function you know never returns (like `fn main()
93    ///   -> !`), upgrading its lifetime with `transmute`. (unsafe)
94    ///
95    /// This function never returns.
96    pub fn run(&'static mut self, init: impl FnOnce(Spawner)) -> ! {
97        #[cfg(all(multi_core, low_power_wait))]
98        unwrap!(esp_hal::interrupt::enable(
99            esp_hal::peripherals::Interrupt::FROM_CPU_INTR3,
100            Priority::min(),
101        ));
102
103        self.inner.init();
104
105        init(self.inner.inner.spawner());
106
107        #[cfg(low_power_wait)]
108        let cpu = Cpu::current() as usize;
109
110        loop {
111            unsafe { self.inner.inner.poll() };
112
113            #[cfg(low_power_wait)]
114            Self::wait_impl(cpu);
115        }
116    }
117
118    #[cfg(all(xtensa, low_power_wait))]
119    fn wait_impl(cpu: usize) {
120        // Manual critical section implementation that only masks interrupts handlers.
121        // We must not acquire the cross-core on dual-core systems because that would
122        // prevent the other core from doing useful work while this core is sleeping.
123        let token: critical_section::RawRestoreState;
124        unsafe { core::arch::asm!("rsil {0}, 5", out(reg) token) };
125
126        // we do not care about race conditions between the load and store operations,
127        // interrupts will only set this value to true.
128        // Acquire makes no sense but at this time it's slightly faster than Relaxed.
129        if SIGNAL_WORK_THREAD_MODE[cpu].load(Ordering::Acquire) {
130            // if there is work to do, exit critical section and loop back to polling
131            unsafe {
132                core::arch::asm!(
133                    "wsr.ps {0}",
134                    "rsync",
135                    in(reg) token
136                );
137            }
138        } else {
139            // `waiti` sets the `PS.INTLEVEL` when slipping into sleep because critical
140            // sections in Xtensa are implemented via increasing `PS.INTLEVEL`.
141            // The critical section ends here. Take care not add code after
142            // `waiti` if it needs to be inside the CS.
143            // Do not lower INTLEVEL below the current value.
144            match token & 0x0F {
145                0 => unsafe { core::arch::asm!("waiti 0") },
146                1 => unsafe { core::arch::asm!("waiti 1") },
147                2 => unsafe { core::arch::asm!("waiti 2") },
148                3 => unsafe { core::arch::asm!("waiti 3") },
149                4 => unsafe { core::arch::asm!("waiti 4") },
150                _ => unsafe { core::arch::asm!("waiti 5") },
151            }
152        }
153        // If this races and some waker sets the signal, we'll reset it, but still poll.
154        SIGNAL_WORK_THREAD_MODE[cpu].store(false, Ordering::Relaxed);
155    }
156
157    #[cfg(all(riscv, low_power_wait))]
158    fn wait_impl(cpu: usize) {
159        // we do not care about race conditions between the load and store operations,
160        // interrupts will only set this value to true.
161        critical_section::with(|_| {
162            // if there is work to do, loop back to polling
163            if !SIGNAL_WORK_THREAD_MODE[cpu].load(Ordering::Relaxed) {
164                // if not, wait for interrupt
165                unsafe { core::arch::asm!("wfi") };
166            }
167        });
168        // if an interrupt occurred while waiting, it will be serviced here
169        // If this races and some waker sets the signal, we'll reset it, but still poll.
170        SIGNAL_WORK_THREAD_MODE[cpu].store(false, Ordering::Relaxed);
171    }
172}
173
174impl Default for Executor {
175    fn default() -> Self {
176        Self::new()
177    }
178}