1use core::{cell::UnsafeCell, mem::MaybeUninit, ptr::NonNull, sync::atomic::Ordering};
4
5use embassy_executor::{SendSpawner, Spawner, raw};
6use esp_hal::{
7 interrupt::{InterruptHandler, Priority, software::SoftwareInterrupt},
8 system::Cpu,
9 time::{Duration, Instant},
10};
11use esp_sync::NonReentrantMutex;
12use macros::ram;
13use portable_atomic::AtomicPtr;
14
15use crate::{
16 SCHEDULER,
17 task::{TaskExt, TaskPtr},
18};
19
20pub(crate) struct FlagsInner {
21 owner: TaskPtr,
22 waiting: Option<TaskPtr>,
23 set: bool,
24}
25impl FlagsInner {
26 fn take(&mut self) -> bool {
27 if self.set {
28 self.set = false;
30 true
31 } else {
32 self.waiting = Some(self.owner);
36
37 false
38 }
39 }
40}
41
42struct ThreadFlag {
48 inner: NonReentrantMutex<FlagsInner>,
49}
50
51impl ThreadFlag {
52 fn new() -> Self {
53 let owner = SCHEDULER.with(|scheduler| {
54 let current_cpu = Cpu::current() as usize;
55 if let Some(current_task) = scheduler.per_cpu[current_cpu].current_task {
56 current_task
57 } else {
58 NonNull::from(&scheduler.per_cpu[current_cpu].main_task)
60 }
61 });
62 Self {
63 inner: NonReentrantMutex::new(FlagsInner {
64 owner,
65 waiting: None,
66 set: false,
67 }),
68 }
69 }
70
71 fn with<R>(&self, f: impl FnOnce(&mut FlagsInner) -> R) -> R {
72 self.inner.with(|inner| f(inner))
73 }
74
75 fn set(&self) {
76 self.with(|inner| {
77 if let Some(waiting) = inner.waiting.take() {
78 waiting.resume();
81 } else {
82 inner.set = true;
84 }
85 });
86 }
87
88 fn get(&self) -> bool {
89 self.with(|inner| inner.set)
90 }
91
92 fn wait(&self) {
93 self.with(|inner| {
94 if !inner.take() {
95 SCHEDULER.with(|scheduler| {
98 scheduler.sleep_task_until(inner.owner, Instant::EPOCH + Duration::MAX);
99 crate::task::yield_task();
100 });
101 }
102 });
103 }
104}
105
106#[unsafe(export_name = "__pender")]
107#[ram]
108fn __pender(context: *mut ()) {
109 match context as usize {
110 0 => unsafe { SoftwareInterrupt::<0>::steal().raise() },
111 1 => unsafe { SoftwareInterrupt::<1>::steal().raise() },
112 2 => unsafe { SoftwareInterrupt::<2>::steal().raise() },
113 3 => unsafe { SoftwareInterrupt::<3>::steal().raise() },
114 _ => {
115 let flags = unwrap!(unsafe { context.cast::<ThreadFlag>().as_ref() });
118 flags.set();
119 }
120 }
121}
122
123pub trait Callbacks {
125 fn before_poll(&mut self);
127
128 fn on_idle(&mut self);
133}
134
135#[cfg_attr(
141 multi_core,
142 doc = r"
143
144If you want to start the executor on the second core, you will need to start the second core using [`crate::start_second_core`].
145If you are looking for a way to run code on the second core without the scheduler, use the [`InterruptExecutor`].
146"
147)]
148pub struct Executor {
149 executor: UnsafeCell<MaybeUninit<raw::Executor>>,
150}
151
152impl Executor {
153 pub const fn new() -> Self {
155 Self {
156 executor: UnsafeCell::new(MaybeUninit::uninit()),
157 }
158 }
159
160 pub fn run(&'static mut self, init: impl FnOnce(Spawner)) -> ! {
181 let flags = ThreadFlag::new();
182 struct NoHooks;
183
184 impl Callbacks for NoHooks {
185 fn before_poll(&mut self) {}
186
187 fn on_idle(&mut self) {}
188 }
189
190 self.run_inner(init, &flags, NoHooks)
191 }
192
193 pub fn run_with_callbacks(
201 &'static mut self,
202 init: impl FnOnce(Spawner),
203 callbacks: impl Callbacks,
204 ) -> ! {
205 let flags = ThreadFlag::new();
206 struct Hooks<'a, CB: Callbacks>(CB, &'a ThreadFlag);
207
208 impl<CB: Callbacks> Callbacks for Hooks<'_, CB> {
209 fn before_poll(&mut self) {
210 self.0.before_poll()
211 }
212
213 fn on_idle(&mut self) {
214 if !self.1.get() {
216 self.0.on_idle();
217 }
218 }
219 }
220
221 self.run_inner(init, &flags, Hooks(callbacks, &flags))
222 }
223
224 fn run_inner(
225 &'static self,
226 init: impl FnOnce(Spawner),
227 flags: &ThreadFlag,
228 mut hooks: impl Callbacks,
229 ) -> ! {
230 let executor = unsafe {
231 (&mut *self.executor.get()).write(raw::Executor::new(
232 (flags as *const ThreadFlag).cast::<()>().cast_mut(),
233 ))
234 };
235
236 #[cfg(multi_core)]
237 if Cpu::current() != Cpu::ProCpu
238 && crate::SCHEDULER
239 .with(|scheduler| !scheduler.per_cpu[Cpu::current() as usize].initialized)
240 {
241 panic!("Executor cannot be started: the scheduler is not running on the current CPU.");
242 }
243
244 init(executor.spawner());
245
246 loop {
247 hooks.before_poll();
248
249 unsafe { executor.poll() };
250
251 hooks.on_idle();
252
253 flags.wait();
255 }
256 }
257}
258
259impl Default for Executor {
260 fn default() -> Self {
261 Self::new()
262 }
263}
264
265pub struct InterruptExecutor<const SWI: u8> {
274 executor: UnsafeCell<MaybeUninit<raw::Executor>>,
275 interrupt: SoftwareInterrupt<'static, SWI>,
276}
277
278const COUNT: usize = 4;
279static INTERRUPT_EXECUTORS: [InterruptExecutorStorage; COUNT] =
280 [const { InterruptExecutorStorage::new() }; COUNT];
281
282unsafe impl<const SWI: u8> Send for InterruptExecutor<SWI> {}
283unsafe impl<const SWI: u8> Sync for InterruptExecutor<SWI> {}
284
285struct InterruptExecutorStorage {
286 raw_executor: AtomicPtr<raw::Executor>,
287}
288
289impl InterruptExecutorStorage {
290 const fn new() -> Self {
291 Self {
292 raw_executor: AtomicPtr::new(core::ptr::null_mut()),
293 }
294 }
295
296 unsafe fn get(&self) -> &raw::Executor {
300 unsafe { &*self.raw_executor.load(Ordering::Relaxed) }
301 }
302
303 fn set(&self, executor: *mut raw::Executor) {
304 self.raw_executor.store(executor, Ordering::Relaxed);
305 }
306}
307
308extern "C" fn handle_interrupt<const NUM: u8>() {
309 let swi = unsafe { SoftwareInterrupt::<NUM>::steal() };
310 swi.reset();
311
312 unsafe {
313 let executor = INTERRUPT_EXECUTORS[NUM as usize].get();
315 executor.poll();
316 }
317}
318
319impl<const SWI: u8> InterruptExecutor<SWI> {
320 #[inline]
323 pub const fn new(interrupt: SoftwareInterrupt<'static, SWI>) -> Self {
324 Self {
325 executor: UnsafeCell::new(MaybeUninit::uninit()),
326 interrupt,
327 }
328 }
329
330 pub fn start(&'static mut self, priority: Priority) -> SendSpawner {
343 unsafe {
344 (*self.executor.get()).write(raw::Executor::new((SWI as usize) as *mut ()));
345
346 INTERRUPT_EXECUTORS[SWI as usize].set((*self.executor.get()).as_mut_ptr());
347 }
348
349 let swi_handler = match SWI {
350 0 => handle_interrupt::<0>,
351 1 => handle_interrupt::<1>,
352 2 => handle_interrupt::<2>,
353 3 => handle_interrupt::<3>,
354 _ => unreachable!(),
355 };
356
357 self.interrupt
358 .set_interrupt_handler(InterruptHandler::new(swi_handler, priority));
359
360 let executor = unsafe { (*self.executor.get()).assume_init_ref() };
361 executor.spawner().make_send()
362 }
363
364 pub fn spawner(&'static self) -> SendSpawner {
372 if INTERRUPT_EXECUTORS[SWI as usize]
373 .raw_executor
374 .load(Ordering::Acquire)
375 .is_null()
376 {
377 panic!("InterruptExecutor::spawner() called on uninitialized executor.");
378 }
379 let executor = unsafe { (*self.executor.get()).assume_init_ref() };
380 executor.spawner().make_send()
381 }
382}