Skip to main content

esp_rtos/task/
mod.rs

1#[cfg_attr(riscv, path = "riscv.rs")]
2#[cfg_attr(xtensa, path = "xtensa.rs")]
3pub(crate) mod arch_specific;
4
5#[cfg(feature = "esp-radio")]
6use core::ffi::c_void;
7use core::{marker::PhantomData, mem::MaybeUninit, ptr::NonNull};
8
9#[cfg(feature = "alloc")]
10use allocator_api2::{
11    alloc::{Allocator, Layout},
12    boxed::Box,
13};
14pub(crate) use arch_specific::*;
15use esp_hal::{
16    system::Cpu,
17    time::{Duration, Instant},
18};
19#[cfg(feature = "esp-radio")]
20use esp_radio_rtos_driver::semaphore::{SemaphoreHandle, SemaphorePtr};
21#[cfg(feature = "rtos-trace")]
22use rtos_trace::TaskInfo;
23
24#[cfg(feature = "alloc")]
25use crate::InternalMemory;
26#[cfg(feature = "esp-radio")]
27use crate::wait_queue::WaitQueue;
28use crate::{
29    SCHEDULER,
30    run_queue::{Priority, RunQueue, RunSchedulerOn},
31    scheduler::SchedulerState,
32};
33
34pub type IdleFn = extern "C" fn() -> !;
35
36pub(crate) extern "C" fn idle_hook() -> ! {
37    loop {
38        esp_hal::interrupt::wait_for_interrupt();
39    }
40}
41
42#[derive(Clone, Copy, PartialEq, Debug)]
43#[cfg_attr(feature = "defmt", derive(defmt::Format))]
44pub(crate) enum TaskState {
45    Ready,
46    Sleeping,
47    Deleted,
48}
49
50pub(crate) type TaskPtr = NonNull<Task>;
51pub(crate) type TaskListItem = Option<TaskPtr>;
52
53/// An abstraction that allows the task to contain multiple different queue pointers.
54pub(crate) trait TaskListElement: Default {
55    /// Returns the pointer to the next element in the list.
56    fn next(task: TaskPtr) -> Option<TaskPtr>;
57
58    /// Sets the pointer to the next element in the list.
59    fn set_next(task: TaskPtr, next: Option<TaskPtr>);
60
61    /// Returns whether the task is in the list. If this function returns `None`, we don't know.
62    fn is_in_queue(_task: TaskPtr) -> Option<bool> {
63        // By default we don't store this information, so we return "Don't know".
64        None
65    }
66
67    /// Marks whether the task is in the list.
68    fn mark_in_queue(_task: TaskPtr, _in_queue: bool) {}
69}
70
71macro_rules! task_list_item {
72    ($struct:ident, $field:ident $(, $in_queue_field:ident)?) => {
73        #[derive(Default)]
74        pub(crate) struct $struct;
75        impl TaskListElement for $struct {
76            fn next(task: TaskPtr) -> Option<TaskPtr> {
77                unsafe { task.as_ref().$field }
78            }
79
80            fn set_next(mut task: TaskPtr, next: Option<TaskPtr>) {
81                unsafe {
82                    task.as_mut().$field = next;
83                }
84            }
85
86            $(
87                fn is_in_queue(task: TaskPtr) -> Option<bool> {
88                    Some(unsafe { task.as_ref().$in_queue_field })
89                }
90
91                fn mark_in_queue(mut task: TaskPtr, in_queue: bool) {
92                    unsafe {
93                        task.as_mut().$in_queue_field = in_queue;
94                    }
95                }
96            )?
97        }
98    };
99}
100
101task_list_item!(
102    TaskReadyQueueElement,
103    ready_queue_item,
104    in_run_or_wait_queue
105);
106task_list_item!(TaskTimerQueueElement, timer_queue_item, timer_queued);
107// These aren't perf critical, no need to waste memory on caching list status:
108task_list_item!(TaskAllocListElement, alloc_list_item);
109task_list_item!(TaskDeleteListElement, delete_list_item);
110
111/// Extension trait for common task operations. These should be inherent methods but we can't
112/// implement stuff for NonNull.
113pub(crate) trait TaskExt {
114    #[cfg(feature = "rtos-trace")]
115    fn rtos_trace_id(self) -> u32;
116    #[cfg(feature = "rtos-trace")]
117    fn rtos_trace_info(self, run_queue: &mut RunQueue) -> TaskInfo;
118
119    fn priority(self, _: &mut RunQueue) -> Priority;
120    fn state(self) -> TaskState;
121    fn set_state(self, state: TaskState);
122}
123
124impl TaskExt for TaskPtr {
125    #[cfg(feature = "rtos-trace")]
126    fn rtos_trace_id(self) -> u32 {
127        self.addr().get() as u32
128    }
129
130    #[cfg(feature = "rtos-trace")]
131    fn rtos_trace_info(self, run_queue: &mut RunQueue) -> TaskInfo {
132        TaskInfo {
133            name: "<todo>",
134            priority: self.priority(run_queue).get() as u32,
135            stack_base: unsafe { self.as_ref().stack.addr() },
136            stack_size: unsafe { self.as_ref().stack.len() },
137        }
138    }
139
140    fn priority(self, _: &mut RunQueue) -> Priority {
141        unsafe { self.as_ref().priority }
142    }
143
144    fn state(self) -> TaskState {
145        unsafe { self.as_ref().state }
146    }
147
148    fn set_state(mut self, state: TaskState) {
149        trace!("Task {:?} state changed to {:?}", self, state);
150
151        #[cfg(feature = "rtos-trace")]
152        match state {
153            TaskState::Ready => rtos_trace::trace::task_ready_begin(self.rtos_trace_id()),
154            TaskState::Sleeping => rtos_trace::trace::task_ready_end(self.rtos_trace_id()),
155            TaskState::Deleted => rtos_trace::trace::task_terminate(self.rtos_trace_id()),
156        }
157
158        unsafe { self.as_mut().state = state };
159    }
160}
161
162/// A singly linked list of tasks.
163///
164/// Use this where you don't care about the order of list elements.
165///
166/// The `E` type parameter is used to access the data in the task object that belongs to this list.
167#[derive(Default)]
168pub(crate) struct TaskList<E> {
169    head: Option<TaskPtr>,
170    _item: PhantomData<E>,
171}
172
173impl<E: TaskListElement> TaskList<E> {
174    pub const fn new() -> Self {
175        Self {
176            head: None,
177            _item: PhantomData,
178        }
179    }
180
181    pub fn push(&mut self, task: TaskPtr) {
182        if E::is_in_queue(task) == Some(true) {
183            return;
184        }
185        E::mark_in_queue(task, true);
186
187        debug_assert!(E::next(task).is_none());
188        E::set_next(task, self.head);
189        self.head = Some(task);
190    }
191
192    pub fn pop(&mut self) -> Option<TaskPtr> {
193        let popped = self.head.take();
194
195        if let Some(task) = popped {
196            self.head = E::next(task);
197            E::set_next(task, None);
198            E::mark_in_queue(task, false);
199        }
200
201        popped
202    }
203
204    #[cfg(feature = "esp-radio")]
205    pub fn remove(&mut self, task: TaskPtr) {
206        if E::is_in_queue(task) == Some(false) {
207            return;
208        }
209
210        // TODO: maybe this (and TaskQueue::remove) may prove too expensive.
211        let mut list = core::mem::take(self);
212        while let Some(popped) = list.pop() {
213            if popped != task {
214                self.push(popped);
215            } else {
216                E::mark_in_queue(task, false);
217            }
218        }
219    }
220
221    #[cfg(feature = "rtos-trace")]
222    pub fn iter(&self) -> impl Iterator<Item = TaskPtr> {
223        let mut current = self.head;
224        core::iter::from_fn(move || {
225            let task = current?;
226            current = E::next(task);
227            Some(task)
228        })
229    }
230
231    pub(crate) fn is_empty(&self) -> bool {
232        self.head.is_none()
233    }
234}
235
236/// A singly linked queue of tasks.
237///
238/// Use this where you care about the order of list elements. Elements are popped from the front,
239/// and pushed to the back.
240///
241/// The `E` type parameter is used to access the data in the task object that belongs to this list.
242#[derive(Default)]
243pub(crate) struct TaskQueue<E> {
244    head: Option<TaskPtr>,
245    tail: Option<TaskPtr>,
246    _item: PhantomData<E>,
247}
248
249impl<E: TaskListElement> TaskQueue<E> {
250    pub const fn new() -> Self {
251        Self {
252            head: None,
253            tail: None,
254            _item: PhantomData,
255        }
256    }
257
258    pub fn push(&mut self, task: TaskPtr) {
259        if E::is_in_queue(task) == Some(true) {
260            return;
261        }
262        E::mark_in_queue(task, true);
263
264        debug_assert!(E::next(task).is_none());
265        if let Some(tail) = self.tail {
266            E::set_next(tail, Some(task));
267        } else {
268            self.head = Some(task);
269        }
270        self.tail = Some(task);
271    }
272
273    pub fn pop(&mut self) -> Option<TaskPtr> {
274        let popped = self.head.take();
275
276        if let Some(task) = popped {
277            self.head = E::next(task);
278            E::set_next(task, None);
279            if self.head.is_none() {
280                self.tail = None;
281            }
282            E::mark_in_queue(task, false);
283        }
284
285        popped
286    }
287
288    pub fn pop_if(&mut self, cond: impl Fn(&Task) -> bool) -> Option<TaskPtr> {
289        let mut popped = None;
290
291        let mut list = core::mem::take(self);
292        while let Some(task) = list.pop() {
293            if popped.is_none() && cond(unsafe { task.as_ref() }) {
294                E::mark_in_queue(task, false);
295                popped = Some(task);
296            } else {
297                self.push(task);
298            }
299        }
300
301        popped
302    }
303
304    pub fn remove(&mut self, task: TaskPtr) {
305        if E::is_in_queue(task) == Some(false) {
306            return;
307        }
308
309        _ = self.pop_if(|t| NonNull::from(t) == task);
310    }
311
312    pub(crate) fn is_empty(&self) -> bool {
313        self.head.is_none()
314    }
315}
316
317pub(crate) struct ThreadLocalData {
318    #[cfg(feature = "esp-radio")]
319    pub thread_semaphore: Option<SemaphorePtr>,
320
321    // The _reent struct is rarely needed, but big. Let's heap-allocate it, to save a bit of RAM.
322    #[cfg(feature = "alloc")]
323    pub reent: Option<Box<esp_rom_sys::_reent>>,
324}
325impl ThreadLocalData {
326    pub const fn new() -> Self {
327        Self {
328            #[cfg(feature = "esp-radio")]
329            thread_semaphore: None,
330
331            #[cfg(feature = "alloc")]
332            reent: None,
333        }
334    }
335}
336
337impl Drop for ThreadLocalData {
338    fn drop(&mut self) {
339        #[cfg(feature = "esp-radio")]
340        if let Some(semaphore_ptr) = self.thread_semaphore.take() {
341            core::mem::drop(unsafe { SemaphoreHandle::from_ptr(semaphore_ptr) });
342        }
343    }
344}
345
346#[repr(C)]
347pub(crate) struct Task {
348    pub cpu_context: CpuContext,
349
350    pub thread_local: ThreadLocalData,
351
352    pub state: TaskState,
353    pub stack: *mut [MaybeUninit<u32>],
354
355    #[cfg(any(hw_task_overflow_detection, sw_task_overflow_detection))]
356    pub stack_guard: *mut u32,
357    #[cfg(sw_task_overflow_detection)]
358    pub(crate) stack_guard_value: u32,
359
360    pub priority: Priority,
361    #[cfg(multi_core)]
362    pub pinned_to: Option<Cpu>,
363
364    pub wakeup_at: u64,
365
366    /// Whether the task is currently queued in the run queue.
367    pub in_run_or_wait_queue: bool,
368    /// Whether the task is currently queued in the timer queue.
369    pub timer_queued: bool,
370
371    /// The current wait queue this task is in.
372    #[cfg(feature = "esp-radio")]
373    pub(crate) current_wait_queue: Option<NonNull<WaitQueue>>,
374
375    // Lists a task can be in:
376    /// The list of all allocated tasks
377    pub alloc_list_item: TaskListItem,
378
379    /// Either the RunQueue or the WaitQueue
380    pub ready_queue_item: TaskListItem,
381
382    /// The timer queue
383    pub timer_queue_item: TaskListItem,
384
385    /// The list of tasks scheduled for deletion
386    pub delete_list_item: TaskListItem,
387
388    /// Whether the task was allocated on the heap.
389    #[cfg(feature = "alloc")]
390    pub(crate) heap_allocated: bool,
391}
392
393pub(crate) trait ContextExt {
394    fn set_tp(&mut self, tp: u32);
395
396    fn sp(&self) -> u32;
397
398    fn set_sp(&mut self, sp: u32);
399}
400
401impl ContextExt for CpuContext {
402    fn set_tp(&mut self, tp: u32) {
403        cfg_if::cfg_if! {
404            if #[cfg(xtensa)] {
405                self.THREADPTR = tp;
406            } else if #[cfg(riscv)] {
407                self.tp = tp as usize;
408            }
409        }
410    }
411
412    fn sp(&self) -> u32 {
413        cfg_if::cfg_if! {
414            if #[cfg(xtensa)] {
415                self.A1
416            } else {
417                self.sp as u32
418            }
419        }
420    }
421
422    fn set_sp(&mut self, sp: u32) {
423        cfg_if::cfg_if! {
424            if #[cfg(xtensa)] {
425                self.A1 = sp;
426            } else {
427                self.sp = sp as usize;
428            }
429        }
430    }
431}
432
433#[cfg(feature = "esp-radio")]
434extern "C" fn task_wrapper(task_fn: extern "C" fn(*mut c_void), param: *mut c_void) {
435    task_fn(param);
436    schedule_task_deletion(None);
437}
438
439impl Task {
440    #[cfg(feature = "esp-radio")]
441    pub(crate) fn new(
442        name: &str,
443        task_fn: extern "C" fn(*mut c_void),
444        param: *mut c_void,
445        task_stack_size: usize,
446        priority: usize,
447        pinned_to: Option<Cpu>,
448    ) -> Self {
449        debug!(
450            "task_create {} {:?}({:?}) stack_size = {} priority = {} pinned_to = {:?}",
451            name, task_fn, param, task_stack_size, priority, pinned_to
452        );
453
454        // Make sure the stack guard doesn't eat into the stack size.
455        let extra_stack = if cfg!(any(hw_task_overflow_detection, sw_task_overflow_detection)) {
456            4 + esp_config::esp_config_int!(usize, "ESP_HAL_CONFIG_STACK_GUARD_OFFSET")
457        } else {
458            0
459        };
460
461        #[cfg(debug_build)]
462        // This is a lot, but debug builds fail in different ways without.
463        let extra_stack = extra_stack.max(6 * 1024);
464
465        let task_stack_size = task_stack_size + extra_stack;
466
467        // Make sure stack size is also aligned to 16 bytes.
468        const MIN_STACK_ALIGNMENT: usize = 16;
469        let task_stack_size = (task_stack_size & !(MIN_STACK_ALIGNMENT - 1)) + MIN_STACK_ALIGNMENT;
470
471        let stack = unwrap!(
472            Layout::from_size_align(task_stack_size, MIN_STACK_ALIGNMENT)
473                .ok()
474                .and_then(|layout| InternalMemory.allocate(layout).ok()),
475            "Failed to allocate stack",
476        )
477        .as_ptr();
478
479        let stack_bottom = stack.cast::<MaybeUninit<u32>>();
480        let stack_len_bytes = stack.len();
481
482        let stack_guard_offset =
483            esp_config::esp_config_int!(usize, "ESP_HAL_CONFIG_STACK_GUARD_OFFSET");
484
485        let stack_words = core::ptr::slice_from_raw_parts_mut(stack_bottom, stack_len_bytes / 4);
486        let stack_top = unsafe { stack_bottom.add(stack_words.len()).cast() };
487
488        let mut task = Task {
489            cpu_context: new_task_context(task_fn, param, stack_top),
490            thread_local: ThreadLocalData::new(),
491            state: TaskState::Ready,
492            stack: stack_words,
493            #[cfg(any(hw_task_overflow_detection, sw_task_overflow_detection))]
494            stack_guard: stack_words.cast(),
495            #[cfg(sw_task_overflow_detection)]
496            stack_guard_value: 0,
497            #[cfg(feature = "esp-radio")]
498            current_wait_queue: None,
499            priority: Priority::new(priority),
500            #[cfg(multi_core)]
501            pinned_to,
502
503            wakeup_at: 0,
504            timer_queued: false,
505            in_run_or_wait_queue: false,
506
507            alloc_list_item: TaskListItem::None,
508            ready_queue_item: TaskListItem::None,
509            timer_queue_item: TaskListItem::None,
510            delete_list_item: TaskListItem::None,
511
512            #[cfg(feature = "alloc")]
513            heap_allocated: false,
514        };
515
516        task.set_up_stack_guard(stack_guard_offset, 0xDEED_BAAD);
517
518        task
519    }
520
521    fn set_up_stack_guard(&mut self, offset: usize, _value: u32) {
522        let stack_bottom = self.stack.cast::<MaybeUninit<u32>>();
523        let stack_guard = unsafe { stack_bottom.byte_add(offset) };
524
525        #[cfg(sw_task_overflow_detection)]
526        unsafe {
527            // avoid touching the main stack's canary on the first core
528            if stack_guard.read().assume_init() != _value {
529                stack_guard.write(MaybeUninit::new(_value));
530            }
531            self.stack_guard_value = _value;
532        }
533
534        #[cfg(any(hw_task_overflow_detection, sw_task_overflow_detection))]
535        {
536            self.stack_guard = stack_guard.cast();
537        }
538    }
539
540    pub(crate) fn ensure_no_stack_overflow(&self, _sp: usize) {
541        #[cfg(sw_task_overflow_detection)]
542        assert_eq!(
543            // This cast is safe to do from MaybeUninit<u32> because this is the word we've written
544            // during initialization.
545            unsafe { self.stack_guard.read() },
546            self.stack_guard_value,
547            "Stack overflow detected in {:?}",
548            self as *const Task
549        );
550
551        #[cfg(stack_pointer_range_check)]
552        {
553            let len = self.stack.len();
554            let data_ptr = self.stack.cast::<MaybeUninit<u32>>();
555            let stack_bottom = data_ptr as usize;
556            let stack_top = data_ptr.wrapping_add(len) as usize;
557            assert!(
558                _sp > stack_bottom && _sp <= stack_top,
559                "Stack overflow detected in {:?}. Stack pointer: {:x}, Task stack range: {:x} ..= {:x}",
560                self as *const Task,
561                _sp,
562                stack_bottom,
563                stack_top
564            );
565        }
566    }
567
568    pub(crate) fn set_up_stack_watchpoint(&self) {
569        #[cfg(hw_task_overflow_detection)]
570        unsafe {
571            esp_hal::debugger::set_stack_watchpoint(self.stack_guard as usize);
572        }
573    }
574}
575
576impl Drop for Task {
577    fn drop(&mut self) {
578        debug!("Dropping task: {:?}", self as *mut Task);
579
580        #[cfg(feature = "alloc")]
581        if self.heap_allocated {
582            let layout = unwrap!(
583                Layout::from_size_align(self.stack.len() * 4, 16).ok(),
584                "Cannot compute Layout for stack"
585            );
586            unsafe { InternalMemory.deallocate(unwrap!(NonNull::new(self.stack.cast())), layout) };
587        }
588    }
589}
590
591pub(super) fn allocate_main_task(
592    scheduler: &mut SchedulerState,
593    stack: *mut [MaybeUninit<u32>],
594    stack_guard_offset: usize,
595    stack_guard_value: u32,
596) {
597    let cpu = Cpu::current();
598    let current_cpu = cpu as usize;
599
600    debug_assert!(
601        !scheduler.per_cpu[current_cpu].initialized,
602        "Tried to allocate main task multiple times"
603    );
604
605    scheduler.per_cpu[current_cpu].initialized = true;
606
607    // Reset main task properties. The rest should be cleared when the task is deleted.
608    scheduler.per_cpu[current_cpu].main_task.priority = Priority::ZERO;
609    scheduler.per_cpu[current_cpu].main_task.state = TaskState::Ready;
610    scheduler.per_cpu[current_cpu].main_task.stack = stack;
611    scheduler.per_cpu[current_cpu]
612        .main_task
613        .in_run_or_wait_queue = false;
614    scheduler.per_cpu[current_cpu].main_task.timer_queued = false;
615    #[cfg(multi_core)]
616    {
617        scheduler.per_cpu[current_cpu].main_task.pinned_to = Some(cpu);
618    }
619    scheduler.per_cpu[current_cpu].main_task.thread_local = ThreadLocalData::new();
620
621    scheduler.per_cpu[current_cpu]
622        .main_task
623        .set_up_stack_guard(stack_guard_offset, stack_guard_value);
624
625    scheduler.per_cpu[current_cpu]
626        .main_task
627        .set_up_stack_watchpoint();
628
629    // This is slightly questionable as we don't ensure SchedulerState is pinned, but it's always
630    // part of a static object so taking the pointer is fine.
631    let main_task_ptr = NonNull::from(&scheduler.per_cpu[current_cpu].main_task);
632
633    scheduler.per_cpu[current_cpu]
634        .main_task
635        .cpu_context
636        .set_tp(main_task_ptr.as_ptr() as u32);
637
638    write_thread_pointer(main_task_ptr.as_ptr());
639
640    debug!("Main task created: {:?}", main_task_ptr);
641
642    #[cfg(feature = "rtos-trace")]
643    rtos_trace::trace::task_new(main_task_ptr.rtos_trace_id());
644
645    // The main task is already running, no need to add it to the ready queue.
646    scheduler.all_tasks.push(main_task_ptr);
647    #[cfg(multi_core)]
648    scheduler.set_current_task(cpu, Some(main_task_ptr));
649    scheduler
650        .run_queue
651        .mark_task_ready(&scheduler.per_cpu, main_task_ptr);
652}
653
654/// A handle to the current thread.
655#[derive(Clone, Copy, Debug)]
656#[cfg_attr(feature = "defmt", derive(defmt::Format))]
657pub struct CurrentThreadHandle {
658    task: TaskPtr,
659}
660
661impl CurrentThreadHandle {
662    /// Retrieves a handle to the current task.
663    pub fn get() -> Self {
664        Self {
665            task: SCHEDULER.current_task(),
666        }
667    }
668
669    /// Delays the current task for the specified duration.
670    pub fn delay(self, duration: Duration) {
671        self.delay_until(Instant::now() + duration);
672    }
673
674    /// Delays the current task until the specified deadline.
675    pub fn delay_until(self, deadline: Instant) {
676        SCHEDULER.sleep_until(deadline);
677    }
678
679    /// Sets the priority of the current task.
680    pub fn set_priority(self, priority: usize) {
681        let priority = Priority::new(priority);
682        SCHEDULER.with(|scheduler| {
683            let old = self.task.priority(&mut scheduler.run_queue);
684            scheduler.set_priority(self.task, priority);
685
686            // If we're dropping in priority, trigger a context switch in case another task can be
687            // scheduled or time slicing needs to be started.
688            if old > priority {
689                yield_task();
690            }
691        });
692    }
693}
694
695#[cfg(feature = "esp-radio")]
696pub(super) fn schedule_task_deletion(task: Option<NonNull<Task>>) {
697    trace!("schedule_task_deletion {:?}", task);
698    if SCHEDULER.with(|scheduler| scheduler.schedule_task_deletion(task)) {
699        loop {
700            yield_task();
701        }
702    }
703}
704
705pub(crate) fn trigger_scheduler(run_scheduler: RunSchedulerOn) {
706    match run_scheduler {
707        RunSchedulerOn::DontRun => {}
708        RunSchedulerOn::RunOnCore(_core) => {
709            cfg_if::cfg_if! {
710                if #[cfg(multi_core)] {
711                    if _core == Cpu::current() {
712                        yield_task()
713                    } else {
714                        schedule_other_core()
715                    }
716                } else {
717                    yield_task()
718                }
719            }
720        }
721    }
722}
723
724#[inline]
725#[cfg(multi_core)]
726pub(crate) fn schedule_other_core() {
727    use esp_hal::interrupt::software::SoftwareInterrupt;
728    match Cpu::current() {
729        Cpu::ProCpu => unsafe { SoftwareInterrupt::<'static, 1>::steal() }.raise(),
730        Cpu::AppCpu => unsafe { SoftwareInterrupt::<'static, 0>::steal() }.raise(),
731    }
732}