1#[cfg_attr(riscv, path = "riscv.rs")]
2#[cfg_attr(xtensa, path = "xtensa.rs")]
3pub(crate) mod arch_specific;
4
5#[cfg(feature = "esp-radio")]
6use core::ffi::c_void;
7use core::{marker::PhantomData, mem::MaybeUninit, ptr::NonNull};
8
9#[cfg(feature = "alloc")]
10use allocator_api2::alloc::{Allocator, Layout};
11pub(crate) use arch_specific::*;
12use esp_hal::{
13 system::Cpu,
14 time::{Duration, Instant},
15};
16#[cfg(feature = "rtos-trace")]
17use rtos_trace::TaskInfo;
18
19#[cfg(feature = "alloc")]
20use crate::InternalMemory;
21#[cfg(feature = "esp-radio")]
22use crate::semaphore::Semaphore;
23use crate::{
24 SCHEDULER,
25 run_queue::{Priority, RunQueue},
26 scheduler::SchedulerState,
27 wait_queue::WaitQueue,
28};
29
30pub type IdleFn = extern "C" fn() -> !;
31
32#[derive(Clone, Copy, PartialEq, Debug)]
33#[cfg_attr(feature = "defmt", derive(defmt::Format))]
34pub(crate) enum TaskState {
35 Ready,
36 Sleeping,
37 Deleted,
38}
39
40pub(crate) type TaskPtr = NonNull<Task>;
41pub(crate) type TaskListItem = Option<TaskPtr>;
42
43pub(crate) trait TaskListElement: Default {
45 fn next(task: TaskPtr) -> Option<TaskPtr>;
47
48 fn set_next(task: TaskPtr, next: Option<TaskPtr>);
50
51 fn is_in_queue(_task: TaskPtr) -> Option<bool> {
53 None
55 }
56
57 fn mark_in_queue(_task: TaskPtr, _in_queue: bool) {}
59}
60
61macro_rules! task_list_item {
62 ($struct:ident, $field:ident $(, $in_queue_field:ident)?) => {
63 #[derive(Default)]
64 pub(crate) struct $struct;
65 impl TaskListElement for $struct {
66 fn next(task: TaskPtr) -> Option<TaskPtr> {
67 unsafe { task.as_ref().$field }
68 }
69
70 fn set_next(mut task: TaskPtr, next: Option<TaskPtr>) {
71 unsafe {
72 task.as_mut().$field = next;
73 }
74 }
75
76 $(
77 fn is_in_queue(task: TaskPtr) -> Option<bool> {
78 Some(unsafe { task.as_ref().$in_queue_field })
79 }
80
81 fn mark_in_queue(mut task: TaskPtr, in_queue: bool) {
82 unsafe {
83 task.as_mut().$in_queue_field = in_queue;
84 }
85 }
86 )?
87 }
88 };
89}
90
91task_list_item!(TaskReadyQueueElement, ready_queue_item, run_queued);
92task_list_item!(TaskTimerQueueElement, timer_queue_item, timer_queued);
93task_list_item!(TaskAllocListElement, alloc_list_item);
95task_list_item!(TaskDeleteListElement, delete_list_item);
96
97pub(crate) trait TaskExt {
100 #[cfg(feature = "rtos-trace")]
101 fn rtos_trace_id(self) -> u32;
102 #[cfg(feature = "rtos-trace")]
103 fn rtos_trace_info(self, run_queue: &mut RunQueue) -> TaskInfo;
104
105 #[cfg(any(feature = "esp-radio", feature = "embassy"))]
106 fn resume(self);
107 fn priority(self, _: &mut RunQueue) -> Priority;
108 fn set_priority(self, _: &mut RunQueue, new_pro: Priority);
109 fn state(self) -> TaskState;
110 fn set_state(self, state: TaskState);
111}
112
113impl TaskExt for TaskPtr {
114 #[cfg(feature = "rtos-trace")]
115 fn rtos_trace_id(self) -> u32 {
116 self.addr().get() as u32
117 }
118
119 #[cfg(feature = "rtos-trace")]
120 fn rtos_trace_info(self, run_queue: &mut RunQueue) -> TaskInfo {
121 TaskInfo {
122 name: "<todo>",
123 priority: self.priority(run_queue).get() as u32,
124 stack_base: unsafe { self.as_ref().stack.addr() },
125 stack_size: unsafe { self.as_ref().stack.len() },
126 }
127 }
128
129 #[cfg(any(feature = "esp-radio", feature = "embassy"))]
130 #[esp_hal::ram]
131 fn resume(self) {
132 SCHEDULER.with(|scheduler| scheduler.resume_task(self))
133 }
134
135 fn priority(self, _: &mut RunQueue) -> Priority {
136 unsafe { self.as_ref().priority }
137 }
138
139 fn set_priority(mut self, run_queue: &mut RunQueue, new_priority: Priority) {
140 run_queue.remove(self);
141 unsafe { self.as_mut().priority = new_priority };
142 }
143
144 fn state(self) -> TaskState {
145 unsafe { self.as_ref().state }
146 }
147
148 fn set_state(mut self, state: TaskState) {
149 trace!("Task {:?} state changed to {:?}", self, state);
150
151 #[cfg(feature = "rtos-trace")]
152 match state {
153 TaskState::Ready => rtos_trace::trace::task_ready_begin(self.rtos_trace_id()),
154 TaskState::Sleeping => rtos_trace::trace::task_ready_end(self.rtos_trace_id()),
155 TaskState::Deleted => rtos_trace::trace::task_terminate(self.rtos_trace_id()),
156 }
157
158 unsafe { self.as_mut().state = state };
159 }
160}
161
162#[derive(Default)]
168pub(crate) struct TaskList<E> {
169 head: Option<TaskPtr>,
170 _item: PhantomData<E>,
171}
172
173impl<E: TaskListElement> TaskList<E> {
174 pub const fn new() -> Self {
175 Self {
176 head: None,
177 _item: PhantomData,
178 }
179 }
180
181 pub fn push(&mut self, task: TaskPtr) {
182 if E::is_in_queue(task) == Some(true) {
183 return;
184 }
185 E::mark_in_queue(task, true);
186
187 debug_assert!(E::next(task).is_none());
188 E::set_next(task, self.head);
189 self.head = Some(task);
190 }
191
192 pub fn pop(&mut self) -> Option<TaskPtr> {
193 let popped = self.head.take();
194
195 if let Some(task) = popped {
196 self.head = E::next(task);
197 E::set_next(task, None);
198 E::mark_in_queue(task, false);
199 }
200
201 popped
202 }
203
204 pub fn remove(&mut self, task: TaskPtr) {
205 if E::is_in_queue(task) == Some(false) {
206 return;
207 }
208 E::mark_in_queue(task, false);
209
210 let mut list = core::mem::take(self);
212 while let Some(popped) = list.pop() {
213 if popped != task {
214 self.push(popped);
215 }
216 }
217 }
218
219 #[cfg(feature = "rtos-trace")]
220 pub fn iter(&self) -> impl Iterator<Item = TaskPtr> {
221 let mut current = self.head;
222 core::iter::from_fn(move || {
223 let task = current?;
224 current = E::next(task);
225 Some(task)
226 })
227 }
228
229 pub(crate) fn is_empty(&self) -> bool {
230 self.head.is_none()
231 }
232}
233
234#[derive(Default)]
241pub(crate) struct TaskQueue<E> {
242 head: Option<TaskPtr>,
243 tail: Option<TaskPtr>,
244 _item: PhantomData<E>,
245}
246
247impl<E: TaskListElement> TaskQueue<E> {
248 pub const fn new() -> Self {
249 Self {
250 head: None,
251 tail: None,
252 _item: PhantomData,
253 }
254 }
255
256 pub fn push(&mut self, task: TaskPtr) {
257 if E::is_in_queue(task) == Some(true) {
258 return;
259 }
260 E::mark_in_queue(task, true);
261
262 debug_assert!(E::next(task).is_none());
263 if let Some(tail) = self.tail {
264 E::set_next(tail, Some(task));
265 } else {
266 self.head = Some(task);
267 }
268 self.tail = Some(task);
269 }
270
271 pub fn pop(&mut self) -> Option<TaskPtr> {
272 let popped = self.head.take();
273
274 if let Some(task) = popped {
275 self.head = E::next(task);
276 E::set_next(task, None);
277 if self.head.is_none() {
278 self.tail = None;
279 }
280 E::mark_in_queue(task, false);
281 }
282
283 popped
284 }
285
286 #[cfg(multi_core)]
287 pub fn pop_if(&mut self, cond: impl Fn(&Task) -> bool) -> Option<TaskPtr> {
288 let mut popped = None;
289
290 let mut list = core::mem::take(self);
291 while let Some(task) = list.pop() {
292 if popped.is_none() && cond(unsafe { task.as_ref() }) {
293 E::mark_in_queue(task, false);
294 popped = Some(task);
295 } else {
296 self.push(task);
297 }
298 }
299
300 popped
301 }
302
303 pub fn remove(&mut self, task: TaskPtr) {
304 if E::is_in_queue(task) == Some(false) {
305 return;
306 }
307
308 let mut list = core::mem::take(self);
309 while let Some(popped) = list.pop() {
310 if popped == task {
311 E::mark_in_queue(task, false);
312 } else {
313 self.push(popped);
314 }
315 }
316 }
317
318 pub(crate) fn is_empty(&self) -> bool {
319 self.head.is_none()
320 }
321}
322
323#[repr(C)]
324pub(crate) struct Task {
325 pub cpu_context: CpuContext,
326 #[cfg(feature = "esp-radio")]
327 pub thread_semaphore: Option<Semaphore>,
328 pub state: TaskState,
329 pub stack: *mut [MaybeUninit<u32>],
330
331 #[cfg(any(hw_task_overflow_detection, sw_task_overflow_detection))]
332 pub stack_guard: *mut u32,
333 #[cfg(sw_task_overflow_detection)]
334 pub(crate) stack_guard_value: u32,
335
336 pub priority: Priority,
337 #[cfg(multi_core)]
338 pub pinned_to: Option<Cpu>,
339
340 pub wakeup_at: u64,
341
342 pub run_queued: bool,
344 pub timer_queued: bool,
346
347 pub(crate) current_queue: Option<NonNull<WaitQueue>>,
349
350 pub alloc_list_item: TaskListItem,
353
354 pub ready_queue_item: TaskListItem,
356
357 pub timer_queue_item: TaskListItem,
359
360 pub delete_list_item: TaskListItem,
362
363 #[cfg(feature = "alloc")]
365 pub(crate) heap_allocated: bool,
366}
367
368#[cfg(feature = "esp-radio")]
369extern "C" fn task_wrapper(task_fn: extern "C" fn(*mut c_void), param: *mut c_void) {
370 task_fn(param);
371 schedule_task_deletion(core::ptr::null_mut());
372}
373
374impl Task {
375 #[cfg(feature = "esp-radio")]
376 pub(crate) fn new(
377 name: &str,
378 task_fn: extern "C" fn(*mut c_void),
379 param: *mut c_void,
380 task_stack_size: usize,
381 priority: usize,
382 pinned_to: Option<Cpu>,
383 ) -> Self {
384 debug!(
385 "task_create {} {:?}({:?}) stack_size = {} priority = {} pinned_to = {:?}",
386 name, task_fn, param, task_stack_size, priority, pinned_to
387 );
388
389 let extra_stack = if cfg!(any(hw_task_overflow_detection, sw_task_overflow_detection)) {
391 4 + esp_config::esp_config_int!(usize, "ESP_HAL_CONFIG_STACK_GUARD_OFFSET")
392 } else {
393 0
394 };
395
396 #[cfg(debug_build)]
397 let extra_stack = extra_stack.max(6 * 1024);
399
400 let task_stack_size = task_stack_size + extra_stack;
401
402 let task_stack_size = (task_stack_size & !0xF) + 16;
404
405 let stack = unwrap!(
406 Layout::from_size_align(task_stack_size, 16)
407 .ok()
408 .and_then(|layout| InternalMemory.allocate(layout).ok()),
409 "Failed to allocate stack",
410 )
411 .as_ptr();
412
413 let stack_bottom = stack.cast::<MaybeUninit<u32>>();
414 let stack_len_bytes = stack.len();
415
416 let stack_guard_offset =
417 esp_config::esp_config_int!(usize, "ESP_HAL_CONFIG_STACK_GUARD_OFFSET");
418
419 let stack_words = core::ptr::slice_from_raw_parts_mut(stack_bottom, stack_len_bytes / 4);
420 let stack_top = unsafe { stack_bottom.add(stack_words.len()).cast() };
421
422 let mut task = Task {
423 cpu_context: new_task_context(task_fn, param, stack_top),
424 #[cfg(feature = "esp-radio")]
425 thread_semaphore: None,
426 state: TaskState::Ready,
427 stack: stack_words,
428 #[cfg(any(hw_task_overflow_detection, sw_task_overflow_detection))]
429 stack_guard: stack_words.cast(),
430 #[cfg(sw_task_overflow_detection)]
431 stack_guard_value: 0,
432 current_queue: None,
433 priority: Priority::new(priority),
434 #[cfg(multi_core)]
435 pinned_to,
436
437 wakeup_at: 0,
438 timer_queued: false,
439 run_queued: false,
440
441 alloc_list_item: TaskListItem::None,
442 ready_queue_item: TaskListItem::None,
443 timer_queue_item: TaskListItem::None,
444 delete_list_item: TaskListItem::None,
445
446 #[cfg(feature = "alloc")]
447 heap_allocated: false,
448 };
449
450 task.set_up_stack_guard(stack_guard_offset, 0xDEED_BAAD);
451
452 task
453 }
454
455 fn set_up_stack_guard(&mut self, offset: usize, _value: u32) {
456 let stack_bottom = self.stack.cast::<MaybeUninit<u32>>();
457 let stack_guard = unsafe { stack_bottom.byte_add(offset) };
458
459 #[cfg(sw_task_overflow_detection)]
460 unsafe {
461 if stack_guard.read().assume_init() != _value {
463 stack_guard.write(MaybeUninit::new(_value));
464 }
465 self.stack_guard_value = _value;
466 }
467
468 #[cfg(any(hw_task_overflow_detection, sw_task_overflow_detection))]
469 {
470 self.stack_guard = stack_guard.cast();
471 }
472 }
473
474 pub(crate) fn ensure_no_stack_overflow(&self) {
475 #[cfg(sw_task_overflow_detection)]
476 assert_eq!(
477 unsafe { self.stack_guard.read() },
480 self.stack_guard_value,
481 "Stack overflow detected in {:?}",
482 self as *const Task
483 );
484 }
485
486 pub(crate) fn set_up_stack_watchpoint(&self) {
487 #[cfg(hw_task_overflow_detection)]
488 unsafe {
489 esp_hal::debugger::set_stack_watchpoint(self.stack_guard as usize);
490 }
491 }
492}
493
494impl Drop for Task {
495 fn drop(&mut self) {
496 debug!("Dropping task: {:?}", self as *mut Task);
497
498 #[cfg(feature = "esp-radio")]
499 let _ = self.thread_semaphore.take();
500
501 #[cfg(feature = "alloc")]
502 if self.heap_allocated {
503 let layout = unwrap!(
504 Layout::from_size_align(self.stack.len() * 4, 16).ok(),
505 "Cannot compute Layout for stack"
506 );
507 unsafe { InternalMemory.deallocate(unwrap!(NonNull::new(self.stack.cast())), layout) };
508 }
509 }
510}
511
512pub(super) fn allocate_main_task(
513 scheduler: &mut SchedulerState,
514 stack: *mut [MaybeUninit<u32>],
515 stack_guard_offset: usize,
516 stack_guard_value: u32,
517) {
518 let cpu = Cpu::current();
519 let current_cpu = cpu as usize;
520
521 debug_assert!(
522 !scheduler.per_cpu[current_cpu].initialized,
523 "Tried to allocate main task multiple times"
524 );
525
526 scheduler.per_cpu[current_cpu].initialized = true;
527
528 scheduler.per_cpu[current_cpu].main_task.priority = Priority::ZERO;
530 scheduler.per_cpu[current_cpu].main_task.state = TaskState::Ready;
531 scheduler.per_cpu[current_cpu].main_task.stack = stack;
532 scheduler.per_cpu[current_cpu].main_task.run_queued = false;
533 scheduler.per_cpu[current_cpu].main_task.timer_queued = false;
534 #[cfg(multi_core)]
535 {
536 scheduler.per_cpu[current_cpu].main_task.pinned_to = Some(cpu);
537 }
538
539 scheduler.per_cpu[current_cpu]
540 .main_task
541 .set_up_stack_guard(stack_guard_offset, stack_guard_value);
542
543 scheduler.per_cpu[current_cpu]
544 .main_task
545 .set_up_stack_watchpoint();
546
547 let main_task_ptr = NonNull::from(&scheduler.per_cpu[current_cpu].main_task);
550 debug!("Main task created: {:?}", main_task_ptr);
551
552 #[cfg(feature = "rtos-trace")]
553 rtos_trace::trace::task_new(main_task_ptr.rtos_trace_id());
554
555 scheduler.all_tasks.push(main_task_ptr);
557 scheduler.per_cpu[current_cpu].current_task = Some(main_task_ptr);
558 scheduler
559 .run_queue
560 .mark_task_ready(&scheduler.per_cpu, main_task_ptr);
561}
562
563pub(super) fn with_current_task<R>(mut cb: impl FnMut(&mut Task) -> R) -> R {
564 SCHEDULER.with(|state| {
565 cb(unsafe {
566 let current_cpu = Cpu::current() as usize;
567 unwrap!(state.per_cpu[current_cpu].current_task).as_mut()
568 })
569 })
570}
571
572pub(super) fn current_task() -> TaskPtr {
573 with_current_task(|task| NonNull::from(task))
574}
575
576#[derive(Clone, Copy, Debug)]
578#[cfg_attr(feature = "defmt", derive(defmt::Format))]
579pub struct CurrentThreadHandle {
580 task: TaskPtr,
581}
582
583impl CurrentThreadHandle {
584 pub fn get() -> Self {
586 Self {
587 task: current_task(),
588 }
589 }
590
591 pub fn delay(self, duration: Duration) {
593 self.delay_until(Instant::now() + duration);
594 }
595
596 pub fn delay_until(self, deadline: Instant) {
598 SCHEDULER.sleep_until(deadline);
599 }
600
601 pub fn set_priority(self, priority: usize) {
603 let priority = Priority::new(priority);
604 SCHEDULER.with(|state| {
605 let old = self.task.priority(&mut state.run_queue);
606 self.task.set_priority(&mut state.run_queue, priority);
607
608 if old > priority {
611 crate::task::yield_task();
612 }
613 });
614 }
615}
616
617#[cfg(feature = "esp-radio")]
618pub(super) fn schedule_task_deletion(task: *mut Task) {
619 trace!("schedule_task_deletion {:?}", task);
620 if SCHEDULER.with(|scheduler| scheduler.schedule_task_deletion(task)) {
621 loop {
622 yield_task();
623 }
624 }
625}
626
627#[inline]
628#[cfg(multi_core)]
629pub(crate) fn schedule_other_core() {
630 use esp_hal::interrupt::software::SoftwareInterrupt;
631 match Cpu::current() {
632 Cpu::ProCpu => unsafe { SoftwareInterrupt::<'static, 1>::steal() }.raise(),
633 Cpu::AppCpu => unsafe { SoftwareInterrupt::<'static, 0>::steal() }.raise(),
634 }
635
636 }