Skip to main content

esp_hal/system/
multi_core.rs

1//! Multi-core support
2
3use core::{
4    marker::PhantomData,
5    mem::{ManuallyDrop, MaybeUninit},
6    sync::atomic::{AtomicPtr, Ordering},
7};
8
9#[instability::unstable]
10pub use crate::soc::cpu_control::is_running;
11use crate::{
12    peripherals::CPU_CTRL,
13    soc::cpu_control::{internal_park_core, start_core1_init},
14    system::Cpu,
15};
16
17/// Data type for a properly aligned stack of N bytes
18// Xtensa ISA 10.5: [B]y default, the
19// stack frame is 16-byte aligned. However, the maximal alignment allowed for a
20// TIE ctype is 64-bytes. If a function has any wide-aligned (>16-byte aligned)
21// data type for their arguments or the return values, the caller has to ensure
22// that the SP is aligned to the largest alignment right before the call.
23//
24// ^ this means that we should be able to get away with 16 bytes of alignment
25// because our root stack frame has no arguments and no return values.
26//
27// This alignment also doesn't align the stack frames, only the end of stack.
28// Stack frame alignment depends on the SIZE as well as the placement of the
29// array.
30#[repr(C, align(16))]
31#[instability::unstable]
32pub struct Stack<const SIZE: usize> {
33    /// Memory to be used for the stack
34    pub mem: MaybeUninit<[u8; SIZE]>,
35}
36
37impl<const SIZE: usize> Default for Stack<SIZE> {
38    fn default() -> Self {
39        Self::new()
40    }
41}
42
43#[allow(clippy::len_without_is_empty)]
44impl<const SIZE: usize> Stack<SIZE> {
45    /// Construct a stack of length SIZE, uninitialized
46    #[instability::unstable]
47    pub const fn new() -> Stack<SIZE> {
48        const {
49            // Make sure stack top is aligned, too.
50            ::core::assert!(SIZE.is_multiple_of(16));
51        }
52
53        Stack {
54            mem: MaybeUninit::uninit(),
55        }
56    }
57
58    /// Returns the length of the stack in bytes.
59    #[instability::unstable]
60    pub const fn len(&self) -> usize {
61        SIZE
62    }
63
64    /// Returns a mutable pointer to the bottom of the stack.
65    #[instability::unstable]
66    pub fn bottom(&mut self) -> *mut u32 {
67        self.mem.as_mut_ptr() as *mut u32
68    }
69
70    /// Returns a mutable pointer to the top of the stack.
71    #[instability::unstable]
72    pub fn top(&mut self) -> *mut u32 {
73        unsafe { self.bottom().add(SIZE / 4) }
74    }
75}
76
77// Pointer to the closure that will be executed on the second core. The closure
78// is copied to the core's stack.
79pub(crate) static START_CORE1_FUNCTION: AtomicPtr<()> = AtomicPtr::new(core::ptr::null_mut());
80pub(crate) static APP_CORE_STACK_TOP: AtomicPtr<u32> = AtomicPtr::new(core::ptr::null_mut());
81pub(crate) static APP_CORE_STACK_GUARD: AtomicPtr<u32> = AtomicPtr::new(core::ptr::null_mut());
82
83/// Will park the APP (second) core when dropped
84#[must_use = "Dropping this guard will park the APP core"]
85#[instability::unstable]
86pub struct AppCoreGuard<'a> {
87    phantom: PhantomData<&'a ()>,
88}
89
90impl Drop for AppCoreGuard<'_> {
91    fn drop(&mut self) {
92        unsafe { internal_park_core(Cpu::AppCpu, true) };
93    }
94}
95
96/// Represents errors that can occur while working with the core.
97#[derive(Debug, Clone, Copy, PartialEq)]
98#[cfg_attr(feature = "defmt", derive(defmt::Format))]
99#[instability::unstable]
100pub enum Error {
101    /// The core is already running.
102    CoreAlreadyRunning,
103}
104
105#[procmacros::doc_replace]
106/// Control CPU Cores
107///
108/// ## Examples
109/// ```rust, no_run
110/// # {before_snippet}
111/// # use esp_hal::delay::Delay;
112/// # use esp_hal::system::{CpuControl, Stack};
113/// # use core::{cell::RefCell, ptr::addr_of_mut};
114/// # use critical_section::Mutex;
115/// # let delay = Delay::new();
116/// static mut APP_CORE_STACK: Stack<8192> = Stack::new();
117///
118/// let counter = Mutex::new(RefCell::new(0));
119///
120/// let mut cpu_control = CpuControl::new(peripherals.CPU_CTRL);
121/// let cpu1_fnctn = || {
122///     cpu1_task(&delay, &counter);
123/// };
124/// let _guard =
125///     cpu_control.start_app_core(unsafe { &mut *addr_of_mut!(APP_CORE_STACK) }, cpu1_fnctn)?;
126///
127/// loop {
128///     delay.delay(Duration::from_secs(1));
129///     let count = critical_section::with(|cs| *counter.borrow_ref(cs));
130/// }
131/// # }
132///
133/// // Where `cpu1_task()` may be defined as:
134/// # use esp_hal::delay::Delay;
135/// # use core::cell::RefCell;
136///
137/// fn cpu1_task(delay: &Delay, counter: &critical_section::Mutex<RefCell<i32>>) -> ! {
138///     loop {
139///         delay.delay(Duration::from_millis(500));
140///
141///         critical_section::with(|cs| {
142///             let mut val = counter.borrow_ref_mut(cs);
143///             *val = val.wrapping_add(1);
144///         });
145///     }
146/// }
147/// ```
148#[instability::unstable]
149pub struct CpuControl<'d> {
150    _cpu_control: CPU_CTRL<'d>,
151}
152
153impl<'d> CpuControl<'d> {
154    /// Creates a new instance of `CpuControl`.
155    #[instability::unstable]
156    pub fn new(cpu_control: CPU_CTRL<'d>) -> CpuControl<'d> {
157        CpuControl {
158            _cpu_control: cpu_control,
159        }
160    }
161
162    /// Park the given core
163    ///
164    /// # Safety
165    ///
166    /// The user must ensure that the core being parked is not the core which is
167    /// currently executing their code.
168    #[instability::unstable]
169    pub unsafe fn park_core(&mut self, core: Cpu) {
170        unsafe { internal_park_core(core, true) };
171    }
172
173    /// Unpark the given core
174    #[instability::unstable]
175    pub fn unpark_core(&mut self, core: Cpu) {
176        unsafe { internal_park_core(core, false) };
177    }
178
179    /// Run the core1 closure.
180    #[inline(never)]
181    pub(crate) unsafe fn start_core1_run<F>() -> !
182    where
183        F: FnOnce(),
184    {
185        let entry = START_CORE1_FUNCTION.load(Ordering::Acquire);
186        debug_assert!(!entry.is_null());
187
188        unsafe {
189            let entry = ManuallyDrop::take(&mut *entry.cast::<ManuallyDrop<F>>());
190            entry();
191            loop {
192                internal_park_core(Cpu::current(), true);
193            }
194        }
195    }
196
197    /// Start the APP (second) core.
198    ///
199    /// The second core will start running the closure `entry`. Note that if the
200    /// closure exits, the core will be parked.
201    ///
202    /// Dropping the returned guard will park the core.
203    #[instability::unstable]
204    pub fn start_app_core<'a, const SIZE: usize, F>(
205        &mut self,
206        stack: &'static mut Stack<SIZE>,
207        entry: F,
208    ) -> Result<AppCoreGuard<'a>, Error>
209    where
210        F: FnOnce(),
211        F: Send + 'a,
212    {
213        cfg_if::cfg_if! {
214            if #[cfg(all(stack_guard_monitoring))] {
215                let stack_guard_offset = Some(esp_config::esp_config_int!(
216                    usize,
217                    "ESP_HAL_CONFIG_STACK_GUARD_OFFSET"
218                ));
219            } else {
220                let stack_guard_offset = None;
221            }
222        };
223
224        self.start_app_core_with_stack_guard_offset(stack, stack_guard_offset, entry)
225    }
226
227    /// Start the APP (second) core.
228    ///
229    /// The second core will start running the closure `entry`. Note that if the
230    /// closure exits, the core will be parked.
231    ///
232    /// Dropping the returned guard will park the core.
233    #[instability::unstable]
234    pub fn start_app_core_with_stack_guard_offset<'a, const SIZE: usize, F>(
235        &mut self,
236        stack: &'static mut Stack<SIZE>,
237        stack_guard_offset: Option<usize>,
238        entry: F,
239    ) -> Result<AppCoreGuard<'a>, Error>
240    where
241        F: FnOnce(),
242        F: Send + 'a,
243    {
244        if !crate::debugger::debugger_connected() && is_running(Cpu::AppCpu) {
245            return Err(Error::CoreAlreadyRunning);
246        }
247
248        setup_second_core_stack(stack, stack_guard_offset, entry);
249
250        crate::soc::cpu_control::start_core1(start_core1_init::<F> as *const u32);
251
252        self.unpark_core(Cpu::AppCpu);
253
254        Ok(AppCoreGuard {
255            phantom: PhantomData,
256        })
257    }
258}
259
260fn setup_second_core_stack<'a, F, const SIZE: usize>(
261    stack: &'static mut Stack<SIZE>,
262    stack_guard_offset: Option<usize>,
263    entry: F,
264) where
265    F: FnOnce(),
266    F: Send + 'a,
267{
268    // We don't want to drop this, since it's getting moved to the other core.
269    let entry = ManuallyDrop::new(entry);
270
271    unsafe {
272        let stack_bottom = stack.bottom().cast::<u8>();
273        let (stack_guard, stack_bottom_above_guard) =
274            if let Some(stack_guard_offset) = stack_guard_offset {
275                assert!(stack_guard_offset.is_multiple_of(4));
276                assert!(stack_guard_offset <= stack.len() - 4);
277                (
278                    stack_bottom.byte_add(stack_guard_offset),
279                    stack_bottom.byte_add(stack_guard_offset).byte_add(4),
280                )
281            } else {
282                (core::ptr::null_mut(), stack_bottom)
283            };
284
285        // Push `entry` to an aligned address at the (physical) bottom of the stack, but above
286        // the stack guard. The second core will copy it into its proper place, then
287        // calls it.
288        let align_offset = stack_bottom_above_guard.align_offset(core::mem::align_of::<F>());
289        let entry_dst = stack_bottom_above_guard
290            .add(align_offset)
291            .cast::<ManuallyDrop<F>>();
292
293        entry_dst.write(entry);
294
295        let entry_fn = entry_dst.cast::<()>();
296        START_CORE1_FUNCTION.store(entry_fn, Ordering::Release);
297        APP_CORE_STACK_TOP.store(stack.top(), Ordering::Release);
298        APP_CORE_STACK_GUARD.store(stack_guard.cast(), Ordering::Release);
299    }
300}