esp_hal/
reg_access.rs

1//! Utils
2//!
3//! # Overview
4//!
5//! Collection of struct which helps you write to registers.
6
7use core::marker::PhantomData;
8
9const U32_ALIGN_SIZE: usize = core::mem::size_of::<u32>();
10
11pub(crate) trait EndianessConverter {
12    fn u32_from_bytes(bytes: [u8; 4]) -> u32;
13    fn u32_to_bytes(word: u32) -> [u8; 4];
14}
15
16/// Always use native endianess
17#[allow(unused)] // only used in AES driver for now
18pub(crate) struct NativeEndianess;
19
20impl EndianessConverter for NativeEndianess {
21    fn u32_from_bytes(bytes: [u8; 4]) -> u32 {
22        u32::from_ne_bytes(bytes)
23    }
24
25    fn u32_to_bytes(word: u32) -> [u8; 4] {
26        u32::to_ne_bytes(word)
27    }
28}
29
30/// Use BE for ESP32, NE otherwise
31#[derive(Debug, Clone)]
32pub(crate) struct SocDependentEndianess;
33
34#[cfg(not(esp32))]
35impl EndianessConverter for SocDependentEndianess {
36    fn u32_from_bytes(bytes: [u8; 4]) -> u32 {
37        u32::from_ne_bytes(bytes)
38    }
39
40    fn u32_to_bytes(word: u32) -> [u8; 4] {
41        u32::to_ne_bytes(word)
42    }
43}
44
45#[cfg(esp32)]
46impl EndianessConverter for SocDependentEndianess {
47    fn u32_from_bytes(bytes: [u8; 4]) -> u32 {
48        u32::from_be_bytes(bytes)
49    }
50
51    fn u32_to_bytes(word: u32) -> [u8; 4] {
52        u32::to_be_bytes(word)
53    }
54}
55
56// The alignment helper helps you write to registers that only accept u32
57// using regular u8s (bytes). It keeps a write buffer of 4 u8 (could in theory
58// be 3 but less convenient). And if the incoming data is not convertable to u32
59// (i.e not a multiple of 4 in length) it will store the remainder in the
60// buffer until the next call.
61//
62// It assumes incoming `dst` are aligned to desired layout (in future
63// ptr.is_aligned can be used). It also assumes that writes are done in FIFO
64// order.
65#[derive(Debug, Clone)]
66pub(crate) struct AlignmentHelper<E: EndianessConverter> {
67    buf: [u8; U32_ALIGN_SIZE],
68    buf_fill: usize,
69    phantom: PhantomData<E>,
70}
71
72impl AlignmentHelper<SocDependentEndianess> {
73    pub fn default() -> AlignmentHelper<SocDependentEndianess> {
74        AlignmentHelper {
75            buf: [0u8; U32_ALIGN_SIZE],
76            buf_fill: 0,
77            phantom: PhantomData,
78        }
79    }
80}
81
82// only used by AES
83#[cfg(aes)]
84impl AlignmentHelper<NativeEndianess> {
85    pub fn native_endianess() -> AlignmentHelper<NativeEndianess> {
86        AlignmentHelper {
87            buf: [0u8; U32_ALIGN_SIZE],
88            buf_fill: 0,
89            phantom: PhantomData,
90        }
91    }
92}
93
94impl<E: EndianessConverter> AlignmentHelper<E> {
95    pub fn reset(&mut self) {
96        self.buf_fill = 0;
97    }
98
99    pub const fn align_size(&self) -> usize {
100        U32_ALIGN_SIZE
101    }
102
103    // This function will write any remaining buffer to dst and return the
104    // amount of *bytes* written (0 means no write). If the buffer is not
105    // aligned to the size of the register destination, it will append the '0'
106    // value.
107    pub fn flush_to(&mut self, dst_ptr: *mut u32, offset: usize) -> usize {
108        if self.buf_fill != 0 {
109            for i in self.buf_fill..U32_ALIGN_SIZE {
110                self.buf[i] = 0;
111            }
112
113            unsafe {
114                dst_ptr
115                    .add(offset)
116                    .write_volatile(E::u32_from_bytes(self.buf));
117            }
118
119            let ret = self.align_size() - self.buf_fill;
120            self.buf_fill = 0;
121
122            ret
123        } else {
124            0
125        }
126    }
127
128    // This function is similar to `volatile_set_memory` but will prepend data that
129    // was previously ingested and ensure aligned (u32) writes.
130    pub fn volatile_write(&mut self, dst_ptr: *mut u32, val: u8, count: usize, offset: usize) {
131        let dst_ptr = unsafe { dst_ptr.add(offset) };
132
133        let mut cursor = if self.buf_fill != 0 {
134            for i in self.buf_fill..U32_ALIGN_SIZE {
135                self.buf[i] = val;
136            }
137
138            unsafe {
139                dst_ptr.write_volatile(E::u32_from_bytes(self.buf));
140            }
141
142            self.buf_fill = 0;
143
144            1
145        } else {
146            0
147        };
148
149        while cursor < count {
150            unsafe {
151                dst_ptr
152                    .add(cursor)
153                    .write_volatile(E::u32_from_bytes([0_u8; 4]));
154            }
155            cursor += 1;
156        }
157    }
158
159    // This function is similar to `volatile_copy_nonoverlapping_memory`,
160    // however it buffers up to a u32 in order to always write to registers in
161    // an aligned way. Additionally it will keep stop writing when the end of
162    // the register (defined by `dst_bound` relative to `dst`) and returns the
163    // remaining data (if not possible to write everything), and if it wrote
164    // till dst_bound or exited early (due to lack of data).
165    pub fn aligned_volatile_copy<'a>(
166        &mut self,
167        dst_ptr: *mut u32,
168        src: &'a [u8],
169        dst_bound: usize,
170        offset: usize,
171    ) -> (&'a [u8], bool) {
172        assert!(dst_bound > 0);
173
174        let dst_ptr = unsafe { dst_ptr.add(offset) };
175
176        let mut nsrc = src;
177        let mut cursor = 0;
178
179        if self.buf_fill != 0 {
180            // First prepend existing data
181            let max_fill = U32_ALIGN_SIZE - self.buf_fill;
182            let (nbuf, src) = src.split_at(core::cmp::min(src.len(), max_fill));
183            nsrc = src;
184
185            for i in 0..max_fill {
186                match nbuf.get(i) {
187                    Some(v) => {
188                        self.buf[self.buf_fill] = *v;
189                        self.buf_fill += 1;
190                    }
191                    None => return (&[], false), // Used up entire buffer before filling buff_fil
192                }
193            }
194
195            unsafe {
196                dst_ptr.write_volatile(E::u32_from_bytes(self.buf));
197            }
198            cursor += 1;
199
200            self.buf_fill = 0;
201        }
202
203        if dst_bound <= offset + cursor {
204            return (nsrc, true);
205        }
206
207        let (to_write, remaining) = nsrc.split_at(core::cmp::min(
208            (dst_bound - offset - cursor) * U32_ALIGN_SIZE,
209            (nsrc.len() / U32_ALIGN_SIZE) * U32_ALIGN_SIZE,
210        ));
211
212        if !to_write.is_empty() {
213            for (i, v) in to_write.chunks_exact(U32_ALIGN_SIZE).enumerate() {
214                unsafe {
215                    dst_ptr
216                        .add(i + cursor)
217                        .write_volatile(E::u32_from_bytes(v.try_into().unwrap()));
218                }
219            }
220        }
221
222        // If it's data we can't store we don't need to try and align it, just wait for
223        // next write Generally this applies when (src/4*4) != src
224        let was_bounded = (offset + cursor + to_write.len() / U32_ALIGN_SIZE) == dst_bound;
225
226        if !remaining.is_empty() && remaining.len() < 4 {
227            self.buf[..remaining.len()].copy_from_slice(remaining);
228            self.buf_fill = remaining.len();
229
230            return (&[], was_bounded);
231        }
232
233        (remaining, was_bounded)
234    }
235
236    #[allow(dead_code)]
237    pub fn volatile_write_regset(&mut self, dst_ptr: *mut u32, src: &[u8], dst_bound: usize) {
238        assert!(dst_bound > 0);
239        assert!(src.len() <= dst_bound * 4);
240
241        if !src.is_empty() {
242            for (i, v) in src.chunks_exact(U32_ALIGN_SIZE).enumerate() {
243                unsafe {
244                    dst_ptr
245                        .add(i)
246                        .write_volatile(E::u32_from_bytes(v.try_into().unwrap()));
247                }
248            }
249        }
250    }
251
252    pub fn volatile_read_regset(&self, src_ptr: *const u32, dst: &mut [u8], dst_bound: usize) {
253        assert!(dst.len() >= dst_bound * 4);
254
255        let chunks = dst.chunks_exact_mut(U32_ALIGN_SIZE);
256        for (i, chunk) in chunks.enumerate() {
257            let read_val: [u8; U32_ALIGN_SIZE] =
258                unsafe { E::u32_to_bytes(src_ptr.add(i).read_volatile()) };
259            chunk.copy_from_slice(&read_val);
260        }
261    }
262}