1use core::convert::Infallible;
38
39use crate::{
40 pac,
41 peripherals::HMAC,
42 reg_access::{AlignmentHelper, SocDependentEndianess},
43 system::{GenericPeripheralGuard, Peripheral as PeripheralEnable},
44};
45
46pub struct Hmac<'d> {
50 hmac: HMAC<'d>,
51 alignment_helper: AlignmentHelper<SocDependentEndianess>,
52 byte_written: usize,
53 next_command: NextCommand,
54 _guard: GenericPeripheralGuard<{ PeripheralEnable::Hmac as u8 }>,
55}
56
57#[derive(Debug, Clone, Copy, PartialEq)]
59#[cfg_attr(feature = "defmt", derive(defmt::Format))]
60pub enum Error {
61 KeyPurposeMismatch,
64}
65
66#[derive(Debug, Clone, Copy, PartialEq)]
69#[cfg_attr(feature = "defmt", derive(defmt::Format))]
70#[allow(clippy::enum_variant_names, reason = "peripheral is unstable")]
71pub enum HmacPurpose {
72 ToJtag = 6,
74 ToDs = 7,
77 ToUser = 8,
79 ToDsOrJtag = 5,
81}
82
83#[derive(Debug, Clone, Copy, PartialEq)]
84#[cfg_attr(feature = "defmt", derive(defmt::Format))]
85pub enum KeyId {
87 Key0 = 0,
89 Key1 = 1,
91 Key2 = 2,
93 Key3 = 3,
95 Key4 = 4,
97 Key5 = 5,
99}
100
101enum NextCommand {
102 None,
103 MessageIng,
104 MessagePad,
105}
106
107impl<'d> Hmac<'d> {
108 pub fn new(hmac: HMAC<'d>) -> Self {
110 let guard = GenericPeripheralGuard::new();
111
112 Self {
113 hmac,
114 alignment_helper: AlignmentHelper::default(),
115 byte_written: 64,
116 next_command: NextCommand::None,
117 _guard: guard,
118 }
119 }
120
121 fn regs(&self) -> &pac::hmac::RegisterBlock {
122 self.hmac.register_block()
123 }
124
125 pub fn init(&mut self) {
131 self.regs().set_start().write(|w| w.set_start().set_bit());
132 self.alignment_helper.reset();
133 self.byte_written = 64;
134 self.next_command = NextCommand::None;
135 }
136
137 pub fn configure(&mut self, m: HmacPurpose, key_id: KeyId) -> nb::Result<(), Error> {
139 self.regs()
140 .set_para_purpose()
141 .write(|w| unsafe { w.purpose_set().bits(m as u8) });
142 self.regs()
143 .set_para_key()
144 .write(|w| unsafe { w.key_set().bits(key_id as u8) });
145 self.regs()
146 .set_para_finish()
147 .write(|w| w.set_para_end().set_bit());
148
149 if self.regs().query_error().read().query_check().bit_is_set() {
150 return Err(nb::Error::Other(Error::KeyPurposeMismatch));
151 }
152
153 Ok(())
154 }
155
156 pub fn update<'a>(&mut self, msg: &'a [u8]) -> nb::Result<&'a [u8], Infallible> {
160 if self.is_busy() {
161 return Err(nb::Error::WouldBlock);
162 }
163
164 self.next_command();
165
166 let remaining = self.write_data(msg).unwrap();
167
168 Ok(remaining)
169 }
170
171 pub fn finalize(&mut self, output: &mut [u8]) -> nb::Result<(), Infallible> {
173 if self.is_busy() {
174 return Err(nb::Error::WouldBlock);
175 }
176
177 self.next_command();
178
179 let msg_len = self.byte_written as u64;
180
181 nb::block!(self.write_data(&[0x80])).unwrap();
182 nb::block!(self.flush_data()).unwrap();
183 self.next_command();
184 debug_assert!(self.byte_written % 4 == 0);
185
186 self.padding(msg_len);
187
188 if msg_len < 64 + 56 {
190 self.regs()
191 .one_block()
192 .write(|w| w.set_one_block().set_bit());
193
194 while self.is_busy() {}
195 }
196
197 self.alignment_helper.volatile_read_regset(
198 #[cfg(esp32s2)]
199 self.regs().rd_result_(0).as_ptr(),
200 #[cfg(not(esp32s2))]
201 self.regs().rd_result_mem(0).as_ptr(),
202 output,
203 core::cmp::min(output.len(), 32) / self.alignment_helper.align_size(),
204 );
205
206 self.regs()
207 .set_result_finish()
208 .write(|w| w.set_result_end().set_bit());
209 self.byte_written = 64;
210 self.next_command = NextCommand::None;
211 Ok(())
212 }
213
214 fn is_busy(&mut self) -> bool {
215 self.regs().query_busy().read().busy_state().bit_is_set()
216 }
217
218 fn next_command(&mut self) {
219 match self.next_command {
220 NextCommand::MessageIng => {
221 self.regs()
222 .set_message_ing()
223 .write(|w| w.set_text_ing().set_bit());
224 }
225 NextCommand::MessagePad => {
226 self.regs()
227 .set_message_pad()
228 .write(|w| w.set_text_pad().set_bit());
229 }
230 NextCommand::None => {}
231 }
232 self.next_command = NextCommand::None;
233 }
234
235 fn write_data<'a>(&mut self, incoming: &'a [u8]) -> nb::Result<&'a [u8], Infallible> {
236 let mod_length = self.byte_written % 64;
237
238 let (remaining, bound_reached) = self.alignment_helper.aligned_volatile_copy(
239 #[cfg(esp32s2)]
240 self.regs().wr_message_(0).as_ptr(),
241 #[cfg(not(esp32s2))]
242 self.regs().wr_message_mem(0).as_ptr(),
243 incoming,
244 64 / self.alignment_helper.align_size(),
245 mod_length / self.alignment_helper.align_size(),
246 );
247
248 self.byte_written = self
249 .byte_written
250 .wrapping_add(incoming.len() - remaining.len());
251
252 if bound_reached {
253 self.regs()
254 .set_message_one()
255 .write(|w| w.set_text_one().set_bit());
256
257 if remaining.len() >= 56 {
258 self.next_command = NextCommand::MessageIng;
259 } else {
260 self.next_command = NextCommand::MessagePad;
261 }
262 }
263
264 Ok(remaining)
265 }
266
267 fn flush_data(&mut self) -> nb::Result<(), Infallible> {
268 if self.is_busy() {
269 return Err(nb::Error::WouldBlock);
270 }
271
272 let flushed = self.alignment_helper.flush_to(
273 #[cfg(esp32s2)]
274 self.regs().wr_message_(0).as_ptr(),
275 #[cfg(not(esp32s2))]
276 self.regs().wr_message_mem(0).as_ptr(),
277 (self.byte_written % 64) / self.alignment_helper.align_size(),
278 );
279
280 self.byte_written = self.byte_written.wrapping_add(flushed);
281 if flushed > 0 && self.byte_written % 64 == 0 {
282 self.regs()
283 .set_message_one()
284 .write(|w| w.set_text_one().set_bit());
285 while self.is_busy() {}
286 self.next_command = NextCommand::MessagePad;
287 }
288
289 Ok(())
290 }
291
292 fn padding(&mut self, msg_len: u64) {
293 let mod_cursor = self.byte_written % 64;
294
295 if mod_cursor > 56 {
297 let pad_len = 64 - mod_cursor;
298 self.alignment_helper.volatile_write(
299 #[cfg(esp32s2)]
300 self.regs().wr_message_(0).as_ptr(),
301 #[cfg(not(esp32s2))]
302 self.regs().wr_message_mem(0).as_ptr(),
303 0_u8,
304 pad_len / self.alignment_helper.align_size(),
305 mod_cursor / self.alignment_helper.align_size(),
306 );
307 self.regs()
308 .set_message_one()
309 .write(|w| w.set_text_one().set_bit());
310 self.byte_written = self.byte_written.wrapping_add(pad_len);
311 debug_assert!(self.byte_written % 64 == 0);
312 while self.is_busy() {}
313 self.next_command = NextCommand::MessagePad;
314 self.next_command();
315 }
316
317 let mod_cursor = self.byte_written % 64;
318 let pad_len = 64 - mod_cursor - core::mem::size_of::<u64>();
319
320 self.alignment_helper.volatile_write(
321 #[cfg(esp32s2)]
322 self.regs().wr_message_(0).as_ptr(),
323 #[cfg(not(esp32s2))]
324 self.regs().wr_message_mem(0).as_ptr(),
325 0_u8,
326 pad_len / self.alignment_helper.align_size(),
327 mod_cursor / self.alignment_helper.align_size(),
328 );
329
330 self.byte_written = self.byte_written.wrapping_add(pad_len);
331
332 assert_eq!(self.byte_written % 64, 64 - core::mem::size_of::<u64>());
333
334 let len_mem = (msg_len * 8).to_be_bytes();
336
337 self.alignment_helper.aligned_volatile_copy(
338 #[cfg(esp32s2)]
339 self.regs().wr_message_(0).as_ptr(),
340 #[cfg(not(esp32s2))]
341 self.regs().wr_message_mem(0).as_ptr(),
342 &len_mem,
343 64 / self.alignment_helper.align_size(),
344 (64 - core::mem::size_of::<u64>()) / self.alignment_helper.align_size(),
345 );
346 self.regs()
347 .set_message_one()
348 .write(|w| w.set_text_one().set_bit());
349
350 while self.is_busy() {}
351 }
352}