1use core::convert::Infallible;
36
37use crate::{
38 pac,
39 peripherals::HMAC,
40 reg_access::{AlignmentHelper, SocDependentEndianess},
41 system::{GenericPeripheralGuard, Peripheral as PeripheralEnable},
42};
43
44pub struct Hmac<'d> {
48 hmac: HMAC<'d>,
49 alignment_helper: AlignmentHelper<SocDependentEndianess>,
50 byte_written: usize,
51 next_command: NextCommand,
52 _guard: GenericPeripheralGuard<{ PeripheralEnable::Hmac as u8 }>,
53}
54
55#[derive(Debug, Clone, Copy, PartialEq)]
57#[cfg_attr(feature = "defmt", derive(defmt::Format))]
58pub enum Error {
59 KeyPurposeMismatch,
62}
63
64#[derive(Debug, Clone, Copy, PartialEq)]
67#[cfg_attr(feature = "defmt", derive(defmt::Format))]
68#[allow(clippy::enum_variant_names, reason = "peripheral is unstable")]
69pub enum HmacPurpose {
70 ToJtag = 6,
72 ToDs = 7,
75 ToUser = 8,
77 ToDsOrJtag = 5,
79}
80
81#[derive(Debug, Clone, Copy, PartialEq)]
82#[cfg_attr(feature = "defmt", derive(defmt::Format))]
83pub enum KeyId {
85 Key0 = 0,
87 Key1 = 1,
89 Key2 = 2,
91 Key3 = 3,
93 Key4 = 4,
95 Key5 = 5,
97}
98
99enum NextCommand {
100 None,
101 MessageIng,
102 MessagePad,
103}
104
105impl<'d> Hmac<'d> {
106 pub fn new(hmac: HMAC<'d>) -> Self {
108 let guard = GenericPeripheralGuard::new();
109
110 Self {
111 hmac,
112 alignment_helper: AlignmentHelper::default(),
113 byte_written: 64,
114 next_command: NextCommand::None,
115 _guard: guard,
116 }
117 }
118
119 fn regs(&self) -> &pac::hmac::RegisterBlock {
120 self.hmac.register_block()
121 }
122
123 pub fn init(&mut self) {
129 self.regs().set_start().write(|w| w.set_start().set_bit());
130 self.alignment_helper.reset();
131 self.byte_written = 64;
132 self.next_command = NextCommand::None;
133 }
134
135 pub fn configure(&mut self, m: HmacPurpose, key_id: KeyId) -> nb::Result<(), Error> {
137 self.regs()
138 .set_para_purpose()
139 .write(|w| unsafe { w.purpose_set().bits(m as u8) });
140 self.regs()
141 .set_para_key()
142 .write(|w| unsafe { w.key_set().bits(key_id as u8) });
143 self.regs()
144 .set_para_finish()
145 .write(|w| w.set_para_end().set_bit());
146
147 if self.regs().query_error().read().query_check().bit_is_set() {
148 return Err(nb::Error::Other(Error::KeyPurposeMismatch));
149 }
150
151 Ok(())
152 }
153
154 pub fn update<'a>(&mut self, msg: &'a [u8]) -> nb::Result<&'a [u8], Infallible> {
158 if self.is_busy() {
159 return Err(nb::Error::WouldBlock);
160 }
161
162 self.next_command();
163
164 let remaining = self.write_data(msg).unwrap();
165
166 Ok(remaining)
167 }
168
169 pub fn finalize(&mut self, output: &mut [u8]) -> nb::Result<(), Infallible> {
171 if self.is_busy() {
172 return Err(nb::Error::WouldBlock);
173 }
174
175 self.next_command();
176
177 let msg_len = self.byte_written as u64;
178
179 nb::block!(self.write_data(&[0x80])).unwrap();
180 nb::block!(self.flush_data()).unwrap();
181 self.next_command();
182 debug_assert!(self.byte_written.is_multiple_of(4));
183
184 self.padding(msg_len);
185
186 if msg_len < 64 + 56 {
188 self.regs()
189 .one_block()
190 .write(|w| w.set_one_block().set_bit());
191
192 while self.is_busy() {}
193 }
194
195 self.alignment_helper.volatile_read_regset(
196 #[cfg(esp32s2)]
197 self.regs().rd_result_(0).as_ptr(),
198 #[cfg(not(esp32s2))]
199 self.regs().rd_result_mem(0).as_ptr(),
200 output,
201 core::cmp::min(output.len(), 32) / self.alignment_helper.align_size(),
202 );
203
204 self.regs()
205 .set_result_finish()
206 .write(|w| w.set_result_end().set_bit());
207 self.byte_written = 64;
208 self.next_command = NextCommand::None;
209 Ok(())
210 }
211
212 fn is_busy(&mut self) -> bool {
213 self.regs().query_busy().read().busy_state().bit_is_set()
214 }
215
216 fn next_command(&mut self) {
217 match self.next_command {
218 NextCommand::MessageIng => {
219 self.regs()
220 .set_message_ing()
221 .write(|w| w.set_text_ing().set_bit());
222 }
223 NextCommand::MessagePad => {
224 self.regs()
225 .set_message_pad()
226 .write(|w| w.set_text_pad().set_bit());
227 }
228 NextCommand::None => {}
229 }
230 self.next_command = NextCommand::None;
231 }
232
233 fn write_data<'a>(&mut self, incoming: &'a [u8]) -> nb::Result<&'a [u8], Infallible> {
234 let mod_length = self.byte_written % 64;
235
236 let (remaining, bound_reached) = self.alignment_helper.aligned_volatile_copy(
237 #[cfg(esp32s2)]
238 self.regs().wr_message_(0).as_ptr(),
239 #[cfg(not(esp32s2))]
240 self.regs().wr_message_mem(0).as_ptr(),
241 incoming,
242 64 / self.alignment_helper.align_size(),
243 mod_length / self.alignment_helper.align_size(),
244 );
245
246 self.byte_written = self
247 .byte_written
248 .wrapping_add(incoming.len() - remaining.len());
249
250 if bound_reached {
251 self.regs()
252 .set_message_one()
253 .write(|w| w.set_text_one().set_bit());
254
255 if remaining.len() >= 56 {
256 self.next_command = NextCommand::MessageIng;
257 } else {
258 self.next_command = NextCommand::MessagePad;
259 }
260 }
261
262 Ok(remaining)
263 }
264
265 fn flush_data(&mut self) -> nb::Result<(), Infallible> {
266 if self.is_busy() {
267 return Err(nb::Error::WouldBlock);
268 }
269
270 let flushed = self.alignment_helper.flush_to(
271 #[cfg(esp32s2)]
272 self.regs().wr_message_(0).as_ptr(),
273 #[cfg(not(esp32s2))]
274 self.regs().wr_message_mem(0).as_ptr(),
275 (self.byte_written % 64) / self.alignment_helper.align_size(),
276 );
277
278 self.byte_written = self.byte_written.wrapping_add(flushed);
279 if flushed > 0 && self.byte_written.is_multiple_of(64) {
280 self.regs()
281 .set_message_one()
282 .write(|w| w.set_text_one().set_bit());
283 while self.is_busy() {}
284 self.next_command = NextCommand::MessagePad;
285 }
286
287 Ok(())
288 }
289
290 fn padding(&mut self, msg_len: u64) {
291 let mod_cursor = self.byte_written % 64;
292
293 if mod_cursor > 56 {
295 let pad_len = 64 - mod_cursor;
296 self.alignment_helper.volatile_write(
297 #[cfg(esp32s2)]
298 self.regs().wr_message_(0).as_ptr(),
299 #[cfg(not(esp32s2))]
300 self.regs().wr_message_mem(0).as_ptr(),
301 0_u8,
302 pad_len / self.alignment_helper.align_size(),
303 mod_cursor / self.alignment_helper.align_size(),
304 );
305 self.regs()
306 .set_message_one()
307 .write(|w| w.set_text_one().set_bit());
308 self.byte_written = self.byte_written.wrapping_add(pad_len);
309 debug_assert!(self.byte_written.is_multiple_of(64));
310 while self.is_busy() {}
311 self.next_command = NextCommand::MessagePad;
312 self.next_command();
313 }
314
315 let mod_cursor = self.byte_written % 64;
316 let pad_len = 64 - mod_cursor - core::mem::size_of::<u64>();
317
318 self.alignment_helper.volatile_write(
319 #[cfg(esp32s2)]
320 self.regs().wr_message_(0).as_ptr(),
321 #[cfg(not(esp32s2))]
322 self.regs().wr_message_mem(0).as_ptr(),
323 0_u8,
324 pad_len / self.alignment_helper.align_size(),
325 mod_cursor / self.alignment_helper.align_size(),
326 );
327
328 self.byte_written = self.byte_written.wrapping_add(pad_len);
329
330 assert_eq!(self.byte_written % 64, 64 - core::mem::size_of::<u64>());
331
332 let len_mem = (msg_len * 8).to_be_bytes();
334
335 self.alignment_helper.aligned_volatile_copy(
336 #[cfg(esp32s2)]
337 self.regs().wr_message_(0).as_ptr(),
338 #[cfg(not(esp32s2))]
339 self.regs().wr_message_mem(0).as_ptr(),
340 &len_mem,
341 64 / self.alignment_helper.align_size(),
342 (64 - core::mem::size_of::<u64>()) / self.alignment_helper.align_size(),
343 );
344 self.regs()
345 .set_message_one()
346 .write(|w| w.set_text_one().set_bit());
347
348 while self.is_busy() {}
349 }
350}