1#![doc = crate::before_snippet!()]
34use core::{borrow::Borrow, convert::Infallible, marker::PhantomData, mem::size_of};
61
62pub use digest::Digest;
64
65#[cfg(not(esp32))]
66use crate::peripherals::Interrupt;
67use crate::{
68 peripherals::SHA,
69 reg_access::{AlignmentHelper, SocDependentEndianess},
70 system::GenericPeripheralGuard,
71};
72
73pub struct Sha<'d> {
75 sha: SHA<'d>,
76 _guard: GenericPeripheralGuard<{ crate::system::Peripheral::Sha as u8 }>,
77}
78
79impl<'d> Sha<'d> {
80 pub fn new(sha: SHA<'d>) -> Self {
82 let guard = GenericPeripheralGuard::new();
83
84 Self { sha, _guard: guard }
85 }
86
87 pub fn start<'a, A: ShaAlgorithm>(&'a mut self) -> ShaDigest<'d, A, &'a mut Self> {
89 ShaDigest::new(self)
90 }
91
92 pub fn start_owned<A: ShaAlgorithm>(self) -> ShaDigest<'d, A, Self> {
96 ShaDigest::new(self)
97 }
98
99 #[cfg(not(esp32))]
100 fn regs(&self) -> &crate::pac::sha::RegisterBlock {
101 self.sha.register_block()
102 }
103}
104
105impl crate::private::Sealed for Sha<'_> {}
106
107#[cfg(not(esp32))]
108#[instability::unstable]
109impl crate::interrupt::InterruptConfigurable for Sha<'_> {
110 fn set_interrupt_handler(&mut self, handler: crate::interrupt::InterruptHandler) {
111 for core in crate::system::Cpu::other() {
112 crate::interrupt::disable(core, Interrupt::SHA);
113 }
114 unsafe { crate::interrupt::bind_interrupt(Interrupt::SHA, handler.handler()) };
115 unwrap!(crate::interrupt::enable(Interrupt::SHA, handler.priority()));
116 }
117}
118
119pub struct ShaDigest<'d, A, S: Borrow<Sha<'d>>> {
136 sha: S,
137 alignment_helper: AlignmentHelper<SocDependentEndianess>,
138 cursor: usize,
139 first_run: bool,
140 finished: bool,
141 message_buffer_is_full: bool,
142 phantom: PhantomData<(&'d (), A)>,
143}
144
145impl<'d, A: ShaAlgorithm, S: Borrow<Sha<'d>>> ShaDigest<'d, A, S> {
146 #[allow(unused_mut)]
148 pub fn new(mut sha: S) -> Self {
149 #[cfg(not(esp32))]
150 sha.borrow()
152 .regs()
153 .mode()
154 .write(|w| unsafe { w.mode().bits(A::MODE_AS_BITS) });
155
156 Self {
157 sha,
158 alignment_helper: AlignmentHelper::default(),
159 cursor: 0,
160 first_run: true,
161 finished: false,
162 message_buffer_is_full: false,
163 phantom: PhantomData,
164 }
165 }
166
167 #[cfg(not(esp32))]
169 pub fn restore(sha: S, ctx: &mut Context<A>) -> Self {
170 sha.borrow()
172 .regs()
173 .mode()
174 .write(|w| unsafe { w.mode().bits(A::MODE_AS_BITS) });
175
176 unsafe {
178 core::ptr::copy_nonoverlapping(ctx.buffer.as_ptr(), m_mem(&sha.borrow().sha, 0), 32);
179 }
180
181 let mut ah = ctx.alignment_helper.clone();
182
183 ah.volatile_write_regset(h_mem(&sha.borrow().sha, 0), &ctx.saved_digest, 64);
185
186 Self {
187 sha,
188 alignment_helper: ah,
189 cursor: ctx.cursor,
190 first_run: ctx.first_run,
191 finished: ctx.finished,
192 message_buffer_is_full: ctx.message_buffer_is_full,
193 phantom: PhantomData,
194 }
195 }
196
197 pub fn is_busy(&self) -> bool {
199 cfg_if::cfg_if! {
200 if #[cfg(esp32)] {
201 A::is_busy(&self.sha.borrow().sha)
202 } else {
203 self.sha.borrow().regs().busy().read().state().bit_is_set()
204 }
205 }
206 }
207
208 pub fn update<'a>(&mut self, incoming: &'a [u8]) -> nb::Result<&'a [u8], Infallible> {
210 self.finished = false;
211
212 self.write_data(incoming)
213 }
214
215 pub fn finish(&mut self, output: &mut [u8]) -> nb::Result<(), Infallible> {
223 let length = (self.cursor as u64 * 8).to_be_bytes();
225 nb::block!(self.update(&[0x80]))?; {
229 while self.is_busy() {}
230 if self.message_buffer_is_full {
231 self.process_buffer();
232 self.message_buffer_is_full = false;
233 while self.is_busy() {}
234 }
235
236 let flushed = self.alignment_helper.flush_to(
237 m_mem(&self.sha.borrow().sha, 0),
238 (self.cursor % A::CHUNK_LENGTH) / self.alignment_helper.align_size(),
239 );
240 self.cursor = self.cursor.wrapping_add(flushed);
241
242 if flushed > 0 && self.cursor % A::CHUNK_LENGTH == 0 {
243 self.process_buffer();
244 while self.is_busy() {}
245 }
246 }
247 debug_assert!(self.cursor % 4 == 0);
248
249 let mod_cursor = self.cursor % A::CHUNK_LENGTH;
250 if (A::CHUNK_LENGTH - mod_cursor) < A::CHUNK_LENGTH / 8 {
251 let pad_len = A::CHUNK_LENGTH - mod_cursor;
254 self.alignment_helper.volatile_write(
255 m_mem(&self.sha.borrow().sha, 0),
256 0_u8,
257 pad_len / self.alignment_helper.align_size(),
258 mod_cursor / self.alignment_helper.align_size(),
259 );
260 self.process_buffer();
261 self.cursor = self.cursor.wrapping_add(pad_len);
262
263 debug_assert_eq!(self.cursor % A::CHUNK_LENGTH, 0);
264
265 while self.is_busy() {}
267 }
268
269 let mod_cursor = self.cursor % A::CHUNK_LENGTH; let pad_len = A::CHUNK_LENGTH - mod_cursor - size_of::<u64>();
271
272 self.alignment_helper.volatile_write(
273 m_mem(&self.sha.borrow().sha, 0),
274 0,
275 pad_len / self.alignment_helper.align_size(),
276 mod_cursor / self.alignment_helper.align_size(),
277 );
278
279 self.alignment_helper.aligned_volatile_copy(
280 m_mem(&self.sha.borrow().sha, 0),
281 &length,
282 A::CHUNK_LENGTH / self.alignment_helper.align_size(),
283 (A::CHUNK_LENGTH - size_of::<u64>()) / self.alignment_helper.align_size(),
284 );
285
286 self.process_buffer();
287 while self.is_busy() {}
289
290 #[cfg(esp32)]
292 {
293 A::load(&self.sha.borrow().sha);
294
295 while self.is_busy() {}
297 }
298
299 self.alignment_helper.volatile_read_regset(
300 h_mem(&self.sha.borrow().sha, 0),
301 output,
302 core::cmp::min(output.len(), 32) / self.alignment_helper.align_size(),
303 );
304
305 self.first_run = true;
306 self.cursor = 0;
307 self.alignment_helper.reset();
308
309 Ok(())
310 }
311
312 #[cfg(not(esp32))]
314 pub fn save(&mut self, context: &mut Context<A>) -> nb::Result<(), Infallible> {
315 if self.is_busy() {
316 return Err(nb::Error::WouldBlock);
317 }
318
319 context.alignment_helper = self.alignment_helper.clone();
320 context.cursor = self.cursor;
321 context.first_run = self.first_run;
322 context.finished = self.finished;
323 context.message_buffer_is_full = self.message_buffer_is_full;
324
325 self.alignment_helper.volatile_read_regset(
327 h_mem(&self.sha.borrow().sha, 0),
328 &mut context.saved_digest,
329 64 / self.alignment_helper.align_size(),
330 );
331
332 unsafe {
334 core::ptr::copy_nonoverlapping(
335 m_mem(&self.sha.borrow().sha, 0),
336 context.buffer.as_mut_ptr(),
337 32,
338 );
339 }
340
341 Ok(())
342 }
343
344 pub fn cancel(self) -> S {
346 self.sha
347 }
348
349 fn process_buffer(&mut self) {
354 let sha = self.sha.borrow();
355
356 cfg_if::cfg_if! {
357 if #[cfg(esp32)] {
358 if self.first_run {
359 A::start(&sha.sha);
360 self.first_run = false;
361 } else {
362 A::r#continue(&sha.sha);
363 }
364 } else {
365 if self.first_run {
366 sha.regs().start().write(|w| unsafe { w.bits(1) });
369 self.first_run = false;
370 } else {
371 sha.regs().continue_().write(|w| unsafe { w.bits(1) });
374 }
375 }
376 }
377 }
378
379 fn write_data<'a>(&mut self, incoming: &'a [u8]) -> nb::Result<&'a [u8], Infallible> {
380 if self.message_buffer_is_full {
381 if self.is_busy() {
382 return Err(nb::Error::WouldBlock);
385 } else {
386 self.process_buffer();
388 self.message_buffer_is_full = false;
390 }
391 }
392
393 let mod_cursor = self.cursor % A::CHUNK_LENGTH;
394 let chunk_len = A::CHUNK_LENGTH;
395
396 let (remaining, bound_reached) = self.alignment_helper.aligned_volatile_copy(
397 m_mem(&self.sha.borrow().sha, 0),
398 incoming,
399 chunk_len / self.alignment_helper.align_size(),
400 mod_cursor / self.alignment_helper.align_size(),
401 );
402
403 self.cursor = self.cursor.wrapping_add(incoming.len() - remaining.len());
404
405 if bound_reached {
406 if self.is_busy() {
409 self.message_buffer_is_full = true;
412 } else {
413 self.process_buffer();
415 }
416 }
417
418 Ok(remaining)
419 }
420}
421
422#[cfg(not(esp32))]
423#[derive(Debug, Clone)]
425pub struct Context<A: ShaAlgorithm> {
426 alignment_helper: AlignmentHelper<SocDependentEndianess>,
427 cursor: usize,
428 first_run: bool,
429 finished: bool,
430 message_buffer_is_full: bool,
431 buffer: [u32; 32],
433 saved_digest: [u8; 64],
435 phantom: PhantomData<A>,
436}
437
438#[cfg(not(esp32))]
439impl<A: ShaAlgorithm> Context<A> {
440 pub fn new() -> Self {
442 Self {
443 cursor: 0,
444 first_run: true,
445 finished: false,
446 message_buffer_is_full: false,
447 alignment_helper: AlignmentHelper::default(),
448 buffer: [0; 32],
449 saved_digest: [0; 64],
450 phantom: PhantomData,
451 }
452 }
453
454 pub fn first_run(&self) -> bool {
459 self.first_run
460 }
461
462 pub fn finished(&self) -> bool {
466 self.finished
467 }
468}
469
470#[cfg(not(esp32))]
471impl<A: ShaAlgorithm> Default for Context<A> {
472 fn default() -> Self {
473 Self::new()
474 }
475}
476
477pub trait ShaAlgorithm: crate::private::Sealed {
479 const ALGORITHM: &'static str;
481
482 const CHUNK_LENGTH: usize;
486
487 const DIGEST_LENGTH: usize;
491
492 #[doc(hidden)]
493 type DigestOutputSize: digest::generic_array::ArrayLength<u8> + 'static;
494
495 #[cfg(not(esp32))]
496 #[doc(hidden)]
497 const MODE_AS_BITS: u8;
498
499 #[cfg(esp32)]
500 #[doc(hidden)]
501 fn start(sha: &crate::peripherals::SHA<'_>);
503
504 #[cfg(esp32)]
505 #[doc(hidden)]
506 fn r#continue(sha: &crate::peripherals::SHA<'_>);
508
509 #[cfg(esp32)]
510 #[doc(hidden)]
511 fn load(sha: &crate::peripherals::SHA<'_>);
513
514 #[cfg(esp32)]
515 #[doc(hidden)]
516 fn is_busy(sha: &crate::peripherals::SHA<'_>) -> bool;
518}
519
520impl<'d, A: ShaAlgorithm, S: Borrow<Sha<'d>>> digest::HashMarker for ShaDigest<'d, A, S> {}
524
525impl<'d, A: ShaAlgorithm, S: Borrow<Sha<'d>>> digest::OutputSizeUser for ShaDigest<'d, A, S> {
526 type OutputSize = A::DigestOutputSize;
527}
528
529impl<'d, A: ShaAlgorithm, S: Borrow<Sha<'d>>> digest::Update for ShaDigest<'d, A, S> {
530 fn update(&mut self, mut remaining: &[u8]) {
531 while !remaining.is_empty() {
532 remaining = nb::block!(Self::update(self, remaining)).unwrap();
533 }
534 }
535}
536
537impl<'d, A: ShaAlgorithm, S: Borrow<Sha<'d>>> digest::FixedOutput for ShaDigest<'d, A, S> {
538 fn finalize_into(mut self, out: &mut digest::Output<Self>) {
539 nb::block!(self.finish(out)).unwrap();
540 }
541}
542
543macro_rules! impl_sha {
546 ($name: ident, $mode_bits: tt, $digest_length: tt, $chunk_length: tt) => {
547 #[non_exhaustive]
557 pub struct $name;
558
559 impl crate::private::Sealed for $name {}
560
561 impl $crate::sha::ShaAlgorithm for $name {
562 const ALGORITHM: &'static str = stringify!($name);
563
564 const CHUNK_LENGTH: usize = $chunk_length;
565
566 const DIGEST_LENGTH: usize = $digest_length;
567
568 #[cfg(not(esp32))]
569 const MODE_AS_BITS: u8 = $mode_bits;
570
571 type DigestOutputSize = paste::paste!(digest::consts::[< U $digest_length >]);
574
575 #[cfg(esp32)]
576 fn start(sha: &crate::peripherals::SHA<'_>) {
577 paste::paste! {
578 sha.register_block().[< $name:lower _start >]().write(|w| w.[< $name:lower _start >]().set_bit());
579 }
580 }
581
582 #[cfg(esp32)]
583 fn r#continue(sha: &crate::peripherals::SHA<'_>) {
584 paste::paste! {
585 sha.register_block().[< $name:lower _continue >]().write(|w| w.[< $name:lower _continue >]().set_bit());
586 }
587 }
588
589 #[cfg(esp32)]
590 fn load(sha: &crate::peripherals::SHA<'_>) {
591 paste::paste! {
592 sha.register_block().[< $name:lower _load >]().write(|w| w.[< $name:lower _load >]().set_bit());
593 }
594 }
595
596 #[cfg(esp32)]
597 fn is_busy(sha: &crate::peripherals::SHA<'_>) -> bool {
598 paste::paste! {
599 sha.register_block().[< $name:lower _busy >]().read().[< $name:lower _busy >]().bit_is_set()
600 }
601 }
602 }
603 };
604}
605
606impl_sha!(Sha1, 0, 20, 64);
621#[cfg(not(esp32))]
622impl_sha!(Sha224, 1, 28, 64);
623impl_sha!(Sha256, 2, 32, 64);
624#[cfg(any(esp32, esp32s2, esp32s3))]
625impl_sha!(Sha384, 3, 48, 128);
626#[cfg(any(esp32, esp32s2, esp32s3))]
627impl_sha!(Sha512, 4, 64, 128);
628#[cfg(any(esp32s2, esp32s3))]
629impl_sha!(Sha512_224, 5, 28, 128);
630#[cfg(any(esp32s2, esp32s3))]
631impl_sha!(Sha512_256, 6, 32, 128);
632
633fn h_mem(sha: &crate::peripherals::SHA<'_>, index: usize) -> *mut u32 {
634 let sha = sha.register_block();
635 cfg_if::cfg_if! {
636 if #[cfg(esp32)] {
637 sha.text(index).as_ptr()
638 } else {
639 sha.h_mem(index).as_ptr()
640 }
641 }
642}
643
644fn m_mem(sha: &crate::peripherals::SHA<'_>, index: usize) -> *mut u32 {
645 let sha = sha.register_block();
646 cfg_if::cfg_if! {
647 if #[cfg(esp32)] {
648 sha.text(index).as_ptr()
649 } else {
650 sha.m_mem(index).as_ptr()
651 }
652 }
653}