1#![doc = crate::before_snippet!()]
34use core::{borrow::Borrow, convert::Infallible, marker::PhantomData, mem::size_of};
61
62#[cfg(feature = "digest")]
64pub use digest::Digest;
65
66#[cfg(not(esp32))]
67use crate::peripherals::Interrupt;
68use crate::{
69 peripheral::{Peripheral, PeripheralRef},
70 peripherals::SHA,
71 reg_access::{AlignmentHelper, SocDependentEndianess},
72 system::GenericPeripheralGuard,
73};
74
75pub struct Sha<'d> {
77 sha: PeripheralRef<'d, SHA>,
78 _guard: GenericPeripheralGuard<{ crate::system::Peripheral::Sha as u8 }>,
79}
80
81impl<'d> Sha<'d> {
82 pub fn new(sha: impl Peripheral<P = SHA> + 'd) -> Self {
84 crate::into_ref!(sha);
85 let guard = GenericPeripheralGuard::new();
86
87 Self { sha, _guard: guard }
88 }
89
90 pub fn start<'a, A: ShaAlgorithm>(&'a mut self) -> ShaDigest<'d, A, &'a mut Self> {
92 ShaDigest::new(self)
93 }
94
95 pub fn start_owned<A: ShaAlgorithm>(self) -> ShaDigest<'d, A, Self> {
99 ShaDigest::new(self)
100 }
101
102 #[cfg(not(esp32))]
103 fn regs(&self) -> &crate::pac::sha::RegisterBlock {
104 self.sha.register_block()
105 }
106}
107
108impl crate::private::Sealed for Sha<'_> {}
109
110#[cfg(not(esp32))]
111#[instability::unstable]
112impl crate::interrupt::InterruptConfigurable for Sha<'_> {
113 fn set_interrupt_handler(&mut self, handler: crate::interrupt::InterruptHandler) {
114 for core in crate::system::Cpu::other() {
115 crate::interrupt::disable(core, Interrupt::SHA);
116 }
117 unsafe { crate::interrupt::bind_interrupt(Interrupt::SHA, handler.handler()) };
118 unwrap!(crate::interrupt::enable(Interrupt::SHA, handler.priority()));
119 }
120}
121
122pub struct ShaDigest<'d, A, S: Borrow<Sha<'d>>> {
139 sha: S,
140 alignment_helper: AlignmentHelper<SocDependentEndianess>,
141 cursor: usize,
142 first_run: bool,
143 finished: bool,
144 message_buffer_is_full: bool,
145 phantom: PhantomData<(&'d (), A)>,
146}
147
148impl<'d, A: ShaAlgorithm, S: Borrow<Sha<'d>>> ShaDigest<'d, A, S> {
149 #[allow(unused_mut)]
151 pub fn new(mut sha: S) -> Self {
152 #[cfg(not(esp32))]
153 sha.borrow()
155 .regs()
156 .mode()
157 .write(|w| unsafe { w.mode().bits(A::MODE_AS_BITS) });
158
159 Self {
160 sha,
161 alignment_helper: AlignmentHelper::default(),
162 cursor: 0,
163 first_run: true,
164 finished: false,
165 message_buffer_is_full: false,
166 phantom: PhantomData,
167 }
168 }
169
170 #[cfg(not(esp32))]
172 pub fn restore(sha: S, ctx: &mut Context<A>) -> Self {
173 sha.borrow()
175 .regs()
176 .mode()
177 .write(|w| unsafe { w.mode().bits(A::MODE_AS_BITS) });
178
179 unsafe {
181 core::ptr::copy_nonoverlapping(ctx.buffer.as_ptr(), m_mem(&sha.borrow().sha, 0), 32);
182 }
183
184 let mut ah = ctx.alignment_helper.clone();
185
186 ah.volatile_write_regset(h_mem(&sha.borrow().sha, 0), &ctx.saved_digest, 64);
188
189 Self {
190 sha,
191 alignment_helper: ah,
192 cursor: ctx.cursor,
193 first_run: ctx.first_run,
194 finished: ctx.finished,
195 message_buffer_is_full: ctx.message_buffer_is_full,
196 phantom: PhantomData,
197 }
198 }
199
200 pub fn is_busy(&self) -> bool {
202 cfg_if::cfg_if! {
203 if #[cfg(esp32)] {
204 A::is_busy(&self.sha.borrow().sha)
205 } else {
206 self.sha.borrow().regs().busy().read().state().bit_is_set()
207 }
208 }
209 }
210
211 pub fn update<'a>(&mut self, incoming: &'a [u8]) -> nb::Result<&'a [u8], Infallible> {
213 self.finished = false;
214
215 self.write_data(incoming)
216 }
217
218 pub fn finish(&mut self, output: &mut [u8]) -> nb::Result<(), Infallible> {
226 let length = (self.cursor as u64 * 8).to_be_bytes();
228 nb::block!(self.update(&[0x80]))?; {
232 while self.is_busy() {}
233 if self.message_buffer_is_full {
234 self.process_buffer();
235 self.message_buffer_is_full = false;
236 while self.is_busy() {}
237 }
238
239 let flushed = self.alignment_helper.flush_to(
240 m_mem(&self.sha.borrow().sha, 0),
241 (self.cursor % A::CHUNK_LENGTH) / self.alignment_helper.align_size(),
242 );
243 self.cursor = self.cursor.wrapping_add(flushed);
244
245 if flushed > 0 && self.cursor % A::CHUNK_LENGTH == 0 {
246 self.process_buffer();
247 while self.is_busy() {}
248 }
249 }
250 debug_assert!(self.cursor % 4 == 0);
251
252 let mod_cursor = self.cursor % A::CHUNK_LENGTH;
253 if (A::CHUNK_LENGTH - mod_cursor) < A::CHUNK_LENGTH / 8 {
254 let pad_len = A::CHUNK_LENGTH - mod_cursor;
257 self.alignment_helper.volatile_write(
258 m_mem(&self.sha.borrow().sha, 0),
259 0_u8,
260 pad_len / self.alignment_helper.align_size(),
261 mod_cursor / self.alignment_helper.align_size(),
262 );
263 self.process_buffer();
264 self.cursor = self.cursor.wrapping_add(pad_len);
265
266 debug_assert_eq!(self.cursor % A::CHUNK_LENGTH, 0);
267
268 while self.is_busy() {}
270 }
271
272 let mod_cursor = self.cursor % A::CHUNK_LENGTH; let pad_len = A::CHUNK_LENGTH - mod_cursor - size_of::<u64>();
274
275 self.alignment_helper.volatile_write(
276 m_mem(&self.sha.borrow().sha, 0),
277 0,
278 pad_len / self.alignment_helper.align_size(),
279 mod_cursor / self.alignment_helper.align_size(),
280 );
281
282 self.alignment_helper.aligned_volatile_copy(
283 m_mem(&self.sha.borrow().sha, 0),
284 &length,
285 A::CHUNK_LENGTH / self.alignment_helper.align_size(),
286 (A::CHUNK_LENGTH - size_of::<u64>()) / self.alignment_helper.align_size(),
287 );
288
289 self.process_buffer();
290 while self.is_busy() {}
292
293 #[cfg(esp32)]
295 {
296 A::load(&self.sha.borrow().sha);
297
298 while self.is_busy() {}
300 }
301
302 self.alignment_helper.volatile_read_regset(
303 h_mem(&self.sha.borrow().sha, 0),
304 output,
305 core::cmp::min(output.len(), 32) / self.alignment_helper.align_size(),
306 );
307
308 self.first_run = true;
309 self.cursor = 0;
310 self.alignment_helper.reset();
311
312 Ok(())
313 }
314
315 #[cfg(not(esp32))]
317 pub fn save(&mut self, context: &mut Context<A>) -> nb::Result<(), Infallible> {
318 if self.is_busy() {
319 return Err(nb::Error::WouldBlock);
320 }
321
322 context.alignment_helper = self.alignment_helper.clone();
323 context.cursor = self.cursor;
324 context.first_run = self.first_run;
325 context.finished = self.finished;
326 context.message_buffer_is_full = self.message_buffer_is_full;
327
328 self.alignment_helper.volatile_read_regset(
330 h_mem(&self.sha.borrow().sha, 0),
331 &mut context.saved_digest,
332 64 / self.alignment_helper.align_size(),
333 );
334
335 unsafe {
337 core::ptr::copy_nonoverlapping(
338 m_mem(&self.sha.borrow().sha, 0),
339 context.buffer.as_mut_ptr(),
340 32,
341 );
342 }
343
344 Ok(())
345 }
346
347 pub fn cancel(self) -> S {
349 self.sha
350 }
351
352 fn process_buffer(&mut self) {
357 let sha = self.sha.borrow();
358
359 cfg_if::cfg_if! {
360 if #[cfg(esp32)] {
361 if self.first_run {
362 A::start(&sha.sha);
363 self.first_run = false;
364 } else {
365 A::r#continue(&sha.sha);
366 }
367 } else {
368 if self.first_run {
369 sha.regs().start().write(|w| unsafe { w.bits(1) });
372 self.first_run = false;
373 } else {
374 sha.regs().continue_().write(|w| unsafe { w.bits(1) });
377 }
378 }
379 }
380 }
381
382 fn write_data<'a>(&mut self, incoming: &'a [u8]) -> nb::Result<&'a [u8], Infallible> {
383 if self.message_buffer_is_full {
384 if self.is_busy() {
385 return Err(nb::Error::WouldBlock);
388 } else {
389 self.process_buffer();
391 self.message_buffer_is_full = false;
393 }
394 }
395
396 let mod_cursor = self.cursor % A::CHUNK_LENGTH;
397 let chunk_len = A::CHUNK_LENGTH;
398
399 let (remaining, bound_reached) = self.alignment_helper.aligned_volatile_copy(
400 m_mem(&self.sha.borrow().sha, 0),
401 incoming,
402 chunk_len / self.alignment_helper.align_size(),
403 mod_cursor / self.alignment_helper.align_size(),
404 );
405
406 self.cursor = self.cursor.wrapping_add(incoming.len() - remaining.len());
407
408 if bound_reached {
409 if self.is_busy() {
412 self.message_buffer_is_full = true;
415 } else {
416 self.process_buffer();
418 }
419 }
420
421 Ok(remaining)
422 }
423}
424
425#[cfg(not(esp32))]
426#[derive(Debug, Clone)]
428pub struct Context<A: ShaAlgorithm> {
429 alignment_helper: AlignmentHelper<SocDependentEndianess>,
430 cursor: usize,
431 first_run: bool,
432 finished: bool,
433 message_buffer_is_full: bool,
434 buffer: [u32; 32],
436 saved_digest: [u8; 64],
438 phantom: PhantomData<A>,
439}
440
441#[cfg(not(esp32))]
442impl<A: ShaAlgorithm> Context<A> {
443 pub fn new() -> Self {
445 Self {
446 cursor: 0,
447 first_run: true,
448 finished: false,
449 message_buffer_is_full: false,
450 alignment_helper: AlignmentHelper::default(),
451 buffer: [0; 32],
452 saved_digest: [0; 64],
453 phantom: PhantomData,
454 }
455 }
456
457 pub fn first_run(&self) -> bool {
462 self.first_run
463 }
464
465 pub fn finished(&self) -> bool {
469 self.finished
470 }
471}
472
473#[cfg(not(esp32))]
474impl<A: ShaAlgorithm> Default for Context<A> {
475 fn default() -> Self {
476 Self::new()
477 }
478}
479
480pub trait ShaAlgorithm: crate::private::Sealed {
482 const ALGORITHM: &'static str;
484
485 const CHUNK_LENGTH: usize;
489
490 const DIGEST_LENGTH: usize;
494
495 #[cfg(feature = "digest")]
496 #[doc(hidden)]
497 type DigestOutputSize: digest::generic_array::ArrayLength<u8> + 'static;
498
499 #[cfg(not(esp32))]
500 #[doc(hidden)]
501 const MODE_AS_BITS: u8;
502
503 #[cfg(esp32)]
504 #[doc(hidden)]
505 fn start(sha: &crate::peripherals::SHA);
507
508 #[cfg(esp32)]
509 #[doc(hidden)]
510 fn r#continue(sha: &crate::peripherals::SHA);
512
513 #[cfg(esp32)]
514 #[doc(hidden)]
515 fn load(sha: &crate::peripherals::SHA);
517
518 #[cfg(esp32)]
519 #[doc(hidden)]
520 fn is_busy(sha: &crate::peripherals::SHA) -> bool;
522}
523
524#[cfg(feature = "digest")]
528impl<'d, A: ShaAlgorithm, S: Borrow<Sha<'d>>> digest::HashMarker for ShaDigest<'d, A, S> {}
529
530#[cfg(feature = "digest")]
531impl<'d, A: ShaAlgorithm, S: Borrow<Sha<'d>>> digest::OutputSizeUser for ShaDigest<'d, A, S> {
532 type OutputSize = A::DigestOutputSize;
533}
534
535#[cfg(feature = "digest")]
536impl<'d, A: ShaAlgorithm, S: Borrow<Sha<'d>>> digest::Update for ShaDigest<'d, A, S> {
537 fn update(&mut self, data: &[u8]) {
538 let mut remaining = data.as_ref();
539 while !remaining.is_empty() {
540 remaining = nb::block!(Self::update(self, remaining)).unwrap();
541 }
542 }
543}
544
545#[cfg(feature = "digest")]
546impl<'d, A: ShaAlgorithm, S: Borrow<Sha<'d>>> digest::FixedOutput for ShaDigest<'d, A, S> {
547 fn finalize_into(mut self, out: &mut digest::Output<Self>) {
548 nb::block!(self.finish(out)).unwrap();
549 }
550}
551
552macro_rules! impl_sha {
555 ($name: ident, $mode_bits: tt, $digest_length: tt, $chunk_length: tt) => {
556 #[non_exhaustive]
566 pub struct $name;
567
568 impl crate::private::Sealed for $name {}
569
570 impl $crate::sha::ShaAlgorithm for $name {
571 const ALGORITHM: &'static str = stringify!($name);
572
573 const CHUNK_LENGTH: usize = $chunk_length;
574
575 const DIGEST_LENGTH: usize = $digest_length;
576
577 #[cfg(not(esp32))]
578 const MODE_AS_BITS: u8 = $mode_bits;
579
580 #[cfg(feature = "digest")]
581 type DigestOutputSize = paste::paste!(digest::consts::[< U $digest_length >]);
584
585 #[cfg(esp32)]
586 fn start(sha: &crate::peripherals::SHA) {
587 paste::paste! {
588 sha.register_block().[< $name:lower _start >]().write(|w| w.[< $name:lower _start >]().set_bit());
589 }
590 }
591
592 #[cfg(esp32)]
593 fn r#continue(sha: &crate::peripherals::SHA) {
594 paste::paste! {
595 sha.register_block().[< $name:lower _continue >]().write(|w| w.[< $name:lower _continue >]().set_bit());
596 }
597 }
598
599 #[cfg(esp32)]
600 fn load(sha: &crate::peripherals::SHA) {
601 paste::paste! {
602 sha.register_block().[< $name:lower _load >]().write(|w| w.[< $name:lower _load >]().set_bit());
603 }
604 }
605
606 #[cfg(esp32)]
607 fn is_busy(sha: &crate::peripherals::SHA) -> bool {
608 paste::paste! {
609 sha.register_block().[< $name:lower _busy >]().read().[< $name:lower _busy >]().bit_is_set()
610 }
611 }
612 }
613 };
614}
615
616impl_sha!(Sha1, 0, 20, 64);
631#[cfg(not(esp32))]
632impl_sha!(Sha224, 1, 28, 64);
633impl_sha!(Sha256, 2, 32, 64);
634#[cfg(any(esp32, esp32s2, esp32s3))]
635impl_sha!(Sha384, 3, 48, 128);
636#[cfg(any(esp32, esp32s2, esp32s3))]
637impl_sha!(Sha512, 4, 64, 128);
638#[cfg(any(esp32s2, esp32s3))]
639impl_sha!(Sha512_224, 5, 28, 128);
640#[cfg(any(esp32s2, esp32s3))]
641impl_sha!(Sha512_256, 6, 32, 128);
642
643fn h_mem(sha: &crate::peripherals::SHA, index: usize) -> *mut u32 {
644 let sha = sha.register_block();
645 cfg_if::cfg_if! {
646 if #[cfg(esp32)] {
647 sha.text(index).as_ptr()
648 } else {
649 sha.h_mem(index).as_ptr()
650 }
651 }
652}
653
654fn m_mem(sha: &crate::peripherals::SHA, index: usize) -> *mut u32 {
655 let sha = sha.register_block();
656 cfg_if::cfg_if! {
657 if #[cfg(esp32)] {
658 sha.text(index).as_ptr()
659 } else {
660 sha.m_mem(index).as_ptr()
661 }
662 }
663}