esp_hal/
sha.rs

1//! # Secure Hash Algorithm (SHA) Accelerator
2//!
3//! ## Overview
4//! This SHA accelerator is a hardware device that speeds up the SHA algorithm
5//! significantly, compared to a SHA algorithm implemented solely in software
6//!
7//! ## Configuration
8//! This driver allows you to perform cryptographic hash operations using
9//! various hash algorithms supported by the SHA peripheral, such as:
10//!    * SHA-1
11//!    * SHA-224
12//!    * SHA-256
13//!    * SHA-384
14//!    * SHA-512
15//!
16//! The driver supports two working modes:
17//!    * Typical SHA
18//!    * DMA-SHA
19//!
20//! It provides functions to update the hash calculation with input data, finish
21//! the hash calculation and retrieve the resulting hash value. The SHA
22//! peripheral on ESP chips can handle large data streams efficiently, making it
23//! suitable for cryptographic applications that require secure hashing.
24//!
25//! To use the SHA Peripheral Driver, you need to initialize it with the desired
26//! SHA mode and the corresponding SHA peripheral. Once initialized, you can
27//! update the hash calculation by providing input data, finish the calculation
28//! to retrieve the hash value and repeat the process for a new hash calculation
29//! if needed.
30//!
31//! ## Examples
32//! ```rust, no_run
33#![doc = crate::before_snippet!()]
34//! # use esp_hal::sha::Sha;
35//! # use esp_hal::sha::Sha256;
36//! # use nb::block;
37//! let mut source_data = "HELLO, ESPRESSIF!".as_bytes();
38//! let mut sha = Sha::new(peripherals.SHA);
39//! let mut hasher = sha.start::<Sha256>();
40//! // Short hashes can be created by decreasing the output buffer to the
41//! // desired length
42//! let mut output = [0u8; 32];
43//!
44//! while !source_data.is_empty() {
45//!     // All the HW Sha functions are infallible so unwrap is fine to use if
46//!     // you use block!
47//!     source_data = block!(hasher.update(source_data))?;
48//! }
49//!
50//! // Finish can be called as many times as desired to get multiple copies of
51//! // the output.
52//! block!(hasher.finish(output.as_mut_slice()))?;
53//!
54//! # Ok(())
55//! # }
56//! ```
57//! ## Implementation State
58//! - DMA-SHA Mode is not supported.
59
60use core::{borrow::Borrow, convert::Infallible, marker::PhantomData, mem::size_of};
61
62/// Re-export digest for convenience
63#[cfg(feature = "digest")]
64pub use digest::Digest;
65
66#[cfg(not(esp32))]
67use crate::peripherals::Interrupt;
68use crate::{
69    peripheral::{Peripheral, PeripheralRef},
70    peripherals::SHA,
71    reg_access::{AlignmentHelper, SocDependentEndianess},
72    system::GenericPeripheralGuard,
73};
74
75/// The SHA Accelerator driver instance
76pub struct Sha<'d> {
77    sha: PeripheralRef<'d, SHA>,
78    _guard: GenericPeripheralGuard<{ crate::system::Peripheral::Sha as u8 }>,
79}
80
81impl<'d> Sha<'d> {
82    /// Create a new instance of the SHA Accelerator driver.
83    pub fn new(sha: impl Peripheral<P = SHA> + 'd) -> Self {
84        crate::into_ref!(sha);
85        let guard = GenericPeripheralGuard::new();
86
87        Self { sha, _guard: guard }
88    }
89
90    /// Start a new digest.
91    pub fn start<'a, A: ShaAlgorithm>(&'a mut self) -> ShaDigest<'d, A, &'a mut Self> {
92        ShaDigest::new(self)
93    }
94
95    /// Start a new digest and take ownership of the driver.
96    /// This is useful for storage outside a function body. i.e. in static or
97    /// struct.
98    pub fn start_owned<A: ShaAlgorithm>(self) -> ShaDigest<'d, A, Self> {
99        ShaDigest::new(self)
100    }
101
102    #[cfg(not(esp32))]
103    fn regs(&self) -> &crate::pac::sha::RegisterBlock {
104        self.sha.register_block()
105    }
106}
107
108impl crate::private::Sealed for Sha<'_> {}
109
110#[cfg(not(esp32))]
111#[instability::unstable]
112impl crate::interrupt::InterruptConfigurable for Sha<'_> {
113    fn set_interrupt_handler(&mut self, handler: crate::interrupt::InterruptHandler) {
114        for core in crate::system::Cpu::other() {
115            crate::interrupt::disable(core, Interrupt::SHA);
116        }
117        unsafe { crate::interrupt::bind_interrupt(Interrupt::SHA, handler.handler()) };
118        unwrap!(crate::interrupt::enable(Interrupt::SHA, handler.priority()));
119    }
120}
121
122// A few notes on this implementation with regards to 'memcpy',
123// - The registers are *not* cleared after processing, so padding needs to be
124//   written out
125// - This component uses core::intrinsics::volatile_* which is unstable, but is
126//   the only way to
127// efficiently copy memory with volatile
128// - For this particular registers (and probably others), a full u32 needs to be
129//   written partial
130// register writes (i.e. in u8 mode) does not work
131//   - This means that we need to buffer bytes coming in up to 4 u8's in order
132//     to create a full u32
133
134/// An active digest
135///
136/// This implementation might fail after u32::MAX/8 bytes, to increase please
137/// see ::finish() length/self.cursor usage
138pub struct ShaDigest<'d, A, S: Borrow<Sha<'d>>> {
139    sha: S,
140    alignment_helper: AlignmentHelper<SocDependentEndianess>,
141    cursor: usize,
142    first_run: bool,
143    finished: bool,
144    message_buffer_is_full: bool,
145    phantom: PhantomData<(&'d (), A)>,
146}
147
148impl<'d, A: ShaAlgorithm, S: Borrow<Sha<'d>>> ShaDigest<'d, A, S> {
149    /// Creates a new digest
150    #[allow(unused_mut)]
151    pub fn new(mut sha: S) -> Self {
152        #[cfg(not(esp32))]
153        // Setup SHA Mode.
154        sha.borrow()
155            .regs()
156            .mode()
157            .write(|w| unsafe { w.mode().bits(A::MODE_AS_BITS) });
158
159        Self {
160            sha,
161            alignment_helper: AlignmentHelper::default(),
162            cursor: 0,
163            first_run: true,
164            finished: false,
165            message_buffer_is_full: false,
166            phantom: PhantomData,
167        }
168    }
169
170    /// Restores a previously saved digest.
171    #[cfg(not(esp32))]
172    pub fn restore(sha: S, ctx: &mut Context<A>) -> Self {
173        // Setup SHA Mode.
174        sha.borrow()
175            .regs()
176            .mode()
177            .write(|w| unsafe { w.mode().bits(A::MODE_AS_BITS) });
178
179        // Restore the message buffer
180        unsafe {
181            core::ptr::copy_nonoverlapping(ctx.buffer.as_ptr(), m_mem(&sha.borrow().sha, 0), 32);
182        }
183
184        let mut ah = ctx.alignment_helper.clone();
185
186        // Restore previously saved hash
187        ah.volatile_write_regset(h_mem(&sha.borrow().sha, 0), &ctx.saved_digest, 64);
188
189        Self {
190            sha,
191            alignment_helper: ah,
192            cursor: ctx.cursor,
193            first_run: ctx.first_run,
194            finished: ctx.finished,
195            message_buffer_is_full: ctx.message_buffer_is_full,
196            phantom: PhantomData,
197        }
198    }
199
200    /// Returns true if the hardware is processing the next message.
201    pub fn is_busy(&self) -> bool {
202        cfg_if::cfg_if! {
203            if #[cfg(esp32)] {
204                A::is_busy(&self.sha.borrow().sha)
205            } else {
206                self.sha.borrow().regs().busy().read().state().bit_is_set()
207            }
208        }
209    }
210
211    /// Updates the SHA digest with the provided data buffer.
212    pub fn update<'a>(&mut self, incoming: &'a [u8]) -> nb::Result<&'a [u8], Infallible> {
213        self.finished = false;
214
215        self.write_data(incoming)
216    }
217
218    /// Finish of the calculation (if not already) and copy result to output
219    /// After `finish()` is called `update()`s will contribute to a new hash
220    /// which can be calculated again with `finish()`.
221    ///
222    /// Typically, output is expected to be the size of
223    /// [ShaAlgorithm::DIGEST_LENGTH], but smaller inputs can be given to
224    /// get a "short hash"
225    pub fn finish(&mut self, output: &mut [u8]) -> nb::Result<(), Infallible> {
226        // Store message length for padding
227        let length = (self.cursor as u64 * 8).to_be_bytes();
228        nb::block!(self.update(&[0x80]))?; // Append "1" bit
229
230        // Flush partial data, ensures aligned cursor
231        {
232            while self.is_busy() {}
233            if self.message_buffer_is_full {
234                self.process_buffer();
235                self.message_buffer_is_full = false;
236                while self.is_busy() {}
237            }
238
239            let flushed = self.alignment_helper.flush_to(
240                m_mem(&self.sha.borrow().sha, 0),
241                (self.cursor % A::CHUNK_LENGTH) / self.alignment_helper.align_size(),
242            );
243            self.cursor = self.cursor.wrapping_add(flushed);
244
245            if flushed > 0 && self.cursor % A::CHUNK_LENGTH == 0 {
246                self.process_buffer();
247                while self.is_busy() {}
248            }
249        }
250        debug_assert!(self.cursor % 4 == 0);
251
252        let mod_cursor = self.cursor % A::CHUNK_LENGTH;
253        if (A::CHUNK_LENGTH - mod_cursor) < A::CHUNK_LENGTH / 8 {
254            // Zero out remaining data if buffer is almost full (>=448/896), and process
255            // buffer
256            let pad_len = A::CHUNK_LENGTH - mod_cursor;
257            self.alignment_helper.volatile_write(
258                m_mem(&self.sha.borrow().sha, 0),
259                0_u8,
260                pad_len / self.alignment_helper.align_size(),
261                mod_cursor / self.alignment_helper.align_size(),
262            );
263            self.process_buffer();
264            self.cursor = self.cursor.wrapping_add(pad_len);
265
266            debug_assert_eq!(self.cursor % A::CHUNK_LENGTH, 0);
267
268            // Spin-wait for finish
269            while self.is_busy() {}
270        }
271
272        let mod_cursor = self.cursor % A::CHUNK_LENGTH; // Should be zero if branched above
273        let pad_len = A::CHUNK_LENGTH - mod_cursor - size_of::<u64>();
274
275        self.alignment_helper.volatile_write(
276            m_mem(&self.sha.borrow().sha, 0),
277            0,
278            pad_len / self.alignment_helper.align_size(),
279            mod_cursor / self.alignment_helper.align_size(),
280        );
281
282        self.alignment_helper.aligned_volatile_copy(
283            m_mem(&self.sha.borrow().sha, 0),
284            &length,
285            A::CHUNK_LENGTH / self.alignment_helper.align_size(),
286            (A::CHUNK_LENGTH - size_of::<u64>()) / self.alignment_helper.align_size(),
287        );
288
289        self.process_buffer();
290        // Spin-wait for final buffer to be processed
291        while self.is_busy() {}
292
293        // ESP32 requires additional load to retrieve output
294        #[cfg(esp32)]
295        {
296            A::load(&self.sha.borrow().sha);
297
298            // Spin wait for result, 8-20 clock cycles according to manual
299            while self.is_busy() {}
300        }
301
302        self.alignment_helper.volatile_read_regset(
303            h_mem(&self.sha.borrow().sha, 0),
304            output,
305            core::cmp::min(output.len(), 32) / self.alignment_helper.align_size(),
306        );
307
308        self.first_run = true;
309        self.cursor = 0;
310        self.alignment_helper.reset();
311
312        Ok(())
313    }
314
315    /// Save the current state of the digest for later continuation.
316    #[cfg(not(esp32))]
317    pub fn save(&mut self, context: &mut Context<A>) -> nb::Result<(), Infallible> {
318        if self.is_busy() {
319            return Err(nb::Error::WouldBlock);
320        }
321
322        context.alignment_helper = self.alignment_helper.clone();
323        context.cursor = self.cursor;
324        context.first_run = self.first_run;
325        context.finished = self.finished;
326        context.message_buffer_is_full = self.message_buffer_is_full;
327
328        // Save the content of the current hash.
329        self.alignment_helper.volatile_read_regset(
330            h_mem(&self.sha.borrow().sha, 0),
331            &mut context.saved_digest,
332            64 / self.alignment_helper.align_size(),
333        );
334
335        // Save the content of the current (probably partially written) message.
336        unsafe {
337            core::ptr::copy_nonoverlapping(
338                m_mem(&self.sha.borrow().sha, 0),
339                context.buffer.as_mut_ptr(),
340                32,
341            );
342        }
343
344        Ok(())
345    }
346
347    /// Discard the current digest and return the peripheral.
348    pub fn cancel(self) -> S {
349        self.sha
350    }
351
352    /// Processes the data buffer and updates the hash state.
353    ///
354    /// This method is platform-specific and differs for ESP32 and non-ESP32
355    /// platforms.
356    fn process_buffer(&mut self) {
357        let sha = self.sha.borrow();
358
359        cfg_if::cfg_if! {
360            if #[cfg(esp32)] {
361                if self.first_run {
362                    A::start(&sha.sha);
363                    self.first_run = false;
364                } else {
365                    A::r#continue(&sha.sha);
366                }
367            } else {
368                if self.first_run {
369                    // Set SHA_START_REG
370                    // FIXME: raw register access
371                    sha.regs().start().write(|w| unsafe { w.bits(1) });
372                    self.first_run = false;
373                } else {
374                    // SET SHA_CONTINUE_REG
375                    // FIXME: raw register access
376                    sha.regs().continue_().write(|w| unsafe { w.bits(1) });
377                }
378            }
379        }
380    }
381
382    fn write_data<'a>(&mut self, incoming: &'a [u8]) -> nb::Result<&'a [u8], Infallible> {
383        if self.message_buffer_is_full {
384            if self.is_busy() {
385                // The message buffer is full and the hardware is still processing the previous
386                // message. There's nothing to be done besides wait for the hardware.
387                return Err(nb::Error::WouldBlock);
388            } else {
389                // Submit the full buffer.
390                self.process_buffer();
391                // The buffer is now free for filling.
392                self.message_buffer_is_full = false;
393            }
394        }
395
396        let mod_cursor = self.cursor % A::CHUNK_LENGTH;
397        let chunk_len = A::CHUNK_LENGTH;
398
399        let (remaining, bound_reached) = self.alignment_helper.aligned_volatile_copy(
400            m_mem(&self.sha.borrow().sha, 0),
401            incoming,
402            chunk_len / self.alignment_helper.align_size(),
403            mod_cursor / self.alignment_helper.align_size(),
404        );
405
406        self.cursor = self.cursor.wrapping_add(incoming.len() - remaining.len());
407
408        if bound_reached {
409            // Message is full now.
410
411            if self.is_busy() {
412                // The message buffer is full and the hardware is still processing the previous
413                // message. There's nothing to be done besides wait for the hardware.
414                self.message_buffer_is_full = true;
415            } else {
416                // Send the full buffer.
417                self.process_buffer();
418            }
419        }
420
421        Ok(remaining)
422    }
423}
424
425#[cfg(not(esp32))]
426/// Context for a SHA Accelerator driver instance
427#[derive(Debug, Clone)]
428pub struct Context<A: ShaAlgorithm> {
429    alignment_helper: AlignmentHelper<SocDependentEndianess>,
430    cursor: usize,
431    first_run: bool,
432    finished: bool,
433    message_buffer_is_full: bool,
434    /// Buffered bytes (SHA_M_n_REG) to be processed.
435    buffer: [u32; 32],
436    /// Saved digest (SHA_H_n_REG) for interleaving operation
437    saved_digest: [u8; 64],
438    phantom: PhantomData<A>,
439}
440
441#[cfg(not(esp32))]
442impl<A: ShaAlgorithm> Context<A> {
443    /// Create a new empty context
444    pub fn new() -> Self {
445        Self {
446            cursor: 0,
447            first_run: true,
448            finished: false,
449            message_buffer_is_full: false,
450            alignment_helper: AlignmentHelper::default(),
451            buffer: [0; 32],
452            saved_digest: [0; 64],
453            phantom: PhantomData,
454        }
455    }
456
457    /// Indicates if the SHA context is in the first run.
458    ///
459    /// Returns `true` if this is the first time processing data with the SHA
460    /// instance, otherwise returns `false`.
461    pub fn first_run(&self) -> bool {
462        self.first_run
463    }
464
465    /// Indicates if the SHA context has finished processing the data.
466    ///
467    /// Returns `true` if the SHA calculation is complete, otherwise returns.
468    pub fn finished(&self) -> bool {
469        self.finished
470    }
471}
472
473#[cfg(not(esp32))]
474impl<A: ShaAlgorithm> Default for Context<A> {
475    fn default() -> Self {
476        Self::new()
477    }
478}
479
480/// This trait encapsulates the configuration for a specific SHA algorithm.
481pub trait ShaAlgorithm: crate::private::Sealed {
482    /// Constant containing the name of the algorithm as a string.
483    const ALGORITHM: &'static str;
484
485    /// The length of the chunk that the algorithm processes at a time.
486    ///
487    /// For example, in SHA-256, this would typically be 64 bytes.
488    const CHUNK_LENGTH: usize;
489
490    /// The length of the resulting digest produced by the algorithm.
491    ///
492    /// For example, in SHA-256, this would be 32 bytes.
493    const DIGEST_LENGTH: usize;
494
495    #[cfg(feature = "digest")]
496    #[doc(hidden)]
497    type DigestOutputSize: digest::generic_array::ArrayLength<u8> + 'static;
498
499    #[cfg(not(esp32))]
500    #[doc(hidden)]
501    const MODE_AS_BITS: u8;
502
503    #[cfg(esp32)]
504    #[doc(hidden)]
505    // Initiate the operation
506    fn start(sha: &crate::peripherals::SHA);
507
508    #[cfg(esp32)]
509    #[doc(hidden)]
510    // Continue the operation
511    fn r#continue(sha: &crate::peripherals::SHA);
512
513    #[cfg(esp32)]
514    #[doc(hidden)]
515    // Calculate the final hash
516    fn load(sha: &crate::peripherals::SHA);
517
518    #[cfg(esp32)]
519    #[doc(hidden)]
520    // Check if peripheral is busy
521    fn is_busy(sha: &crate::peripherals::SHA) -> bool;
522}
523
524/// implement digest traits if digest feature is present.
525/// Note: digest has a blanket trait implementation for [digest::Digest] for any
526/// element that implements FixedOutput + Default + Update + HashMarker
527#[cfg(feature = "digest")]
528impl<'d, A: ShaAlgorithm, S: Borrow<Sha<'d>>> digest::HashMarker for ShaDigest<'d, A, S> {}
529
530#[cfg(feature = "digest")]
531impl<'d, A: ShaAlgorithm, S: Borrow<Sha<'d>>> digest::OutputSizeUser for ShaDigest<'d, A, S> {
532    type OutputSize = A::DigestOutputSize;
533}
534
535#[cfg(feature = "digest")]
536impl<'d, A: ShaAlgorithm, S: Borrow<Sha<'d>>> digest::Update for ShaDigest<'d, A, S> {
537    fn update(&mut self, data: &[u8]) {
538        let mut remaining = data.as_ref();
539        while !remaining.is_empty() {
540            remaining = nb::block!(Self::update(self, remaining)).unwrap();
541        }
542    }
543}
544
545#[cfg(feature = "digest")]
546impl<'d, A: ShaAlgorithm, S: Borrow<Sha<'d>>> digest::FixedOutput for ShaDigest<'d, A, S> {
547    fn finalize_into(mut self, out: &mut digest::Output<Self>) {
548        nb::block!(self.finish(out)).unwrap();
549    }
550}
551
552/// This macro implements the Sha<'a, Dm> trait for a specified Sha algorithm
553/// and a set of parameters
554macro_rules! impl_sha {
555    ($name: ident, $mode_bits: tt, $digest_length: tt, $chunk_length: tt) => {
556        /// A SHA implementation struct.
557        ///
558        /// This struct is generated by the macro and represents a specific SHA hashing
559        /// algorithm (e.g., SHA-256, SHA-1). It manages the context and state required
560        /// for processing data using the selected hashing algorithm.
561        ///
562        /// The struct provides various functionalities such as initializing the hashing
563        /// process, updating the internal state with new data, and finalizing the
564        /// hashing operation to generate the final digest.
565        #[non_exhaustive]
566        pub struct $name;
567
568        impl crate::private::Sealed for $name {}
569
570        impl $crate::sha::ShaAlgorithm for $name {
571            const ALGORITHM: &'static str = stringify!($name);
572
573            const CHUNK_LENGTH: usize = $chunk_length;
574
575            const DIGEST_LENGTH: usize = $digest_length;
576
577            #[cfg(not(esp32))]
578            const MODE_AS_BITS: u8 = $mode_bits;
579
580            #[cfg(feature = "digest")]
581            // We use paste to append `U` to the digest size to match a const defined in
582            // digest
583            type DigestOutputSize = paste::paste!(digest::consts::[< U $digest_length >]);
584
585            #[cfg(esp32)]
586            fn start(sha: &crate::peripherals::SHA) {
587                paste::paste! {
588                    sha.register_block().[< $name:lower _start >]().write(|w| w.[< $name:lower _start >]().set_bit());
589                }
590            }
591
592            #[cfg(esp32)]
593            fn r#continue(sha: &crate::peripherals::SHA) {
594                paste::paste! {
595                    sha.register_block().[< $name:lower _continue >]().write(|w| w.[< $name:lower _continue >]().set_bit());
596                }
597            }
598
599            #[cfg(esp32)]
600            fn load(sha: &crate::peripherals::SHA) {
601                paste::paste! {
602                    sha.register_block().[< $name:lower _load >]().write(|w| w.[< $name:lower _load >]().set_bit());
603                }
604            }
605
606            #[cfg(esp32)]
607            fn is_busy(sha: &crate::peripherals::SHA) -> bool {
608                paste::paste! {
609                    sha.register_block().[< $name:lower _busy >]().read().[< $name:lower _busy >]().bit_is_set()
610                }
611            }
612        }
613    };
614}
615
616// All the hash algorithms introduced in FIPS PUB 180-4 Spec.
617// – SHA-1
618// – SHA-224
619// – SHA-256
620// – SHA-384
621// – SHA-512
622// – SHA-512/224
623// – SHA-512/256
624// – SHA-512/t (not implemented yet)
625// Two working modes
626// – Typical SHA
627// – DMA-SHA (not implemented yet)
628//
629// TODO: Allow/Implement SHA512_(u16)
630impl_sha!(Sha1, 0, 20, 64);
631#[cfg(not(esp32))]
632impl_sha!(Sha224, 1, 28, 64);
633impl_sha!(Sha256, 2, 32, 64);
634#[cfg(any(esp32, esp32s2, esp32s3))]
635impl_sha!(Sha384, 3, 48, 128);
636#[cfg(any(esp32, esp32s2, esp32s3))]
637impl_sha!(Sha512, 4, 64, 128);
638#[cfg(any(esp32s2, esp32s3))]
639impl_sha!(Sha512_224, 5, 28, 128);
640#[cfg(any(esp32s2, esp32s3))]
641impl_sha!(Sha512_256, 6, 32, 128);
642
643fn h_mem(sha: &crate::peripherals::SHA, index: usize) -> *mut u32 {
644    let sha = sha.register_block();
645    cfg_if::cfg_if! {
646        if #[cfg(esp32)] {
647            sha.text(index).as_ptr()
648        } else {
649            sha.h_mem(index).as_ptr()
650        }
651    }
652}
653
654fn m_mem(sha: &crate::peripherals::SHA, index: usize) -> *mut u32 {
655    let sha = sha.register_block();
656    cfg_if::cfg_if! {
657        if #[cfg(esp32)] {
658            sha.text(index).as_ptr()
659        } else {
660            sha.m_mem(index).as_ptr()
661        }
662    }
663}