esp_hal/
sha.rs

1//! # Secure Hash Algorithm (SHA) Accelerator
2//!
3//! ## Overview
4//! This SHA accelerator is a hardware device that speeds up the SHA algorithm
5//! significantly, compared to a SHA algorithm implemented solely in software
6//!
7//! ## Configuration
8//! This driver allows you to perform cryptographic hash operations using
9//! various hash algorithms supported by the SHA peripheral, such as:
10//!    * SHA-1
11//!    * SHA-224
12//!    * SHA-256
13//!    * SHA-384
14//!    * SHA-512
15//!
16//! The driver supports two working modes:
17//!    * Typical SHA
18//!    * DMA-SHA
19//!
20//! It provides functions to update the hash calculation with input data, finish
21//! the hash calculation and retrieve the resulting hash value. The SHA
22//! peripheral on ESP chips can handle large data streams efficiently, making it
23//! suitable for cryptographic applications that require secure hashing.
24//!
25//! To use the SHA Peripheral Driver, you need to initialize it with the desired
26//! SHA mode and the corresponding SHA peripheral. Once initialized, you can
27//! update the hash calculation by providing input data, finish the calculation
28//! to retrieve the hash value and repeat the process for a new hash calculation
29//! if needed.
30//!
31//! ## Examples
32//! ```rust, no_run
33#![doc = crate::before_snippet!()]
34//! # use esp_hal::sha::Sha;
35//! # use esp_hal::sha::Sha256;
36//! # use nb::block;
37//! let mut source_data = "HELLO, ESPRESSIF!".as_bytes();
38//! let mut sha = Sha::new(peripherals.SHA);
39//! let mut hasher = sha.start::<Sha256>();
40//! // Short hashes can be created by decreasing the output buffer to the
41//! // desired length
42//! let mut output = [0u8; 32];
43//!
44//! while !source_data.is_empty() {
45//!     // All the HW Sha functions are infallible so unwrap is fine to use if
46//!     // you use block!
47//!     source_data = block!(hasher.update(source_data))?;
48//! }
49//!
50//! // Finish can be called as many times as desired to get multiple copies of
51//! // the output.
52//! block!(hasher.finish(output.as_mut_slice()))?;
53//!
54//! # Ok(())
55//! # }
56//! ```
57//! ## Implementation State
58//! - DMA-SHA Mode is not supported.
59
60use core::{borrow::Borrow, convert::Infallible, marker::PhantomData, mem::size_of};
61
62/// Re-export digest for convenience
63pub use digest::Digest;
64
65#[cfg(not(esp32))]
66use crate::peripherals::Interrupt;
67use crate::{
68    peripherals::SHA,
69    reg_access::{AlignmentHelper, SocDependentEndianess},
70    system::GenericPeripheralGuard,
71};
72
73/// The SHA Accelerator driver instance
74pub struct Sha<'d> {
75    sha: SHA<'d>,
76    _guard: GenericPeripheralGuard<{ crate::system::Peripheral::Sha as u8 }>,
77}
78
79impl<'d> Sha<'d> {
80    /// Create a new instance of the SHA Accelerator driver.
81    pub fn new(sha: SHA<'d>) -> Self {
82        let guard = GenericPeripheralGuard::new();
83
84        Self { sha, _guard: guard }
85    }
86
87    /// Start a new digest.
88    pub fn start<'a, A: ShaAlgorithm>(&'a mut self) -> ShaDigest<'d, A, &'a mut Self> {
89        ShaDigest::new(self)
90    }
91
92    /// Start a new digest and take ownership of the driver.
93    /// This is useful for storage outside a function body. i.e. in static or
94    /// struct.
95    pub fn start_owned<A: ShaAlgorithm>(self) -> ShaDigest<'d, A, Self> {
96        ShaDigest::new(self)
97    }
98
99    #[cfg(not(esp32))]
100    fn regs(&self) -> &crate::pac::sha::RegisterBlock {
101        self.sha.register_block()
102    }
103}
104
105impl crate::private::Sealed for Sha<'_> {}
106
107#[cfg(not(esp32))]
108#[instability::unstable]
109impl crate::interrupt::InterruptConfigurable for Sha<'_> {
110    fn set_interrupt_handler(&mut self, handler: crate::interrupt::InterruptHandler) {
111        for core in crate::system::Cpu::other() {
112            crate::interrupt::disable(core, Interrupt::SHA);
113        }
114        unsafe { crate::interrupt::bind_interrupt(Interrupt::SHA, handler.handler()) };
115        unwrap!(crate::interrupt::enable(Interrupt::SHA, handler.priority()));
116    }
117}
118
119// A few notes on this implementation with regards to 'memcpy',
120// - The registers are *not* cleared after processing, so padding needs to be
121//   written out
122// - This component uses core::intrinsics::volatile_* which is unstable, but is
123//   the only way to
124// efficiently copy memory with volatile
125// - For this particular registers (and probably others), a full u32 needs to be
126//   written partial
127// register writes (i.e. in u8 mode) does not work
128//   - This means that we need to buffer bytes coming in up to 4 u8's in order
129//     to create a full u32
130
131/// An active digest
132///
133/// This implementation might fail after u32::MAX/8 bytes, to increase please
134/// see ::finish() length/self.cursor usage
135pub struct ShaDigest<'d, A, S: Borrow<Sha<'d>>> {
136    sha: S,
137    alignment_helper: AlignmentHelper<SocDependentEndianess>,
138    cursor: usize,
139    first_run: bool,
140    finished: bool,
141    message_buffer_is_full: bool,
142    phantom: PhantomData<(&'d (), A)>,
143}
144
145impl<'d, A: ShaAlgorithm, S: Borrow<Sha<'d>>> ShaDigest<'d, A, S> {
146    /// Creates a new digest
147    #[allow(unused_mut)]
148    pub fn new(mut sha: S) -> Self {
149        #[cfg(not(esp32))]
150        // Setup SHA Mode.
151        sha.borrow()
152            .regs()
153            .mode()
154            .write(|w| unsafe { w.mode().bits(A::MODE_AS_BITS) });
155
156        Self {
157            sha,
158            alignment_helper: AlignmentHelper::default(),
159            cursor: 0,
160            first_run: true,
161            finished: false,
162            message_buffer_is_full: false,
163            phantom: PhantomData,
164        }
165    }
166
167    /// Restores a previously saved digest.
168    #[cfg(not(esp32))]
169    pub fn restore(sha: S, ctx: &mut Context<A>) -> Self {
170        // Setup SHA Mode.
171        sha.borrow()
172            .regs()
173            .mode()
174            .write(|w| unsafe { w.mode().bits(A::MODE_AS_BITS) });
175
176        // Restore the message buffer
177        unsafe {
178            core::ptr::copy_nonoverlapping(ctx.buffer.as_ptr(), m_mem(&sha.borrow().sha, 0), 32);
179        }
180
181        let mut ah = ctx.alignment_helper.clone();
182
183        // Restore previously saved hash
184        ah.volatile_write_regset(h_mem(&sha.borrow().sha, 0), &ctx.saved_digest, 64);
185
186        Self {
187            sha,
188            alignment_helper: ah,
189            cursor: ctx.cursor,
190            first_run: ctx.first_run,
191            finished: ctx.finished,
192            message_buffer_is_full: ctx.message_buffer_is_full,
193            phantom: PhantomData,
194        }
195    }
196
197    /// Returns true if the hardware is processing the next message.
198    pub fn is_busy(&self) -> bool {
199        cfg_if::cfg_if! {
200            if #[cfg(esp32)] {
201                A::is_busy(&self.sha.borrow().sha)
202            } else {
203                self.sha.borrow().regs().busy().read().state().bit_is_set()
204            }
205        }
206    }
207
208    /// Updates the SHA digest with the provided data buffer.
209    pub fn update<'a>(&mut self, incoming: &'a [u8]) -> nb::Result<&'a [u8], Infallible> {
210        self.finished = false;
211
212        self.write_data(incoming)
213    }
214
215    /// Finish of the calculation (if not already) and copy result to output
216    /// After `finish()` is called `update()`s will contribute to a new hash
217    /// which can be calculated again with `finish()`.
218    ///
219    /// Typically, output is expected to be the size of
220    /// [ShaAlgorithm::DIGEST_LENGTH], but smaller inputs can be given to
221    /// get a "short hash"
222    pub fn finish(&mut self, output: &mut [u8]) -> nb::Result<(), Infallible> {
223        // Store message length for padding
224        let length = (self.cursor as u64 * 8).to_be_bytes();
225        nb::block!(self.update(&[0x80]))?; // Append "1" bit
226
227        // Flush partial data, ensures aligned cursor
228        {
229            while self.is_busy() {}
230            if self.message_buffer_is_full {
231                self.process_buffer();
232                self.message_buffer_is_full = false;
233                while self.is_busy() {}
234            }
235
236            let flushed = self.alignment_helper.flush_to(
237                m_mem(&self.sha.borrow().sha, 0),
238                (self.cursor % A::CHUNK_LENGTH) / self.alignment_helper.align_size(),
239            );
240            self.cursor = self.cursor.wrapping_add(flushed);
241
242            if flushed > 0 && self.cursor % A::CHUNK_LENGTH == 0 {
243                self.process_buffer();
244                while self.is_busy() {}
245            }
246        }
247        debug_assert!(self.cursor % 4 == 0);
248
249        let mod_cursor = self.cursor % A::CHUNK_LENGTH;
250        if (A::CHUNK_LENGTH - mod_cursor) < A::CHUNK_LENGTH / 8 {
251            // Zero out remaining data if buffer is almost full (>=448/896), and process
252            // buffer
253            let pad_len = A::CHUNK_LENGTH - mod_cursor;
254            self.alignment_helper.volatile_write(
255                m_mem(&self.sha.borrow().sha, 0),
256                0_u8,
257                pad_len / self.alignment_helper.align_size(),
258                mod_cursor / self.alignment_helper.align_size(),
259            );
260            self.process_buffer();
261            self.cursor = self.cursor.wrapping_add(pad_len);
262
263            debug_assert_eq!(self.cursor % A::CHUNK_LENGTH, 0);
264
265            // Spin-wait for finish
266            while self.is_busy() {}
267        }
268
269        let mod_cursor = self.cursor % A::CHUNK_LENGTH; // Should be zero if branched above
270        let pad_len = A::CHUNK_LENGTH - mod_cursor - size_of::<u64>();
271
272        self.alignment_helper.volatile_write(
273            m_mem(&self.sha.borrow().sha, 0),
274            0,
275            pad_len / self.alignment_helper.align_size(),
276            mod_cursor / self.alignment_helper.align_size(),
277        );
278
279        self.alignment_helper.aligned_volatile_copy(
280            m_mem(&self.sha.borrow().sha, 0),
281            &length,
282            A::CHUNK_LENGTH / self.alignment_helper.align_size(),
283            (A::CHUNK_LENGTH - size_of::<u64>()) / self.alignment_helper.align_size(),
284        );
285
286        self.process_buffer();
287        // Spin-wait for final buffer to be processed
288        while self.is_busy() {}
289
290        // ESP32 requires additional load to retrieve output
291        #[cfg(esp32)]
292        {
293            A::load(&self.sha.borrow().sha);
294
295            // Spin wait for result, 8-20 clock cycles according to manual
296            while self.is_busy() {}
297        }
298
299        self.alignment_helper.volatile_read_regset(
300            h_mem(&self.sha.borrow().sha, 0),
301            output,
302            core::cmp::min(output.len(), 32) / self.alignment_helper.align_size(),
303        );
304
305        self.first_run = true;
306        self.cursor = 0;
307        self.alignment_helper.reset();
308
309        Ok(())
310    }
311
312    /// Save the current state of the digest for later continuation.
313    #[cfg(not(esp32))]
314    pub fn save(&mut self, context: &mut Context<A>) -> nb::Result<(), Infallible> {
315        if self.is_busy() {
316            return Err(nb::Error::WouldBlock);
317        }
318
319        context.alignment_helper = self.alignment_helper.clone();
320        context.cursor = self.cursor;
321        context.first_run = self.first_run;
322        context.finished = self.finished;
323        context.message_buffer_is_full = self.message_buffer_is_full;
324
325        // Save the content of the current hash.
326        self.alignment_helper.volatile_read_regset(
327            h_mem(&self.sha.borrow().sha, 0),
328            &mut context.saved_digest,
329            64 / self.alignment_helper.align_size(),
330        );
331
332        // Save the content of the current (probably partially written) message.
333        unsafe {
334            core::ptr::copy_nonoverlapping(
335                m_mem(&self.sha.borrow().sha, 0),
336                context.buffer.as_mut_ptr(),
337                32,
338            );
339        }
340
341        Ok(())
342    }
343
344    /// Discard the current digest and return the peripheral.
345    pub fn cancel(self) -> S {
346        self.sha
347    }
348
349    /// Processes the data buffer and updates the hash state.
350    ///
351    /// This method is platform-specific and differs for ESP32 and non-ESP32
352    /// platforms.
353    fn process_buffer(&mut self) {
354        let sha = self.sha.borrow();
355
356        cfg_if::cfg_if! {
357            if #[cfg(esp32)] {
358                if self.first_run {
359                    A::start(&sha.sha);
360                    self.first_run = false;
361                } else {
362                    A::r#continue(&sha.sha);
363                }
364            } else {
365                if self.first_run {
366                    // Set SHA_START_REG
367                    // FIXME: raw register access
368                    sha.regs().start().write(|w| unsafe { w.bits(1) });
369                    self.first_run = false;
370                } else {
371                    // SET SHA_CONTINUE_REG
372                    // FIXME: raw register access
373                    sha.regs().continue_().write(|w| unsafe { w.bits(1) });
374                }
375            }
376        }
377    }
378
379    fn write_data<'a>(&mut self, incoming: &'a [u8]) -> nb::Result<&'a [u8], Infallible> {
380        if self.message_buffer_is_full {
381            if self.is_busy() {
382                // The message buffer is full and the hardware is still processing the previous
383                // message. There's nothing to be done besides wait for the hardware.
384                return Err(nb::Error::WouldBlock);
385            } else {
386                // Submit the full buffer.
387                self.process_buffer();
388                // The buffer is now free for filling.
389                self.message_buffer_is_full = false;
390            }
391        }
392
393        let mod_cursor = self.cursor % A::CHUNK_LENGTH;
394        let chunk_len = A::CHUNK_LENGTH;
395
396        let (remaining, bound_reached) = self.alignment_helper.aligned_volatile_copy(
397            m_mem(&self.sha.borrow().sha, 0),
398            incoming,
399            chunk_len / self.alignment_helper.align_size(),
400            mod_cursor / self.alignment_helper.align_size(),
401        );
402
403        self.cursor = self.cursor.wrapping_add(incoming.len() - remaining.len());
404
405        if bound_reached {
406            // Message is full now.
407
408            if self.is_busy() {
409                // The message buffer is full and the hardware is still processing the previous
410                // message. There's nothing to be done besides wait for the hardware.
411                self.message_buffer_is_full = true;
412            } else {
413                // Send the full buffer.
414                self.process_buffer();
415            }
416        }
417
418        Ok(remaining)
419    }
420}
421
422#[cfg(not(esp32))]
423/// Context for a SHA Accelerator driver instance
424#[derive(Debug, Clone)]
425pub struct Context<A: ShaAlgorithm> {
426    alignment_helper: AlignmentHelper<SocDependentEndianess>,
427    cursor: usize,
428    first_run: bool,
429    finished: bool,
430    message_buffer_is_full: bool,
431    /// Buffered bytes (SHA_M_n_REG) to be processed.
432    buffer: [u32; 32],
433    /// Saved digest (SHA_H_n_REG) for interleaving operation
434    saved_digest: [u8; 64],
435    phantom: PhantomData<A>,
436}
437
438#[cfg(not(esp32))]
439impl<A: ShaAlgorithm> Context<A> {
440    /// Create a new empty context
441    pub fn new() -> Self {
442        Self {
443            cursor: 0,
444            first_run: true,
445            finished: false,
446            message_buffer_is_full: false,
447            alignment_helper: AlignmentHelper::default(),
448            buffer: [0; 32],
449            saved_digest: [0; 64],
450            phantom: PhantomData,
451        }
452    }
453
454    /// Indicates if the SHA context is in the first run.
455    ///
456    /// Returns `true` if this is the first time processing data with the SHA
457    /// instance, otherwise returns `false`.
458    pub fn first_run(&self) -> bool {
459        self.first_run
460    }
461
462    /// Indicates if the SHA context has finished processing the data.
463    ///
464    /// Returns `true` if the SHA calculation is complete, otherwise returns.
465    pub fn finished(&self) -> bool {
466        self.finished
467    }
468}
469
470#[cfg(not(esp32))]
471impl<A: ShaAlgorithm> Default for Context<A> {
472    fn default() -> Self {
473        Self::new()
474    }
475}
476
477/// This trait encapsulates the configuration for a specific SHA algorithm.
478pub trait ShaAlgorithm: crate::private::Sealed {
479    /// Constant containing the name of the algorithm as a string.
480    const ALGORITHM: &'static str;
481
482    /// The length of the chunk that the algorithm processes at a time.
483    ///
484    /// For example, in SHA-256, this would typically be 64 bytes.
485    const CHUNK_LENGTH: usize;
486
487    /// The length of the resulting digest produced by the algorithm.
488    ///
489    /// For example, in SHA-256, this would be 32 bytes.
490    const DIGEST_LENGTH: usize;
491
492    #[doc(hidden)]
493    type DigestOutputSize: digest::generic_array::ArrayLength<u8> + 'static;
494
495    #[cfg(not(esp32))]
496    #[doc(hidden)]
497    const MODE_AS_BITS: u8;
498
499    #[cfg(esp32)]
500    #[doc(hidden)]
501    // Initiate the operation
502    fn start(sha: &crate::peripherals::SHA<'_>);
503
504    #[cfg(esp32)]
505    #[doc(hidden)]
506    // Continue the operation
507    fn r#continue(sha: &crate::peripherals::SHA<'_>);
508
509    #[cfg(esp32)]
510    #[doc(hidden)]
511    // Calculate the final hash
512    fn load(sha: &crate::peripherals::SHA<'_>);
513
514    #[cfg(esp32)]
515    #[doc(hidden)]
516    // Check if peripheral is busy
517    fn is_busy(sha: &crate::peripherals::SHA<'_>) -> bool;
518}
519
520/// implement digest traits if digest feature is present.
521/// Note: digest has a blanket trait implementation for [digest::Digest] for any
522/// element that implements FixedOutput + Default + Update + HashMarker
523impl<'d, A: ShaAlgorithm, S: Borrow<Sha<'d>>> digest::HashMarker for ShaDigest<'d, A, S> {}
524
525impl<'d, A: ShaAlgorithm, S: Borrow<Sha<'d>>> digest::OutputSizeUser for ShaDigest<'d, A, S> {
526    type OutputSize = A::DigestOutputSize;
527}
528
529impl<'d, A: ShaAlgorithm, S: Borrow<Sha<'d>>> digest::Update for ShaDigest<'d, A, S> {
530    fn update(&mut self, mut remaining: &[u8]) {
531        while !remaining.is_empty() {
532            remaining = nb::block!(Self::update(self, remaining)).unwrap();
533        }
534    }
535}
536
537impl<'d, A: ShaAlgorithm, S: Borrow<Sha<'d>>> digest::FixedOutput for ShaDigest<'d, A, S> {
538    fn finalize_into(mut self, out: &mut digest::Output<Self>) {
539        nb::block!(self.finish(out)).unwrap();
540    }
541}
542
543/// This macro implements the Sha<'a, Dm> trait for a specified Sha algorithm
544/// and a set of parameters
545macro_rules! impl_sha {
546    ($name: ident, $mode_bits: tt, $digest_length: tt, $chunk_length: tt) => {
547        /// A SHA implementation struct.
548        ///
549        /// This struct is generated by the macro and represents a specific SHA hashing
550        /// algorithm (e.g., SHA-256, SHA-1). It manages the context and state required
551        /// for processing data using the selected hashing algorithm.
552        ///
553        /// The struct provides various functionalities such as initializing the hashing
554        /// process, updating the internal state with new data, and finalizing the
555        /// hashing operation to generate the final digest.
556        #[non_exhaustive]
557        pub struct $name;
558
559        impl crate::private::Sealed for $name {}
560
561        impl $crate::sha::ShaAlgorithm for $name {
562            const ALGORITHM: &'static str = stringify!($name);
563
564            const CHUNK_LENGTH: usize = $chunk_length;
565
566            const DIGEST_LENGTH: usize = $digest_length;
567
568            #[cfg(not(esp32))]
569            const MODE_AS_BITS: u8 = $mode_bits;
570
571            // We use paste to append `U` to the digest size to match a const defined in
572            // digest
573            type DigestOutputSize = paste::paste!(digest::consts::[< U $digest_length >]);
574
575            #[cfg(esp32)]
576            fn start(sha: &crate::peripherals::SHA<'_>) {
577                paste::paste! {
578                    sha.register_block().[< $name:lower _start >]().write(|w| w.[< $name:lower _start >]().set_bit());
579                }
580            }
581
582            #[cfg(esp32)]
583            fn r#continue(sha: &crate::peripherals::SHA<'_>) {
584                paste::paste! {
585                    sha.register_block().[< $name:lower _continue >]().write(|w| w.[< $name:lower _continue >]().set_bit());
586                }
587            }
588
589            #[cfg(esp32)]
590            fn load(sha: &crate::peripherals::SHA<'_>) {
591                paste::paste! {
592                    sha.register_block().[< $name:lower _load >]().write(|w| w.[< $name:lower _load >]().set_bit());
593                }
594            }
595
596            #[cfg(esp32)]
597            fn is_busy(sha: &crate::peripherals::SHA<'_>) -> bool {
598                paste::paste! {
599                    sha.register_block().[< $name:lower _busy >]().read().[< $name:lower _busy >]().bit_is_set()
600                }
601            }
602        }
603    };
604}
605
606// All the hash algorithms introduced in FIPS PUB 180-4 Spec.
607// – SHA-1
608// – SHA-224
609// – SHA-256
610// – SHA-384
611// – SHA-512
612// – SHA-512/224
613// – SHA-512/256
614// – SHA-512/t (not implemented yet)
615// Two working modes
616// – Typical SHA
617// – DMA-SHA (not implemented yet)
618//
619// TODO: Allow/Implement SHA512_(u16)
620impl_sha!(Sha1, 0, 20, 64);
621#[cfg(not(esp32))]
622impl_sha!(Sha224, 1, 28, 64);
623impl_sha!(Sha256, 2, 32, 64);
624#[cfg(any(esp32, esp32s2, esp32s3))]
625impl_sha!(Sha384, 3, 48, 128);
626#[cfg(any(esp32, esp32s2, esp32s3))]
627impl_sha!(Sha512, 4, 64, 128);
628#[cfg(any(esp32s2, esp32s3))]
629impl_sha!(Sha512_224, 5, 28, 128);
630#[cfg(any(esp32s2, esp32s3))]
631impl_sha!(Sha512_256, 6, 32, 128);
632
633fn h_mem(sha: &crate::peripherals::SHA<'_>, index: usize) -> *mut u32 {
634    let sha = sha.register_block();
635    cfg_if::cfg_if! {
636        if #[cfg(esp32)] {
637            sha.text(index).as_ptr()
638        } else {
639            sha.h_mem(index).as_ptr()
640        }
641    }
642}
643
644fn m_mem(sha: &crate::peripherals::SHA<'_>, index: usize) -> *mut u32 {
645    let sha = sha.register_block();
646    cfg_if::cfg_if! {
647        if #[cfg(esp32)] {
648            sha.text(index).as_ptr()
649        } else {
650            sha.m_mem(index).as_ptr()
651        }
652    }
653}