esp_hal/dma/
buffers.rs

1#[cfg(psram_dma)]
2use core::ops::Range;
3use core::{
4    ops::{Deref, DerefMut},
5    ptr::{NonNull, null_mut},
6};
7
8use super::*;
9use crate::soc::is_slice_in_dram;
10#[cfg(psram_dma)]
11use crate::soc::{is_slice_in_psram, is_valid_psram_address, is_valid_ram_address};
12
13/// Error returned from Dma[Rx|Tx|RxTx]Buf operations.
14#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
15#[cfg_attr(feature = "defmt", derive(defmt::Format))]
16pub enum DmaBufError {
17    /// The buffer is smaller than the requested size.
18    BufferTooSmall,
19
20    /// More descriptors are needed for the buffer size.
21    InsufficientDescriptors,
22
23    /// Descriptors or buffers are not located in a supported memory region.
24    UnsupportedMemoryRegion,
25
26    /// Buffer address or size is not properly aligned.
27    InvalidAlignment(DmaAlignmentError),
28
29    /// Invalid chunk size: must be > 0 and <= 4095.
30    InvalidChunkSize,
31}
32
33/// DMA buffer alignment errors.
34#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
35#[cfg_attr(feature = "defmt", derive(defmt::Format))]
36pub enum DmaAlignmentError {
37    /// Buffer address is not properly aligned.
38    Address,
39
40    /// Buffer size is not properly aligned.
41    Size,
42}
43
44impl From<DmaAlignmentError> for DmaBufError {
45    fn from(err: DmaAlignmentError) -> Self {
46        DmaBufError::InvalidAlignment(err)
47    }
48}
49
50cfg_if::cfg_if! {
51    if #[cfg(psram_dma)] {
52        /// Burst size used when transferring to and from external memory.
53        #[derive(Clone, Copy, PartialEq, Eq, Debug)]
54        #[cfg_attr(feature = "defmt", derive(defmt::Format))]
55        pub enum ExternalBurstConfig {
56            /// 16 bytes
57            Size16 = 16,
58
59            /// 32 bytes
60            Size32 = 32,
61
62            /// 64 bytes
63            Size64 = 64,
64        }
65
66        impl ExternalBurstConfig {
67            /// The default external memory burst length.
68            pub const DEFAULT: Self = Self::Size16;
69        }
70
71        impl Default for ExternalBurstConfig {
72            fn default() -> Self {
73                Self::DEFAULT
74            }
75        }
76
77        /// Internal memory access burst mode.
78        #[derive(Clone, Copy, PartialEq, Eq, Debug)]
79        #[cfg_attr(feature = "defmt", derive(defmt::Format))]
80        pub enum InternalBurstConfig {
81            /// Burst mode is disabled.
82            Disabled,
83
84            /// Burst mode is enabled.
85            Enabled,
86        }
87
88        impl InternalBurstConfig {
89            /// The default internal burst mode configuration.
90            pub const DEFAULT: Self = Self::Disabled;
91        }
92
93        impl Default for InternalBurstConfig {
94            fn default() -> Self {
95                Self::DEFAULT
96            }
97        }
98
99        /// Burst transfer configuration.
100        #[derive(Clone, Copy, PartialEq, Eq, Debug)]
101        #[cfg_attr(feature = "defmt", derive(defmt::Format))]
102        pub struct BurstConfig {
103            /// Configures the burst size for PSRAM transfers.
104            ///
105            /// Burst mode is always enabled for PSRAM transfers.
106            pub external_memory: ExternalBurstConfig,
107
108            /// Enables or disables the burst mode for internal memory transfers.
109            ///
110            /// The burst size is not configurable.
111            pub internal_memory: InternalBurstConfig,
112        }
113
114        impl BurstConfig {
115            /// The default burst mode configuration.
116            pub const DEFAULT: Self = Self {
117                external_memory: ExternalBurstConfig::DEFAULT,
118                internal_memory: InternalBurstConfig::DEFAULT,
119            };
120        }
121
122        impl Default for BurstConfig {
123            fn default() -> Self {
124                Self::DEFAULT
125            }
126        }
127
128        impl From<InternalBurstConfig> for BurstConfig {
129            fn from(internal_memory: InternalBurstConfig) -> Self {
130                Self {
131                    external_memory: ExternalBurstConfig::DEFAULT,
132                    internal_memory,
133                }
134            }
135        }
136
137        impl From<ExternalBurstConfig> for BurstConfig {
138            fn from(external_memory: ExternalBurstConfig) -> Self {
139                Self {
140                    external_memory,
141                    internal_memory: InternalBurstConfig::DEFAULT,
142                }
143            }
144        }
145    } else {
146        /// Burst transfer configuration.
147        #[derive(Clone, Copy, PartialEq, Eq, Debug)]
148        #[cfg_attr(feature = "defmt", derive(defmt::Format))]
149        pub enum BurstConfig {
150            /// Burst mode is disabled.
151            Disabled,
152
153            /// Burst mode is enabled.
154            Enabled,
155        }
156
157        impl BurstConfig {
158            /// The default burst mode configuration.
159            pub const DEFAULT: Self = Self::Disabled;
160        }
161
162        impl Default for BurstConfig {
163            fn default() -> Self {
164                Self::DEFAULT
165            }
166        }
167
168        type InternalBurstConfig = BurstConfig;
169    }
170}
171
172#[cfg(psram_dma)]
173impl ExternalBurstConfig {
174    const fn min_psram_alignment(self, direction: TransferDirection) -> usize {
175        // S2 TRM: Specifically, size and buffer address pointer in receive descriptors
176        // should be 16-byte, 32-byte or 64-byte aligned. For data frame whose
177        // length is not a multiple of 16 bytes, 32 bytes, or 64 bytes, EDMA adds
178        // padding bytes to the end.
179
180        // S3 TRM: Size and Address for IN transfers must be block aligned. For receive
181        // descriptors, if the data length received are not aligned with block size,
182        // GDMA will pad the data received with 0 until they are aligned to
183        // initiate burst transfer. You can read the length field in receive descriptors
184        // to obtain the length of valid data received
185        if matches!(direction, TransferDirection::In) {
186            self as usize
187        } else {
188            // S2 TRM: Size, length and buffer address pointer in transmit descriptors are
189            // not necessarily aligned with block size.
190
191            // S3 TRM: Size, length, and buffer address pointer in transmit descriptors do
192            // not need to be aligned.
193            1
194        }
195    }
196}
197
198impl InternalBurstConfig {
199    pub(super) const fn is_burst_enabled(self) -> bool {
200        !matches!(self, Self::Disabled)
201    }
202
203    // Size and address alignment as those come in pairs on current hardware.
204    const fn min_dram_alignment(self, direction: TransferDirection) -> usize {
205        if matches!(direction, TransferDirection::In) {
206            if cfg!(esp32) {
207                // NOTE: The size must be word-aligned.
208                // NOTE: The buffer address must be word-aligned
209                4
210            } else if self.is_burst_enabled() {
211                // As described in "Accessing Internal Memory" paragraphs in the various TRMs.
212                4
213            } else {
214                1
215            }
216        } else {
217            // OUT transfers have no alignment requirements, except for ESP32, which is
218            // described below.
219            if cfg!(esp32) {
220                // SPI DMA: Burst transmission is supported. The data size for
221                // a single transfer must be four bytes aligned.
222                // I2S DMA: Burst transfer is supported. However, unlike the
223                // SPI DMA channels, the data size for a single transfer is
224                // one word, or four bytes.
225                4
226            } else {
227                1
228            }
229        }
230    }
231}
232
233const fn max(a: usize, b: usize) -> usize {
234    if a > b { a } else { b }
235}
236
237impl BurstConfig {
238    delegate::delegate! {
239        #[cfg(psram_dma)]
240        to self.internal_memory {
241            pub(super) const fn min_dram_alignment(self, direction: TransferDirection) -> usize;
242            pub(super) fn is_burst_enabled(self) -> bool;
243        }
244    }
245
246    /// Calculates an alignment that is compatible with the current burst
247    /// configuration.
248    ///
249    /// This is an over-estimation so that Descriptors can be safely used with
250    /// any DMA channel in any direction.
251    pub const fn min_compatible_alignment(self) -> usize {
252        let in_alignment = self.min_dram_alignment(TransferDirection::In);
253        let out_alignment = self.min_dram_alignment(TransferDirection::Out);
254        let alignment = max(in_alignment, out_alignment);
255
256        #[cfg(psram_dma)]
257        let alignment = max(alignment, self.external_memory as usize);
258
259        alignment
260    }
261
262    const fn chunk_size_for_alignment(alignment: usize) -> usize {
263        // DMA descriptors have a 12-bit field for the size/length of the buffer they
264        // point at. As there is no such thing as 0-byte alignment, this means the
265        // maximum size is 4095 bytes.
266        4096 - alignment
267    }
268
269    /// Calculates a chunk size that is compatible with the current burst
270    /// configuration's alignment requirements.
271    ///
272    /// This is an over-estimation so that Descriptors can be safely used with
273    /// any DMA channel in any direction.
274    pub const fn max_compatible_chunk_size(self) -> usize {
275        Self::chunk_size_for_alignment(self.min_compatible_alignment())
276    }
277
278    fn min_alignment(self, _buffer: &[u8], direction: TransferDirection) -> usize {
279        let alignment = self.min_dram_alignment(direction);
280
281        cfg_if::cfg_if! {
282            if #[cfg(psram_dma)] {
283                let mut alignment = alignment;
284                if is_valid_psram_address(_buffer.as_ptr() as usize) {
285                    alignment = max(alignment, self.external_memory.min_psram_alignment(direction));
286                }
287            }
288        }
289
290        alignment
291    }
292
293    // Note: this function ignores address alignment as we assume the buffers are
294    // aligned.
295    fn max_chunk_size_for(self, buffer: &[u8], direction: TransferDirection) -> usize {
296        Self::chunk_size_for_alignment(self.min_alignment(buffer, direction))
297    }
298
299    fn ensure_buffer_aligned(
300        self,
301        buffer: &[u8],
302        direction: TransferDirection,
303    ) -> Result<(), DmaAlignmentError> {
304        let alignment = self.min_alignment(buffer, direction);
305        if !(buffer.as_ptr() as usize).is_multiple_of(alignment) {
306            return Err(DmaAlignmentError::Address);
307        }
308
309        // NB: the TRMs suggest that buffer length don't need to be aligned, but
310        // for IN transfers, we configure the DMA descriptors' size field, which needs
311        // to be aligned.
312        if direction == TransferDirection::In && !buffer.len().is_multiple_of(alignment) {
313            return Err(DmaAlignmentError::Size);
314        }
315
316        Ok(())
317    }
318
319    fn ensure_buffer_compatible(
320        self,
321        buffer: &[u8],
322        direction: TransferDirection,
323    ) -> Result<(), DmaBufError> {
324        // buffer can be either DRAM or PSRAM (if supported)
325        let is_in_dram = is_slice_in_dram(buffer);
326        cfg_if::cfg_if! {
327            if #[cfg(psram_dma)]{
328                let is_in_psram = is_slice_in_psram(buffer);
329            } else {
330                let is_in_psram = false;
331            }
332        }
333
334        if !(is_in_dram || is_in_psram) {
335            return Err(DmaBufError::UnsupportedMemoryRegion);
336        }
337
338        self.ensure_buffer_aligned(buffer, direction)?;
339
340        Ok(())
341    }
342}
343
344/// The direction of the DMA transfer.
345#[derive(Clone, Copy, PartialEq, Eq, Debug)]
346#[cfg_attr(feature = "defmt", derive(defmt::Format))]
347pub enum TransferDirection {
348    /// DMA transfer from peripheral or external memory to memory.
349    In,
350    /// DMA transfer from memory to peripheral or external memory.
351    Out,
352}
353
354/// Holds all the information needed to configure a DMA channel for a transfer.
355#[derive(PartialEq, Eq, Debug)]
356#[cfg_attr(feature = "defmt", derive(defmt::Format))]
357pub struct Preparation {
358    /// The descriptor the DMA will start from.
359    pub start: *mut DmaDescriptor,
360
361    /// The direction of the DMA transfer.
362    pub direction: TransferDirection,
363
364    /// Must be `true` if any of the DMA descriptors contain data in PSRAM.
365    #[cfg(psram_dma)]
366    pub accesses_psram: bool,
367
368    /// Configures the DMA to transfer data in bursts.
369    ///
370    /// The implementation of the buffer must ensure that buffer size
371    /// and alignment in each descriptor is compatible with the burst
372    /// transfer configuration.
373    ///
374    /// For details on alignment requirements, refer to your chip's
375    #[doc = crate::trm_markdown_link!()]
376    pub burst_transfer: BurstConfig,
377
378    /// Configures the "check owner" feature of the DMA channel.
379    ///
380    /// Most DMA channels allow software to configure whether the hardware
381    /// checks that [DmaDescriptor::owner] is set to [Owner::Dma] before
382    /// consuming the descriptor. If this check fails, the channel stops
383    /// operating and fires
384    /// [DmaRxInterrupt::DescriptorError]/[DmaTxInterrupt::DescriptorError].
385    ///
386    /// This field allows buffer implementation to configure this behaviour.
387    /// - `Some(true)`: DMA channel must check the owner bit.
388    /// - `Some(false)`: DMA channel must NOT check the owner bit.
389    /// - `None`: DMA channel should check the owner bit if it is supported.
390    ///
391    /// Some buffer implementations may require that the DMA channel performs
392    /// this check before consuming the descriptor to ensure correct
393    /// behaviour. e.g. To prevent wrap-around in a circular transfer.
394    ///
395    /// Some buffer implementations may require that the DMA channel does NOT
396    /// perform this check as the ownership bit will not be set before the
397    /// channel tries to consume the descriptor.
398    ///
399    /// Most implementations won't have any such requirements and will work
400    /// correctly regardless of whether the DMA channel checks or not.
401    ///
402    /// Note: If the DMA channel doesn't support the provided option,
403    /// preparation will fail.
404    pub check_owner: Option<bool>,
405
406    /// Configures whether the DMA channel automatically clears the
407    /// [DmaDescriptor::owner] bit after it is done with the buffer pointed
408    /// to by a descriptor.
409    ///
410    /// For RX transfers, this is always true and the value specified here is
411    /// ignored.
412    ///
413    /// Note: SPI_DMA on the ESP32 does not support this and will panic if set
414    /// to true.
415    pub auto_write_back: bool,
416}
417
418/// [DmaTxBuffer] is a DMA descriptor + memory combo that can be used for
419/// transmitting data from a DMA channel to a peripheral's FIFO.
420///
421/// # Safety
422///
423/// The implementing type must keep all its descriptors and the buffers they
424/// point to valid while the buffer is being transferred.
425pub unsafe trait DmaTxBuffer {
426    /// A type providing operations that are safe to perform on the buffer
427    /// whilst the DMA is actively using it.
428    type View;
429
430    /// The type returned to the user when a transfer finishes.
431    ///
432    /// Some buffers don't need to be reconstructed.
433    type Final;
434
435    /// Prepares the buffer for an imminent transfer and returns
436    /// information required to use this buffer.
437    ///
438    /// Note: This operation is idempotent.
439    fn prepare(&mut self) -> Preparation;
440
441    /// This is called before the DMA starts using the buffer.
442    fn into_view(self) -> Self::View;
443
444    /// This is called after the DMA is done using the buffer.
445    fn from_view(view: Self::View) -> Self::Final;
446}
447
448/// [DmaRxBuffer] is a DMA descriptor + memory combo that can be used for
449/// receiving data from a peripheral's FIFO to a DMA channel.
450///
451/// Note: Implementations of this trait may only support having a single EOF bit
452/// which resides in the last descriptor. There will be a separate trait in
453/// future to support multiple EOFs.
454///
455/// # Safety
456///
457/// The implementing type must keep all its descriptors and the buffers they
458/// point to valid while the buffer is being transferred.
459pub unsafe trait DmaRxBuffer {
460    /// A type providing operations that are safe to perform on the buffer
461    /// whilst the DMA is actively using it.
462    type View;
463
464    /// The type returned to the user when a transfer finishes.
465    ///
466    /// Some buffers don't need to be reconstructed.
467    type Final;
468
469    /// Prepares the buffer for an imminent transfer and returns
470    /// information required to use this buffer.
471    ///
472    /// Note: This operation is idempotent.
473    fn prepare(&mut self) -> Preparation;
474
475    /// This is called before the DMA starts using the buffer.
476    fn into_view(self) -> Self::View;
477
478    /// This is called after the DMA is done using the buffer.
479    fn from_view(view: Self::View) -> Self::Final;
480}
481
482/// An in-progress view into [DmaRxBuf]/[DmaTxBuf].
483///
484/// In the future, this could support peeking into state of the
485/// descriptors/buffers.
486pub struct BufView<T>(T);
487
488/// DMA transmit buffer
489///
490/// This is a contiguous buffer linked together by DMA descriptors of length
491/// 4095 at most. It can only be used for transmitting data to a peripheral's
492/// FIFO. See [DmaRxBuf] for receiving data.
493#[derive(Debug)]
494#[cfg_attr(feature = "defmt", derive(defmt::Format))]
495pub struct DmaTxBuf {
496    descriptors: DescriptorSet<'static>,
497    buffer: &'static mut [u8],
498    burst: BurstConfig,
499}
500
501impl DmaTxBuf {
502    /// Creates a new [DmaTxBuf] from some descriptors and a buffer.
503    ///
504    /// There must be enough descriptors for the provided buffer.
505    /// Depending on alignment requirements, each descriptor can handle at most
506    /// 4095 bytes worth of buffer.
507    ///
508    /// Both the descriptors and buffer must be in DMA-capable memory.
509    /// Only DRAM is supported for descriptors.
510    pub fn new(
511        descriptors: &'static mut [DmaDescriptor],
512        buffer: &'static mut [u8],
513    ) -> Result<Self, DmaBufError> {
514        Self::new_with_config(descriptors, buffer, BurstConfig::default())
515    }
516
517    /// Creates a new [DmaTxBuf] from some descriptors and a buffer.
518    ///
519    /// There must be enough descriptors for the provided buffer.
520    /// Depending on alignment requirements, each descriptor can handle at most
521    /// 4095 bytes worth of buffer.
522    ///
523    /// Both the descriptors and buffer must be in DMA-capable memory.
524    /// Only DRAM is supported for descriptors.
525    pub fn new_with_config(
526        descriptors: &'static mut [DmaDescriptor],
527        buffer: &'static mut [u8],
528        config: impl Into<BurstConfig>,
529    ) -> Result<Self, DmaBufError> {
530        let mut buf = Self {
531            descriptors: DescriptorSet::new(descriptors)?,
532            buffer,
533            burst: BurstConfig::default(),
534        };
535
536        let capacity = buf.capacity();
537        buf.configure(config, capacity)?;
538
539        Ok(buf)
540    }
541
542    fn configure(
543        &mut self,
544        burst: impl Into<BurstConfig>,
545        length: usize,
546    ) -> Result<(), DmaBufError> {
547        let burst = burst.into();
548        self.set_length_fallible(length, burst)?;
549
550        self.descriptors.link_with_buffer(
551            self.buffer,
552            burst.max_chunk_size_for(self.buffer, TransferDirection::Out),
553        )?;
554
555        self.burst = burst;
556        Ok(())
557    }
558
559    /// Configures the DMA to use burst transfers to access this buffer.
560    pub fn set_burst_config(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> {
561        let len = self.len();
562        self.configure(burst, len)
563    }
564
565    /// Consume the buf, returning the descriptors and buffer.
566    pub fn split(self) -> (&'static mut [DmaDescriptor], &'static mut [u8]) {
567        (self.descriptors.into_inner(), self.buffer)
568    }
569
570    /// Returns the size of the underlying buffer
571    pub fn capacity(&self) -> usize {
572        self.buffer.len()
573    }
574
575    /// Return the number of bytes that would be transmitted by this buf.
576    #[allow(clippy::len_without_is_empty)]
577    pub fn len(&self) -> usize {
578        self.descriptors
579            .linked_iter()
580            .map(|d| d.len())
581            .sum::<usize>()
582    }
583
584    fn set_length_fallible(&mut self, len: usize, burst: BurstConfig) -> Result<(), DmaBufError> {
585        if len > self.capacity() {
586            return Err(DmaBufError::BufferTooSmall);
587        }
588        burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::Out)?;
589
590        self.descriptors.set_tx_length(
591            len,
592            burst.max_chunk_size_for(self.buffer, TransferDirection::Out),
593        )?;
594
595        // This only needs to be done once (after every significant length change) as
596        // Self::prepare sets Preparation::auto_write_back to false.
597        for desc in self.descriptors.linked_iter_mut() {
598            // In non-circular mode, we only set `suc_eof` for the last descriptor to signal
599            // the end of the transfer.
600            desc.reset_for_tx(desc.next.is_null());
601        }
602
603        Ok(())
604    }
605
606    /// Reset the descriptors to only transmit `len` amount of bytes from this
607    /// buf.
608    ///
609    /// The number of bytes in data must be less than or equal to the buffer
610    /// size.
611    pub fn set_length(&mut self, len: usize) {
612        unwrap!(self.set_length_fallible(len, self.burst))
613    }
614
615    /// Fills the TX buffer with the bytes provided in `data` and reset the
616    /// descriptors to only cover the filled section.
617    ///
618    /// The number of bytes in data must be less than or equal to the buffer
619    /// size.
620    pub fn fill(&mut self, data: &[u8]) {
621        self.set_length(data.len());
622        self.as_mut_slice()[..data.len()].copy_from_slice(data);
623    }
624
625    /// Returns the buf as a mutable slice than can be written.
626    pub fn as_mut_slice(&mut self) -> &mut [u8] {
627        self.buffer
628    }
629
630    /// Returns the buf as a slice than can be read.
631    pub fn as_slice(&self) -> &[u8] {
632        self.buffer
633    }
634}
635
636unsafe impl DmaTxBuffer for DmaTxBuf {
637    type View = BufView<DmaTxBuf>;
638    type Final = DmaTxBuf;
639
640    fn prepare(&mut self) -> Preparation {
641        cfg_if::cfg_if! {
642            if #[cfg(psram_dma)] {
643                let is_data_in_psram = !is_valid_ram_address(self.buffer.as_ptr() as usize);
644                if is_data_in_psram {
645                    unsafe {
646                        crate::soc::cache_writeback_addr(
647                            self.buffer.as_ptr() as u32,
648                            self.buffer.len() as u32,
649                        )
650                    };
651                }
652            }
653        }
654
655        Preparation {
656            start: self.descriptors.head(),
657            direction: TransferDirection::Out,
658            #[cfg(psram_dma)]
659            accesses_psram: is_data_in_psram,
660            burst_transfer: self.burst,
661            check_owner: None,
662            auto_write_back: false,
663        }
664    }
665
666    fn into_view(self) -> BufView<DmaTxBuf> {
667        BufView(self)
668    }
669
670    fn from_view(view: Self::View) -> Self {
671        view.0
672    }
673}
674
675/// DMA receive buffer
676///
677/// This is a contiguous buffer linked together by DMA descriptors of length
678/// 4092. It can only be used for receiving data from a peripheral's FIFO.
679/// See [DmaTxBuf] for transmitting data.
680#[derive(Debug)]
681#[cfg_attr(feature = "defmt", derive(defmt::Format))]
682pub struct DmaRxBuf {
683    descriptors: DescriptorSet<'static>,
684    buffer: &'static mut [u8],
685    burst: BurstConfig,
686}
687
688impl DmaRxBuf {
689    /// Creates a new [DmaRxBuf] from some descriptors and a buffer.
690    ///
691    /// There must be enough descriptors for the provided buffer.
692    /// Each descriptor can handle 4092 bytes worth of buffer.
693    ///
694    /// Both the descriptors and buffer must be in DMA-capable memory.
695    /// Only DRAM is supported.
696    pub fn new(
697        descriptors: &'static mut [DmaDescriptor],
698        buffer: &'static mut [u8],
699    ) -> Result<Self, DmaBufError> {
700        Self::new_with_config(descriptors, buffer, BurstConfig::default())
701    }
702
703    /// Creates a new [DmaRxBuf] from some descriptors and a buffer.
704    ///
705    /// There must be enough descriptors for the provided buffer.
706    /// Depending on alignment requirements, each descriptor can handle at most
707    /// 4092 bytes worth of buffer.
708    ///
709    /// Both the descriptors and buffer must be in DMA-capable memory.
710    /// Only DRAM is supported for descriptors.
711    pub fn new_with_config(
712        descriptors: &'static mut [DmaDescriptor],
713        buffer: &'static mut [u8],
714        config: impl Into<BurstConfig>,
715    ) -> Result<Self, DmaBufError> {
716        let mut buf = Self {
717            descriptors: DescriptorSet::new(descriptors)?,
718            buffer,
719            burst: BurstConfig::default(),
720        };
721
722        buf.configure(config, buf.capacity())?;
723
724        Ok(buf)
725    }
726
727    fn configure(
728        &mut self,
729        burst: impl Into<BurstConfig>,
730        length: usize,
731    ) -> Result<(), DmaBufError> {
732        let burst = burst.into();
733        self.set_length_fallible(length, burst)?;
734
735        self.descriptors.link_with_buffer(
736            self.buffer,
737            burst.max_chunk_size_for(self.buffer, TransferDirection::In),
738        )?;
739
740        self.burst = burst;
741        Ok(())
742    }
743
744    /// Configures the DMA to use burst transfers to access this buffer.
745    pub fn set_burst_config(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> {
746        let len = self.len();
747        self.configure(burst, len)
748    }
749
750    /// Consume the buf, returning the descriptors and buffer.
751    pub fn split(self) -> (&'static mut [DmaDescriptor], &'static mut [u8]) {
752        (self.descriptors.into_inner(), self.buffer)
753    }
754
755    /// Returns the size of the underlying buffer
756    pub fn capacity(&self) -> usize {
757        self.buffer.len()
758    }
759
760    /// Returns the maximum number of bytes that this buf has been configured to
761    /// receive.
762    #[allow(clippy::len_without_is_empty)]
763    pub fn len(&self) -> usize {
764        self.descriptors
765            .linked_iter()
766            .map(|d| d.size())
767            .sum::<usize>()
768    }
769
770    fn set_length_fallible(&mut self, len: usize, burst: BurstConfig) -> Result<(), DmaBufError> {
771        if len > self.capacity() {
772            return Err(DmaBufError::BufferTooSmall);
773        }
774        burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::In)?;
775
776        self.descriptors.set_rx_length(
777            len,
778            burst.max_chunk_size_for(&self.buffer[..len], TransferDirection::In),
779        )
780    }
781
782    /// Reset the descriptors to only receive `len` amount of bytes into this
783    /// buf.
784    ///
785    /// The number of bytes in data must be less than or equal to the buffer
786    /// size.
787    pub fn set_length(&mut self, len: usize) {
788        unwrap!(self.set_length_fallible(len, self.burst));
789    }
790
791    /// Returns the entire underlying buffer as a slice than can be read.
792    pub fn as_slice(&self) -> &[u8] {
793        self.buffer
794    }
795
796    /// Returns the entire underlying buffer as a slice than can be written.
797    pub fn as_mut_slice(&mut self) -> &mut [u8] {
798        self.buffer
799    }
800
801    /// Return the number of bytes that was received by this buf.
802    pub fn number_of_received_bytes(&self) -> usize {
803        self.descriptors
804            .linked_iter()
805            .map(|d| d.len())
806            .sum::<usize>()
807    }
808
809    /// Reads the received data into the provided `buf`.
810    ///
811    /// If `buf.len()` is less than the amount of received data then only the
812    /// first `buf.len()` bytes of received data is written into `buf`.
813    ///
814    /// Returns the number of bytes in written to `buf`.
815    pub fn read_received_data(&self, mut buf: &mut [u8]) -> usize {
816        // Note that due to an ESP32 quirk, the last received descriptor may not get
817        // updated.
818        let capacity = buf.len();
819        for chunk in self.received_data() {
820            if buf.is_empty() {
821                break;
822            }
823            let to_fill;
824            (to_fill, buf) = buf.split_at_mut(chunk.len());
825            to_fill.copy_from_slice(chunk);
826        }
827
828        capacity - buf.len()
829    }
830
831    /// Returns the received data as an iterator of slices.
832    pub fn received_data(&self) -> impl Iterator<Item = &[u8]> {
833        self.descriptors.linked_iter().map(|desc| {
834            // SAFETY: We set up the descriptor to point to a subslice of the buffer, and
835            // here we are only recreating that slice with a perhaps shorter length.
836            // We are also not accessing `self.buffer` while this slice is alive, so we
837            // are not violating any aliasing rules.
838            unsafe { core::slice::from_raw_parts(desc.buffer.cast_const(), desc.len()) }
839        })
840    }
841}
842
843unsafe impl DmaRxBuffer for DmaRxBuf {
844    type View = BufView<DmaRxBuf>;
845    type Final = DmaRxBuf;
846
847    fn prepare(&mut self) -> Preparation {
848        for desc in self.descriptors.linked_iter_mut() {
849            desc.reset_for_rx();
850        }
851
852        cfg_if::cfg_if! {
853            if #[cfg(psram_dma)] {
854                // Optimization: avoid locking for PSRAM range.
855                let is_data_in_psram = !is_valid_ram_address(self.buffer.as_ptr() as usize);
856                if is_data_in_psram {
857                    unsafe {
858                        crate::soc::cache_invalidate_addr(
859                            self.buffer.as_ptr() as u32,
860                            self.buffer.len() as u32,
861                        )
862                    };
863                }
864            }
865        }
866
867        Preparation {
868            start: self.descriptors.head(),
869            direction: TransferDirection::In,
870            #[cfg(psram_dma)]
871            accesses_psram: is_data_in_psram,
872            burst_transfer: self.burst,
873            check_owner: None,
874            auto_write_back: true,
875        }
876    }
877
878    fn into_view(self) -> BufView<DmaRxBuf> {
879        BufView(self)
880    }
881
882    fn from_view(view: Self::View) -> Self {
883        view.0
884    }
885}
886
887/// DMA transmit and receive buffer.
888///
889/// This is a (single) contiguous buffer linked together by two sets of DMA
890/// descriptors of length 4092 each.
891/// It can be used for simultaneously transmitting to and receiving from a
892/// peripheral's FIFO. These are typically full-duplex transfers.
893#[derive(Debug)]
894#[cfg_attr(feature = "defmt", derive(defmt::Format))]
895pub struct DmaRxTxBuf {
896    rx_descriptors: DescriptorSet<'static>,
897    tx_descriptors: DescriptorSet<'static>,
898    buffer: &'static mut [u8],
899    burst: BurstConfig,
900}
901
902impl DmaRxTxBuf {
903    /// Creates a new [DmaRxTxBuf] from some descriptors and a buffer.
904    ///
905    /// There must be enough descriptors for the provided buffer.
906    /// Each descriptor can handle 4092 bytes worth of buffer.
907    ///
908    /// Both the descriptors and buffer must be in DMA-capable memory.
909    /// Only DRAM is supported.
910    pub fn new(
911        rx_descriptors: &'static mut [DmaDescriptor],
912        tx_descriptors: &'static mut [DmaDescriptor],
913        buffer: &'static mut [u8],
914    ) -> Result<Self, DmaBufError> {
915        let mut buf = Self {
916            rx_descriptors: DescriptorSet::new(rx_descriptors)?,
917            tx_descriptors: DescriptorSet::new(tx_descriptors)?,
918            buffer,
919            burst: BurstConfig::default(),
920        };
921
922        let capacity = buf.capacity();
923        buf.configure(buf.burst, capacity)?;
924
925        Ok(buf)
926    }
927
928    fn configure(
929        &mut self,
930        burst: impl Into<BurstConfig>,
931        length: usize,
932    ) -> Result<(), DmaBufError> {
933        let burst = burst.into();
934        self.set_length_fallible(length, burst)?;
935
936        self.rx_descriptors.link_with_buffer(
937            self.buffer,
938            burst.max_chunk_size_for(self.buffer, TransferDirection::In),
939        )?;
940        self.tx_descriptors.link_with_buffer(
941            self.buffer,
942            burst.max_chunk_size_for(self.buffer, TransferDirection::Out),
943        )?;
944
945        self.burst = burst;
946
947        Ok(())
948    }
949
950    /// Configures the DMA to use burst transfers to access this buffer.
951    pub fn set_burst_config(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> {
952        let len = self.len();
953        self.configure(burst, len)
954    }
955
956    /// Consume the buf, returning the rx descriptors, tx descriptors and
957    /// buffer.
958    pub fn split(
959        self,
960    ) -> (
961        &'static mut [DmaDescriptor],
962        &'static mut [DmaDescriptor],
963        &'static mut [u8],
964    ) {
965        (
966            self.rx_descriptors.into_inner(),
967            self.tx_descriptors.into_inner(),
968            self.buffer,
969        )
970    }
971
972    /// Return the size of the underlying buffer.
973    pub fn capacity(&self) -> usize {
974        self.buffer.len()
975    }
976
977    /// Return the number of bytes that would be transmitted by this buf.
978    #[allow(clippy::len_without_is_empty)]
979    pub fn len(&self) -> usize {
980        self.tx_descriptors
981            .linked_iter()
982            .map(|d| d.len())
983            .sum::<usize>()
984    }
985
986    /// Returns the entire buf as a slice than can be read.
987    pub fn as_slice(&self) -> &[u8] {
988        self.buffer
989    }
990
991    /// Returns the entire buf as a slice than can be written.
992    pub fn as_mut_slice(&mut self) -> &mut [u8] {
993        self.buffer
994    }
995
996    fn set_length_fallible(&mut self, len: usize, burst: BurstConfig) -> Result<(), DmaBufError> {
997        if len > self.capacity() {
998            return Err(DmaBufError::BufferTooSmall);
999        }
1000        burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::In)?;
1001        burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::Out)?;
1002
1003        self.rx_descriptors.set_rx_length(
1004            len,
1005            burst.max_chunk_size_for(self.buffer, TransferDirection::In),
1006        )?;
1007        self.tx_descriptors.set_tx_length(
1008            len,
1009            burst.max_chunk_size_for(self.buffer, TransferDirection::Out),
1010        )?;
1011
1012        Ok(())
1013    }
1014
1015    /// Reset the descriptors to only transmit/receive `len` amount of bytes
1016    /// with this buf.
1017    ///
1018    /// `len` must be less than or equal to the buffer size.
1019    pub fn set_length(&mut self, len: usize) {
1020        unwrap!(self.set_length_fallible(len, self.burst));
1021    }
1022}
1023
1024unsafe impl DmaTxBuffer for DmaRxTxBuf {
1025    type View = BufView<DmaRxTxBuf>;
1026    type Final = DmaRxTxBuf;
1027
1028    fn prepare(&mut self) -> Preparation {
1029        for desc in self.tx_descriptors.linked_iter_mut() {
1030            // In non-circular mode, we only set `suc_eof` for the last descriptor to signal
1031            // the end of the transfer.
1032            desc.reset_for_tx(desc.next.is_null());
1033        }
1034
1035        cfg_if::cfg_if! {
1036            if #[cfg(psram_dma)] {
1037                // Optimization: avoid locking for PSRAM range.
1038                let is_data_in_psram = !is_valid_ram_address(self.buffer.as_ptr() as usize);
1039                if is_data_in_psram {
1040                    unsafe {
1041                        crate::soc::cache_writeback_addr(
1042                            self.buffer.as_ptr() as u32,
1043                            self.buffer.len() as u32,
1044                        )
1045                    };
1046                }
1047            }
1048        }
1049
1050        Preparation {
1051            start: self.tx_descriptors.head(),
1052            direction: TransferDirection::Out,
1053            #[cfg(psram_dma)]
1054            accesses_psram: is_data_in_psram,
1055            burst_transfer: self.burst,
1056            check_owner: None,
1057            auto_write_back: false,
1058        }
1059    }
1060
1061    fn into_view(self) -> BufView<DmaRxTxBuf> {
1062        BufView(self)
1063    }
1064
1065    fn from_view(view: Self::View) -> Self {
1066        view.0
1067    }
1068}
1069
1070unsafe impl DmaRxBuffer for DmaRxTxBuf {
1071    type View = BufView<DmaRxTxBuf>;
1072    type Final = DmaRxTxBuf;
1073
1074    fn prepare(&mut self) -> Preparation {
1075        for desc in self.rx_descriptors.linked_iter_mut() {
1076            desc.reset_for_rx();
1077        }
1078
1079        cfg_if::cfg_if! {
1080            if #[cfg(psram_dma)] {
1081                // Optimization: avoid locking for PSRAM range.
1082                let is_data_in_psram = !is_valid_ram_address(self.buffer.as_ptr() as usize);
1083                if is_data_in_psram {
1084                    unsafe {
1085                        crate::soc::cache_invalidate_addr(
1086                            self.buffer.as_ptr() as u32,
1087                            self.buffer.len() as u32,
1088                        )
1089                    };
1090                }
1091            }
1092        }
1093
1094        Preparation {
1095            start: self.rx_descriptors.head(),
1096            direction: TransferDirection::In,
1097            #[cfg(psram_dma)]
1098            accesses_psram: is_data_in_psram,
1099            burst_transfer: self.burst,
1100            check_owner: None,
1101            auto_write_back: true,
1102        }
1103    }
1104
1105    fn into_view(self) -> BufView<DmaRxTxBuf> {
1106        BufView(self)
1107    }
1108
1109    fn from_view(view: Self::View) -> Self {
1110        view.0
1111    }
1112}
1113
1114/// DMA Streaming Receive Buffer.
1115///
1116/// This is a contiguous buffer linked together by DMA descriptors, and the
1117/// buffer is evenly distributed between each descriptor provided.
1118///
1119/// It is used for continuously streaming data from a peripheral's FIFO.
1120///
1121/// It does so by maintaining sliding window of descriptors that progresses when
1122/// you call [DmaRxStreamBufView::consume].
1123///
1124/// The list starts out like so `A (empty) -> B (empty) -> C (empty) -> D
1125/// (empty) -> NULL`.
1126///
1127/// As the DMA writes to the buffers the list progresses like so:
1128/// - `A (empty) -> B (empty) -> C (empty) -> D (empty) -> NULL`
1129/// - `A (full)  -> B (empty) -> C (empty) -> D (empty) -> NULL`
1130/// - `A (full)  -> B (full)  -> C (empty) -> D (empty) -> NULL`
1131/// - `A (full)  -> B (full)  -> C (full)  -> D (empty) -> NULL`
1132///
1133/// As you call [DmaRxStreamBufView::consume] the list (approximately)
1134/// progresses like so:
1135/// - `A (full)  -> B (full)  -> C (full)  -> D (empty) -> NULL`
1136/// - `B (full)  -> C (full)  -> D (empty) -> A (empty) -> NULL`
1137/// - `C (full)  -> D (empty) -> A (empty) -> B (empty) -> NULL`
1138/// - `D (empty) -> A (empty) -> B (empty) -> C (empty) -> NULL`
1139///
1140/// If all the descriptors fill up, the [DmaRxInterrupt::DescriptorEmpty]
1141/// interrupt will fire and the DMA will stop writing, at which point it is up
1142/// to you to resume/restart the transfer.
1143///
1144/// Note: This buffer will not tell you when this condition occurs, you should
1145/// check with the driver to see if the DMA has stopped.
1146///
1147/// When constructing this buffer, it is important to tune the ratio between the
1148/// chunk size and buffer size appropriately. Smaller chunk sizes means you
1149/// receive data more frequently but this means the DMA interrupts
1150/// ([DmaRxInterrupt::Done]) also fire more frequently (if you use them).
1151///
1152/// See [DmaRxStreamBufView] for APIs available whilst a transfer is in
1153/// progress.
1154pub struct DmaRxStreamBuf {
1155    descriptors: &'static mut [DmaDescriptor],
1156    buffer: &'static mut [u8],
1157    burst: BurstConfig,
1158}
1159
1160impl DmaRxStreamBuf {
1161    /// Creates a new [DmaRxStreamBuf] evenly distributing the buffer between
1162    /// the provided descriptors.
1163    pub fn new(
1164        descriptors: &'static mut [DmaDescriptor],
1165        buffer: &'static mut [u8],
1166    ) -> Result<Self, DmaBufError> {
1167        if !is_slice_in_dram(descriptors) {
1168            return Err(DmaBufError::UnsupportedMemoryRegion);
1169        }
1170        if !is_slice_in_dram(buffer) {
1171            return Err(DmaBufError::UnsupportedMemoryRegion);
1172        }
1173
1174        if descriptors.is_empty() {
1175            return Err(DmaBufError::InsufficientDescriptors);
1176        }
1177
1178        // Evenly distribute the buffer between the descriptors.
1179        let chunk_size = buffer.len() / descriptors.len();
1180
1181        if chunk_size > 4095 {
1182            return Err(DmaBufError::InsufficientDescriptors);
1183        }
1184
1185        // Check that the last descriptor can hold the excess
1186        let excess = buffer.len() % descriptors.len();
1187        if chunk_size + excess > 4095 {
1188            return Err(DmaBufError::InsufficientDescriptors);
1189        }
1190
1191        let mut chunks = buffer.chunks_exact_mut(chunk_size);
1192        for (desc, chunk) in descriptors.iter_mut().zip(chunks.by_ref()) {
1193            desc.buffer = chunk.as_mut_ptr();
1194            desc.set_size(chunk.len());
1195        }
1196
1197        let remainder = chunks.into_remainder();
1198        debug_assert_eq!(remainder.len(), excess);
1199
1200        if !remainder.is_empty() {
1201            // Append any excess to the last descriptor.
1202            let last_descriptor = descriptors.last_mut().unwrap();
1203            last_descriptor.set_size(last_descriptor.size() + remainder.len());
1204        }
1205
1206        Ok(Self {
1207            descriptors,
1208            buffer,
1209            burst: BurstConfig::default(),
1210        })
1211    }
1212
1213    /// Consume the buf, returning the descriptors and buffer.
1214    pub fn split(self) -> (&'static mut [DmaDescriptor], &'static mut [u8]) {
1215        (self.descriptors, self.buffer)
1216    }
1217}
1218
1219unsafe impl DmaRxBuffer for DmaRxStreamBuf {
1220    type View = DmaRxStreamBufView;
1221    type Final = DmaRxStreamBuf;
1222
1223    fn prepare(&mut self) -> Preparation {
1224        // Link up all the descriptors (but not in a circle).
1225        let mut next = null_mut();
1226        for desc in self.descriptors.iter_mut().rev() {
1227            desc.next = next;
1228            next = desc;
1229
1230            desc.reset_for_rx();
1231        }
1232        Preparation {
1233            start: self.descriptors.as_mut_ptr(),
1234            direction: TransferDirection::In,
1235            #[cfg(psram_dma)]
1236            accesses_psram: false,
1237            burst_transfer: self.burst,
1238
1239            // Whilst we give ownership of the descriptors the DMA, the correctness of this buffer
1240            // implementation doesn't rely on the DMA checking for descriptor ownership.
1241            // No descriptor is added back to the end of the stream before it's ready for the DMA
1242            // to consume it.
1243            check_owner: None,
1244            auto_write_back: true,
1245        }
1246    }
1247
1248    fn into_view(self) -> DmaRxStreamBufView {
1249        DmaRxStreamBufView {
1250            buf: self,
1251            descriptor_idx: 0,
1252            descriptor_offset: 0,
1253        }
1254    }
1255
1256    fn from_view(view: Self::View) -> Self {
1257        view.buf
1258    }
1259}
1260
1261/// A view into a [DmaRxStreamBuf]
1262pub struct DmaRxStreamBufView {
1263    buf: DmaRxStreamBuf,
1264    descriptor_idx: usize,
1265    descriptor_offset: usize,
1266}
1267
1268impl DmaRxStreamBufView {
1269    /// Returns the number of bytes that are available to read from the buf.
1270    pub fn available_bytes(&self) -> usize {
1271        let (tail, head) = self.buf.descriptors.split_at(self.descriptor_idx);
1272        let mut result = 0;
1273        for desc in head.iter().chain(tail) {
1274            if desc.owner() == Owner::Dma {
1275                break;
1276            }
1277            result += desc.len();
1278        }
1279        result - self.descriptor_offset
1280    }
1281
1282    /// Reads as much as possible into the buf from the available data.
1283    pub fn pop(&mut self, buf: &mut [u8]) -> usize {
1284        if buf.is_empty() {
1285            return 0;
1286        }
1287        let total_bytes = buf.len();
1288
1289        let mut remaining = buf;
1290        loop {
1291            let available = self.peek();
1292            if available.len() >= remaining.len() {
1293                remaining.copy_from_slice(&available[0..remaining.len()]);
1294                self.consume(remaining.len());
1295                let consumed = remaining.len();
1296                remaining = &mut remaining[consumed..];
1297                break;
1298            } else {
1299                let to_consume = available.len();
1300                remaining[0..to_consume].copy_from_slice(available);
1301                self.consume(to_consume);
1302                remaining = &mut remaining[to_consume..];
1303            }
1304        }
1305
1306        total_bytes - remaining.len()
1307    }
1308
1309    /// Returns a slice into the buffer containing available data.
1310    /// This will be the longest possible contiguous slice into the buffer that
1311    /// contains data that is available to read.
1312    ///
1313    /// Note: This function ignores EOFs, see [Self::peek_until_eof] if you need
1314    /// EOF support.
1315    pub fn peek(&self) -> &[u8] {
1316        let (slice, _) = self.peek_internal(false);
1317        slice
1318    }
1319
1320    /// Same as [Self::peek] but will not skip over any EOFs.
1321    ///
1322    /// It also returns a boolean indicating whether this slice ends with an EOF
1323    /// or not.
1324    pub fn peek_until_eof(&self) -> (&[u8], bool) {
1325        self.peek_internal(true)
1326    }
1327
1328    /// Consumes the first `n` bytes from the available data, returning any
1329    /// fully consumed descriptors back to the DMA.
1330    /// This is typically called after [Self::peek]/[Self::peek_until_eof].
1331    ///
1332    /// Returns the number of bytes that were actually consumed.
1333    pub fn consume(&mut self, n: usize) -> usize {
1334        let mut remaining_bytes_to_consume = n;
1335
1336        loop {
1337            let desc = &mut self.buf.descriptors[self.descriptor_idx];
1338
1339            if desc.owner() == Owner::Dma {
1340                // Descriptor is still owned by DMA so it can't be read yet.
1341                // This should only happen when there is no more data available to read.
1342                break;
1343            }
1344
1345            let remaining_bytes_in_descriptor = desc.len() - self.descriptor_offset;
1346            if remaining_bytes_to_consume < remaining_bytes_in_descriptor {
1347                self.descriptor_offset += remaining_bytes_to_consume;
1348                remaining_bytes_to_consume = 0;
1349                break;
1350            }
1351
1352            // Reset the descriptor for reuse.
1353            desc.set_owner(Owner::Dma);
1354            desc.set_suc_eof(false);
1355            desc.set_length(0);
1356
1357            // Before connecting this descriptor to the end of the list, the next descriptor
1358            // must be disconnected from this one to prevent the DMA from
1359            // overtaking.
1360            desc.next = null_mut();
1361
1362            let desc_ptr: *mut _ = desc;
1363
1364            let prev_descriptor_index = self
1365                .descriptor_idx
1366                .checked_sub(1)
1367                .unwrap_or(self.buf.descriptors.len() - 1);
1368
1369            // Connect this consumed descriptor to the end of the chain.
1370            self.buf.descriptors[prev_descriptor_index].next = desc_ptr;
1371
1372            self.descriptor_idx += 1;
1373            if self.descriptor_idx >= self.buf.descriptors.len() {
1374                self.descriptor_idx = 0;
1375            }
1376            self.descriptor_offset = 0;
1377
1378            remaining_bytes_to_consume -= remaining_bytes_in_descriptor;
1379        }
1380
1381        n - remaining_bytes_to_consume
1382    }
1383
1384    fn peek_internal(&self, stop_at_eof: bool) -> (&[u8], bool) {
1385        let descriptors = &self.buf.descriptors[self.descriptor_idx..];
1386
1387        // There must be at least one descriptor.
1388        debug_assert!(!descriptors.is_empty());
1389
1390        if descriptors.len() == 1 {
1391            let last_descriptor = &descriptors[0];
1392            if last_descriptor.owner() == Owner::Dma {
1393                // No data available.
1394                (&[], false)
1395            } else {
1396                let length = last_descriptor.len() - self.descriptor_offset;
1397                (
1398                    &self.buf.buffer[self.buf.buffer.len() - length..],
1399                    last_descriptor.flags.suc_eof(),
1400                )
1401            }
1402        } else {
1403            let chunk_size = descriptors[0].size();
1404            let mut found_eof = false;
1405
1406            let mut number_of_contiguous_bytes = 0;
1407            for desc in descriptors {
1408                if desc.owner() == Owner::Dma {
1409                    break;
1410                }
1411                number_of_contiguous_bytes += desc.len();
1412
1413                if stop_at_eof && desc.flags.suc_eof() {
1414                    found_eof = true;
1415                    break;
1416                }
1417                // If the length is smaller than the size, the contiguous-ness ends here.
1418                if desc.len() < desc.size() {
1419                    break;
1420                }
1421            }
1422
1423            (
1424                &self.buf.buffer[chunk_size * self.descriptor_idx..][..number_of_contiguous_bytes]
1425                    [self.descriptor_offset..],
1426                found_eof,
1427            )
1428        }
1429    }
1430}
1431
1432static mut EMPTY: [DmaDescriptor; 1] = [DmaDescriptor::EMPTY];
1433
1434/// An empty buffer that can be used when you don't need to transfer any data.
1435pub struct EmptyBuf;
1436
1437unsafe impl DmaTxBuffer for EmptyBuf {
1438    type View = EmptyBuf;
1439    type Final = EmptyBuf;
1440
1441    fn prepare(&mut self) -> Preparation {
1442        Preparation {
1443            start: core::ptr::addr_of_mut!(EMPTY).cast(),
1444            direction: TransferDirection::Out,
1445            #[cfg(psram_dma)]
1446            accesses_psram: false,
1447            burst_transfer: BurstConfig::default(),
1448
1449            // As we don't give ownership of the descriptor to the DMA, it's important that the DMA
1450            // channel does *NOT* check for ownership, otherwise the channel will return an error.
1451            check_owner: Some(false),
1452
1453            // The DMA should not write back to the descriptor as it is shared.
1454            auto_write_back: false,
1455        }
1456    }
1457
1458    fn into_view(self) -> EmptyBuf {
1459        self
1460    }
1461
1462    fn from_view(view: Self::View) -> Self {
1463        view
1464    }
1465}
1466
1467unsafe impl DmaRxBuffer for EmptyBuf {
1468    type View = EmptyBuf;
1469    type Final = EmptyBuf;
1470
1471    fn prepare(&mut self) -> Preparation {
1472        Preparation {
1473            start: core::ptr::addr_of_mut!(EMPTY).cast(),
1474            direction: TransferDirection::In,
1475            #[cfg(psram_dma)]
1476            accesses_psram: false,
1477            burst_transfer: BurstConfig::default(),
1478
1479            // As we don't give ownership of the descriptor to the DMA, it's important that the DMA
1480            // channel does *NOT* check for ownership, otherwise the channel will return an error.
1481            check_owner: Some(false),
1482            auto_write_back: true,
1483        }
1484    }
1485
1486    fn into_view(self) -> EmptyBuf {
1487        self
1488    }
1489
1490    fn from_view(view: Self::View) -> Self {
1491        view
1492    }
1493}
1494
1495/// DMA Loop Buffer
1496///
1497/// This consists of a single descriptor that points to itself and points to a
1498/// single buffer, resulting in the buffer being transmitted over and over
1499/// again, indefinitely.
1500///
1501/// Note: A DMA descriptor is 12 bytes. If your buffer is significantly shorter
1502/// than this, the DMA channel will spend more time reading the descriptor than
1503/// it does reading the buffer, which may leave it unable to keep up with the
1504/// bandwidth requirements of some peripherals at high frequencies.
1505pub struct DmaLoopBuf {
1506    descriptor: &'static mut DmaDescriptor,
1507    buffer: &'static mut [u8],
1508}
1509
1510impl DmaLoopBuf {
1511    /// Create a new [DmaLoopBuf].
1512    pub fn new(
1513        descriptor: &'static mut DmaDescriptor,
1514        buffer: &'static mut [u8],
1515    ) -> Result<DmaLoopBuf, DmaBufError> {
1516        if !is_slice_in_dram(buffer) {
1517            return Err(DmaBufError::UnsupportedMemoryRegion);
1518        }
1519        if !is_slice_in_dram(core::slice::from_ref(descriptor)) {
1520            return Err(DmaBufError::UnsupportedMemoryRegion);
1521        }
1522
1523        if buffer.len() > BurstConfig::default().max_chunk_size_for(buffer, TransferDirection::Out)
1524        {
1525            return Err(DmaBufError::InsufficientDescriptors);
1526        }
1527
1528        descriptor.set_owner(Owner::Dma); // Doesn't matter
1529        descriptor.set_suc_eof(false);
1530        descriptor.set_length(buffer.len());
1531        descriptor.set_size(buffer.len());
1532        descriptor.buffer = buffer.as_mut_ptr();
1533        descriptor.next = descriptor;
1534
1535        Ok(Self { descriptor, buffer })
1536    }
1537
1538    /// Consume the buf, returning the descriptor and buffer.
1539    pub fn split(self) -> (&'static mut DmaDescriptor, &'static mut [u8]) {
1540        (self.descriptor, self.buffer)
1541    }
1542}
1543
1544unsafe impl DmaTxBuffer for DmaLoopBuf {
1545    type View = DmaLoopBuf;
1546    type Final = DmaLoopBuf;
1547
1548    fn prepare(&mut self) -> Preparation {
1549        Preparation {
1550            start: self.descriptor,
1551            #[cfg(psram_dma)]
1552            accesses_psram: false,
1553            direction: TransferDirection::Out,
1554            burst_transfer: BurstConfig::default(),
1555            // The DMA must not check the owner bit, as it is never set.
1556            check_owner: Some(false),
1557
1558            // Doesn't matter either way but it is set to true for ESP32 SPI_DMA compatibility.
1559            auto_write_back: false,
1560        }
1561    }
1562
1563    fn into_view(self) -> Self::View {
1564        self
1565    }
1566
1567    fn from_view(view: Self::View) -> Self {
1568        view
1569    }
1570}
1571
1572impl Deref for DmaLoopBuf {
1573    type Target = [u8];
1574
1575    fn deref(&self) -> &Self::Target {
1576        self.buffer
1577    }
1578}
1579
1580impl DerefMut for DmaLoopBuf {
1581    fn deref_mut(&mut self) -> &mut Self::Target {
1582        self.buffer
1583    }
1584}
1585
1586/// A Preparation that masks itself as a DMA buffer.
1587///
1588/// Fow low level use, where none of the pre-made buffers really fit.
1589///
1590/// This type likely never should be visible outside of esp-hal.
1591pub(crate) struct NoBuffer(Preparation);
1592impl NoBuffer {
1593    fn prep(&self) -> Preparation {
1594        Preparation {
1595            start: self.0.start,
1596            direction: self.0.direction,
1597            #[cfg(psram_dma)]
1598            accesses_psram: self.0.accesses_psram,
1599            burst_transfer: self.0.burst_transfer,
1600            check_owner: self.0.check_owner,
1601            auto_write_back: self.0.auto_write_back,
1602        }
1603    }
1604}
1605unsafe impl DmaTxBuffer for NoBuffer {
1606    type View = ();
1607    type Final = ();
1608
1609    fn prepare(&mut self) -> Preparation {
1610        self.prep()
1611    }
1612
1613    fn into_view(self) -> Self::View {}
1614    fn from_view(_view: Self::View) {}
1615}
1616unsafe impl DmaRxBuffer for NoBuffer {
1617    type View = ();
1618    type Final = ();
1619
1620    fn prepare(&mut self) -> Preparation {
1621        self.prep()
1622    }
1623
1624    fn into_view(self) -> Self::View {}
1625    fn from_view(_view: Self::View) {}
1626}
1627
1628/// Prepares data unsafely to be transmitted via DMA.
1629///
1630/// `block_size` is the requirement imposed by the peripheral that receives the data. It
1631/// ensures that the DMA will not try to copy a partial block, which would cause the RX DMA (that
1632/// moves results back into RAM) to never complete.
1633///
1634/// The function returns the DMA buffer, and the number of bytes that will be transferred.
1635///
1636/// # Safety
1637///
1638/// The caller must keep all its descriptors and the buffers they
1639/// point to valid while the buffer is being transferred.
1640#[cfg_attr(not(aes_dma), expect(unused))]
1641pub(crate) unsafe fn prepare_for_tx(
1642    descriptors: &mut [DmaDescriptor],
1643    mut data: NonNull<[u8]>,
1644    block_size: usize,
1645) -> Result<(NoBuffer, usize), DmaError> {
1646    let alignment =
1647        BurstConfig::DEFAULT.min_alignment(unsafe { data.as_ref() }, TransferDirection::Out);
1648
1649    if !data.addr().get().is_multiple_of(alignment) {
1650        // ESP32 has word alignment requirement on the TX descriptors, too.
1651        return Err(DmaError::InvalidAlignment(DmaAlignmentError::Address));
1652    }
1653
1654    // Whichever is stricter, data location or peripheral requirements.
1655    //
1656    // This ensures that the RX DMA, if used, can transfer the returned number of bytes using at
1657    // most N+2 descriptors. While the hardware doesn't require this on the TX DMA side, (the TX DMA
1658    // can, except on the ESP32, transfer any amount of data), it makes usage MUCH simpler.
1659    let alignment = alignment.max(block_size);
1660    let chunk_size = 4096 - alignment;
1661
1662    let data_len = data.len().min(chunk_size * descriptors.len());
1663
1664    cfg_if::cfg_if! {
1665        if #[cfg(psram_dma)] {
1666            let data_addr = data.addr().get();
1667            let data_in_psram = crate::psram::psram_range().contains(&data_addr);
1668
1669            // Make sure input data is in PSRAM instead of cache
1670            if data_in_psram {
1671                unsafe { crate::soc::cache_writeback_addr(data_addr as u32, data_len as u32) };
1672            }
1673        }
1674    }
1675
1676    let mut descriptors = unwrap!(DescriptorSet::new(descriptors));
1677    // TODO: it would be best if this function returned the amount of data that could be linked
1678    // up.
1679    unwrap!(descriptors.link_with_buffer(unsafe { data.as_mut() }, chunk_size));
1680    unwrap!(descriptors.set_tx_length(data_len, chunk_size));
1681
1682    for desc in descriptors.linked_iter_mut() {
1683        desc.reset_for_tx(desc.next.is_null());
1684    }
1685
1686    Ok((
1687        NoBuffer(Preparation {
1688            start: descriptors.head(),
1689            direction: TransferDirection::Out,
1690            burst_transfer: BurstConfig::DEFAULT,
1691            check_owner: None,
1692            auto_write_back: true,
1693            #[cfg(psram_dma)]
1694            accesses_psram: data_in_psram,
1695        }),
1696        data_len,
1697    ))
1698}
1699
1700/// Prepare buffers to receive data from DMA.
1701///
1702/// The function returns the DMA buffer, and the number of bytes that will be transferred.
1703///
1704/// # Safety
1705///
1706/// The caller must keep all its descriptors and the buffers they
1707/// point to valid while the buffer is being transferred.
1708#[cfg_attr(not(aes_dma), expect(unused))]
1709pub(crate) unsafe fn prepare_for_rx(
1710    descriptors: &mut [DmaDescriptor],
1711    #[cfg(psram_dma)] align_buffers: &mut [Option<ManualWritebackBuffer>; 2],
1712    mut data: NonNull<[u8]>,
1713) -> (NoBuffer, usize) {
1714    let chunk_size =
1715        BurstConfig::DEFAULT.max_chunk_size_for(unsafe { data.as_ref() }, TransferDirection::In);
1716
1717    // The data we have to process may not be appropriate for the DMA:
1718    // - it may be improperly aligned for PSRAM
1719    // - it may not have a length that is a multiple of the external memory block size
1720
1721    cfg_if::cfg_if! {
1722        if #[cfg(psram_dma)] {
1723            let data_addr = data.addr().get();
1724            let data_in_psram = crate::psram::psram_range().contains(&data_addr);
1725        } else {
1726            let data_in_psram = false;
1727        }
1728    }
1729
1730    let mut descriptors = unwrap!(DescriptorSet::new(descriptors));
1731    let data_len = if data_in_psram {
1732        cfg_if::cfg_if! {
1733            if #[cfg(psram_dma)] {
1734                // This could use a better API, but right now we'll have to build the descriptor list by
1735                // hand.
1736                let consumed_bytes = build_descriptor_list_for_psram(
1737                    &mut descriptors,
1738                    align_buffers,
1739                    data,
1740                );
1741
1742                // Invalidate data written by the DMA. As this likely affects more data than we touched, write back first.
1743                unsafe {
1744                    crate::soc::cache_writeback_addr(data_addr as u32, consumed_bytes as u32);
1745                    crate::soc::cache_invalidate_addr(data_addr as u32, consumed_bytes as u32);
1746                }
1747
1748                consumed_bytes
1749            } else {
1750                unreachable!()
1751            }
1752        }
1753    } else {
1754        // Just set up descriptors as usual
1755        let data_len = data.len();
1756        unwrap!(descriptors.link_with_buffer(unsafe { data.as_mut() }, chunk_size));
1757        unwrap!(descriptors.set_tx_length(data_len, chunk_size));
1758
1759        data_len
1760    };
1761
1762    for desc in descriptors.linked_iter_mut() {
1763        desc.reset_for_rx();
1764    }
1765
1766    (
1767        NoBuffer(Preparation {
1768            start: descriptors.head(),
1769            direction: TransferDirection::In,
1770            burst_transfer: BurstConfig::DEFAULT,
1771            check_owner: None,
1772            auto_write_back: true,
1773            #[cfg(psram_dma)]
1774            accesses_psram: data_in_psram,
1775        }),
1776        data_len,
1777    )
1778}
1779
1780#[cfg(psram_dma)]
1781fn build_descriptor_list_for_psram(
1782    descriptors: &mut DescriptorSet<'_>,
1783    copy_buffers: &mut [Option<ManualWritebackBuffer>; 2],
1784    data: NonNull<[u8]>,
1785) -> usize {
1786    let data_len = data.len();
1787    let data_addr = data.addr().get();
1788
1789    let min_alignment = ExternalBurstConfig::DEFAULT.min_psram_alignment(TransferDirection::In);
1790    let chunk_size = 4096 - min_alignment;
1791
1792    let mut desciptor_iter = DescriptorChainingIter::new(descriptors.descriptors);
1793    let mut copy_buffer_iter = copy_buffers.iter_mut();
1794
1795    // MIN_LAST_DMA_LEN could make this really annoying, so we're just allocating a bit larger
1796    // buffer and shove edge cases into a single one. If we have >24 bytes on the S2, the 2-buffer
1797    // alignment algo works fine as one of them can steal 16 bytes, the other will have
1798    // MIN_LAST_DMA_LEN data to work with.
1799    let has_aligned_data = data_len > BUF_LEN;
1800
1801    // Calculate byte offset to the start of the buffer
1802    let offset = data_addr % min_alignment;
1803    let head_to_copy = min_alignment - offset;
1804    let head_to_copy = if !has_aligned_data {
1805        BUF_LEN
1806    } else if head_to_copy > 0 && head_to_copy < MIN_LAST_DMA_LEN {
1807        head_to_copy + min_alignment
1808    } else {
1809        head_to_copy
1810    };
1811    let head_to_copy = head_to_copy.min(data_len);
1812
1813    // Calculate last unaligned part
1814    let tail_to_copy = (data_len - head_to_copy) % min_alignment;
1815    let tail_to_copy = if tail_to_copy > 0 && tail_to_copy < MIN_LAST_DMA_LEN {
1816        tail_to_copy + min_alignment
1817    } else {
1818        tail_to_copy
1819    };
1820
1821    let mut consumed = 0;
1822
1823    // Align beginning
1824    if head_to_copy > 0 {
1825        let copy_buffer = unwrap!(copy_buffer_iter.next());
1826        let buffer =
1827            copy_buffer.insert(ManualWritebackBuffer::new(get_range(data, 0..head_to_copy)));
1828
1829        let Some(descriptor) = desciptor_iter.next() else {
1830            return consumed;
1831        };
1832        descriptor.set_size(head_to_copy);
1833        descriptor.buffer = buffer.buffer_ptr();
1834        consumed += head_to_copy;
1835    };
1836
1837    // Chain up descriptors for the main aligned data part.
1838    let mut aligned_data = get_range(data, head_to_copy..data.len() - tail_to_copy);
1839    while !aligned_data.is_empty() {
1840        let Some(descriptor) = desciptor_iter.next() else {
1841            return consumed;
1842        };
1843        let chunk = aligned_data.len().min(chunk_size);
1844
1845        descriptor.set_size(chunk);
1846        descriptor.buffer = aligned_data.cast::<u8>().as_ptr();
1847        consumed += chunk;
1848        aligned_data = get_range(aligned_data, chunk..aligned_data.len());
1849    }
1850
1851    // Align end
1852    if tail_to_copy > 0 {
1853        let copy_buffer = unwrap!(copy_buffer_iter.next());
1854        let buffer = copy_buffer.insert(ManualWritebackBuffer::new(get_range(
1855            data,
1856            data.len() - tail_to_copy..data.len(),
1857        )));
1858
1859        let Some(descriptor) = desciptor_iter.next() else {
1860            return consumed;
1861        };
1862        descriptor.set_size(tail_to_copy);
1863        descriptor.buffer = buffer.buffer_ptr();
1864        consumed += tail_to_copy;
1865    }
1866
1867    consumed
1868}
1869
1870#[cfg(psram_dma)]
1871fn get_range(ptr: NonNull<[u8]>, range: Range<usize>) -> NonNull<[u8]> {
1872    let len = range.end - range.start;
1873    NonNull::slice_from_raw_parts(unsafe { ptr.cast().byte_add(range.start) }, len)
1874}
1875
1876#[cfg(psram_dma)]
1877struct DescriptorChainingIter<'a> {
1878    /// index of the next element to emit
1879    index: usize,
1880    descriptors: &'a mut [DmaDescriptor],
1881}
1882#[cfg(psram_dma)]
1883impl<'a> DescriptorChainingIter<'a> {
1884    fn new(descriptors: &'a mut [DmaDescriptor]) -> Self {
1885        Self {
1886            descriptors,
1887            index: 0,
1888        }
1889    }
1890
1891    fn next(&mut self) -> Option<&'_ mut DmaDescriptor> {
1892        if self.index == 0 {
1893            self.index += 1;
1894            self.descriptors.get_mut(0)
1895        } else if self.index < self.descriptors.len() {
1896            let index = self.index;
1897            self.index += 1;
1898
1899            // Grab a pointer to the current descriptor.
1900            let ptr = &raw mut self.descriptors[index];
1901
1902            // Link the descriptor to the previous one.
1903            self.descriptors[index - 1].next = ptr;
1904
1905            // Reborrow the pointer so that it doesn't get invalidated by our continued use of the
1906            // descriptor reference.
1907            Some(unsafe { &mut *ptr })
1908        } else {
1909            None
1910        }
1911    }
1912}
1913
1914#[cfg(psram_dma)]
1915const MIN_LAST_DMA_LEN: usize = if cfg!(esp32s2) { 5 } else { 1 };
1916#[cfg(psram_dma)]
1917const BUF_LEN: usize = 16 + 2 * (MIN_LAST_DMA_LEN - 1); // 2x makes aligning short buffers simpler
1918
1919/// PSRAM helper. DMA can write data of any alignment into this buffer, and it can be written by
1920/// the CPU back to PSRAM.
1921#[cfg(psram_dma)]
1922pub(crate) struct ManualWritebackBuffer {
1923    dst_address: NonNull<u8>,
1924    buffer: [u8; BUF_LEN],
1925    n_bytes: u8,
1926}
1927
1928#[cfg(psram_dma)]
1929impl ManualWritebackBuffer {
1930    pub fn new(ptr: NonNull<[u8]>) -> Self {
1931        assert!(ptr.len() <= BUF_LEN);
1932        Self {
1933            dst_address: ptr.cast(),
1934            buffer: [0; BUF_LEN],
1935            n_bytes: ptr.len() as u8,
1936        }
1937    }
1938
1939    pub fn write_back(&self) {
1940        unsafe {
1941            self.dst_address
1942                .as_ptr()
1943                .copy_from(self.buffer.as_ptr(), self.n_bytes as usize);
1944        }
1945    }
1946
1947    pub fn buffer_ptr(&self) -> *mut u8 {
1948        self.buffer.as_ptr().cast_mut()
1949    }
1950}