Skip to main content

esp_hal/dma/
buffers.rs

1#[cfg(dma_can_access_psram)]
2use core::ops::Range;
3use core::{
4    ops::{Deref, DerefMut},
5    ptr::{NonNull, null_mut},
6};
7
8use super::*;
9use crate::soc::is_slice_in_dram;
10#[cfg(dma_can_access_psram)]
11use crate::soc::{is_slice_in_psram, is_valid_psram_address, is_valid_ram_address};
12
13/// Error returned from Dma[Rx|Tx|RxTx]Buf operations.
14#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
15#[cfg_attr(feature = "defmt", derive(defmt::Format))]
16pub enum DmaBufError {
17    /// The buffer is smaller than the requested size.
18    BufferTooSmall,
19
20    /// More descriptors are needed for the buffer size.
21    InsufficientDescriptors,
22
23    /// Descriptors or buffers are not located in a supported memory region.
24    UnsupportedMemoryRegion,
25
26    /// Buffer address or size is not properly aligned.
27    InvalidAlignment(DmaAlignmentError),
28
29    /// Invalid chunk size: must be > 0 and <= 4095.
30    InvalidChunkSize,
31}
32
33/// DMA buffer alignment errors.
34#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
35#[cfg_attr(feature = "defmt", derive(defmt::Format))]
36pub enum DmaAlignmentError {
37    /// Buffer address is not properly aligned.
38    Address,
39
40    /// Buffer size is not properly aligned.
41    Size,
42}
43
44impl From<DmaAlignmentError> for DmaBufError {
45    fn from(err: DmaAlignmentError) -> Self {
46        DmaBufError::InvalidAlignment(err)
47    }
48}
49
50cfg_if::cfg_if! {
51    if #[cfg(dma_can_access_psram)] {
52        /// Burst size used when transferring to and from external memory.
53        #[derive(Clone, Copy, PartialEq, Eq, Debug)]
54        #[cfg_attr(feature = "defmt", derive(defmt::Format))]
55        pub enum ExternalBurstConfig {
56            /// 16 bytes
57            Size16 = 16,
58
59            /// 32 bytes
60            Size32 = 32,
61
62            /// 64 bytes
63            Size64 = 64,
64        }
65
66        impl ExternalBurstConfig {
67            /// The default external memory burst length.
68            pub const DEFAULT: Self = Self::Size16;
69        }
70
71        impl Default for ExternalBurstConfig {
72            fn default() -> Self {
73                Self::DEFAULT
74            }
75        }
76
77        /// Internal memory access burst mode.
78        #[derive(Clone, Copy, PartialEq, Eq, Debug)]
79        #[cfg_attr(feature = "defmt", derive(defmt::Format))]
80        pub enum InternalBurstConfig {
81            /// Burst mode is disabled.
82            Disabled,
83
84            /// Burst mode is enabled.
85            Enabled,
86        }
87
88        impl InternalBurstConfig {
89            /// The default internal burst mode configuration.
90            pub const DEFAULT: Self = Self::Disabled;
91        }
92
93        impl Default for InternalBurstConfig {
94            fn default() -> Self {
95                Self::DEFAULT
96            }
97        }
98
99        /// Burst transfer configuration.
100        #[derive(Clone, Copy, PartialEq, Eq, Debug)]
101        #[cfg_attr(feature = "defmt", derive(defmt::Format))]
102        pub struct BurstConfig {
103            /// Configures the burst size for PSRAM transfers.
104            ///
105            /// Burst mode is always enabled for PSRAM transfers.
106            pub external_memory: ExternalBurstConfig,
107
108            /// Enables or disables the burst mode for internal memory transfers.
109            ///
110            /// The burst size is not configurable.
111            pub internal_memory: InternalBurstConfig,
112        }
113
114        impl BurstConfig {
115            /// The default burst mode configuration.
116            pub const DEFAULT: Self = Self {
117                external_memory: ExternalBurstConfig::DEFAULT,
118                internal_memory: InternalBurstConfig::DEFAULT,
119            };
120        }
121
122        impl Default for BurstConfig {
123            fn default() -> Self {
124                Self::DEFAULT
125            }
126        }
127
128        impl From<InternalBurstConfig> for BurstConfig {
129            fn from(internal_memory: InternalBurstConfig) -> Self {
130                Self {
131                    external_memory: ExternalBurstConfig::DEFAULT,
132                    internal_memory,
133                }
134            }
135        }
136
137        impl From<ExternalBurstConfig> for BurstConfig {
138            fn from(external_memory: ExternalBurstConfig) -> Self {
139                Self {
140                    external_memory,
141                    internal_memory: InternalBurstConfig::DEFAULT,
142                }
143            }
144        }
145    } else {
146        /// Burst transfer configuration.
147        #[derive(Clone, Copy, PartialEq, Eq, Debug)]
148        #[cfg_attr(feature = "defmt", derive(defmt::Format))]
149        pub enum BurstConfig {
150            /// Burst mode is disabled.
151            Disabled,
152
153            /// Burst mode is enabled.
154            Enabled,
155        }
156
157        impl BurstConfig {
158            /// The default burst mode configuration.
159            pub const DEFAULT: Self = Self::Disabled;
160        }
161
162        impl Default for BurstConfig {
163            fn default() -> Self {
164                Self::DEFAULT
165            }
166        }
167
168        type InternalBurstConfig = BurstConfig;
169    }
170}
171
172#[cfg(dma_can_access_psram)]
173impl ExternalBurstConfig {
174    const fn min_psram_alignment(self, direction: TransferDirection) -> usize {
175        // S2 TRM: Specifically, size and buffer address pointer in receive descriptors
176        // should be 16-byte, 32-byte or 64-byte aligned. For data frame whose
177        // length is not a multiple of 16 bytes, 32 bytes, or 64 bytes, EDMA adds
178        // padding bytes to the end.
179
180        // S3 TRM: Size and Address for IN transfers must be block aligned. For receive
181        // descriptors, if the data length received are not aligned with block size,
182        // GDMA will pad the data received with 0 until they are aligned to
183        // initiate burst transfer. You can read the length field in receive descriptors
184        // to obtain the length of valid data received
185        if matches!(direction, TransferDirection::In) {
186            self as usize
187        } else {
188            // S2 TRM: Size, length and buffer address pointer in transmit descriptors are
189            // not necessarily aligned with block size.
190
191            // S3 TRM: Size, length, and buffer address pointer in transmit descriptors do
192            // not need to be aligned.
193            1
194        }
195    }
196}
197
198impl InternalBurstConfig {
199    pub(super) const fn is_burst_enabled(self) -> bool {
200        !matches!(self, Self::Disabled)
201    }
202
203    // Size and address alignment as those come in pairs on current hardware.
204    const fn min_dram_alignment(self, direction: TransferDirection) -> usize {
205        if matches!(direction, TransferDirection::In) {
206            if cfg!(esp32) {
207                // NOTE: The size must be word-aligned.
208                // NOTE: The buffer address must be word-aligned
209                4
210            } else if self.is_burst_enabled() {
211                // As described in "Accessing Internal Memory" paragraphs in the various TRMs.
212                4
213            } else {
214                1
215            }
216        } else {
217            // OUT transfers have no alignment requirements, except for ESP32, which is
218            // described below.
219            if cfg!(esp32) {
220                // SPI DMA: Burst transmission is supported. The data size for
221                // a single transfer must be four bytes aligned.
222                // I2S DMA: Burst transfer is supported. However, unlike the
223                // SPI DMA channels, the data size for a single transfer is
224                // one word, or four bytes.
225                4
226            } else {
227                1
228            }
229        }
230    }
231}
232
233const fn max(a: usize, b: usize) -> usize {
234    if a > b { a } else { b }
235}
236
237impl BurstConfig {
238    delegate::delegate! {
239        #[cfg(dma_can_access_psram)]
240        to self.internal_memory {
241            pub(super) const fn min_dram_alignment(self, direction: TransferDirection) -> usize;
242            pub(super) fn is_burst_enabled(self) -> bool;
243        }
244    }
245
246    /// Calculates an alignment that is compatible with the current burst
247    /// configuration.
248    ///
249    /// This is an over-estimation so that Descriptors can be safely used with
250    /// any DMA channel in any direction.
251    pub const fn min_compatible_alignment(self) -> usize {
252        let in_alignment = self.min_dram_alignment(TransferDirection::In);
253        let out_alignment = self.min_dram_alignment(TransferDirection::Out);
254        let alignment = max(in_alignment, out_alignment);
255
256        #[cfg(dma_can_access_psram)]
257        let alignment = max(alignment, self.external_memory as usize);
258
259        alignment
260    }
261
262    const fn chunk_size_for_alignment(alignment: usize) -> usize {
263        // DMA descriptors have a 12-bit field for the size/length of the buffer they
264        // point at. As there is no such thing as 0-byte alignment, this means the
265        // maximum size is 4095 bytes.
266        4096 - alignment
267    }
268
269    /// Calculates a chunk size that is compatible with the current burst
270    /// configuration's alignment requirements.
271    ///
272    /// This is an over-estimation so that Descriptors can be safely used with
273    /// any DMA channel in any direction.
274    pub const fn max_compatible_chunk_size(self) -> usize {
275        Self::chunk_size_for_alignment(self.min_compatible_alignment())
276    }
277
278    fn min_alignment(self, _buffer: &[u8], direction: TransferDirection) -> usize {
279        let alignment = self.min_dram_alignment(direction);
280
281        cfg_if::cfg_if! {
282            if #[cfg(dma_can_access_psram)] {
283                let mut alignment = alignment;
284                if is_valid_psram_address(_buffer.as_ptr() as usize) {
285                    alignment = max(alignment, self.external_memory.min_psram_alignment(direction));
286                }
287            }
288        }
289
290        alignment
291    }
292
293    // Note: this function ignores address alignment as we assume the buffers are
294    // aligned.
295    fn max_chunk_size_for(self, buffer: &[u8], direction: TransferDirection) -> usize {
296        Self::chunk_size_for_alignment(self.min_alignment(buffer, direction))
297    }
298
299    fn ensure_buffer_aligned(
300        self,
301        buffer: &[u8],
302        direction: TransferDirection,
303    ) -> Result<(), DmaAlignmentError> {
304        let alignment = self.min_alignment(buffer, direction);
305        if !(buffer.as_ptr() as usize).is_multiple_of(alignment) {
306            return Err(DmaAlignmentError::Address);
307        }
308
309        // NB: the TRMs suggest that buffer length don't need to be aligned, but
310        // for IN transfers, we configure the DMA descriptors' size field, which needs
311        // to be aligned.
312        if direction == TransferDirection::In && !buffer.len().is_multiple_of(alignment) {
313            return Err(DmaAlignmentError::Size);
314        }
315
316        Ok(())
317    }
318
319    fn ensure_buffer_compatible(
320        self,
321        buffer: &[u8],
322        direction: TransferDirection,
323    ) -> Result<(), DmaBufError> {
324        if buffer.is_empty() {
325            return Ok(());
326        }
327        // buffer can be either DRAM or PSRAM (if supported)
328        let is_in_dram = is_slice_in_dram(buffer);
329        cfg_if::cfg_if! {
330            if #[cfg(dma_can_access_psram)]{
331                let is_in_psram = is_slice_in_psram(buffer);
332            } else {
333                let is_in_psram = false;
334            }
335        }
336
337        if !(is_in_dram || is_in_psram) {
338            return Err(DmaBufError::UnsupportedMemoryRegion);
339        }
340
341        self.ensure_buffer_aligned(buffer, direction)?;
342
343        Ok(())
344    }
345}
346
347/// The direction of the DMA transfer.
348#[derive(Clone, Copy, PartialEq, Eq, Debug)]
349#[cfg_attr(feature = "defmt", derive(defmt::Format))]
350pub enum TransferDirection {
351    /// DMA transfer from peripheral or external memory to memory.
352    In,
353    /// DMA transfer from memory to peripheral or external memory.
354    Out,
355}
356
357/// Holds all the information needed to configure a DMA channel for a transfer.
358#[derive(PartialEq, Eq, Debug)]
359#[cfg_attr(feature = "defmt", derive(defmt::Format))]
360pub struct Preparation {
361    /// The descriptor the DMA will start from.
362    pub start: *mut DmaDescriptor,
363
364    /// The direction of the DMA transfer.
365    pub direction: TransferDirection,
366
367    /// Must be `true` if any of the DMA descriptors contain data in PSRAM.
368    #[cfg(dma_can_access_psram)]
369    pub accesses_psram: bool,
370
371    /// Configures the DMA to transfer data in bursts.
372    ///
373    /// The implementation of the buffer must ensure that buffer size
374    /// and alignment in each descriptor is compatible with the burst
375    /// transfer configuration.
376    ///
377    /// For details on alignment requirements, refer to your chip's
378    #[doc = crate::trm_markdown_link!()]
379    pub burst_transfer: BurstConfig,
380
381    /// Configures the "check owner" feature of the DMA channel.
382    ///
383    /// Most DMA channels allow software to configure whether the hardware
384    /// checks that [DmaDescriptor::owner] is set to [Owner::Dma] before
385    /// consuming the descriptor. If this check fails, the channel stops
386    /// operating and fires
387    /// [DmaRxInterrupt::DescriptorError]/[DmaTxInterrupt::DescriptorError].
388    ///
389    /// This field allows buffer implementation to configure this behaviour.
390    /// - `Some(true)`: DMA channel must check the owner bit.
391    /// - `Some(false)`: DMA channel must NOT check the owner bit.
392    /// - `None`: DMA channel should check the owner bit if it is supported.
393    ///
394    /// Some buffer implementations may require that the DMA channel performs
395    /// this check before consuming the descriptor to ensure correct
396    /// behaviour. e.g. To prevent wrap-around in a circular transfer.
397    ///
398    /// Some buffer implementations may require that the DMA channel does NOT
399    /// perform this check as the ownership bit will not be set before the
400    /// channel tries to consume the descriptor.
401    ///
402    /// Most implementations won't have any such requirements and will work
403    /// correctly regardless of whether the DMA channel checks or not.
404    ///
405    /// Note: If the DMA channel doesn't support the provided option,
406    /// preparation will fail.
407    pub check_owner: Option<bool>,
408
409    /// Configures whether the DMA channel automatically clears the
410    /// [DmaDescriptor::owner] bit after it is done with the buffer pointed
411    /// to by a descriptor.
412    ///
413    /// For RX transfers, this is always true and the value specified here is
414    /// ignored.
415    ///
416    /// Note: SPI_DMA on the ESP32 does not support this and will panic if set
417    /// to true.
418    pub auto_write_back: bool,
419}
420
421/// [DmaTxBuffer] is a DMA descriptor + memory combo that can be used for
422/// transmitting data from a DMA channel to a peripheral's FIFO.
423///
424/// # Safety
425///
426/// The implementing type must keep all its descriptors and the buffers they
427/// point to valid while the buffer is being transferred.
428pub unsafe trait DmaTxBuffer {
429    /// A type providing operations that are safe to perform on the buffer
430    /// whilst the DMA is actively using it.
431    type View;
432
433    /// The type returned to the user when a transfer finishes.
434    ///
435    /// Some buffers don't need to be reconstructed.
436    type Final;
437
438    /// Prepares the buffer for an imminent transfer and returns
439    /// information required to use this buffer.
440    ///
441    /// Note: This operation is idempotent.
442    fn prepare(&mut self) -> Preparation;
443
444    /// This is called before the DMA starts using the buffer.
445    fn into_view(self) -> Self::View;
446
447    /// This is called after the DMA is done using the buffer.
448    fn from_view(view: Self::View) -> Self::Final;
449}
450
451/// [DmaRxBuffer] is a DMA descriptor + memory combo that can be used for
452/// receiving data from a peripheral's FIFO to a DMA channel.
453///
454/// Note: Implementations of this trait may only support having a single EOF bit
455/// which resides in the last descriptor. There will be a separate trait in
456/// future to support multiple EOFs.
457///
458/// # Safety
459///
460/// The implementing type must keep all its descriptors and the buffers they
461/// point to valid while the buffer is being transferred.
462pub unsafe trait DmaRxBuffer {
463    /// A type providing operations that are safe to perform on the buffer
464    /// whilst the DMA is actively using it.
465    type View;
466
467    /// The type returned to the user when a transfer finishes.
468    ///
469    /// Some buffers don't need to be reconstructed.
470    type Final;
471
472    /// Prepares the buffer for an imminent transfer and returns
473    /// information required to use this buffer.
474    ///
475    /// Note: This operation is idempotent.
476    fn prepare(&mut self) -> Preparation;
477
478    /// This is called before the DMA starts using the buffer.
479    fn into_view(self) -> Self::View;
480
481    /// This is called after the DMA is done using the buffer.
482    fn from_view(view: Self::View) -> Self::Final;
483}
484
485/// An in-progress view into [DmaRxBuf]/[DmaTxBuf].
486///
487/// In the future, this could support peeking into state of the
488/// descriptors/buffers.
489pub struct BufView<T>(T);
490
491/// DMA transmit buffer
492///
493/// This is a contiguous buffer linked together by DMA descriptors of length
494/// 4095 at most. It can only be used for transmitting data to a peripheral's
495/// FIFO. See [DmaRxBuf] for receiving data.
496#[derive(Debug)]
497#[cfg_attr(feature = "defmt", derive(defmt::Format))]
498pub struct DmaTxBuf {
499    descriptors: DescriptorSet<'static>,
500    buffer: &'static mut [u8],
501    burst: BurstConfig,
502}
503
504impl DmaTxBuf {
505    /// Creates a new [DmaTxBuf] from some descriptors and a buffer.
506    ///
507    /// There must be enough descriptors for the provided buffer.
508    /// Depending on alignment requirements, each descriptor can handle at most
509    /// 4095 bytes worth of buffer.
510    ///
511    /// Both the descriptors and buffer must be in DMA-capable memory.
512    /// Only DRAM is supported for descriptors.
513    pub fn new(
514        descriptors: &'static mut [DmaDescriptor],
515        buffer: &'static mut [u8],
516    ) -> Result<Self, DmaBufError> {
517        Self::new_with_config(descriptors, buffer, BurstConfig::default())
518    }
519
520    /// Creates a new [DmaTxBuf] from some descriptors and a buffer.
521    ///
522    /// There must be enough descriptors for the provided buffer.
523    /// Depending on alignment requirements, each descriptor can handle at most
524    /// 4095 bytes worth of buffer.
525    ///
526    /// Both the descriptors and buffer must be in DMA-capable memory.
527    /// Only DRAM is supported for descriptors.
528    pub fn new_with_config(
529        descriptors: &'static mut [DmaDescriptor],
530        buffer: &'static mut [u8],
531        config: impl Into<BurstConfig>,
532    ) -> Result<Self, DmaBufError> {
533        let mut buf = Self {
534            descriptors: DescriptorSet::new(descriptors)?,
535            buffer,
536            burst: BurstConfig::default(),
537        };
538
539        let capacity = buf.capacity();
540        buf.configure(config, capacity)?;
541
542        Ok(buf)
543    }
544
545    fn configure(
546        &mut self,
547        burst: impl Into<BurstConfig>,
548        length: usize,
549    ) -> Result<(), DmaBufError> {
550        let burst = burst.into();
551        self.set_length_fallible(length, burst)?;
552
553        self.descriptors.link_with_buffer(
554            self.buffer,
555            burst.max_chunk_size_for(self.buffer, TransferDirection::Out),
556        )?;
557
558        self.burst = burst;
559        Ok(())
560    }
561
562    /// Configures the DMA to use burst transfers to access this buffer.
563    pub fn set_burst_config(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> {
564        let len = self.len();
565        self.configure(burst, len)
566    }
567
568    /// Consume the buf, returning the descriptors and buffer.
569    pub fn split(self) -> (&'static mut [DmaDescriptor], &'static mut [u8]) {
570        (self.descriptors.into_inner(), self.buffer)
571    }
572
573    /// Returns the size of the underlying buffer
574    pub fn capacity(&self) -> usize {
575        self.buffer.len()
576    }
577
578    /// Return the number of bytes that would be transmitted by this buf.
579    #[allow(clippy::len_without_is_empty)]
580    pub fn len(&self) -> usize {
581        self.descriptors
582            .linked_iter()
583            .map(|d| d.len())
584            .sum::<usize>()
585    }
586
587    fn set_length_fallible(&mut self, len: usize, burst: BurstConfig) -> Result<(), DmaBufError> {
588        if len > self.capacity() {
589            return Err(DmaBufError::BufferTooSmall);
590        }
591        burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::Out)?;
592
593        self.descriptors.set_tx_length(
594            len,
595            burst.max_chunk_size_for(self.buffer, TransferDirection::Out),
596        )?;
597
598        // This only needs to be done once (after every significant length change) as
599        // Self::prepare sets Preparation::auto_write_back to false.
600        for desc in self.descriptors.linked_iter_mut() {
601            // In non-circular mode, we only set `suc_eof` for the last descriptor to signal
602            // the end of the transfer.
603            desc.reset_for_tx(desc.next.is_null());
604        }
605
606        Ok(())
607    }
608
609    /// Reset the descriptors to only transmit `len` amount of bytes from this
610    /// buf.
611    ///
612    /// The number of bytes in data must be less than or equal to the buffer
613    /// size.
614    pub fn set_length(&mut self, len: usize) {
615        unwrap!(self.set_length_fallible(len, self.burst))
616    }
617
618    /// Fills the TX buffer with the bytes provided in `data` and reset the
619    /// descriptors to only cover the filled section.
620    ///
621    /// The number of bytes in data must be less than or equal to the buffer
622    /// size.
623    pub fn fill(&mut self, data: &[u8]) {
624        self.set_length(data.len());
625        self.as_mut_slice()[..data.len()].copy_from_slice(data);
626    }
627
628    /// Returns the buf as a mutable slice than can be written.
629    pub fn as_mut_slice(&mut self) -> &mut [u8] {
630        self.buffer
631    }
632
633    /// Returns the buf as a slice than can be read.
634    pub fn as_slice(&self) -> &[u8] {
635        self.buffer
636    }
637}
638
639unsafe impl DmaTxBuffer for DmaTxBuf {
640    type View = BufView<DmaTxBuf>;
641    type Final = DmaTxBuf;
642
643    fn prepare(&mut self) -> Preparation {
644        cfg_if::cfg_if! {
645            if #[cfg(dma_can_access_psram)] {
646                let is_data_in_psram = !is_valid_ram_address(self.buffer.as_ptr() as usize);
647                if is_data_in_psram {
648                    unsafe {
649                        crate::soc::cache_writeback_addr(
650                            self.buffer.as_ptr() as u32,
651                            self.buffer.len() as u32,
652                        )
653                    };
654                }
655            }
656        }
657
658        Preparation {
659            start: self.descriptors.head(),
660            direction: TransferDirection::Out,
661            #[cfg(dma_can_access_psram)]
662            accesses_psram: is_data_in_psram,
663            burst_transfer: self.burst,
664            check_owner: None,
665            auto_write_back: false,
666        }
667    }
668
669    fn into_view(self) -> BufView<DmaTxBuf> {
670        BufView(self)
671    }
672
673    fn from_view(view: Self::View) -> Self {
674        view.0
675    }
676}
677
678/// DMA receive buffer
679///
680/// This is a contiguous buffer linked together by DMA descriptors of length
681/// 4092. It can only be used for receiving data from a peripheral's FIFO.
682/// See [DmaTxBuf] for transmitting data.
683#[derive(Debug)]
684#[cfg_attr(feature = "defmt", derive(defmt::Format))]
685pub struct DmaRxBuf {
686    descriptors: DescriptorSet<'static>,
687    buffer: &'static mut [u8],
688    burst: BurstConfig,
689}
690
691impl DmaRxBuf {
692    /// Creates a new [DmaRxBuf] from some descriptors and a buffer.
693    ///
694    /// There must be enough descriptors for the provided buffer.
695    /// Each descriptor can handle 4092 bytes worth of buffer.
696    ///
697    /// Both the descriptors and buffer must be in DMA-capable memory.
698    /// Only DRAM is supported.
699    pub fn new(
700        descriptors: &'static mut [DmaDescriptor],
701        buffer: &'static mut [u8],
702    ) -> Result<Self, DmaBufError> {
703        Self::new_with_config(descriptors, buffer, BurstConfig::default())
704    }
705
706    /// Creates a new [DmaRxBuf] from some descriptors and a buffer.
707    ///
708    /// There must be enough descriptors for the provided buffer.
709    /// Depending on alignment requirements, each descriptor can handle at most
710    /// 4092 bytes worth of buffer.
711    ///
712    /// Both the descriptors and buffer must be in DMA-capable memory.
713    /// Only DRAM is supported for descriptors.
714    pub fn new_with_config(
715        descriptors: &'static mut [DmaDescriptor],
716        buffer: &'static mut [u8],
717        config: impl Into<BurstConfig>,
718    ) -> Result<Self, DmaBufError> {
719        let mut buf = Self {
720            descriptors: DescriptorSet::new(descriptors)?,
721            buffer,
722            burst: BurstConfig::default(),
723        };
724
725        buf.configure(config, buf.capacity())?;
726
727        Ok(buf)
728    }
729
730    fn configure(
731        &mut self,
732        burst: impl Into<BurstConfig>,
733        length: usize,
734    ) -> Result<(), DmaBufError> {
735        let burst = burst.into();
736        self.set_length_fallible(length, burst)?;
737
738        self.descriptors.link_with_buffer(
739            self.buffer,
740            burst.max_chunk_size_for(self.buffer, TransferDirection::In),
741        )?;
742
743        self.burst = burst;
744        Ok(())
745    }
746
747    /// Configures the DMA to use burst transfers to access this buffer.
748    pub fn set_burst_config(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> {
749        let len = self.len();
750        self.configure(burst, len)
751    }
752
753    /// Consume the buf, returning the descriptors and buffer.
754    pub fn split(self) -> (&'static mut [DmaDescriptor], &'static mut [u8]) {
755        (self.descriptors.into_inner(), self.buffer)
756    }
757
758    /// Returns the size of the underlying buffer
759    pub fn capacity(&self) -> usize {
760        self.buffer.len()
761    }
762
763    /// Returns the maximum number of bytes that this buf has been configured to
764    /// receive.
765    #[allow(clippy::len_without_is_empty)]
766    pub fn len(&self) -> usize {
767        self.descriptors
768            .linked_iter()
769            .map(|d| d.size())
770            .sum::<usize>()
771    }
772
773    fn set_length_fallible(&mut self, len: usize, burst: BurstConfig) -> Result<(), DmaBufError> {
774        if len > self.capacity() {
775            return Err(DmaBufError::BufferTooSmall);
776        }
777        burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::In)?;
778
779        self.descriptors.set_rx_length(
780            len,
781            burst.max_chunk_size_for(&self.buffer[..len], TransferDirection::In),
782        )
783    }
784
785    /// Reset the descriptors to only receive `len` amount of bytes into this
786    /// buf.
787    ///
788    /// The number of bytes in data must be less than or equal to the buffer
789    /// size.
790    pub fn set_length(&mut self, len: usize) {
791        unwrap!(self.set_length_fallible(len, self.burst));
792    }
793
794    /// Returns the entire underlying buffer as a slice than can be read.
795    pub fn as_slice(&self) -> &[u8] {
796        self.buffer
797    }
798
799    /// Returns the entire underlying buffer as a slice than can be written.
800    pub fn as_mut_slice(&mut self) -> &mut [u8] {
801        self.buffer
802    }
803
804    /// Return the number of bytes that was received by this buf.
805    pub fn number_of_received_bytes(&self) -> usize {
806        self.descriptors
807            .linked_iter()
808            .map(|d| d.len())
809            .sum::<usize>()
810    }
811
812    /// Reads the received data into the provided `buf`.
813    ///
814    /// If `buf.len()` is less than the amount of received data then only the
815    /// first `buf.len()` bytes of received data is written into `buf`.
816    ///
817    /// Returns the number of bytes in written to `buf`.
818    pub fn read_received_data(&self, mut buf: &mut [u8]) -> usize {
819        // Note that due to an ESP32 quirk, the last received descriptor may not get
820        // updated.
821        let capacity = buf.len();
822        for chunk in self.received_data() {
823            if buf.is_empty() {
824                break;
825            }
826            let to_fill;
827            (to_fill, buf) = buf.split_at_mut(chunk.len());
828            to_fill.copy_from_slice(chunk);
829        }
830
831        capacity - buf.len()
832    }
833
834    /// Returns the received data as an iterator of slices.
835    pub fn received_data(&self) -> impl Iterator<Item = &[u8]> {
836        self.descriptors.linked_iter().map(|desc| {
837            // SAFETY: We set up the descriptor to point to a subslice of the buffer, and
838            // here we are only recreating that slice with a perhaps shorter length.
839            // We are also not accessing `self.buffer` while this slice is alive, so we
840            // are not violating any aliasing rules.
841            unsafe { core::slice::from_raw_parts(desc.buffer.cast_const(), desc.len()) }
842        })
843    }
844}
845
846unsafe impl DmaRxBuffer for DmaRxBuf {
847    type View = BufView<DmaRxBuf>;
848    type Final = DmaRxBuf;
849
850    fn prepare(&mut self) -> Preparation {
851        for desc in self.descriptors.linked_iter_mut() {
852            desc.reset_for_rx();
853        }
854
855        cfg_if::cfg_if! {
856            if #[cfg(dma_can_access_psram)] {
857                // Optimization: avoid locking for PSRAM range.
858                let is_data_in_psram = !is_valid_ram_address(self.buffer.as_ptr() as usize);
859                if is_data_in_psram {
860                    unsafe {
861                        crate::soc::cache_invalidate_addr(
862                            self.buffer.as_ptr() as u32,
863                            self.buffer.len() as u32,
864                        )
865                    };
866                }
867            }
868        }
869
870        Preparation {
871            start: self.descriptors.head(),
872            direction: TransferDirection::In,
873            #[cfg(dma_can_access_psram)]
874            accesses_psram: is_data_in_psram,
875            burst_transfer: self.burst,
876            check_owner: None,
877            auto_write_back: true,
878        }
879    }
880
881    fn into_view(self) -> BufView<DmaRxBuf> {
882        BufView(self)
883    }
884
885    fn from_view(view: Self::View) -> Self {
886        view.0
887    }
888}
889
890/// DMA transmit and receive buffer.
891///
892/// This is a (single) contiguous buffer linked together by two sets of DMA
893/// descriptors of length 4092 each.
894/// It can be used for simultaneously transmitting to and receiving from a
895/// peripheral's FIFO. These are typically full-duplex transfers.
896#[derive(Debug)]
897#[cfg_attr(feature = "defmt", derive(defmt::Format))]
898pub struct DmaRxTxBuf {
899    rx_descriptors: DescriptorSet<'static>,
900    tx_descriptors: DescriptorSet<'static>,
901    buffer: &'static mut [u8],
902    burst: BurstConfig,
903}
904
905impl DmaRxTxBuf {
906    /// Creates a new [DmaRxTxBuf] from some descriptors and a buffer.
907    ///
908    /// There must be enough descriptors for the provided buffer.
909    /// Each descriptor can handle 4092 bytes worth of buffer.
910    ///
911    /// Both the descriptors and buffer must be in DMA-capable memory.
912    /// Only DRAM is supported.
913    pub fn new(
914        rx_descriptors: &'static mut [DmaDescriptor],
915        tx_descriptors: &'static mut [DmaDescriptor],
916        buffer: &'static mut [u8],
917    ) -> Result<Self, DmaBufError> {
918        let mut buf = Self {
919            rx_descriptors: DescriptorSet::new(rx_descriptors)?,
920            tx_descriptors: DescriptorSet::new(tx_descriptors)?,
921            buffer,
922            burst: BurstConfig::default(),
923        };
924
925        let capacity = buf.capacity();
926        buf.configure(buf.burst, capacity)?;
927
928        Ok(buf)
929    }
930
931    fn configure(
932        &mut self,
933        burst: impl Into<BurstConfig>,
934        length: usize,
935    ) -> Result<(), DmaBufError> {
936        let burst = burst.into();
937        self.set_length_fallible(length, burst)?;
938
939        self.rx_descriptors.link_with_buffer(
940            self.buffer,
941            burst.max_chunk_size_for(self.buffer, TransferDirection::In),
942        )?;
943        self.tx_descriptors.link_with_buffer(
944            self.buffer,
945            burst.max_chunk_size_for(self.buffer, TransferDirection::Out),
946        )?;
947
948        self.burst = burst;
949
950        Ok(())
951    }
952
953    /// Configures the DMA to use burst transfers to access this buffer.
954    pub fn set_burst_config(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> {
955        let len = self.len();
956        self.configure(burst, len)
957    }
958
959    /// Consume the buf, returning the rx descriptors, tx descriptors and
960    /// buffer.
961    pub fn split(
962        self,
963    ) -> (
964        &'static mut [DmaDescriptor],
965        &'static mut [DmaDescriptor],
966        &'static mut [u8],
967    ) {
968        (
969            self.rx_descriptors.into_inner(),
970            self.tx_descriptors.into_inner(),
971            self.buffer,
972        )
973    }
974
975    /// Return the size of the underlying buffer.
976    pub fn capacity(&self) -> usize {
977        self.buffer.len()
978    }
979
980    /// Return the number of bytes that would be transmitted by this buf.
981    #[allow(clippy::len_without_is_empty)]
982    pub fn len(&self) -> usize {
983        self.tx_descriptors
984            .linked_iter()
985            .map(|d| d.len())
986            .sum::<usize>()
987    }
988
989    /// Returns the entire buf as a slice than can be read.
990    pub fn as_slice(&self) -> &[u8] {
991        self.buffer
992    }
993
994    /// Returns the entire buf as a slice than can be written.
995    pub fn as_mut_slice(&mut self) -> &mut [u8] {
996        self.buffer
997    }
998
999    fn set_length_fallible(&mut self, len: usize, burst: BurstConfig) -> Result<(), DmaBufError> {
1000        if len > self.capacity() {
1001            return Err(DmaBufError::BufferTooSmall);
1002        }
1003        burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::In)?;
1004        burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::Out)?;
1005
1006        self.rx_descriptors.set_rx_length(
1007            len,
1008            burst.max_chunk_size_for(self.buffer, TransferDirection::In),
1009        )?;
1010        self.tx_descriptors.set_tx_length(
1011            len,
1012            burst.max_chunk_size_for(self.buffer, TransferDirection::Out),
1013        )?;
1014
1015        Ok(())
1016    }
1017
1018    /// Reset the descriptors to only transmit/receive `len` amount of bytes
1019    /// with this buf.
1020    ///
1021    /// `len` must be less than or equal to the buffer size.
1022    pub fn set_length(&mut self, len: usize) {
1023        unwrap!(self.set_length_fallible(len, self.burst));
1024    }
1025}
1026
1027unsafe impl DmaTxBuffer for DmaRxTxBuf {
1028    type View = BufView<DmaRxTxBuf>;
1029    type Final = DmaRxTxBuf;
1030
1031    fn prepare(&mut self) -> Preparation {
1032        for desc in self.tx_descriptors.linked_iter_mut() {
1033            // In non-circular mode, we only set `suc_eof` for the last descriptor to signal
1034            // the end of the transfer.
1035            desc.reset_for_tx(desc.next.is_null());
1036        }
1037
1038        cfg_if::cfg_if! {
1039            if #[cfg(dma_can_access_psram)] {
1040                // Optimization: avoid locking for PSRAM range.
1041                let is_data_in_psram = !is_valid_ram_address(self.buffer.as_ptr() as usize);
1042                if is_data_in_psram {
1043                    unsafe {
1044                        crate::soc::cache_writeback_addr(
1045                            self.buffer.as_ptr() as u32,
1046                            self.buffer.len() as u32,
1047                        )
1048                    };
1049                }
1050            }
1051        }
1052
1053        Preparation {
1054            start: self.tx_descriptors.head(),
1055            direction: TransferDirection::Out,
1056            #[cfg(dma_can_access_psram)]
1057            accesses_psram: is_data_in_psram,
1058            burst_transfer: self.burst,
1059            check_owner: None,
1060            auto_write_back: false,
1061        }
1062    }
1063
1064    fn into_view(self) -> BufView<DmaRxTxBuf> {
1065        BufView(self)
1066    }
1067
1068    fn from_view(view: Self::View) -> Self {
1069        view.0
1070    }
1071}
1072
1073unsafe impl DmaRxBuffer for DmaRxTxBuf {
1074    type View = BufView<DmaRxTxBuf>;
1075    type Final = DmaRxTxBuf;
1076
1077    fn prepare(&mut self) -> Preparation {
1078        for desc in self.rx_descriptors.linked_iter_mut() {
1079            desc.reset_for_rx();
1080        }
1081
1082        cfg_if::cfg_if! {
1083            if #[cfg(dma_can_access_psram)] {
1084                // Optimization: avoid locking for PSRAM range.
1085                let is_data_in_psram = !is_valid_ram_address(self.buffer.as_ptr() as usize);
1086                if is_data_in_psram {
1087                    unsafe {
1088                        crate::soc::cache_invalidate_addr(
1089                            self.buffer.as_ptr() as u32,
1090                            self.buffer.len() as u32,
1091                        )
1092                    };
1093                }
1094            }
1095        }
1096
1097        Preparation {
1098            start: self.rx_descriptors.head(),
1099            direction: TransferDirection::In,
1100            #[cfg(dma_can_access_psram)]
1101            accesses_psram: is_data_in_psram,
1102            burst_transfer: self.burst,
1103            check_owner: None,
1104            auto_write_back: true,
1105        }
1106    }
1107
1108    fn into_view(self) -> BufView<DmaRxTxBuf> {
1109        BufView(self)
1110    }
1111
1112    fn from_view(view: Self::View) -> Self {
1113        view.0
1114    }
1115}
1116
1117/// DMA Streaming Receive Buffer.
1118///
1119/// This is a contiguous buffer linked together by DMA descriptors, and the
1120/// buffer is evenly distributed between each descriptor provided.
1121///
1122/// It is used for continuously streaming data from a peripheral's FIFO.
1123///
1124/// It does so by maintaining sliding window of descriptors that progresses when
1125/// you call [DmaRxStreamBufView::consume].
1126///
1127/// The list starts out like so `A (empty) -> B (empty) -> C (empty) -> D
1128/// (empty) -> NULL`.
1129///
1130/// As the DMA writes to the buffers the list progresses like so:
1131/// - `A (empty) -> B (empty) -> C (empty) -> D (empty) -> NULL`
1132/// - `A (full)  -> B (empty) -> C (empty) -> D (empty) -> NULL`
1133/// - `A (full)  -> B (full)  -> C (empty) -> D (empty) -> NULL`
1134/// - `A (full)  -> B (full)  -> C (full)  -> D (empty) -> NULL`
1135///
1136/// As you call [DmaRxStreamBufView::consume] the list (approximately)
1137/// progresses like so:
1138/// - `A (full)  -> B (full)  -> C (full)  -> D (empty) -> NULL`
1139/// - `B (full)  -> C (full)  -> D (empty) -> A (empty) -> NULL`
1140/// - `C (full)  -> D (empty) -> A (empty) -> B (empty) -> NULL`
1141/// - `D (empty) -> A (empty) -> B (empty) -> C (empty) -> NULL`
1142///
1143/// If all the descriptors fill up, the [DmaRxInterrupt::DescriptorEmpty]
1144/// interrupt will fire and the DMA will stop writing, at which point it is up
1145/// to you to resume/restart the transfer.
1146///
1147/// Note: This buffer will not tell you when this condition occurs, you should
1148/// check with the driver to see if the DMA has stopped.
1149///
1150/// When constructing this buffer, it is important to tune the ratio between the
1151/// chunk size and buffer size appropriately. Smaller chunk sizes means you
1152/// receive data more frequently but this means the DMA interrupts
1153/// ([DmaRxInterrupt::Done]) also fire more frequently (if you use them).
1154///
1155/// See [DmaRxStreamBufView] for APIs available whilst a transfer is in
1156/// progress.
1157pub struct DmaRxStreamBuf {
1158    descriptors: &'static mut [DmaDescriptor],
1159    buffer: &'static mut [u8],
1160    burst: BurstConfig,
1161}
1162
1163impl DmaRxStreamBuf {
1164    /// Creates a new [DmaRxStreamBuf] evenly distributing the buffer between
1165    /// the provided descriptors.
1166    pub fn new(
1167        descriptors: &'static mut [DmaDescriptor],
1168        buffer: &'static mut [u8],
1169    ) -> Result<Self, DmaBufError> {
1170        if !is_slice_in_dram(descriptors) {
1171            return Err(DmaBufError::UnsupportedMemoryRegion);
1172        }
1173        if !is_slice_in_dram(buffer) {
1174            return Err(DmaBufError::UnsupportedMemoryRegion);
1175        }
1176
1177        if descriptors.is_empty() {
1178            return Err(DmaBufError::InsufficientDescriptors);
1179        }
1180
1181        // Evenly distribute the buffer between the descriptors.
1182        let chunk_size = buffer.len() / descriptors.len();
1183
1184        if chunk_size > 4095 {
1185            return Err(DmaBufError::InsufficientDescriptors);
1186        }
1187
1188        // Check that the last descriptor can hold the excess
1189        let excess = buffer.len() % descriptors.len();
1190        if chunk_size + excess > 4095 {
1191            return Err(DmaBufError::InsufficientDescriptors);
1192        }
1193
1194        let mut chunks = buffer.chunks_exact_mut(chunk_size);
1195        for (desc, chunk) in descriptors.iter_mut().zip(chunks.by_ref()) {
1196            desc.buffer = chunk.as_mut_ptr();
1197            desc.set_size(chunk.len());
1198        }
1199
1200        let remainder = chunks.into_remainder();
1201        debug_assert_eq!(remainder.len(), excess);
1202
1203        if !remainder.is_empty() {
1204            // Append any excess to the last descriptor.
1205            let last_descriptor = descriptors.last_mut().unwrap();
1206            last_descriptor.set_size(last_descriptor.size() + remainder.len());
1207        }
1208
1209        Ok(Self {
1210            descriptors,
1211            buffer,
1212            burst: BurstConfig::default(),
1213        })
1214    }
1215
1216    /// Consume the buf, returning the descriptors and buffer.
1217    pub fn split(self) -> (&'static mut [DmaDescriptor], &'static mut [u8]) {
1218        (self.descriptors, self.buffer)
1219    }
1220}
1221
1222unsafe impl DmaRxBuffer for DmaRxStreamBuf {
1223    type View = DmaRxStreamBufView;
1224    type Final = DmaRxStreamBuf;
1225
1226    fn prepare(&mut self) -> Preparation {
1227        // Link up all the descriptors (but not in a circle).
1228        let mut next = null_mut();
1229        for desc in self.descriptors.iter_mut().rev() {
1230            desc.next = next;
1231            next = desc;
1232
1233            desc.reset_for_rx();
1234        }
1235        Preparation {
1236            start: self.descriptors.as_mut_ptr(),
1237            direction: TransferDirection::In,
1238            #[cfg(dma_can_access_psram)]
1239            accesses_psram: false,
1240            burst_transfer: self.burst,
1241
1242            // Whilst we give ownership of the descriptors the DMA, the correctness of this buffer
1243            // implementation doesn't rely on the DMA checking for descriptor ownership.
1244            // No descriptor is added back to the end of the stream before it's ready for the DMA
1245            // to consume it.
1246            check_owner: None,
1247            auto_write_back: true,
1248        }
1249    }
1250
1251    fn into_view(self) -> DmaRxStreamBufView {
1252        DmaRxStreamBufView {
1253            buf: self,
1254            descriptor_idx: 0,
1255            descriptor_offset: 0,
1256        }
1257    }
1258
1259    fn from_view(view: Self::View) -> Self {
1260        view.buf
1261    }
1262}
1263
1264/// A view into a [DmaRxStreamBuf]
1265pub struct DmaRxStreamBufView {
1266    buf: DmaRxStreamBuf,
1267    descriptor_idx: usize,
1268    descriptor_offset: usize,
1269}
1270
1271impl DmaRxStreamBufView {
1272    /// Returns the number of bytes that are available to read from the buf.
1273    pub fn available_bytes(&self) -> usize {
1274        let (tail, head) = self.buf.descriptors.split_at(self.descriptor_idx);
1275        let mut result = 0;
1276        for desc in head.iter().chain(tail) {
1277            if desc.owner() == Owner::Dma {
1278                break;
1279            }
1280            result += desc.len();
1281        }
1282        result - self.descriptor_offset
1283    }
1284
1285    /// Reads as much as possible into the buf from the available data.
1286    pub fn pop(&mut self, buf: &mut [u8]) -> usize {
1287        if buf.is_empty() {
1288            return 0;
1289        }
1290        let total_bytes = buf.len();
1291
1292        let mut remaining = buf;
1293        loop {
1294            let available = self.peek();
1295            if available.len() >= remaining.len() {
1296                remaining.copy_from_slice(&available[0..remaining.len()]);
1297                self.consume(remaining.len());
1298                let consumed = remaining.len();
1299                remaining = &mut remaining[consumed..];
1300                break;
1301            } else {
1302                let to_consume = available.len();
1303                remaining[0..to_consume].copy_from_slice(available);
1304                self.consume(to_consume);
1305                remaining = &mut remaining[to_consume..];
1306            }
1307        }
1308
1309        total_bytes - remaining.len()
1310    }
1311
1312    /// Returns a slice into the buffer containing available data.
1313    /// This will be the longest possible contiguous slice into the buffer that
1314    /// contains data that is available to read.
1315    ///
1316    /// Note: This function ignores EOFs, see [Self::peek_until_eof] if you need
1317    /// EOF support.
1318    pub fn peek(&self) -> &[u8] {
1319        let (slice, _) = self.peek_internal(false);
1320        slice
1321    }
1322
1323    /// Same as [Self::peek] but will not skip over any EOFs.
1324    ///
1325    /// It also returns a boolean indicating whether this slice ends with an EOF
1326    /// or not.
1327    pub fn peek_until_eof(&self) -> (&[u8], bool) {
1328        self.peek_internal(true)
1329    }
1330
1331    /// Consumes the first `n` bytes from the available data, returning any
1332    /// fully consumed descriptors back to the DMA.
1333    /// This is typically called after [Self::peek]/[Self::peek_until_eof].
1334    ///
1335    /// Returns the number of bytes that were actually consumed.
1336    pub fn consume(&mut self, n: usize) -> usize {
1337        let mut remaining_bytes_to_consume = n;
1338
1339        loop {
1340            let desc = &mut self.buf.descriptors[self.descriptor_idx];
1341
1342            if desc.owner() == Owner::Dma {
1343                // Descriptor is still owned by DMA so it can't be read yet.
1344                // This should only happen when there is no more data available to read.
1345                break;
1346            }
1347
1348            let remaining_bytes_in_descriptor = desc.len() - self.descriptor_offset;
1349            if remaining_bytes_to_consume < remaining_bytes_in_descriptor {
1350                self.descriptor_offset += remaining_bytes_to_consume;
1351                remaining_bytes_to_consume = 0;
1352                break;
1353            }
1354
1355            // Reset the descriptor for reuse.
1356            desc.set_owner(Owner::Dma);
1357            desc.set_suc_eof(false);
1358            desc.set_length(0);
1359
1360            // Before connecting this descriptor to the end of the list, the next descriptor
1361            // must be disconnected from this one to prevent the DMA from
1362            // overtaking.
1363            desc.next = null_mut();
1364
1365            let desc_ptr: *mut _ = desc;
1366
1367            let prev_descriptor_index = self
1368                .descriptor_idx
1369                .checked_sub(1)
1370                .unwrap_or(self.buf.descriptors.len() - 1);
1371
1372            // Connect this consumed descriptor to the end of the chain.
1373            self.buf.descriptors[prev_descriptor_index].next = desc_ptr;
1374
1375            self.descriptor_idx += 1;
1376            if self.descriptor_idx >= self.buf.descriptors.len() {
1377                self.descriptor_idx = 0;
1378            }
1379            self.descriptor_offset = 0;
1380
1381            remaining_bytes_to_consume -= remaining_bytes_in_descriptor;
1382        }
1383
1384        n - remaining_bytes_to_consume
1385    }
1386
1387    fn peek_internal(&self, stop_at_eof: bool) -> (&[u8], bool) {
1388        let descriptors = &self.buf.descriptors[self.descriptor_idx..];
1389
1390        // There must be at least one descriptor.
1391        debug_assert!(!descriptors.is_empty());
1392
1393        if descriptors.len() == 1 {
1394            let last_descriptor = &descriptors[0];
1395            if last_descriptor.owner() == Owner::Dma {
1396                // No data available.
1397                (&[], false)
1398            } else {
1399                let length = last_descriptor.len() - self.descriptor_offset;
1400                (
1401                    &self.buf.buffer[self.buf.buffer.len() - length..],
1402                    last_descriptor.flags.suc_eof(),
1403                )
1404            }
1405        } else {
1406            let chunk_size = descriptors[0].size();
1407            let mut found_eof = false;
1408
1409            let mut number_of_contiguous_bytes = 0;
1410            for desc in descriptors {
1411                if desc.owner() == Owner::Dma {
1412                    break;
1413                }
1414                number_of_contiguous_bytes += desc.len();
1415
1416                if stop_at_eof && desc.flags.suc_eof() {
1417                    found_eof = true;
1418                    break;
1419                }
1420                // If the length is smaller than the size, the contiguous-ness ends here.
1421                if desc.len() < desc.size() {
1422                    break;
1423                }
1424            }
1425
1426            (
1427                &self.buf.buffer[chunk_size * self.descriptor_idx..][..number_of_contiguous_bytes]
1428                    [self.descriptor_offset..],
1429                found_eof,
1430            )
1431        }
1432    }
1433}
1434
1435static mut EMPTY: [DmaDescriptor; 1] = [DmaDescriptor::EMPTY];
1436
1437/// An empty buffer that can be used when you don't need to transfer any data.
1438pub struct EmptyBuf;
1439
1440unsafe impl DmaTxBuffer for EmptyBuf {
1441    type View = EmptyBuf;
1442    type Final = EmptyBuf;
1443
1444    fn prepare(&mut self) -> Preparation {
1445        Preparation {
1446            start: core::ptr::addr_of_mut!(EMPTY).cast(),
1447            direction: TransferDirection::Out,
1448            #[cfg(dma_can_access_psram)]
1449            accesses_psram: false,
1450            burst_transfer: BurstConfig::default(),
1451
1452            // As we don't give ownership of the descriptor to the DMA, it's important that the DMA
1453            // channel does *NOT* check for ownership, otherwise the channel will return an error.
1454            check_owner: Some(false),
1455
1456            // The DMA should not write back to the descriptor as it is shared.
1457            auto_write_back: false,
1458        }
1459    }
1460
1461    fn into_view(self) -> EmptyBuf {
1462        self
1463    }
1464
1465    fn from_view(view: Self::View) -> Self {
1466        view
1467    }
1468}
1469
1470unsafe impl DmaRxBuffer for EmptyBuf {
1471    type View = EmptyBuf;
1472    type Final = EmptyBuf;
1473
1474    fn prepare(&mut self) -> Preparation {
1475        Preparation {
1476            start: core::ptr::addr_of_mut!(EMPTY).cast(),
1477            direction: TransferDirection::In,
1478            #[cfg(dma_can_access_psram)]
1479            accesses_psram: false,
1480            burst_transfer: BurstConfig::default(),
1481
1482            // As we don't give ownership of the descriptor to the DMA, it's important that the DMA
1483            // channel does *NOT* check for ownership, otherwise the channel will return an error.
1484            check_owner: Some(false),
1485            auto_write_back: true,
1486        }
1487    }
1488
1489    fn into_view(self) -> EmptyBuf {
1490        self
1491    }
1492
1493    fn from_view(view: Self::View) -> Self {
1494        view
1495    }
1496}
1497
1498/// DMA Loop Buffer
1499///
1500/// This consists of a single descriptor that points to itself and points to a
1501/// single buffer, resulting in the buffer being transmitted over and over
1502/// again, indefinitely.
1503///
1504/// Note: A DMA descriptor is 12 bytes. If your buffer is significantly shorter
1505/// than this, the DMA channel will spend more time reading the descriptor than
1506/// it does reading the buffer, which may leave it unable to keep up with the
1507/// bandwidth requirements of some peripherals at high frequencies.
1508pub struct DmaLoopBuf {
1509    descriptor: &'static mut DmaDescriptor,
1510    buffer: &'static mut [u8],
1511}
1512
1513impl DmaLoopBuf {
1514    /// Create a new [DmaLoopBuf].
1515    pub fn new(
1516        descriptor: &'static mut DmaDescriptor,
1517        buffer: &'static mut [u8],
1518    ) -> Result<DmaLoopBuf, DmaBufError> {
1519        if !is_slice_in_dram(buffer) {
1520            return Err(DmaBufError::UnsupportedMemoryRegion);
1521        }
1522        if !is_slice_in_dram(core::slice::from_ref(descriptor)) {
1523            return Err(DmaBufError::UnsupportedMemoryRegion);
1524        }
1525
1526        if buffer.len() > BurstConfig::default().max_chunk_size_for(buffer, TransferDirection::Out)
1527        {
1528            return Err(DmaBufError::InsufficientDescriptors);
1529        }
1530
1531        descriptor.set_owner(Owner::Dma); // Doesn't matter
1532        descriptor.set_suc_eof(false);
1533        descriptor.set_length(buffer.len());
1534        descriptor.set_size(buffer.len());
1535        descriptor.buffer = buffer.as_mut_ptr();
1536        descriptor.next = descriptor;
1537
1538        Ok(Self { descriptor, buffer })
1539    }
1540
1541    /// Consume the buf, returning the descriptor and buffer.
1542    pub fn split(self) -> (&'static mut DmaDescriptor, &'static mut [u8]) {
1543        (self.descriptor, self.buffer)
1544    }
1545}
1546
1547unsafe impl DmaTxBuffer for DmaLoopBuf {
1548    type View = DmaLoopBuf;
1549    type Final = DmaLoopBuf;
1550
1551    fn prepare(&mut self) -> Preparation {
1552        Preparation {
1553            start: self.descriptor,
1554            #[cfg(dma_can_access_psram)]
1555            accesses_psram: false,
1556            direction: TransferDirection::Out,
1557            burst_transfer: BurstConfig::default(),
1558            // The DMA must not check the owner bit, as it is never set.
1559            check_owner: Some(false),
1560
1561            // Doesn't matter either way but it is set to true for ESP32 SPI_DMA compatibility.
1562            auto_write_back: false,
1563        }
1564    }
1565
1566    fn into_view(self) -> Self::View {
1567        self
1568    }
1569
1570    fn from_view(view: Self::View) -> Self {
1571        view
1572    }
1573}
1574
1575impl Deref for DmaLoopBuf {
1576    type Target = [u8];
1577
1578    fn deref(&self) -> &Self::Target {
1579        self.buffer
1580    }
1581}
1582
1583impl DerefMut for DmaLoopBuf {
1584    fn deref_mut(&mut self) -> &mut Self::Target {
1585        self.buffer
1586    }
1587}
1588
1589/// A Preparation that masks itself as a DMA buffer.
1590///
1591/// Fow low level use, where none of the pre-made buffers really fit.
1592///
1593/// This type likely never should be visible outside of esp-hal.
1594pub(crate) struct NoBuffer(Preparation);
1595impl NoBuffer {
1596    fn prep(&self) -> Preparation {
1597        Preparation {
1598            start: self.0.start,
1599            direction: self.0.direction,
1600            #[cfg(dma_can_access_psram)]
1601            accesses_psram: self.0.accesses_psram,
1602            burst_transfer: self.0.burst_transfer,
1603            check_owner: self.0.check_owner,
1604            auto_write_back: self.0.auto_write_back,
1605        }
1606    }
1607}
1608unsafe impl DmaTxBuffer for NoBuffer {
1609    type View = ();
1610    type Final = ();
1611
1612    fn prepare(&mut self) -> Preparation {
1613        self.prep()
1614    }
1615
1616    fn into_view(self) -> Self::View {}
1617    fn from_view(_view: Self::View) {}
1618}
1619unsafe impl DmaRxBuffer for NoBuffer {
1620    type View = ();
1621    type Final = ();
1622
1623    fn prepare(&mut self) -> Preparation {
1624        self.prep()
1625    }
1626
1627    fn into_view(self) -> Self::View {}
1628    fn from_view(_view: Self::View) {}
1629}
1630
1631/// Prepares data unsafely to be transmitted via DMA.
1632///
1633/// `block_size` is the requirement imposed by the peripheral that receives the data. It
1634/// ensures that the DMA will not try to copy a partial block, which would cause the RX DMA (that
1635/// moves results back into RAM) to never complete.
1636///
1637/// The function returns the DMA buffer, and the number of bytes that will be transferred.
1638///
1639/// # Safety
1640///
1641/// The caller must keep all its descriptors and the buffers they
1642/// point to valid while the buffer is being transferred.
1643#[cfg_attr(not(aes_dma), expect(unused))]
1644pub(crate) unsafe fn prepare_for_tx(
1645    descriptors: &mut [DmaDescriptor],
1646    mut data: NonNull<[u8]>,
1647    block_size: usize,
1648) -> Result<(NoBuffer, usize), DmaError> {
1649    let alignment =
1650        BurstConfig::DEFAULT.min_alignment(unsafe { data.as_ref() }, TransferDirection::Out);
1651
1652    if !data.addr().get().is_multiple_of(alignment) {
1653        // ESP32 has word alignment requirement on the TX descriptors, too.
1654        return Err(DmaError::InvalidAlignment(DmaAlignmentError::Address));
1655    }
1656
1657    // Whichever is stricter, data location or peripheral requirements.
1658    //
1659    // This ensures that the RX DMA, if used, can transfer the returned number of bytes using at
1660    // most N+2 descriptors. While the hardware doesn't require this on the TX DMA side, (the TX DMA
1661    // can, except on the ESP32, transfer any amount of data), it makes usage MUCH simpler.
1662    let alignment = alignment.max(block_size);
1663    let chunk_size = 4096 - alignment;
1664
1665    let data_len = data.len().min(chunk_size * descriptors.len());
1666
1667    cfg_if::cfg_if! {
1668        if #[cfg(dma_can_access_psram)] {
1669            let data_addr = data.addr().get();
1670            let data_in_psram = crate::psram::psram_range().contains(&data_addr);
1671
1672            // Make sure input data is in PSRAM instead of cache
1673            if data_in_psram {
1674                unsafe { crate::soc::cache_writeback_addr(data_addr as u32, data_len as u32) };
1675            }
1676        }
1677    }
1678
1679    let mut descriptors = unwrap!(DescriptorSet::new(descriptors));
1680    // TODO: it would be best if this function returned the amount of data that could be linked
1681    // up.
1682    unwrap!(descriptors.link_with_buffer(unsafe { data.as_mut() }, chunk_size));
1683    unwrap!(descriptors.set_tx_length(data_len, chunk_size));
1684
1685    for desc in descriptors.linked_iter_mut() {
1686        desc.reset_for_tx(desc.next.is_null());
1687    }
1688
1689    Ok((
1690        NoBuffer(Preparation {
1691            start: descriptors.head(),
1692            direction: TransferDirection::Out,
1693            burst_transfer: BurstConfig::DEFAULT,
1694            check_owner: None,
1695            auto_write_back: true,
1696            #[cfg(dma_can_access_psram)]
1697            accesses_psram: data_in_psram,
1698        }),
1699        data_len,
1700    ))
1701}
1702
1703/// Prepare buffers to receive data from DMA.
1704///
1705/// The function returns the DMA buffer, and the number of bytes that will be transferred.
1706///
1707/// # Safety
1708///
1709/// The caller must keep all its descriptors and the buffers they
1710/// point to valid while the buffer is being transferred.
1711#[cfg_attr(not(aes_dma), expect(unused))]
1712pub(crate) unsafe fn prepare_for_rx(
1713    descriptors: &mut [DmaDescriptor],
1714    #[cfg(dma_can_access_psram)] align_buffers: &mut [Option<ManualWritebackBuffer>; 2],
1715    mut data: NonNull<[u8]>,
1716) -> (NoBuffer, usize) {
1717    let chunk_size =
1718        BurstConfig::DEFAULT.max_chunk_size_for(unsafe { data.as_ref() }, TransferDirection::In);
1719
1720    // The data we have to process may not be appropriate for the DMA:
1721    // - it may be improperly aligned for PSRAM
1722    // - it may not have a length that is a multiple of the external memory block size
1723
1724    cfg_if::cfg_if! {
1725        if #[cfg(dma_can_access_psram)] {
1726            let data_addr = data.addr().get();
1727            let data_in_psram = crate::psram::psram_range().contains(&data_addr);
1728        } else {
1729            let data_in_psram = false;
1730        }
1731    }
1732
1733    let mut descriptors = unwrap!(DescriptorSet::new(descriptors));
1734    let data_len = if data_in_psram {
1735        cfg_if::cfg_if! {
1736            if #[cfg(dma_can_access_psram)] {
1737                // This could use a better API, but right now we'll have to build the descriptor list by
1738                // hand.
1739                let consumed_bytes = build_descriptor_list_for_psram(
1740                    &mut descriptors,
1741                    align_buffers,
1742                    data,
1743                );
1744
1745                // Invalidate data written by the DMA. As this likely affects more data than we touched, write back first.
1746                unsafe {
1747                    crate::soc::cache_writeback_addr(data_addr as u32, consumed_bytes as u32);
1748                    crate::soc::cache_invalidate_addr(data_addr as u32, consumed_bytes as u32);
1749                }
1750
1751                consumed_bytes
1752            } else {
1753                unreachable!()
1754            }
1755        }
1756    } else {
1757        // Just set up descriptors as usual
1758        let data_len = data.len();
1759        unwrap!(descriptors.link_with_buffer(unsafe { data.as_mut() }, chunk_size));
1760        unwrap!(descriptors.set_tx_length(data_len, chunk_size));
1761
1762        data_len
1763    };
1764
1765    for desc in descriptors.linked_iter_mut() {
1766        desc.reset_for_rx();
1767    }
1768
1769    (
1770        NoBuffer(Preparation {
1771            start: descriptors.head(),
1772            direction: TransferDirection::In,
1773            burst_transfer: BurstConfig::DEFAULT,
1774            check_owner: None,
1775            auto_write_back: true,
1776            #[cfg(dma_can_access_psram)]
1777            accesses_psram: data_in_psram,
1778        }),
1779        data_len,
1780    )
1781}
1782
1783#[cfg(dma_can_access_psram)]
1784fn build_descriptor_list_for_psram(
1785    descriptors: &mut DescriptorSet<'_>,
1786    copy_buffers: &mut [Option<ManualWritebackBuffer>; 2],
1787    data: NonNull<[u8]>,
1788) -> usize {
1789    let data_len = data.len();
1790    let data_addr = data.addr().get();
1791
1792    let min_alignment = ExternalBurstConfig::DEFAULT.min_psram_alignment(TransferDirection::In);
1793    let chunk_size = 4096 - min_alignment;
1794
1795    let mut desciptor_iter = DescriptorChainingIter::new(descriptors.descriptors);
1796    let mut copy_buffer_iter = copy_buffers.iter_mut();
1797
1798    // MIN_LAST_DMA_LEN could make this really annoying, so we're just allocating a bit larger
1799    // buffer and shove edge cases into a single one. If we have >24 bytes on the S2, the 2-buffer
1800    // alignment algo works fine as one of them can steal 16 bytes, the other will have
1801    // MIN_LAST_DMA_LEN data to work with.
1802    let has_aligned_data = data_len > BUF_LEN;
1803
1804    // Calculate byte offset to the start of the buffer
1805    let offset = data_addr % min_alignment;
1806    let head_to_copy = min_alignment - offset;
1807    let head_to_copy = if !has_aligned_data {
1808        BUF_LEN
1809    } else if head_to_copy > 0 && head_to_copy < MIN_LAST_DMA_LEN {
1810        head_to_copy + min_alignment
1811    } else {
1812        head_to_copy
1813    };
1814    let head_to_copy = head_to_copy.min(data_len);
1815
1816    // Calculate last unaligned part
1817    let tail_to_copy = (data_len - head_to_copy) % min_alignment;
1818    let tail_to_copy = if tail_to_copy > 0 && tail_to_copy < MIN_LAST_DMA_LEN {
1819        tail_to_copy + min_alignment
1820    } else {
1821        tail_to_copy
1822    };
1823
1824    let mut consumed = 0;
1825
1826    // Align beginning
1827    if head_to_copy > 0 {
1828        let copy_buffer = unwrap!(copy_buffer_iter.next());
1829        let buffer =
1830            copy_buffer.insert(ManualWritebackBuffer::new(get_range(data, 0..head_to_copy)));
1831
1832        let Some(descriptor) = desciptor_iter.next() else {
1833            return consumed;
1834        };
1835        descriptor.set_size(head_to_copy);
1836        descriptor.buffer = buffer.buffer_ptr();
1837        consumed += head_to_copy;
1838    };
1839
1840    // Chain up descriptors for the main aligned data part.
1841    let mut aligned_data = get_range(data, head_to_copy..data.len() - tail_to_copy);
1842    while !aligned_data.is_empty() {
1843        let Some(descriptor) = desciptor_iter.next() else {
1844            return consumed;
1845        };
1846        let chunk = aligned_data.len().min(chunk_size);
1847
1848        descriptor.set_size(chunk);
1849        descriptor.buffer = aligned_data.cast::<u8>().as_ptr();
1850        consumed += chunk;
1851        aligned_data = get_range(aligned_data, chunk..aligned_data.len());
1852    }
1853
1854    // Align end
1855    if tail_to_copy > 0 {
1856        let copy_buffer = unwrap!(copy_buffer_iter.next());
1857        let buffer = copy_buffer.insert(ManualWritebackBuffer::new(get_range(
1858            data,
1859            data.len() - tail_to_copy..data.len(),
1860        )));
1861
1862        let Some(descriptor) = desciptor_iter.next() else {
1863            return consumed;
1864        };
1865        descriptor.set_size(tail_to_copy);
1866        descriptor.buffer = buffer.buffer_ptr();
1867        consumed += tail_to_copy;
1868    }
1869
1870    consumed
1871}
1872
1873#[cfg(dma_can_access_psram)]
1874fn get_range(ptr: NonNull<[u8]>, range: Range<usize>) -> NonNull<[u8]> {
1875    let len = range.end - range.start;
1876    NonNull::slice_from_raw_parts(unsafe { ptr.cast().byte_add(range.start) }, len)
1877}
1878
1879#[cfg(dma_can_access_psram)]
1880struct DescriptorChainingIter<'a> {
1881    /// index of the next element to emit
1882    index: usize,
1883    descriptors: &'a mut [DmaDescriptor],
1884}
1885#[cfg(dma_can_access_psram)]
1886impl<'a> DescriptorChainingIter<'a> {
1887    fn new(descriptors: &'a mut [DmaDescriptor]) -> Self {
1888        Self {
1889            descriptors,
1890            index: 0,
1891        }
1892    }
1893
1894    fn next(&mut self) -> Option<&'_ mut DmaDescriptor> {
1895        if self.index == 0 {
1896            self.index += 1;
1897            self.descriptors.get_mut(0)
1898        } else if self.index < self.descriptors.len() {
1899            let index = self.index;
1900            self.index += 1;
1901
1902            // Grab a pointer to the current descriptor.
1903            let ptr = &raw mut self.descriptors[index];
1904
1905            // Link the descriptor to the previous one.
1906            self.descriptors[index - 1].next = ptr;
1907
1908            // Reborrow the pointer so that it doesn't get invalidated by our continued use of the
1909            // descriptor reference.
1910            Some(unsafe { &mut *ptr })
1911        } else {
1912            None
1913        }
1914    }
1915}
1916
1917#[cfg(dma_can_access_psram)]
1918const MIN_LAST_DMA_LEN: usize = if cfg!(esp32s2) { 5 } else { 1 };
1919#[cfg(dma_can_access_psram)]
1920const BUF_LEN: usize = 16 + 2 * (MIN_LAST_DMA_LEN - 1); // 2x makes aligning short buffers simpler
1921
1922/// PSRAM helper. DMA can write data of any alignment into this buffer, and it can be written by
1923/// the CPU back to PSRAM.
1924#[cfg(dma_can_access_psram)]
1925pub(crate) struct ManualWritebackBuffer {
1926    dst_address: NonNull<u8>,
1927    buffer: [u8; BUF_LEN],
1928    n_bytes: u8,
1929}
1930
1931#[cfg(dma_can_access_psram)]
1932impl ManualWritebackBuffer {
1933    pub fn new(ptr: NonNull<[u8]>) -> Self {
1934        assert!(ptr.len() <= BUF_LEN);
1935        Self {
1936            dst_address: ptr.cast(),
1937            buffer: [0; BUF_LEN],
1938            n_bytes: ptr.len() as u8,
1939        }
1940    }
1941
1942    pub fn write_back(&self) {
1943        unsafe {
1944            self.dst_address
1945                .as_ptr()
1946                .copy_from(self.buffer.as_ptr(), self.n_bytes as usize);
1947        }
1948    }
1949
1950    pub fn buffer_ptr(&self) -> *mut u8 {
1951        self.buffer.as_ptr().cast_mut()
1952    }
1953}