esp_hal/dma/
buffers.rs

1use core::{
2    ops::{Deref, DerefMut},
3    ptr::null_mut,
4};
5
6use super::*;
7use crate::soc::{is_slice_in_dram, is_slice_in_psram};
8#[cfg(psram_dma)]
9use crate::soc::{is_valid_psram_address, is_valid_ram_address};
10
11/// Error returned from Dma[Rx|Tx|RxTx]Buf operations.
12#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
13#[cfg_attr(feature = "defmt", derive(defmt::Format))]
14pub enum DmaBufError {
15    /// The buffer is smaller than the requested size.
16    BufferTooSmall,
17
18    /// More descriptors are needed for the buffer size.
19    InsufficientDescriptors,
20
21    /// Descriptors or buffers are not located in a supported memory region.
22    UnsupportedMemoryRegion,
23
24    /// Buffer address or size is not properly aligned.
25    InvalidAlignment(DmaAlignmentError),
26
27    /// Invalid chunk size: must be > 0 and <= 4095.
28    InvalidChunkSize,
29}
30
31/// DMA buffer alignment errors.
32#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
33#[cfg_attr(feature = "defmt", derive(defmt::Format))]
34pub enum DmaAlignmentError {
35    /// Buffer address is not properly aligned.
36    Address,
37
38    /// Buffer size is not properly aligned.
39    Size,
40}
41
42impl From<DmaAlignmentError> for DmaBufError {
43    fn from(err: DmaAlignmentError) -> Self {
44        DmaBufError::InvalidAlignment(err)
45    }
46}
47
48cfg_if::cfg_if! {
49    if #[cfg(psram_dma)] {
50        /// Burst size used when transferring to and from external memory.
51        #[derive(Clone, Copy, PartialEq, Eq, Debug)]
52        #[cfg_attr(feature = "defmt", derive(defmt::Format))]
53        pub enum ExternalBurstConfig {
54            /// 16 bytes
55            Size16 = 16,
56
57            /// 32 bytes
58            Size32 = 32,
59
60            /// 64 bytes
61            Size64 = 64,
62        }
63
64        impl ExternalBurstConfig {
65            /// The default external memory burst length.
66            pub const DEFAULT: Self = Self::Size16;
67        }
68
69        impl Default for ExternalBurstConfig {
70            fn default() -> Self {
71                Self::DEFAULT
72            }
73        }
74
75        /// Internal memory access burst mode.
76        #[derive(Clone, Copy, PartialEq, Eq, Debug)]
77        #[cfg_attr(feature = "defmt", derive(defmt::Format))]
78        pub enum InternalBurstConfig {
79            /// Burst mode is disabled.
80            Disabled,
81
82            /// Burst mode is enabled.
83            Enabled,
84        }
85
86        impl InternalBurstConfig {
87            /// The default internal burst mode configuration.
88            pub const DEFAULT: Self = Self::Disabled;
89        }
90
91        impl Default for InternalBurstConfig {
92            fn default() -> Self {
93                Self::DEFAULT
94            }
95        }
96
97        /// Burst transfer configuration.
98        #[derive(Clone, Copy, PartialEq, Eq, Debug)]
99        #[cfg_attr(feature = "defmt", derive(defmt::Format))]
100        pub struct BurstConfig {
101            /// Configures the burst size for PSRAM transfers.
102            ///
103            /// Burst mode is always enabled for PSRAM transfers.
104            pub external_memory: ExternalBurstConfig,
105
106            /// Enables or disables the burst mode for internal memory transfers.
107            ///
108            /// The burst size is not configurable.
109            pub internal_memory: InternalBurstConfig,
110        }
111
112        impl BurstConfig {
113            /// The default burst mode configuration.
114            pub const DEFAULT: Self = Self {
115                external_memory: ExternalBurstConfig::DEFAULT,
116                internal_memory: InternalBurstConfig::DEFAULT,
117            };
118        }
119
120        impl Default for BurstConfig {
121            fn default() -> Self {
122                Self::DEFAULT
123            }
124        }
125
126        impl From<InternalBurstConfig> for BurstConfig {
127            fn from(internal_memory: InternalBurstConfig) -> Self {
128                Self {
129                    external_memory: ExternalBurstConfig::DEFAULT,
130                    internal_memory,
131                }
132            }
133        }
134
135        impl From<ExternalBurstConfig> for BurstConfig {
136            fn from(external_memory: ExternalBurstConfig) -> Self {
137                Self {
138                    external_memory,
139                    internal_memory: InternalBurstConfig::DEFAULT,
140                }
141            }
142        }
143    } else {
144        /// Burst transfer configuration.
145        #[derive(Clone, Copy, PartialEq, Eq, Debug)]
146        #[cfg_attr(feature = "defmt", derive(defmt::Format))]
147        pub enum BurstConfig {
148            /// Burst mode is disabled.
149            Disabled,
150
151            /// Burst mode is enabled.
152            Enabled,
153        }
154
155        impl BurstConfig {
156            /// The default burst mode configuration.
157            pub const DEFAULT: Self = Self::Disabled;
158        }
159
160        impl Default for BurstConfig {
161            fn default() -> Self {
162                Self::DEFAULT
163            }
164        }
165
166        type InternalBurstConfig = BurstConfig;
167    }
168}
169
170#[cfg(psram_dma)]
171impl ExternalBurstConfig {
172    const fn min_psram_alignment(self, direction: TransferDirection) -> usize {
173        // S2 TRM: Specifically, size and buffer address pointer in receive descriptors
174        // should be 16-byte, 32-byte or 64-byte aligned. For data frame whose
175        // length is not a multiple of 16 bytes, 32 bytes, or 64 bytes, EDMA adds
176        // padding bytes to the end.
177
178        // S3 TRM: Size and Address for IN transfers must be block aligned. For receive
179        // descriptors, if the data length received are not aligned with block size,
180        // GDMA will pad the data received with 0 until they are aligned to
181        // initiate burst transfer. You can read the length field in receive descriptors
182        // to obtain the length of valid data received
183        if matches!(direction, TransferDirection::In) {
184            self as usize
185        } else {
186            // S2 TRM: Size, length and buffer address pointer in transmit descriptors are
187            // not necessarily aligned with block size.
188
189            // S3 TRM: Size, length, and buffer address pointer in transmit descriptors do
190            // not need to be aligned.
191            1
192        }
193    }
194}
195
196impl InternalBurstConfig {
197    pub(super) const fn is_burst_enabled(self) -> bool {
198        !matches!(self, Self::Disabled)
199    }
200
201    // Size and address alignment as those come in pairs on current hardware.
202    const fn min_dram_alignment(self, direction: TransferDirection) -> usize {
203        if matches!(direction, TransferDirection::In) {
204            if cfg!(esp32) {
205                // NOTE: The size must be word-aligned.
206                // NOTE: The buffer address must be word-aligned
207                4
208            } else if self.is_burst_enabled() {
209                // As described in "Accessing Internal Memory" paragraphs in the various TRMs.
210                4
211            } else {
212                1
213            }
214        } else {
215            // OUT transfers have no alignment requirements, except for ESP32, which is
216            // described below.
217            if cfg!(esp32) {
218                // SPI DMA: Burst transmission is supported. The data size for
219                // a single transfer must be four bytes aligned.
220                // I2S DMA: Burst transfer is supported. However, unlike the
221                // SPI DMA channels, the data size for a single transfer is
222                // one word, or four bytes.
223                4
224            } else {
225                1
226            }
227        }
228    }
229}
230
231const fn max(a: usize, b: usize) -> usize {
232    if a > b { a } else { b }
233}
234
235impl BurstConfig {
236    delegate::delegate! {
237        #[cfg(psram_dma)]
238        to self.internal_memory {
239            pub(super) const fn min_dram_alignment(self, direction: TransferDirection) -> usize;
240            pub(super) fn is_burst_enabled(self) -> bool;
241        }
242    }
243
244    /// Calculates an alignment that is compatible with the current burst
245    /// configuration.
246    ///
247    /// This is an over-estimation so that Descriptors can be safely used with
248    /// any DMA channel in any direction.
249    pub const fn min_compatible_alignment(self) -> usize {
250        let in_alignment = self.min_dram_alignment(TransferDirection::In);
251        let out_alignment = self.min_dram_alignment(TransferDirection::Out);
252        let alignment = max(in_alignment, out_alignment);
253
254        #[cfg(psram_dma)]
255        let alignment = max(alignment, self.external_memory as usize);
256
257        alignment
258    }
259
260    const fn chunk_size_for_alignment(alignment: usize) -> usize {
261        // DMA descriptors have a 12-bit field for the size/length of the buffer they
262        // point at. As there is no such thing as 0-byte alignment, this means the
263        // maximum size is 4095 bytes.
264        4096 - alignment
265    }
266
267    /// Calculates a chunk size that is compatible with the current burst
268    /// configuration's alignment requirements.
269    ///
270    /// This is an over-estimation so that Descriptors can be safely used with
271    /// any DMA channel in any direction.
272    pub const fn max_compatible_chunk_size(self) -> usize {
273        Self::chunk_size_for_alignment(self.min_compatible_alignment())
274    }
275
276    fn min_alignment(self, _buffer: &[u8], direction: TransferDirection) -> usize {
277        let alignment = self.min_dram_alignment(direction);
278
279        cfg_if::cfg_if! {
280            if #[cfg(psram_dma)] {
281                let mut alignment = alignment;
282                if is_valid_psram_address(_buffer.as_ptr() as usize) {
283                    alignment = max(alignment, self.external_memory.min_psram_alignment(direction));
284                }
285            }
286        }
287
288        alignment
289    }
290
291    // Note: this function ignores address alignment as we assume the buffers are
292    // aligned.
293    fn max_chunk_size_for(self, buffer: &[u8], direction: TransferDirection) -> usize {
294        Self::chunk_size_for_alignment(self.min_alignment(buffer, direction))
295    }
296
297    fn ensure_buffer_aligned(
298        self,
299        buffer: &[u8],
300        direction: TransferDirection,
301    ) -> Result<(), DmaAlignmentError> {
302        let alignment = self.min_alignment(buffer, direction);
303        if buffer.as_ptr() as usize % alignment != 0 {
304            return Err(DmaAlignmentError::Address);
305        }
306
307        // NB: the TRMs suggest that buffer length don't need to be aligned, but
308        // for IN transfers, we configure the DMA descriptors' size field, which needs
309        // to be aligned.
310        if direction == TransferDirection::In && buffer.len() % alignment != 0 {
311            return Err(DmaAlignmentError::Size);
312        }
313
314        Ok(())
315    }
316
317    fn ensure_buffer_compatible(
318        self,
319        buffer: &[u8],
320        direction: TransferDirection,
321    ) -> Result<(), DmaBufError> {
322        // buffer can be either DRAM or PSRAM (if supported)
323        let is_in_dram = is_slice_in_dram(buffer);
324        let is_in_psram = cfg!(psram_dma) && is_slice_in_psram(buffer);
325        if !(is_in_dram || is_in_psram) {
326            return Err(DmaBufError::UnsupportedMemoryRegion);
327        }
328
329        self.ensure_buffer_aligned(buffer, direction)?;
330
331        Ok(())
332    }
333}
334
335/// The direction of the DMA transfer.
336#[derive(Clone, Copy, PartialEq, Eq, Debug)]
337#[cfg_attr(feature = "defmt", derive(defmt::Format))]
338pub enum TransferDirection {
339    /// DMA transfer from peripheral or external memory to memory.
340    In,
341    /// DMA transfer from memory to peripheral or external memory.
342    Out,
343}
344
345/// Holds all the information needed to configure a DMA channel for a transfer.
346#[derive(PartialEq, Eq, Debug)]
347#[cfg_attr(feature = "defmt", derive(defmt::Format))]
348pub struct Preparation {
349    /// The descriptor the DMA will start from.
350    pub start: *mut DmaDescriptor,
351
352    /// The direction of the DMA transfer.
353    pub direction: TransferDirection,
354
355    /// Must be `true` if any of the DMA descriptors contain data in PSRAM.
356    #[cfg(psram_dma)]
357    pub accesses_psram: bool,
358
359    /// Configures the DMA to transfer data in bursts.
360    ///
361    /// The implementation of the buffer must ensure that buffer size
362    /// and alignment in each descriptor is compatible with the burst
363    /// transfer configuration.
364    ///
365    /// For details on alignment requirements, refer to your chip's
366    #[doc = crate::trm_markdown_link!()]
367    pub burst_transfer: BurstConfig,
368
369    /// Configures the "check owner" feature of the DMA channel.
370    ///
371    /// Most DMA channels allow software to configure whether the hardware
372    /// checks that [DmaDescriptor::owner] is set to [Owner::Dma] before
373    /// consuming the descriptor. If this check fails, the channel stops
374    /// operating and fires
375    /// [DmaRxInterrupt::DescriptorError]/[DmaTxInterrupt::DescriptorError].
376    ///
377    /// This field allows buffer implementation to configure this behaviour.
378    /// - `Some(true)`: DMA channel must check the owner bit.
379    /// - `Some(false)`: DMA channel must NOT check the owner bit.
380    /// - `None`: DMA channel should check the owner bit if it is supported.
381    ///
382    /// Some buffer implementations may require that the DMA channel performs
383    /// this check before consuming the descriptor to ensure correct
384    /// behaviour. e.g. To prevent wrap-around in a circular transfer.
385    ///
386    /// Some buffer implementations may require that the DMA channel does NOT
387    /// perform this check as the ownership bit will not be set before the
388    /// channel tries to consume the descriptor.
389    ///
390    /// Most implementations won't have any such requirements and will work
391    /// correctly regardless of whether the DMA channel checks or not.
392    ///
393    /// Note: If the DMA channel doesn't support the provided option,
394    /// preparation will fail.
395    pub check_owner: Option<bool>,
396
397    /// Configures whether the DMA channel automatically clears the
398    /// [DmaDescriptor::owner] bit after it is done with the buffer pointed
399    /// to by a descriptor.
400    ///
401    /// For RX transfers, this is always true and the value specified here is
402    /// ignored.
403    ///
404    /// Note: SPI_DMA on the ESP32 does not support this and will panic if set
405    /// to true.
406    pub auto_write_back: bool,
407}
408
409/// [DmaTxBuffer] is a DMA descriptor + memory combo that can be used for
410/// transmitting data from a DMA channel to a peripheral's FIFO.
411///
412/// # Safety
413///
414/// The implementing type must keep all its descriptors and the buffers they
415/// point to valid while the buffer is being transferred.
416pub unsafe trait DmaTxBuffer {
417    /// A type providing operations that are safe to perform on the buffer
418    /// whilst the DMA is actively using it.
419    type View;
420
421    /// Prepares the buffer for an imminent transfer and returns
422    /// information required to use this buffer.
423    ///
424    /// Note: This operation is idempotent.
425    fn prepare(&mut self) -> Preparation;
426
427    /// This is called before the DMA starts using the buffer.
428    fn into_view(self) -> Self::View;
429
430    /// This is called after the DMA is done using the buffer.
431    fn from_view(view: Self::View) -> Self;
432}
433
434/// [DmaRxBuffer] is a DMA descriptor + memory combo that can be used for
435/// receiving data from a peripheral's FIFO to a DMA channel.
436///
437/// Note: Implementations of this trait may only support having a single EOF bit
438/// which resides in the last descriptor. There will be a separate trait in
439/// future to support multiple EOFs.
440///
441/// # Safety
442///
443/// The implementing type must keep all its descriptors and the buffers they
444/// point to valid while the buffer is being transferred.
445pub unsafe trait DmaRxBuffer {
446    /// A type providing operations that are safe to perform on the buffer
447    /// whilst the DMA is actively using it.
448    type View;
449
450    /// Prepares the buffer for an imminent transfer and returns
451    /// information required to use this buffer.
452    ///
453    /// Note: This operation is idempotent.
454    fn prepare(&mut self) -> Preparation;
455
456    /// This is called before the DMA starts using the buffer.
457    fn into_view(self) -> Self::View;
458
459    /// This is called after the DMA is done using the buffer.
460    fn from_view(view: Self::View) -> Self;
461}
462
463/// An in-progress view into [DmaRxBuf]/[DmaTxBuf].
464///
465/// In the future, this could support peeking into state of the
466/// descriptors/buffers.
467pub struct BufView<T>(T);
468
469/// DMA transmit buffer
470///
471/// This is a contiguous buffer linked together by DMA descriptors of length
472/// 4095 at most. It can only be used for transmitting data to a peripheral's
473/// FIFO. See [DmaRxBuf] for receiving data.
474#[derive(Debug)]
475#[cfg_attr(feature = "defmt", derive(defmt::Format))]
476pub struct DmaTxBuf {
477    descriptors: DescriptorSet<'static>,
478    buffer: &'static mut [u8],
479    burst: BurstConfig,
480}
481
482impl DmaTxBuf {
483    /// Creates a new [DmaTxBuf] from some descriptors and a buffer.
484    ///
485    /// There must be enough descriptors for the provided buffer.
486    /// Depending on alignment requirements, each descriptor can handle at most
487    /// 4095 bytes worth of buffer.
488    ///
489    /// Both the descriptors and buffer must be in DMA-capable memory.
490    /// Only DRAM is supported for descriptors.
491    pub fn new(
492        descriptors: &'static mut [DmaDescriptor],
493        buffer: &'static mut [u8],
494    ) -> Result<Self, DmaBufError> {
495        Self::new_with_config(descriptors, buffer, BurstConfig::default())
496    }
497
498    /// Creates a new [DmaTxBuf] from some descriptors and a buffer.
499    ///
500    /// There must be enough descriptors for the provided buffer.
501    /// Depending on alignment requirements, each descriptor can handle at most
502    /// 4095 bytes worth of buffer.
503    ///
504    /// Both the descriptors and buffer must be in DMA-capable memory.
505    /// Only DRAM is supported for descriptors.
506    pub fn new_with_config(
507        descriptors: &'static mut [DmaDescriptor],
508        buffer: &'static mut [u8],
509        config: impl Into<BurstConfig>,
510    ) -> Result<Self, DmaBufError> {
511        let mut buf = Self {
512            descriptors: DescriptorSet::new(descriptors)?,
513            buffer,
514            burst: BurstConfig::default(),
515        };
516
517        let capacity = buf.capacity();
518        buf.configure(config, capacity)?;
519
520        Ok(buf)
521    }
522
523    fn configure(
524        &mut self,
525        burst: impl Into<BurstConfig>,
526        length: usize,
527    ) -> Result<(), DmaBufError> {
528        let burst = burst.into();
529        self.set_length_fallible(length, burst)?;
530
531        self.descriptors.link_with_buffer(
532            self.buffer,
533            burst.max_chunk_size_for(self.buffer, TransferDirection::Out),
534        )?;
535
536        self.burst = burst;
537        Ok(())
538    }
539
540    /// Configures the DMA to use burst transfers to access this buffer.
541    pub fn set_burst_config(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> {
542        let len = self.len();
543        self.configure(burst, len)
544    }
545
546    /// Consume the buf, returning the descriptors and buffer.
547    pub fn split(self) -> (&'static mut [DmaDescriptor], &'static mut [u8]) {
548        (self.descriptors.into_inner(), self.buffer)
549    }
550
551    /// Returns the size of the underlying buffer
552    pub fn capacity(&self) -> usize {
553        self.buffer.len()
554    }
555
556    /// Return the number of bytes that would be transmitted by this buf.
557    #[allow(clippy::len_without_is_empty)]
558    pub fn len(&self) -> usize {
559        self.descriptors
560            .linked_iter()
561            .map(|d| d.len())
562            .sum::<usize>()
563    }
564
565    fn set_length_fallible(&mut self, len: usize, burst: BurstConfig) -> Result<(), DmaBufError> {
566        if len > self.capacity() {
567            return Err(DmaBufError::BufferTooSmall);
568        }
569        burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::Out)?;
570
571        self.descriptors.set_tx_length(
572            len,
573            burst.max_chunk_size_for(self.buffer, TransferDirection::Out),
574        )?;
575
576        // This only needs to be done once (after every significant length change) as
577        // Self::prepare sets Preparation::auto_write_back to false.
578        for desc in self.descriptors.linked_iter_mut() {
579            // In non-circular mode, we only set `suc_eof` for the last descriptor to signal
580            // the end of the transfer.
581            desc.reset_for_tx(desc.next.is_null());
582        }
583
584        Ok(())
585    }
586
587    /// Reset the descriptors to only transmit `len` amount of bytes from this
588    /// buf.
589    ///
590    /// The number of bytes in data must be less than or equal to the buffer
591    /// size.
592    pub fn set_length(&mut self, len: usize) {
593        unwrap!(self.set_length_fallible(len, self.burst))
594    }
595
596    /// Fills the TX buffer with the bytes provided in `data` and reset the
597    /// descriptors to only cover the filled section.
598    ///
599    /// The number of bytes in data must be less than or equal to the buffer
600    /// size.
601    pub fn fill(&mut self, data: &[u8]) {
602        self.set_length(data.len());
603        self.as_mut_slice()[..data.len()].copy_from_slice(data);
604    }
605
606    /// Returns the buf as a mutable slice than can be written.
607    pub fn as_mut_slice(&mut self) -> &mut [u8] {
608        self.buffer
609    }
610
611    /// Returns the buf as a slice than can be read.
612    pub fn as_slice(&self) -> &[u8] {
613        self.buffer
614    }
615}
616
617unsafe impl DmaTxBuffer for DmaTxBuf {
618    type View = BufView<DmaTxBuf>;
619
620    fn prepare(&mut self) -> Preparation {
621        cfg_if::cfg_if! {
622            if #[cfg(psram_dma)] {
623                let is_data_in_psram = !is_valid_ram_address(self.buffer.as_ptr() as usize);
624                if is_data_in_psram {
625                    unsafe {
626                        crate::soc::cache_writeback_addr(
627                            self.buffer.as_ptr() as u32,
628                            self.buffer.len() as u32,
629                        )
630                    };
631                }
632            }
633        }
634
635        Preparation {
636            start: self.descriptors.head(),
637            direction: TransferDirection::Out,
638            #[cfg(psram_dma)]
639            accesses_psram: is_data_in_psram,
640            burst_transfer: self.burst,
641            check_owner: None,
642            auto_write_back: false,
643        }
644    }
645
646    fn into_view(self) -> BufView<DmaTxBuf> {
647        BufView(self)
648    }
649
650    fn from_view(view: Self::View) -> Self {
651        view.0
652    }
653}
654
655/// DMA receive buffer
656///
657/// This is a contiguous buffer linked together by DMA descriptors of length
658/// 4092. It can only be used for receiving data from a peripheral's FIFO.
659/// See [DmaTxBuf] for transmitting data.
660#[derive(Debug)]
661#[cfg_attr(feature = "defmt", derive(defmt::Format))]
662pub struct DmaRxBuf {
663    descriptors: DescriptorSet<'static>,
664    buffer: &'static mut [u8],
665    burst: BurstConfig,
666}
667
668impl DmaRxBuf {
669    /// Creates a new [DmaRxBuf] from some descriptors and a buffer.
670    ///
671    /// There must be enough descriptors for the provided buffer.
672    /// Each descriptor can handle 4092 bytes worth of buffer.
673    ///
674    /// Both the descriptors and buffer must be in DMA-capable memory.
675    /// Only DRAM is supported.
676    pub fn new(
677        descriptors: &'static mut [DmaDescriptor],
678        buffer: &'static mut [u8],
679    ) -> Result<Self, DmaBufError> {
680        Self::new_with_config(descriptors, buffer, BurstConfig::default())
681    }
682
683    /// Creates a new [DmaRxBuf] from some descriptors and a buffer.
684    ///
685    /// There must be enough descriptors for the provided buffer.
686    /// Depending on alignment requirements, each descriptor can handle at most
687    /// 4092 bytes worth of buffer.
688    ///
689    /// Both the descriptors and buffer must be in DMA-capable memory.
690    /// Only DRAM is supported for descriptors.
691    pub fn new_with_config(
692        descriptors: &'static mut [DmaDescriptor],
693        buffer: &'static mut [u8],
694        config: impl Into<BurstConfig>,
695    ) -> Result<Self, DmaBufError> {
696        let mut buf = Self {
697            descriptors: DescriptorSet::new(descriptors)?,
698            buffer,
699            burst: BurstConfig::default(),
700        };
701
702        buf.configure(config, buf.capacity())?;
703
704        Ok(buf)
705    }
706
707    fn configure(
708        &mut self,
709        burst: impl Into<BurstConfig>,
710        length: usize,
711    ) -> Result<(), DmaBufError> {
712        let burst = burst.into();
713        self.set_length_fallible(length, burst)?;
714
715        self.descriptors.link_with_buffer(
716            self.buffer,
717            burst.max_chunk_size_for(self.buffer, TransferDirection::In),
718        )?;
719
720        self.burst = burst;
721        Ok(())
722    }
723
724    /// Configures the DMA to use burst transfers to access this buffer.
725    pub fn set_burst_config(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> {
726        let len = self.len();
727        self.configure(burst, len)
728    }
729
730    /// Consume the buf, returning the descriptors and buffer.
731    pub fn split(self) -> (&'static mut [DmaDescriptor], &'static mut [u8]) {
732        (self.descriptors.into_inner(), self.buffer)
733    }
734
735    /// Returns the size of the underlying buffer
736    pub fn capacity(&self) -> usize {
737        self.buffer.len()
738    }
739
740    /// Returns the maximum number of bytes that this buf has been configured to
741    /// receive.
742    #[allow(clippy::len_without_is_empty)]
743    pub fn len(&self) -> usize {
744        self.descriptors
745            .linked_iter()
746            .map(|d| d.size())
747            .sum::<usize>()
748    }
749
750    fn set_length_fallible(&mut self, len: usize, burst: BurstConfig) -> Result<(), DmaBufError> {
751        if len > self.capacity() {
752            return Err(DmaBufError::BufferTooSmall);
753        }
754        burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::In)?;
755
756        self.descriptors.set_rx_length(
757            len,
758            burst.max_chunk_size_for(&self.buffer[..len], TransferDirection::In),
759        )
760    }
761
762    /// Reset the descriptors to only receive `len` amount of bytes into this
763    /// buf.
764    ///
765    /// The number of bytes in data must be less than or equal to the buffer
766    /// size.
767    pub fn set_length(&mut self, len: usize) {
768        unwrap!(self.set_length_fallible(len, self.burst));
769    }
770
771    /// Returns the entire underlying buffer as a slice than can be read.
772    pub fn as_slice(&self) -> &[u8] {
773        self.buffer
774    }
775
776    /// Returns the entire underlying buffer as a slice than can be written.
777    pub fn as_mut_slice(&mut self) -> &mut [u8] {
778        self.buffer
779    }
780
781    /// Return the number of bytes that was received by this buf.
782    pub fn number_of_received_bytes(&self) -> usize {
783        self.descriptors
784            .linked_iter()
785            .map(|d| d.len())
786            .sum::<usize>()
787    }
788
789    /// Reads the received data into the provided `buf`.
790    ///
791    /// If `buf.len()` is less than the amount of received data then only the
792    /// first `buf.len()` bytes of received data is written into `buf`.
793    ///
794    /// Returns the number of bytes in written to `buf`.
795    pub fn read_received_data(&self, mut buf: &mut [u8]) -> usize {
796        // Note that due to an ESP32 quirk, the last received descriptor may not get
797        // updated.
798        let capacity = buf.len();
799        for chunk in self.received_data() {
800            if buf.is_empty() {
801                break;
802            }
803            let to_fill;
804            (to_fill, buf) = buf.split_at_mut(chunk.len());
805            to_fill.copy_from_slice(chunk);
806        }
807
808        capacity - buf.len()
809    }
810
811    /// Returns the received data as an iterator of slices.
812    pub fn received_data(&self) -> impl Iterator<Item = &[u8]> {
813        self.descriptors.linked_iter().map(|desc| {
814            // SAFETY: We set up the descriptor to point to a subslice of the buffer, and
815            // here we are only recreating that slice with a perhaps shorter length.
816            // We are also not accessing `self.buffer` while this slice is alive, so we
817            // are not violating any aliasing rules.
818            unsafe { core::slice::from_raw_parts(desc.buffer.cast_const(), desc.len()) }
819        })
820    }
821}
822
823unsafe impl DmaRxBuffer for DmaRxBuf {
824    type View = BufView<DmaRxBuf>;
825
826    fn prepare(&mut self) -> Preparation {
827        for desc in self.descriptors.linked_iter_mut() {
828            desc.reset_for_rx();
829        }
830
831        cfg_if::cfg_if! {
832            if #[cfg(psram_dma)] {
833                // Optimization: avoid locking for PSRAM range.
834                let is_data_in_psram = !is_valid_ram_address(self.buffer.as_ptr() as usize);
835                if is_data_in_psram {
836                    unsafe {
837                        crate::soc::cache_invalidate_addr(
838                            self.buffer.as_ptr() as u32,
839                            self.buffer.len() as u32,
840                        )
841                    };
842                }
843            }
844        }
845
846        Preparation {
847            start: self.descriptors.head(),
848            direction: TransferDirection::In,
849            #[cfg(psram_dma)]
850            accesses_psram: is_data_in_psram,
851            burst_transfer: self.burst,
852            check_owner: None,
853            auto_write_back: true,
854        }
855    }
856
857    fn into_view(self) -> BufView<DmaRxBuf> {
858        BufView(self)
859    }
860
861    fn from_view(view: Self::View) -> Self {
862        view.0
863    }
864}
865
866/// DMA transmit and receive buffer.
867///
868/// This is a (single) contiguous buffer linked together by two sets of DMA
869/// descriptors of length 4092 each.
870/// It can be used for simultaneously transmitting to and receiving from a
871/// peripheral's FIFO. These are typically full-duplex transfers.
872#[derive(Debug)]
873#[cfg_attr(feature = "defmt", derive(defmt::Format))]
874pub struct DmaRxTxBuf {
875    rx_descriptors: DescriptorSet<'static>,
876    tx_descriptors: DescriptorSet<'static>,
877    buffer: &'static mut [u8],
878    burst: BurstConfig,
879}
880
881impl DmaRxTxBuf {
882    /// Creates a new [DmaRxTxBuf] from some descriptors and a buffer.
883    ///
884    /// There must be enough descriptors for the provided buffer.
885    /// Each descriptor can handle 4092 bytes worth of buffer.
886    ///
887    /// Both the descriptors and buffer must be in DMA-capable memory.
888    /// Only DRAM is supported.
889    pub fn new(
890        rx_descriptors: &'static mut [DmaDescriptor],
891        tx_descriptors: &'static mut [DmaDescriptor],
892        buffer: &'static mut [u8],
893    ) -> Result<Self, DmaBufError> {
894        let mut buf = Self {
895            rx_descriptors: DescriptorSet::new(rx_descriptors)?,
896            tx_descriptors: DescriptorSet::new(tx_descriptors)?,
897            buffer,
898            burst: BurstConfig::default(),
899        };
900
901        let capacity = buf.capacity();
902        buf.configure(buf.burst, capacity)?;
903
904        Ok(buf)
905    }
906
907    fn configure(
908        &mut self,
909        burst: impl Into<BurstConfig>,
910        length: usize,
911    ) -> Result<(), DmaBufError> {
912        let burst = burst.into();
913        self.set_length_fallible(length, burst)?;
914
915        self.rx_descriptors.link_with_buffer(
916            self.buffer,
917            burst.max_chunk_size_for(self.buffer, TransferDirection::In),
918        )?;
919        self.tx_descriptors.link_with_buffer(
920            self.buffer,
921            burst.max_chunk_size_for(self.buffer, TransferDirection::Out),
922        )?;
923
924        self.burst = burst;
925
926        Ok(())
927    }
928
929    /// Configures the DMA to use burst transfers to access this buffer.
930    pub fn set_burst_config(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> {
931        let len = self.len();
932        self.configure(burst, len)
933    }
934
935    /// Consume the buf, returning the rx descriptors, tx descriptors and
936    /// buffer.
937    pub fn split(
938        self,
939    ) -> (
940        &'static mut [DmaDescriptor],
941        &'static mut [DmaDescriptor],
942        &'static mut [u8],
943    ) {
944        (
945            self.rx_descriptors.into_inner(),
946            self.tx_descriptors.into_inner(),
947            self.buffer,
948        )
949    }
950
951    /// Return the size of the underlying buffer.
952    pub fn capacity(&self) -> usize {
953        self.buffer.len()
954    }
955
956    /// Return the number of bytes that would be transmitted by this buf.
957    #[allow(clippy::len_without_is_empty)]
958    pub fn len(&self) -> usize {
959        self.tx_descriptors
960            .linked_iter()
961            .map(|d| d.len())
962            .sum::<usize>()
963    }
964
965    /// Returns the entire buf as a slice than can be read.
966    pub fn as_slice(&self) -> &[u8] {
967        self.buffer
968    }
969
970    /// Returns the entire buf as a slice than can be written.
971    pub fn as_mut_slice(&mut self) -> &mut [u8] {
972        self.buffer
973    }
974
975    fn set_length_fallible(&mut self, len: usize, burst: BurstConfig) -> Result<(), DmaBufError> {
976        if len > self.capacity() {
977            return Err(DmaBufError::BufferTooSmall);
978        }
979        burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::In)?;
980        burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::Out)?;
981
982        self.rx_descriptors.set_rx_length(
983            len,
984            burst.max_chunk_size_for(self.buffer, TransferDirection::In),
985        )?;
986        self.tx_descriptors.set_tx_length(
987            len,
988            burst.max_chunk_size_for(self.buffer, TransferDirection::Out),
989        )?;
990
991        Ok(())
992    }
993
994    /// Reset the descriptors to only transmit/receive `len` amount of bytes
995    /// with this buf.
996    ///
997    /// `len` must be less than or equal to the buffer size.
998    pub fn set_length(&mut self, len: usize) {
999        unwrap!(self.set_length_fallible(len, self.burst));
1000    }
1001}
1002
1003unsafe impl DmaTxBuffer for DmaRxTxBuf {
1004    type View = BufView<DmaRxTxBuf>;
1005
1006    fn prepare(&mut self) -> Preparation {
1007        for desc in self.tx_descriptors.linked_iter_mut() {
1008            // In non-circular mode, we only set `suc_eof` for the last descriptor to signal
1009            // the end of the transfer.
1010            desc.reset_for_tx(desc.next.is_null());
1011        }
1012
1013        cfg_if::cfg_if! {
1014            if #[cfg(psram_dma)] {
1015                // Optimization: avoid locking for PSRAM range.
1016                let is_data_in_psram = !is_valid_ram_address(self.buffer.as_ptr() as usize);
1017                if is_data_in_psram {
1018                    unsafe {
1019                        crate::soc::cache_writeback_addr(
1020                            self.buffer.as_ptr() as u32,
1021                            self.buffer.len() as u32,
1022                        )
1023                    };
1024                }
1025            }
1026        }
1027
1028        Preparation {
1029            start: self.tx_descriptors.head(),
1030            direction: TransferDirection::Out,
1031            #[cfg(psram_dma)]
1032            accesses_psram: is_data_in_psram,
1033            burst_transfer: self.burst,
1034            check_owner: None,
1035            auto_write_back: false,
1036        }
1037    }
1038
1039    fn into_view(self) -> BufView<DmaRxTxBuf> {
1040        BufView(self)
1041    }
1042
1043    fn from_view(view: Self::View) -> Self {
1044        view.0
1045    }
1046}
1047
1048unsafe impl DmaRxBuffer for DmaRxTxBuf {
1049    type View = BufView<DmaRxTxBuf>;
1050
1051    fn prepare(&mut self) -> Preparation {
1052        for desc in self.rx_descriptors.linked_iter_mut() {
1053            desc.reset_for_rx();
1054        }
1055
1056        cfg_if::cfg_if! {
1057            if #[cfg(psram_dma)] {
1058                // Optimization: avoid locking for PSRAM range.
1059                let is_data_in_psram = !is_valid_ram_address(self.buffer.as_ptr() as usize);
1060                if is_data_in_psram {
1061                    unsafe {
1062                        crate::soc::cache_invalidate_addr(
1063                            self.buffer.as_ptr() as u32,
1064                            self.buffer.len() as u32,
1065                        )
1066                    };
1067                }
1068            }
1069        }
1070
1071        Preparation {
1072            start: self.rx_descriptors.head(),
1073            direction: TransferDirection::In,
1074            #[cfg(psram_dma)]
1075            accesses_psram: is_data_in_psram,
1076            burst_transfer: self.burst,
1077            check_owner: None,
1078            auto_write_back: true,
1079        }
1080    }
1081
1082    fn into_view(self) -> BufView<DmaRxTxBuf> {
1083        BufView(self)
1084    }
1085
1086    fn from_view(view: Self::View) -> Self {
1087        view.0
1088    }
1089}
1090
1091/// DMA Streaming Receive Buffer.
1092///
1093/// This is a contiguous buffer linked together by DMA descriptors, and the
1094/// buffer is evenly distributed between each descriptor provided.
1095///
1096/// It is used for continuously streaming data from a peripheral's FIFO.
1097///
1098/// It does so by maintaining sliding window of descriptors that progresses when
1099/// you call [DmaRxStreamBufView::consume].
1100///
1101/// The list starts out like so `A (empty) -> B (empty) -> C (empty) -> D
1102/// (empty) -> NULL`.
1103///
1104/// As the DMA writes to the buffers the list progresses like so:
1105/// - `A (empty) -> B (empty) -> C (empty) -> D (empty) -> NULL`
1106/// - `A (full)  -> B (empty) -> C (empty) -> D (empty) -> NULL`
1107/// - `A (full)  -> B (full)  -> C (empty) -> D (empty) -> NULL`
1108/// - `A (full)  -> B (full)  -> C (full)  -> D (empty) -> NULL`
1109///
1110/// As you call [DmaRxStreamBufView::consume] the list (approximately)
1111/// progresses like so:
1112/// - `A (full)  -> B (full)  -> C (full)  -> D (empty) -> NULL`
1113/// - `B (full)  -> C (full)  -> D (empty) -> A (empty) -> NULL`
1114/// - `C (full)  -> D (empty) -> A (empty) -> B (empty) -> NULL`
1115/// - `D (empty) -> A (empty) -> B (empty) -> C (empty) -> NULL`
1116///
1117/// If all the descriptors fill up, the [DmaRxInterrupt::DescriptorEmpty]
1118/// interrupt will fire and the DMA will stop writing, at which point it is up
1119/// to you to resume/restart the transfer.
1120///
1121/// Note: This buffer will not tell you when this condition occurs, you should
1122/// check with the driver to see if the DMA has stopped.
1123///
1124/// When constructing this buffer, it is important to tune the ratio between the
1125/// chunk size and buffer size appropriately. Smaller chunk sizes means you
1126/// receive data more frequently but this means the DMA interrupts
1127/// ([DmaRxInterrupt::Done]) also fire more frequently (if you use them).
1128///
1129/// See [DmaRxStreamBufView] for APIs available whilst a transfer is in
1130/// progress.
1131pub struct DmaRxStreamBuf {
1132    descriptors: &'static mut [DmaDescriptor],
1133    buffer: &'static mut [u8],
1134    burst: BurstConfig,
1135}
1136
1137impl DmaRxStreamBuf {
1138    /// Creates a new [DmaRxStreamBuf] evenly distributing the buffer between
1139    /// the provided descriptors.
1140    pub fn new(
1141        descriptors: &'static mut [DmaDescriptor],
1142        buffer: &'static mut [u8],
1143    ) -> Result<Self, DmaBufError> {
1144        if !is_slice_in_dram(descriptors) {
1145            return Err(DmaBufError::UnsupportedMemoryRegion);
1146        }
1147        if !is_slice_in_dram(buffer) {
1148            return Err(DmaBufError::UnsupportedMemoryRegion);
1149        }
1150
1151        if descriptors.is_empty() {
1152            return Err(DmaBufError::InsufficientDescriptors);
1153        }
1154
1155        // Evenly distribute the buffer between the descriptors.
1156        let chunk_size = buffer.len() / descriptors.len();
1157
1158        if chunk_size > 4095 {
1159            return Err(DmaBufError::InsufficientDescriptors);
1160        }
1161
1162        // Check that the last descriptor can hold the excess
1163        let excess = buffer.len() % descriptors.len();
1164        if chunk_size + excess > 4095 {
1165            return Err(DmaBufError::InsufficientDescriptors);
1166        }
1167
1168        let mut chunks = buffer.chunks_exact_mut(chunk_size);
1169        for (desc, chunk) in descriptors.iter_mut().zip(chunks.by_ref()) {
1170            desc.buffer = chunk.as_mut_ptr();
1171            desc.set_size(chunk.len());
1172        }
1173
1174        let remainder = chunks.into_remainder();
1175        debug_assert_eq!(remainder.len(), excess);
1176
1177        if !remainder.is_empty() {
1178            // Append any excess to the last descriptor.
1179            let last_descriptor = descriptors.last_mut().unwrap();
1180            last_descriptor.set_size(last_descriptor.size() + remainder.len());
1181        }
1182
1183        Ok(Self {
1184            descriptors,
1185            buffer,
1186            burst: BurstConfig::default(),
1187        })
1188    }
1189
1190    /// Consume the buf, returning the descriptors and buffer.
1191    pub fn split(self) -> (&'static mut [DmaDescriptor], &'static mut [u8]) {
1192        (self.descriptors, self.buffer)
1193    }
1194}
1195
1196unsafe impl DmaRxBuffer for DmaRxStreamBuf {
1197    type View = DmaRxStreamBufView;
1198
1199    fn prepare(&mut self) -> Preparation {
1200        // Link up all the descriptors (but not in a circle).
1201        let mut next = null_mut();
1202        for desc in self.descriptors.iter_mut().rev() {
1203            desc.next = next;
1204            next = desc;
1205
1206            desc.reset_for_rx();
1207        }
1208        Preparation {
1209            start: self.descriptors.as_mut_ptr(),
1210            direction: TransferDirection::In,
1211            #[cfg(psram_dma)]
1212            accesses_psram: false,
1213            burst_transfer: self.burst,
1214
1215            // Whilst we give ownership of the descriptors the DMA, the correctness of this buffer
1216            // implementation doesn't rely on the DMA checking for descriptor ownership.
1217            // No descriptor is added back to the end of the stream before it's ready for the DMA
1218            // to consume it.
1219            check_owner: None,
1220            auto_write_back: true,
1221        }
1222    }
1223
1224    fn into_view(self) -> DmaRxStreamBufView {
1225        DmaRxStreamBufView {
1226            buf: self,
1227            descriptor_idx: 0,
1228            descriptor_offset: 0,
1229        }
1230    }
1231
1232    fn from_view(view: Self::View) -> Self {
1233        view.buf
1234    }
1235}
1236
1237/// A view into a [DmaRxStreamBuf]
1238pub struct DmaRxStreamBufView {
1239    buf: DmaRxStreamBuf,
1240    descriptor_idx: usize,
1241    descriptor_offset: usize,
1242}
1243
1244impl DmaRxStreamBufView {
1245    /// Returns the number of bytes that are available to read from the buf.
1246    pub fn available_bytes(&self) -> usize {
1247        let (tail, head) = self.buf.descriptors.split_at(self.descriptor_idx);
1248        let mut result = 0;
1249        for desc in head.iter().chain(tail) {
1250            if desc.owner() == Owner::Dma {
1251                break;
1252            }
1253            result += desc.len();
1254        }
1255        result - self.descriptor_offset
1256    }
1257
1258    /// Reads as much as possible into the buf from the available data.
1259    pub fn pop(&mut self, buf: &mut [u8]) -> usize {
1260        if buf.is_empty() {
1261            return 0;
1262        }
1263        let total_bytes = buf.len();
1264
1265        let mut remaining = buf;
1266        loop {
1267            let available = self.peek();
1268            if available.len() >= remaining.len() {
1269                remaining.copy_from_slice(&available[0..remaining.len()]);
1270                self.consume(remaining.len());
1271                let consumed = remaining.len();
1272                remaining = &mut remaining[consumed..];
1273                break;
1274            } else {
1275                let to_consume = available.len();
1276                remaining[0..to_consume].copy_from_slice(available);
1277                self.consume(to_consume);
1278                remaining = &mut remaining[to_consume..];
1279            }
1280        }
1281
1282        total_bytes - remaining.len()
1283    }
1284
1285    /// Returns a slice into the buffer containing available data.
1286    /// This will be the longest possible contiguous slice into the buffer that
1287    /// contains data that is available to read.
1288    ///
1289    /// Note: This function ignores EOFs, see [Self::peek_until_eof] if you need
1290    /// EOF support.
1291    pub fn peek(&self) -> &[u8] {
1292        let (slice, _) = self.peek_internal(false);
1293        slice
1294    }
1295
1296    /// Same as [Self::peek] but will not skip over any EOFs.
1297    ///
1298    /// It also returns a boolean indicating whether this slice ends with an EOF
1299    /// or not.
1300    pub fn peek_until_eof(&self) -> (&[u8], bool) {
1301        self.peek_internal(true)
1302    }
1303
1304    /// Consumes the first `n` bytes from the available data, returning any
1305    /// fully consumed descriptors back to the DMA.
1306    /// This is typically called after [Self::peek]/[Self::peek_until_eof].
1307    ///
1308    /// Returns the number of bytes that were actually consumed.
1309    pub fn consume(&mut self, n: usize) -> usize {
1310        let mut remaining_bytes_to_consume = n;
1311
1312        loop {
1313            let desc = &mut self.buf.descriptors[self.descriptor_idx];
1314
1315            if desc.owner() == Owner::Dma {
1316                // Descriptor is still owned by DMA so it can't be read yet.
1317                // This should only happen when there is no more data available to read.
1318                break;
1319            }
1320
1321            let remaining_bytes_in_descriptor = desc.len() - self.descriptor_offset;
1322            if remaining_bytes_to_consume < remaining_bytes_in_descriptor {
1323                self.descriptor_offset += remaining_bytes_to_consume;
1324                remaining_bytes_to_consume = 0;
1325                break;
1326            }
1327
1328            // Reset the descriptor for reuse.
1329            desc.set_owner(Owner::Dma);
1330            desc.set_suc_eof(false);
1331            desc.set_length(0);
1332
1333            // Before connecting this descriptor to the end of the list, the next descriptor
1334            // must be disconnected from this one to prevent the DMA from
1335            // overtaking.
1336            desc.next = null_mut();
1337
1338            let desc_ptr: *mut _ = desc;
1339
1340            let prev_descriptor_index = self
1341                .descriptor_idx
1342                .checked_sub(1)
1343                .unwrap_or(self.buf.descriptors.len() - 1);
1344
1345            // Connect this consumed descriptor to the end of the chain.
1346            self.buf.descriptors[prev_descriptor_index].next = desc_ptr;
1347
1348            self.descriptor_idx += 1;
1349            if self.descriptor_idx >= self.buf.descriptors.len() {
1350                self.descriptor_idx = 0;
1351            }
1352            self.descriptor_offset = 0;
1353
1354            remaining_bytes_to_consume -= remaining_bytes_in_descriptor;
1355        }
1356
1357        n - remaining_bytes_to_consume
1358    }
1359
1360    fn peek_internal(&self, stop_at_eof: bool) -> (&[u8], bool) {
1361        let descriptors = &self.buf.descriptors[self.descriptor_idx..];
1362
1363        // There must be at least one descriptor.
1364        debug_assert!(!descriptors.is_empty());
1365
1366        if descriptors.len() == 1 {
1367            let last_descriptor = &descriptors[0];
1368            if last_descriptor.owner() == Owner::Dma {
1369                // No data available.
1370                (&[], false)
1371            } else {
1372                let length = last_descriptor.len() - self.descriptor_offset;
1373                (
1374                    &self.buf.buffer[self.buf.buffer.len() - length..],
1375                    last_descriptor.flags.suc_eof(),
1376                )
1377            }
1378        } else {
1379            let chunk_size = descriptors[0].size();
1380            let mut found_eof = false;
1381
1382            let mut number_of_contiguous_bytes = 0;
1383            for desc in descriptors {
1384                if desc.owner() == Owner::Dma {
1385                    break;
1386                }
1387                number_of_contiguous_bytes += desc.len();
1388
1389                if stop_at_eof && desc.flags.suc_eof() {
1390                    found_eof = true;
1391                    break;
1392                }
1393                // If the length is smaller than the size, the contiguous-ness ends here.
1394                if desc.len() < desc.size() {
1395                    break;
1396                }
1397            }
1398
1399            (
1400                &self.buf.buffer[chunk_size * self.descriptor_idx..][..number_of_contiguous_bytes]
1401                    [self.descriptor_offset..],
1402                found_eof,
1403            )
1404        }
1405    }
1406}
1407
1408static mut EMPTY: [DmaDescriptor; 1] = [DmaDescriptor::EMPTY];
1409
1410/// An empty buffer that can be used when you don't need to transfer any data.
1411pub struct EmptyBuf;
1412
1413unsafe impl DmaTxBuffer for EmptyBuf {
1414    type View = EmptyBuf;
1415
1416    fn prepare(&mut self) -> Preparation {
1417        #[allow(unused_unsafe)] // stable requires unsafe, nightly complains about it
1418        Preparation {
1419            start: unsafe { core::ptr::addr_of_mut!(EMPTY).cast() },
1420            direction: TransferDirection::Out,
1421            #[cfg(psram_dma)]
1422            accesses_psram: false,
1423            burst_transfer: BurstConfig::default(),
1424
1425            // As we don't give ownership of the descriptor to the DMA, it's important that the DMA
1426            // channel does *NOT* check for ownership, otherwise the channel will return an error.
1427            check_owner: Some(false),
1428
1429            // The DMA should not write back to the descriptor as it is shared.
1430            auto_write_back: false,
1431        }
1432    }
1433
1434    fn into_view(self) -> EmptyBuf {
1435        self
1436    }
1437
1438    fn from_view(view: Self::View) -> Self {
1439        view
1440    }
1441}
1442
1443unsafe impl DmaRxBuffer for EmptyBuf {
1444    type View = EmptyBuf;
1445
1446    fn prepare(&mut self) -> Preparation {
1447        #[allow(unused_unsafe)] // stable requires unsafe, nightly complains about it
1448        Preparation {
1449            start: unsafe { core::ptr::addr_of_mut!(EMPTY).cast() },
1450            direction: TransferDirection::In,
1451            #[cfg(psram_dma)]
1452            accesses_psram: false,
1453            burst_transfer: BurstConfig::default(),
1454
1455            // As we don't give ownership of the descriptor to the DMA, it's important that the DMA
1456            // channel does *NOT* check for ownership, otherwise the channel will return an error.
1457            check_owner: Some(false),
1458            auto_write_back: true,
1459        }
1460    }
1461
1462    fn into_view(self) -> EmptyBuf {
1463        self
1464    }
1465
1466    fn from_view(view: Self::View) -> Self {
1467        view
1468    }
1469}
1470
1471/// DMA Loop Buffer
1472///
1473/// This consists of a single descriptor that points to itself and points to a
1474/// single buffer, resulting in the buffer being transmitted over and over
1475/// again, indefinitely.
1476///
1477/// Note: A DMA descriptor is 12 bytes. If your buffer is significantly shorter
1478/// than this, the DMA channel will spend more time reading the descriptor than
1479/// it does reading the buffer, which may leave it unable to keep up with the
1480/// bandwidth requirements of some peripherals at high frequencies.
1481pub struct DmaLoopBuf {
1482    descriptor: &'static mut DmaDescriptor,
1483    buffer: &'static mut [u8],
1484}
1485
1486impl DmaLoopBuf {
1487    /// Create a new [DmaLoopBuf].
1488    pub fn new(
1489        descriptor: &'static mut DmaDescriptor,
1490        buffer: &'static mut [u8],
1491    ) -> Result<DmaLoopBuf, DmaBufError> {
1492        if !is_slice_in_dram(buffer) {
1493            return Err(DmaBufError::UnsupportedMemoryRegion);
1494        }
1495        if !is_slice_in_dram(core::slice::from_ref(descriptor)) {
1496            return Err(DmaBufError::UnsupportedMemoryRegion);
1497        }
1498
1499        if buffer.len() > BurstConfig::default().max_chunk_size_for(buffer, TransferDirection::Out)
1500        {
1501            return Err(DmaBufError::InsufficientDescriptors);
1502        }
1503
1504        descriptor.set_owner(Owner::Dma); // Doesn't matter
1505        descriptor.set_suc_eof(false);
1506        descriptor.set_length(buffer.len());
1507        descriptor.set_size(buffer.len());
1508        descriptor.buffer = buffer.as_mut_ptr();
1509        descriptor.next = descriptor;
1510
1511        Ok(Self { descriptor, buffer })
1512    }
1513
1514    /// Consume the buf, returning the descriptor and buffer.
1515    pub fn split(self) -> (&'static mut DmaDescriptor, &'static mut [u8]) {
1516        (self.descriptor, self.buffer)
1517    }
1518}
1519
1520unsafe impl DmaTxBuffer for DmaLoopBuf {
1521    type View = Self;
1522
1523    fn prepare(&mut self) -> Preparation {
1524        Preparation {
1525            start: self.descriptor,
1526            #[cfg(psram_dma)]
1527            accesses_psram: false,
1528            direction: TransferDirection::Out,
1529            burst_transfer: BurstConfig::default(),
1530            // The DMA must not check the owner bit, as it is never set.
1531            check_owner: Some(false),
1532
1533            // Doesn't matter either way but it is set to true for ESP32 SPI_DMA compatibility.
1534            auto_write_back: false,
1535        }
1536    }
1537
1538    fn into_view(self) -> Self::View {
1539        self
1540    }
1541
1542    fn from_view(view: Self::View) -> Self {
1543        view
1544    }
1545}
1546
1547impl Deref for DmaLoopBuf {
1548    type Target = [u8];
1549
1550    fn deref(&self) -> &Self::Target {
1551        self.buffer
1552    }
1553}
1554
1555impl DerefMut for DmaLoopBuf {
1556    fn deref_mut(&mut self) -> &mut Self::Target {
1557        self.buffer
1558    }
1559}