esp_hal/dma/
buffers.rs

1use core::{
2    ops::{Deref, DerefMut},
3    ptr::null_mut,
4};
5
6use super::*;
7use crate::soc::{is_slice_in_dram, is_slice_in_psram};
8#[cfg(psram_dma)]
9use crate::soc::{is_valid_psram_address, is_valid_ram_address};
10
11/// Error returned from Dma[Rx|Tx|RxTx]Buf operations.
12#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
13#[cfg_attr(feature = "defmt", derive(defmt::Format))]
14pub enum DmaBufError {
15    /// The buffer is smaller than the requested size.
16    BufferTooSmall,
17
18    /// More descriptors are needed for the buffer size.
19    InsufficientDescriptors,
20
21    /// Descriptors or buffers are not located in a supported memory region.
22    UnsupportedMemoryRegion,
23
24    /// Buffer address or size is not properly aligned.
25    InvalidAlignment(DmaAlignmentError),
26
27    /// Invalid chunk size: must be > 0 and <= 4095.
28    InvalidChunkSize,
29}
30
31/// DMA buffer alignment errors.
32#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
33#[cfg_attr(feature = "defmt", derive(defmt::Format))]
34pub enum DmaAlignmentError {
35    /// Buffer address is not properly aligned.
36    Address,
37
38    /// Buffer size is not properly aligned.
39    Size,
40}
41
42impl From<DmaAlignmentError> for DmaBufError {
43    fn from(err: DmaAlignmentError) -> Self {
44        DmaBufError::InvalidAlignment(err)
45    }
46}
47
48cfg_if::cfg_if! {
49    if #[cfg(psram_dma)] {
50        /// Burst size used when transferring to and from external memory.
51        #[derive(Clone, Copy, PartialEq, Eq, Debug)]
52        #[cfg_attr(feature = "defmt", derive(defmt::Format))]
53        pub enum ExternalBurstConfig {
54            /// 16 bytes
55            Size16 = 16,
56
57            /// 32 bytes
58            Size32 = 32,
59
60            /// 64 bytes
61            Size64 = 64,
62        }
63
64        impl ExternalBurstConfig {
65            /// The default external memory burst length.
66            pub const DEFAULT: Self = Self::Size16;
67        }
68
69        impl Default for ExternalBurstConfig {
70            fn default() -> Self {
71                Self::DEFAULT
72            }
73        }
74
75        /// Internal memory access burst mode.
76        #[derive(Clone, Copy, PartialEq, Eq, Debug)]
77        #[cfg_attr(feature = "defmt", derive(defmt::Format))]
78        pub enum InternalBurstConfig {
79            /// Burst mode is disabled.
80            Disabled,
81
82            /// Burst mode is enabled.
83            Enabled,
84        }
85
86        impl InternalBurstConfig {
87            /// The default internal burst mode configuration.
88            pub const DEFAULT: Self = Self::Disabled;
89        }
90
91        impl Default for InternalBurstConfig {
92            fn default() -> Self {
93                Self::DEFAULT
94            }
95        }
96
97        /// Burst transfer configuration.
98        #[derive(Clone, Copy, PartialEq, Eq, Debug)]
99        #[cfg_attr(feature = "defmt", derive(defmt::Format))]
100        pub struct BurstConfig {
101            /// Configures the burst size for PSRAM transfers.
102            ///
103            /// Burst mode is always enabled for PSRAM transfers.
104            pub external_memory: ExternalBurstConfig,
105
106            /// Enables or disables the burst mode for internal memory transfers.
107            ///
108            /// The burst size is not configurable.
109            pub internal_memory: InternalBurstConfig,
110        }
111
112        impl BurstConfig {
113            /// The default burst mode configuration.
114            pub const DEFAULT: Self = Self {
115                external_memory: ExternalBurstConfig::DEFAULT,
116                internal_memory: InternalBurstConfig::DEFAULT,
117            };
118        }
119
120        impl Default for BurstConfig {
121            fn default() -> Self {
122                Self::DEFAULT
123            }
124        }
125
126        impl From<InternalBurstConfig> for BurstConfig {
127            fn from(internal_memory: InternalBurstConfig) -> Self {
128                Self {
129                    external_memory: ExternalBurstConfig::DEFAULT,
130                    internal_memory,
131                }
132            }
133        }
134
135        impl From<ExternalBurstConfig> for BurstConfig {
136            fn from(external_memory: ExternalBurstConfig) -> Self {
137                Self {
138                    external_memory,
139                    internal_memory: InternalBurstConfig::DEFAULT,
140                }
141            }
142        }
143    } else {
144        /// Burst transfer configuration.
145        #[derive(Clone, Copy, PartialEq, Eq, Debug)]
146        #[cfg_attr(feature = "defmt", derive(defmt::Format))]
147        pub enum BurstConfig {
148            /// Burst mode is disabled.
149            Disabled,
150
151            /// Burst mode is enabled.
152            Enabled,
153        }
154
155        impl BurstConfig {
156            /// The default burst mode configuration.
157            pub const DEFAULT: Self = Self::Disabled;
158        }
159
160        impl Default for BurstConfig {
161            fn default() -> Self {
162                Self::DEFAULT
163            }
164        }
165
166        type InternalBurstConfig = BurstConfig;
167    }
168}
169
170#[cfg(psram_dma)]
171impl ExternalBurstConfig {
172    const fn min_psram_alignment(self, direction: TransferDirection) -> usize {
173        // S2 TRM: Specifically, size and buffer address pointer in receive descriptors
174        // should be 16-byte, 32-byte or 64-byte aligned. For data frame whose
175        // length is not a multiple of 16 bytes, 32 bytes, or 64 bytes, EDMA adds
176        // padding bytes to the end.
177
178        // S3 TRM: Size and Address for IN transfers must be block aligned. For receive
179        // descriptors, if the data length received are not aligned with block size,
180        // GDMA will pad the data received with 0 until they are aligned to
181        // initiate burst transfer. You can read the length field in receive descriptors
182        // to obtain the length of valid data received
183        if matches!(direction, TransferDirection::In) {
184            self as usize
185        } else {
186            // S2 TRM: Size, length and buffer address pointer in transmit descriptors are
187            // not necessarily aligned with block size.
188
189            // S3 TRM: Size, length, and buffer address pointer in transmit descriptors do
190            // not need to be aligned.
191            1
192        }
193    }
194}
195
196impl InternalBurstConfig {
197    pub(super) const fn is_burst_enabled(self) -> bool {
198        !matches!(self, Self::Disabled)
199    }
200
201    // Size and address alignment as those come in pairs on current hardware.
202    const fn min_dram_alignment(self, direction: TransferDirection) -> usize {
203        if matches!(direction, TransferDirection::In) {
204            // NOTE(danielb): commenting this check is incorrect as per TRM, but works.
205            //                we'll need to restore this once peripherals can read a
206            //                different amount of data than what is configured in the
207            //                buffer.
208            // if cfg!(esp32) {
209            //     // NOTE: The size must be word-aligned.
210            //     // NOTE: The buffer address must be word-aligned
211            //     4
212            // }
213            if self.is_burst_enabled() {
214                // As described in "Accessing Internal Memory" paragraphs in the various TRMs.
215                4
216            } else {
217                1
218            }
219        } else {
220            // OUT transfers have no alignment requirements, except for ESP32, which is
221            // described below.
222            if cfg!(esp32) {
223                // SPI DMA: Burst transmission is supported. The data size for
224                // a single transfer must be four bytes aligned.
225                // I2S DMA: Burst transfer is supported. However, unlike the
226                // SPI DMA channels, the data size for a single transfer is
227                // one word, or four bytes.
228                4
229            } else {
230                1
231            }
232        }
233    }
234}
235
236const fn max(a: usize, b: usize) -> usize {
237    if a > b {
238        a
239    } else {
240        b
241    }
242}
243
244impl BurstConfig {
245    delegate::delegate! {
246        #[cfg(psram_dma)]
247        to self.internal_memory {
248            pub(super) const fn min_dram_alignment(self, direction: TransferDirection) -> usize;
249            pub(super) fn is_burst_enabled(self) -> bool;
250        }
251    }
252
253    /// Calculates an alignment that is compatible with the current burst
254    /// configuration.
255    ///
256    /// This is an over-estimation so that Descriptors can be safely used with
257    /// any DMA channel in any direction.
258    pub const fn min_compatible_alignment(self) -> usize {
259        let in_alignment = self.min_dram_alignment(TransferDirection::In);
260        let out_alignment = self.min_dram_alignment(TransferDirection::Out);
261        let alignment = max(in_alignment, out_alignment);
262
263        #[cfg(psram_dma)]
264        let alignment = max(alignment, self.external_memory as usize);
265
266        alignment
267    }
268
269    const fn chunk_size_for_alignment(alignment: usize) -> usize {
270        // DMA descriptors have a 12-bit field for the size/length of the buffer they
271        // point at. As there is no such thing as 0-byte alignment, this means the
272        // maximum size is 4095 bytes.
273        4096 - alignment
274    }
275
276    /// Calculates a chunk size that is compatible with the current burst
277    /// configuration's alignment requirements.
278    ///
279    /// This is an over-estimation so that Descriptors can be safely used with
280    /// any DMA channel in any direction.
281    pub const fn max_compatible_chunk_size(self) -> usize {
282        Self::chunk_size_for_alignment(self.min_compatible_alignment())
283    }
284
285    fn min_alignment(self, _buffer: &[u8], direction: TransferDirection) -> usize {
286        let alignment = self.min_dram_alignment(direction);
287
288        cfg_if::cfg_if! {
289            if #[cfg(psram_dma)] {
290                let mut alignment = alignment;
291                if is_valid_psram_address(_buffer.as_ptr() as usize) {
292                    alignment = max(alignment, self.external_memory.min_psram_alignment(direction));
293                }
294            }
295        }
296
297        alignment
298    }
299
300    // Note: this function ignores address alignment as we assume the buffers are
301    // aligned.
302    fn max_chunk_size_for(self, buffer: &[u8], direction: TransferDirection) -> usize {
303        Self::chunk_size_for_alignment(self.min_alignment(buffer, direction))
304    }
305
306    fn ensure_buffer_aligned(
307        self,
308        buffer: &[u8],
309        direction: TransferDirection,
310    ) -> Result<(), DmaAlignmentError> {
311        let alignment = self.min_alignment(buffer, direction);
312        if buffer.as_ptr() as usize % alignment != 0 {
313            return Err(DmaAlignmentError::Address);
314        }
315
316        // NB: the TRMs suggest that buffer length don't need to be aligned, but
317        // for IN transfers, we configure the DMA descriptors' size field, which needs
318        // to be aligned.
319        if direction == TransferDirection::In && buffer.len() % alignment != 0 {
320            return Err(DmaAlignmentError::Size);
321        }
322
323        Ok(())
324    }
325
326    fn ensure_buffer_compatible(
327        self,
328        buffer: &[u8],
329        direction: TransferDirection,
330    ) -> Result<(), DmaBufError> {
331        // buffer can be either DRAM or PSRAM (if supported)
332        let is_in_dram = is_slice_in_dram(buffer);
333        let is_in_psram = cfg!(psram_dma) && is_slice_in_psram(buffer);
334        if !(is_in_dram || is_in_psram) {
335            return Err(DmaBufError::UnsupportedMemoryRegion);
336        }
337
338        self.ensure_buffer_aligned(buffer, direction)?;
339
340        Ok(())
341    }
342}
343
344/// The direction of the DMA transfer.
345#[derive(Clone, Copy, PartialEq, Eq, Debug)]
346#[cfg_attr(feature = "defmt", derive(defmt::Format))]
347pub enum TransferDirection {
348    /// DMA transfer from peripheral or external memory to memory.
349    In,
350    /// DMA transfer from memory to peripheral or external memory.
351    Out,
352}
353
354/// Holds all the information needed to configure a DMA channel for a transfer.
355#[derive(PartialEq, Eq, Debug)]
356#[cfg_attr(feature = "defmt", derive(defmt::Format))]
357pub struct Preparation {
358    /// The descriptor the DMA will start from.
359    pub start: *mut DmaDescriptor,
360
361    /// The direction of the DMA transfer.
362    pub direction: TransferDirection,
363
364    /// Must be `true` if any of the DMA descriptors contain data in PSRAM.
365    #[cfg(psram_dma)]
366    pub accesses_psram: bool,
367
368    /// Configures the DMA to transfer data in bursts.
369    ///
370    /// The implementation of the buffer must ensure that buffer size
371    /// and alignment in each descriptor is compatible with the burst
372    /// transfer configuration.
373    ///
374    /// For details on alignment requirements, refer to your chip's
375    #[doc = crate::trm_markdown_link!()]
376    pub burst_transfer: BurstConfig,
377
378    /// Configures the "check owner" feature of the DMA channel.
379    ///
380    /// Most DMA channels allow software to configure whether the hardware
381    /// checks that [DmaDescriptor::owner] is set to [Owner::Dma] before
382    /// consuming the descriptor. If this check fails, the channel stops
383    /// operating and fires
384    /// [DmaRxInterrupt::DescriptorError]/[DmaTxInterrupt::DescriptorError].
385    ///
386    /// This field allows buffer implementation to configure this behaviour.
387    /// - `Some(true)`: DMA channel must check the owner bit.
388    /// - `Some(false)`: DMA channel must NOT check the owner bit.
389    /// - `None`: DMA channel should check the owner bit if it is supported.
390    ///
391    /// Some buffer implementations may require that the DMA channel performs
392    /// this check before consuming the descriptor to ensure correct
393    /// behaviour. e.g. To prevent wrap-around in a circular transfer.
394    ///
395    /// Some buffer implementations may require that the DMA channel does NOT
396    /// perform this check as the ownership bit will not be set before the
397    /// channel tries to consume the descriptor.
398    ///
399    /// Most implementations won't have any such requirements and will work
400    /// correctly regardless of whether the DMA channel checks or not.
401    ///
402    /// Note: If the DMA channel doesn't support the provided option,
403    /// preparation will fail.
404    pub check_owner: Option<bool>,
405
406    /// Configures whether the DMA channel automatically clears the
407    /// [DmaDescriptor::owner] bit after it is done with the buffer pointed
408    /// to by a descriptor.
409    ///
410    /// For RX transfers, this is always true and the value specified here is
411    /// ignored.
412    ///
413    /// Note: SPI_DMA on the ESP32 does not support this and will panic if set
414    /// to true.
415    pub auto_write_back: bool,
416}
417
418/// [DmaTxBuffer] is a DMA descriptor + memory combo that can be used for
419/// transmitting data from a DMA channel to a peripheral's FIFO.
420///
421/// # Safety
422///
423/// The implementing type must keep all its descriptors and the buffers they
424/// point to valid while the buffer is being transferred.
425pub unsafe trait DmaTxBuffer {
426    /// A type providing operations that are safe to perform on the buffer
427    /// whilst the DMA is actively using it.
428    type View;
429
430    /// Prepares the buffer for an imminent transfer and returns
431    /// information required to use this buffer.
432    ///
433    /// Note: This operation is idempotent.
434    fn prepare(&mut self) -> Preparation;
435
436    /// This is called before the DMA starts using the buffer.
437    fn into_view(self) -> Self::View;
438
439    /// This is called after the DMA is done using the buffer.
440    fn from_view(view: Self::View) -> Self;
441}
442
443/// [DmaRxBuffer] is a DMA descriptor + memory combo that can be used for
444/// receiving data from a peripheral's FIFO to a DMA channel.
445///
446/// Note: Implementations of this trait may only support having a single EOF bit
447/// which resides in the last descriptor. There will be a separate trait in
448/// future to support multiple EOFs.
449///
450/// # Safety
451///
452/// The implementing type must keep all its descriptors and the buffers they
453/// point to valid while the buffer is being transferred.
454pub unsafe trait DmaRxBuffer {
455    /// A type providing operations that are safe to perform on the buffer
456    /// whilst the DMA is actively using it.
457    type View;
458
459    /// Prepares the buffer for an imminent transfer and returns
460    /// information required to use this buffer.
461    ///
462    /// Note: This operation is idempotent.
463    fn prepare(&mut self) -> Preparation;
464
465    /// This is called before the DMA starts using the buffer.
466    fn into_view(self) -> Self::View;
467
468    /// This is called after the DMA is done using the buffer.
469    fn from_view(view: Self::View) -> Self;
470}
471
472/// An in-progress view into [DmaRxBuf]/[DmaTxBuf].
473///
474/// In the future, this could support peeking into state of the
475/// descriptors/buffers.
476pub struct BufView<T>(T);
477
478/// DMA transmit buffer
479///
480/// This is a contiguous buffer linked together by DMA descriptors of length
481/// 4095 at most. It can only be used for transmitting data to a peripheral's
482/// FIFO. See [DmaRxBuf] for receiving data.
483#[derive(Debug)]
484#[cfg_attr(feature = "defmt", derive(defmt::Format))]
485pub struct DmaTxBuf {
486    descriptors: DescriptorSet<'static>,
487    buffer: &'static mut [u8],
488    burst: BurstConfig,
489}
490
491impl DmaTxBuf {
492    /// Creates a new [DmaTxBuf] from some descriptors and a buffer.
493    ///
494    /// There must be enough descriptors for the provided buffer.
495    /// Depending on alignment requirements, each descriptor can handle at most
496    /// 4095 bytes worth of buffer.
497    ///
498    /// Both the descriptors and buffer must be in DMA-capable memory.
499    /// Only DRAM is supported for descriptors.
500    pub fn new(
501        descriptors: &'static mut [DmaDescriptor],
502        buffer: &'static mut [u8],
503    ) -> Result<Self, DmaBufError> {
504        Self::new_with_config(descriptors, buffer, BurstConfig::default())
505    }
506
507    /// Creates a new [DmaTxBuf] from some descriptors and a buffer.
508    ///
509    /// There must be enough descriptors for the provided buffer.
510    /// Depending on alignment requirements, each descriptor can handle at most
511    /// 4095 bytes worth of buffer.
512    ///
513    /// Both the descriptors and buffer must be in DMA-capable memory.
514    /// Only DRAM is supported for descriptors.
515    pub fn new_with_config(
516        descriptors: &'static mut [DmaDescriptor],
517        buffer: &'static mut [u8],
518        config: impl Into<BurstConfig>,
519    ) -> Result<Self, DmaBufError> {
520        let mut buf = Self {
521            descriptors: DescriptorSet::new(descriptors)?,
522            buffer,
523            burst: BurstConfig::default(),
524        };
525
526        let capacity = buf.capacity();
527        buf.configure(config, capacity)?;
528
529        Ok(buf)
530    }
531
532    fn configure(
533        &mut self,
534        burst: impl Into<BurstConfig>,
535        length: usize,
536    ) -> Result<(), DmaBufError> {
537        let burst = burst.into();
538        self.set_length_fallible(length, burst)?;
539
540        self.descriptors.link_with_buffer(
541            self.buffer,
542            burst.max_chunk_size_for(self.buffer, TransferDirection::Out),
543        )?;
544
545        self.burst = burst;
546        Ok(())
547    }
548
549    /// Configures the DMA to use burst transfers to access this buffer.
550    pub fn set_burst_config(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> {
551        let len = self.len();
552        self.configure(burst, len)
553    }
554
555    /// Consume the buf, returning the descriptors and buffer.
556    pub fn split(self) -> (&'static mut [DmaDescriptor], &'static mut [u8]) {
557        (self.descriptors.into_inner(), self.buffer)
558    }
559
560    /// Returns the size of the underlying buffer
561    pub fn capacity(&self) -> usize {
562        self.buffer.len()
563    }
564
565    /// Return the number of bytes that would be transmitted by this buf.
566    #[allow(clippy::len_without_is_empty)]
567    pub fn len(&self) -> usize {
568        self.descriptors
569            .linked_iter()
570            .map(|d| d.len())
571            .sum::<usize>()
572    }
573
574    fn set_length_fallible(&mut self, len: usize, burst: BurstConfig) -> Result<(), DmaBufError> {
575        if len > self.capacity() {
576            return Err(DmaBufError::BufferTooSmall);
577        }
578        burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::Out)?;
579
580        self.descriptors.set_tx_length(
581            len,
582            burst.max_chunk_size_for(self.buffer, TransferDirection::Out),
583        )
584    }
585
586    /// Reset the descriptors to only transmit `len` amount of bytes from this
587    /// buf.
588    ///
589    /// The number of bytes in data must be less than or equal to the buffer
590    /// size.
591    pub fn set_length(&mut self, len: usize) {
592        unwrap!(self.set_length_fallible(len, self.burst))
593    }
594
595    /// Fills the TX buffer with the bytes provided in `data` and reset the
596    /// descriptors to only cover the filled section.
597    ///
598    /// The number of bytes in data must be less than or equal to the buffer
599    /// size.
600    pub fn fill(&mut self, data: &[u8]) {
601        self.set_length(data.len());
602        self.as_mut_slice()[..data.len()].copy_from_slice(data);
603    }
604
605    /// Returns the buf as a mutable slice than can be written.
606    pub fn as_mut_slice(&mut self) -> &mut [u8] {
607        self.buffer
608    }
609
610    /// Returns the buf as a slice than can be read.
611    pub fn as_slice(&self) -> &[u8] {
612        self.buffer
613    }
614}
615
616unsafe impl DmaTxBuffer for DmaTxBuf {
617    type View = BufView<DmaTxBuf>;
618
619    fn prepare(&mut self) -> Preparation {
620        for desc in self.descriptors.linked_iter_mut() {
621            // In non-circular mode, we only set `suc_eof` for the last descriptor to signal
622            // the end of the transfer.
623            desc.reset_for_tx(desc.next.is_null());
624        }
625
626        cfg_if::cfg_if! {
627            if #[cfg(psram_dma)] {
628                let is_data_in_psram = !is_valid_ram_address(self.buffer.as_ptr() as usize);
629                if is_data_in_psram {
630                    unsafe {
631                        crate::soc::cache_writeback_addr(
632                            self.buffer.as_ptr() as u32,
633                            self.buffer.len() as u32,
634                        )
635                    };
636                }
637            }
638        }
639
640        Preparation {
641            start: self.descriptors.head(),
642            direction: TransferDirection::Out,
643            #[cfg(psram_dma)]
644            accesses_psram: is_data_in_psram,
645            burst_transfer: self.burst,
646            check_owner: None,
647            auto_write_back: false,
648        }
649    }
650
651    fn into_view(self) -> BufView<DmaTxBuf> {
652        BufView(self)
653    }
654
655    fn from_view(view: Self::View) -> Self {
656        view.0
657    }
658}
659
660/// DMA receive buffer
661///
662/// This is a contiguous buffer linked together by DMA descriptors of length
663/// 4092. It can only be used for receiving data from a peripheral's FIFO.
664/// See [DmaTxBuf] for transmitting data.
665#[derive(Debug)]
666#[cfg_attr(feature = "defmt", derive(defmt::Format))]
667pub struct DmaRxBuf {
668    descriptors: DescriptorSet<'static>,
669    buffer: &'static mut [u8],
670    burst: BurstConfig,
671}
672
673impl DmaRxBuf {
674    /// Creates a new [DmaRxBuf] from some descriptors and a buffer.
675    ///
676    /// There must be enough descriptors for the provided buffer.
677    /// Each descriptor can handle 4092 bytes worth of buffer.
678    ///
679    /// Both the descriptors and buffer must be in DMA-capable memory.
680    /// Only DRAM is supported.
681    pub fn new(
682        descriptors: &'static mut [DmaDescriptor],
683        buffer: &'static mut [u8],
684    ) -> Result<Self, DmaBufError> {
685        let mut buf = Self {
686            descriptors: DescriptorSet::new(descriptors)?,
687            buffer,
688            burst: BurstConfig::default(),
689        };
690
691        buf.configure(buf.burst, buf.capacity())?;
692
693        Ok(buf)
694    }
695
696    fn configure(
697        &mut self,
698        burst: impl Into<BurstConfig>,
699        length: usize,
700    ) -> Result<(), DmaBufError> {
701        let burst = burst.into();
702        self.set_length_fallible(length, burst)?;
703
704        self.descriptors.link_with_buffer(
705            self.buffer,
706            burst.max_chunk_size_for(self.buffer, TransferDirection::In),
707        )?;
708
709        self.burst = burst;
710        Ok(())
711    }
712
713    /// Configures the DMA to use burst transfers to access this buffer.
714    pub fn set_burst_config(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> {
715        let len = self.len();
716        self.configure(burst, len)
717    }
718
719    /// Consume the buf, returning the descriptors and buffer.
720    pub fn split(self) -> (&'static mut [DmaDescriptor], &'static mut [u8]) {
721        (self.descriptors.into_inner(), self.buffer)
722    }
723
724    /// Returns the size of the underlying buffer
725    pub fn capacity(&self) -> usize {
726        self.buffer.len()
727    }
728
729    /// Returns the maximum number of bytes that this buf has been configured to
730    /// receive.
731    #[allow(clippy::len_without_is_empty)]
732    pub fn len(&self) -> usize {
733        self.descriptors
734            .linked_iter()
735            .map(|d| d.size())
736            .sum::<usize>()
737    }
738
739    fn set_length_fallible(&mut self, len: usize, burst: BurstConfig) -> Result<(), DmaBufError> {
740        if len > self.capacity() {
741            return Err(DmaBufError::BufferTooSmall);
742        }
743        burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::In)?;
744
745        self.descriptors.set_rx_length(
746            len,
747            burst.max_chunk_size_for(&self.buffer[..len], TransferDirection::In),
748        )
749    }
750
751    /// Reset the descriptors to only receive `len` amount of bytes into this
752    /// buf.
753    ///
754    /// The number of bytes in data must be less than or equal to the buffer
755    /// size.
756    pub fn set_length(&mut self, len: usize) {
757        unwrap!(self.set_length_fallible(len, self.burst));
758    }
759
760    /// Returns the entire underlying buffer as a slice than can be read.
761    pub fn as_slice(&self) -> &[u8] {
762        self.buffer
763    }
764
765    /// Returns the entire underlying buffer as a slice than can be written.
766    pub fn as_mut_slice(&mut self) -> &mut [u8] {
767        self.buffer
768    }
769
770    /// Return the number of bytes that was received by this buf.
771    pub fn number_of_received_bytes(&self) -> usize {
772        self.descriptors
773            .linked_iter()
774            .map(|d| d.len())
775            .sum::<usize>()
776    }
777
778    /// Reads the received data into the provided `buf`.
779    ///
780    /// If `buf.len()` is less than the amount of received data then only the
781    /// first `buf.len()` bytes of received data is written into `buf`.
782    ///
783    /// Returns the number of bytes in written to `buf`.
784    pub fn read_received_data(&self, mut buf: &mut [u8]) -> usize {
785        let capacity = buf.len();
786        for chunk in self.received_data() {
787            if buf.is_empty() {
788                break;
789            }
790            let to_fill;
791            (to_fill, buf) = buf.split_at_mut(chunk.len());
792            to_fill.copy_from_slice(chunk);
793        }
794
795        capacity - buf.len()
796    }
797
798    /// Returns the received data as an iterator of slices.
799    pub fn received_data(&self) -> impl Iterator<Item = &[u8]> {
800        self.descriptors.linked_iter().map(|desc| {
801            // SAFETY: We set up the descriptor to point to a subslice of the buffer, and
802            // here we are only recreating that slice with a perhaps shorter length.
803            // We are also not accessing `self.buffer` while this slice is alive, so we
804            // are not violating any aliasing rules.
805            unsafe { core::slice::from_raw_parts(desc.buffer.cast_const(), desc.len()) }
806        })
807    }
808}
809
810unsafe impl DmaRxBuffer for DmaRxBuf {
811    type View = BufView<DmaRxBuf>;
812
813    fn prepare(&mut self) -> Preparation {
814        for desc in self.descriptors.linked_iter_mut() {
815            desc.reset_for_rx();
816        }
817
818        cfg_if::cfg_if! {
819            if #[cfg(psram_dma)] {
820                // Optimization: avoid locking for PSRAM range.
821                let is_data_in_psram = !is_valid_ram_address(self.buffer.as_ptr() as usize);
822                if is_data_in_psram {
823                    unsafe {
824                        crate::soc::cache_invalidate_addr(
825                            self.buffer.as_ptr() as u32,
826                            self.buffer.len() as u32,
827                        )
828                    };
829                }
830            }
831        }
832
833        Preparation {
834            start: self.descriptors.head(),
835            direction: TransferDirection::In,
836            #[cfg(psram_dma)]
837            accesses_psram: is_data_in_psram,
838            burst_transfer: self.burst,
839            check_owner: None,
840            auto_write_back: true,
841        }
842    }
843
844    fn into_view(self) -> BufView<DmaRxBuf> {
845        BufView(self)
846    }
847
848    fn from_view(view: Self::View) -> Self {
849        view.0
850    }
851}
852
853/// DMA transmit and receive buffer.
854///
855/// This is a (single) contiguous buffer linked together by two sets of DMA
856/// descriptors of length 4092 each.
857/// It can be used for simultaneously transmitting to and receiving from a
858/// peripheral's FIFO. These are typically full-duplex transfers.
859#[derive(Debug)]
860#[cfg_attr(feature = "defmt", derive(defmt::Format))]
861pub struct DmaRxTxBuf {
862    rx_descriptors: DescriptorSet<'static>,
863    tx_descriptors: DescriptorSet<'static>,
864    buffer: &'static mut [u8],
865    burst: BurstConfig,
866}
867
868impl DmaRxTxBuf {
869    /// Creates a new [DmaRxTxBuf] from some descriptors and a buffer.
870    ///
871    /// There must be enough descriptors for the provided buffer.
872    /// Each descriptor can handle 4092 bytes worth of buffer.
873    ///
874    /// Both the descriptors and buffer must be in DMA-capable memory.
875    /// Only DRAM is supported.
876    pub fn new(
877        rx_descriptors: &'static mut [DmaDescriptor],
878        tx_descriptors: &'static mut [DmaDescriptor],
879        buffer: &'static mut [u8],
880    ) -> Result<Self, DmaBufError> {
881        let mut buf = Self {
882            rx_descriptors: DescriptorSet::new(rx_descriptors)?,
883            tx_descriptors: DescriptorSet::new(tx_descriptors)?,
884            buffer,
885            burst: BurstConfig::default(),
886        };
887
888        let capacity = buf.capacity();
889        buf.configure(buf.burst, capacity)?;
890
891        Ok(buf)
892    }
893
894    fn configure(
895        &mut self,
896        burst: impl Into<BurstConfig>,
897        length: usize,
898    ) -> Result<(), DmaBufError> {
899        let burst = burst.into();
900        self.set_length_fallible(length, burst)?;
901
902        self.rx_descriptors.link_with_buffer(
903            self.buffer,
904            burst.max_chunk_size_for(self.buffer, TransferDirection::In),
905        )?;
906        self.tx_descriptors.link_with_buffer(
907            self.buffer,
908            burst.max_chunk_size_for(self.buffer, TransferDirection::Out),
909        )?;
910
911        self.burst = burst;
912
913        Ok(())
914    }
915
916    /// Configures the DMA to use burst transfers to access this buffer.
917    pub fn set_burst_config(&mut self, burst: BurstConfig) -> Result<(), DmaBufError> {
918        let len = self.len();
919        self.configure(burst, len)
920    }
921
922    /// Consume the buf, returning the rx descriptors, tx descriptors and
923    /// buffer.
924    pub fn split(
925        self,
926    ) -> (
927        &'static mut [DmaDescriptor],
928        &'static mut [DmaDescriptor],
929        &'static mut [u8],
930    ) {
931        (
932            self.rx_descriptors.into_inner(),
933            self.tx_descriptors.into_inner(),
934            self.buffer,
935        )
936    }
937
938    /// Return the size of the underlying buffer.
939    pub fn capacity(&self) -> usize {
940        self.buffer.len()
941    }
942
943    /// Return the number of bytes that would be transmitted by this buf.
944    #[allow(clippy::len_without_is_empty)]
945    pub fn len(&self) -> usize {
946        self.tx_descriptors
947            .linked_iter()
948            .map(|d| d.len())
949            .sum::<usize>()
950    }
951
952    /// Returns the entire buf as a slice than can be read.
953    pub fn as_slice(&self) -> &[u8] {
954        self.buffer
955    }
956
957    /// Returns the entire buf as a slice than can be written.
958    pub fn as_mut_slice(&mut self) -> &mut [u8] {
959        self.buffer
960    }
961
962    fn set_length_fallible(&mut self, len: usize, burst: BurstConfig) -> Result<(), DmaBufError> {
963        if len > self.capacity() {
964            return Err(DmaBufError::BufferTooSmall);
965        }
966        burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::In)?;
967        burst.ensure_buffer_compatible(&self.buffer[..len], TransferDirection::Out)?;
968
969        self.rx_descriptors.set_rx_length(
970            len,
971            burst.max_chunk_size_for(self.buffer, TransferDirection::In),
972        )?;
973        self.tx_descriptors.set_tx_length(
974            len,
975            burst.max_chunk_size_for(self.buffer, TransferDirection::Out),
976        )?;
977
978        Ok(())
979    }
980
981    /// Reset the descriptors to only transmit/receive `len` amount of bytes
982    /// with this buf.
983    ///
984    /// `len` must be less than or equal to the buffer size.
985    pub fn set_length(&mut self, len: usize) {
986        unwrap!(self.set_length_fallible(len, self.burst));
987    }
988}
989
990unsafe impl DmaTxBuffer for DmaRxTxBuf {
991    type View = BufView<DmaRxTxBuf>;
992
993    fn prepare(&mut self) -> Preparation {
994        for desc in self.tx_descriptors.linked_iter_mut() {
995            // In non-circular mode, we only set `suc_eof` for the last descriptor to signal
996            // the end of the transfer.
997            desc.reset_for_tx(desc.next.is_null());
998        }
999
1000        cfg_if::cfg_if! {
1001            if #[cfg(psram_dma)] {
1002                // Optimization: avoid locking for PSRAM range.
1003                let is_data_in_psram = !is_valid_ram_address(self.buffer.as_ptr() as usize);
1004                if is_data_in_psram {
1005                    unsafe {
1006                        crate::soc::cache_writeback_addr(
1007                            self.buffer.as_ptr() as u32,
1008                            self.buffer.len() as u32,
1009                        )
1010                    };
1011                }
1012            }
1013        }
1014
1015        Preparation {
1016            start: self.tx_descriptors.head(),
1017            direction: TransferDirection::Out,
1018            #[cfg(psram_dma)]
1019            accesses_psram: is_data_in_psram,
1020            burst_transfer: self.burst,
1021            check_owner: None,
1022            auto_write_back: false,
1023        }
1024    }
1025
1026    fn into_view(self) -> BufView<DmaRxTxBuf> {
1027        BufView(self)
1028    }
1029
1030    fn from_view(view: Self::View) -> Self {
1031        view.0
1032    }
1033}
1034
1035unsafe impl DmaRxBuffer for DmaRxTxBuf {
1036    type View = BufView<DmaRxTxBuf>;
1037
1038    fn prepare(&mut self) -> Preparation {
1039        for desc in self.rx_descriptors.linked_iter_mut() {
1040            desc.reset_for_rx();
1041        }
1042
1043        cfg_if::cfg_if! {
1044            if #[cfg(psram_dma)] {
1045                // Optimization: avoid locking for PSRAM range.
1046                let is_data_in_psram = !is_valid_ram_address(self.buffer.as_ptr() as usize);
1047                if is_data_in_psram {
1048                    unsafe {
1049                        crate::soc::cache_invalidate_addr(
1050                            self.buffer.as_ptr() as u32,
1051                            self.buffer.len() as u32,
1052                        )
1053                    };
1054                }
1055            }
1056        }
1057
1058        Preparation {
1059            start: self.rx_descriptors.head(),
1060            direction: TransferDirection::In,
1061            #[cfg(psram_dma)]
1062            accesses_psram: is_data_in_psram,
1063            burst_transfer: self.burst,
1064            check_owner: None,
1065            auto_write_back: true,
1066        }
1067    }
1068
1069    fn into_view(self) -> BufView<DmaRxTxBuf> {
1070        BufView(self)
1071    }
1072
1073    fn from_view(view: Self::View) -> Self {
1074        view.0
1075    }
1076}
1077
1078/// DMA Streaming Receive Buffer.
1079///
1080/// This is a contiguous buffer linked together by DMA descriptors, and the
1081/// buffer is evenly distributed between each descriptor provided.
1082///
1083/// It is used for continuously streaming data from a peripheral's FIFO.
1084///
1085/// It does so by maintaining sliding window of descriptors that progresses when
1086/// you call [DmaRxStreamBufView::consume].
1087///
1088/// The list starts out like so `A (empty) -> B (empty) -> C (empty) -> D
1089/// (empty) -> NULL`.
1090///
1091/// As the DMA writes to the buffers the list progresses like so:
1092/// - `A (empty) -> B (empty) -> C (empty) -> D (empty) -> NULL`
1093/// - `A (full)  -> B (empty) -> C (empty) -> D (empty) -> NULL`
1094/// - `A (full)  -> B (full)  -> C (empty) -> D (empty) -> NULL`
1095/// - `A (full)  -> B (full)  -> C (full)  -> D (empty) -> NULL`
1096///
1097/// As you call [DmaRxStreamBufView::consume] the list (approximately)
1098/// progresses like so:
1099/// - `A (full)  -> B (full)  -> C (full)  -> D (empty) -> NULL`
1100/// - `B (full)  -> C (full)  -> D (empty) -> A (empty) -> NULL`
1101/// - `C (full)  -> D (empty) -> A (empty) -> B (empty) -> NULL`
1102/// - `D (empty) -> A (empty) -> B (empty) -> C (empty) -> NULL`
1103///
1104/// If all the descriptors fill up, the [DmaRxInterrupt::DescriptorEmpty]
1105/// interrupt will fire and the DMA will stop writing, at which point it is up
1106/// to you to resume/restart the transfer.
1107///
1108/// Note: This buffer will not tell you when this condition occurs, you should
1109/// check with the driver to see if the DMA has stopped.
1110///
1111/// When constructing this buffer, it is important to tune the ratio between the
1112/// chunk size and buffer size appropriately. Smaller chunk sizes means you
1113/// receive data more frequently but this means the DMA interrupts
1114/// ([DmaRxInterrupt::Done]) also fire more frequently (if you use them).
1115///
1116/// See [DmaRxStreamBufView] for APIs available whilst a transfer is in
1117/// progress.
1118pub struct DmaRxStreamBuf {
1119    descriptors: &'static mut [DmaDescriptor],
1120    buffer: &'static mut [u8],
1121    burst: BurstConfig,
1122}
1123
1124impl DmaRxStreamBuf {
1125    /// Creates a new [DmaRxStreamBuf] evenly distributing the buffer between
1126    /// the provided descriptors.
1127    pub fn new(
1128        descriptors: &'static mut [DmaDescriptor],
1129        buffer: &'static mut [u8],
1130    ) -> Result<Self, DmaBufError> {
1131        if !is_slice_in_dram(descriptors) {
1132            return Err(DmaBufError::UnsupportedMemoryRegion);
1133        }
1134        if !is_slice_in_dram(buffer) {
1135            return Err(DmaBufError::UnsupportedMemoryRegion);
1136        }
1137
1138        if descriptors.is_empty() {
1139            return Err(DmaBufError::InsufficientDescriptors);
1140        }
1141
1142        // Evenly distribute the buffer between the descriptors.
1143        let chunk_size = buffer.len() / descriptors.len();
1144
1145        if chunk_size > 4095 {
1146            return Err(DmaBufError::InsufficientDescriptors);
1147        }
1148
1149        // Check that the last descriptor can hold the excess
1150        let excess = buffer.len() % descriptors.len();
1151        if chunk_size + excess > 4095 {
1152            return Err(DmaBufError::InsufficientDescriptors);
1153        }
1154
1155        let mut chunks = buffer.chunks_exact_mut(chunk_size);
1156        for (desc, chunk) in descriptors.iter_mut().zip(chunks.by_ref()) {
1157            desc.buffer = chunk.as_mut_ptr();
1158            desc.set_size(chunk.len());
1159        }
1160
1161        let remainder = chunks.into_remainder();
1162        debug_assert_eq!(remainder.len(), excess);
1163
1164        if !remainder.is_empty() {
1165            // Append any excess to the last descriptor.
1166            let last_descriptor = descriptors.last_mut().unwrap();
1167            last_descriptor.set_size(last_descriptor.size() + remainder.len());
1168        }
1169
1170        Ok(Self {
1171            descriptors,
1172            buffer,
1173            burst: BurstConfig::default(),
1174        })
1175    }
1176
1177    /// Consume the buf, returning the descriptors and buffer.
1178    pub fn split(self) -> (&'static mut [DmaDescriptor], &'static mut [u8]) {
1179        (self.descriptors, self.buffer)
1180    }
1181}
1182
1183unsafe impl DmaRxBuffer for DmaRxStreamBuf {
1184    type View = DmaRxStreamBufView;
1185
1186    fn prepare(&mut self) -> Preparation {
1187        // Link up all the descriptors (but not in a circle).
1188        let mut next = null_mut();
1189        for desc in self.descriptors.iter_mut().rev() {
1190            desc.next = next;
1191            next = desc;
1192
1193            desc.reset_for_rx();
1194        }
1195        Preparation {
1196            start: self.descriptors.as_mut_ptr(),
1197            direction: TransferDirection::In,
1198            #[cfg(psram_dma)]
1199            accesses_psram: false,
1200            burst_transfer: self.burst,
1201
1202            // Whilst we give ownership of the descriptors the DMA, the correctness of this buffer
1203            // implementation doesn't rely on the DMA checking for descriptor ownership.
1204            // No descriptor is added back to the end of the stream before it's ready for the DMA
1205            // to consume it.
1206            check_owner: None,
1207            auto_write_back: true,
1208        }
1209    }
1210
1211    fn into_view(self) -> DmaRxStreamBufView {
1212        DmaRxStreamBufView {
1213            buf: self,
1214            descriptor_idx: 0,
1215            descriptor_offset: 0,
1216        }
1217    }
1218
1219    fn from_view(view: Self::View) -> Self {
1220        view.buf
1221    }
1222}
1223
1224/// A view into a [DmaRxStreamBuf]
1225pub struct DmaRxStreamBufView {
1226    buf: DmaRxStreamBuf,
1227    descriptor_idx: usize,
1228    descriptor_offset: usize,
1229}
1230
1231impl DmaRxStreamBufView {
1232    /// Returns the number of bytes that are available to read from the buf.
1233    pub fn available_bytes(&self) -> usize {
1234        let (tail, head) = self.buf.descriptors.split_at(self.descriptor_idx);
1235        let mut result = 0;
1236        for desc in head.iter().chain(tail) {
1237            if desc.owner() == Owner::Dma {
1238                break;
1239            }
1240            result += desc.len();
1241        }
1242        result - self.descriptor_offset
1243    }
1244
1245    /// Reads as much as possible into the buf from the available data.
1246    pub fn pop(&mut self, buf: &mut [u8]) -> usize {
1247        if buf.is_empty() {
1248            return 0;
1249        }
1250        let total_bytes = buf.len();
1251
1252        let mut remaining = buf;
1253        loop {
1254            let available = self.peek();
1255            if available.len() >= remaining.len() {
1256                remaining.copy_from_slice(&available[0..remaining.len()]);
1257                self.consume(remaining.len());
1258                let consumed = remaining.len();
1259                remaining = &mut remaining[consumed..];
1260                break;
1261            } else {
1262                let to_consume = available.len();
1263                remaining[0..to_consume].copy_from_slice(available);
1264                self.consume(to_consume);
1265                remaining = &mut remaining[to_consume..];
1266            }
1267        }
1268
1269        total_bytes - remaining.len()
1270    }
1271
1272    /// Returns a slice into the buffer containing available data.
1273    /// This will be the longest possible contiguous slice into the buffer that
1274    /// contains data that is available to read.
1275    ///
1276    /// Note: This function ignores EOFs, see [Self::peek_until_eof] if you need
1277    /// EOF support.
1278    pub fn peek(&self) -> &[u8] {
1279        let (slice, _) = self.peek_internal(false);
1280        slice
1281    }
1282
1283    /// Same as [Self::peek] but will not skip over any EOFs.
1284    ///
1285    /// It also returns a boolean indicating whether this slice ends with an EOF
1286    /// or not.
1287    pub fn peek_until_eof(&self) -> (&[u8], bool) {
1288        self.peek_internal(true)
1289    }
1290
1291    /// Consumes the first `n` bytes from the available data, returning any
1292    /// fully consumed descriptors back to the DMA.
1293    /// This is typically called after [Self::peek]/[Self::peek_until_eof].
1294    ///
1295    /// Returns the number of bytes that were actually consumed.
1296    pub fn consume(&mut self, n: usize) -> usize {
1297        let mut remaining_bytes_to_consume = n;
1298
1299        loop {
1300            let desc = &mut self.buf.descriptors[self.descriptor_idx];
1301
1302            if desc.owner() == Owner::Dma {
1303                // Descriptor is still owned by DMA so it can't be read yet.
1304                // This should only happen when there is no more data available to read.
1305                break;
1306            }
1307
1308            let remaining_bytes_in_descriptor = desc.len() - self.descriptor_offset;
1309            if remaining_bytes_to_consume < remaining_bytes_in_descriptor {
1310                self.descriptor_offset += remaining_bytes_to_consume;
1311                remaining_bytes_to_consume = 0;
1312                break;
1313            }
1314
1315            // Reset the descriptor for reuse.
1316            desc.set_owner(Owner::Dma);
1317            desc.set_suc_eof(false);
1318            desc.set_length(0);
1319
1320            // Before connecting this descriptor to the end of the list, the next descriptor
1321            // must be disconnected from this one to prevent the DMA from
1322            // overtaking.
1323            desc.next = null_mut();
1324
1325            let desc_ptr: *mut _ = desc;
1326
1327            let prev_descriptor_index = self
1328                .descriptor_idx
1329                .checked_sub(1)
1330                .unwrap_or(self.buf.descriptors.len() - 1);
1331
1332            // Connect this consumed descriptor to the end of the chain.
1333            self.buf.descriptors[prev_descriptor_index].next = desc_ptr;
1334
1335            self.descriptor_idx += 1;
1336            if self.descriptor_idx >= self.buf.descriptors.len() {
1337                self.descriptor_idx = 0;
1338            }
1339            self.descriptor_offset = 0;
1340
1341            remaining_bytes_to_consume -= remaining_bytes_in_descriptor;
1342        }
1343
1344        n - remaining_bytes_to_consume
1345    }
1346
1347    fn peek_internal(&self, stop_at_eof: bool) -> (&[u8], bool) {
1348        let descriptors = &self.buf.descriptors[self.descriptor_idx..];
1349
1350        // There must be at least one descriptor.
1351        debug_assert!(!descriptors.is_empty());
1352
1353        if descriptors.len() == 1 {
1354            let last_descriptor = &descriptors[0];
1355            if last_descriptor.owner() == Owner::Dma {
1356                // No data available.
1357                (&[], false)
1358            } else {
1359                let length = last_descriptor.len() - self.descriptor_offset;
1360                (
1361                    &self.buf.buffer[self.buf.buffer.len() - length..],
1362                    last_descriptor.flags.suc_eof(),
1363                )
1364            }
1365        } else {
1366            let chunk_size = descriptors[0].size();
1367            let mut found_eof = false;
1368
1369            let mut number_of_contiguous_bytes = 0;
1370            for desc in descriptors {
1371                if desc.owner() == Owner::Dma {
1372                    break;
1373                }
1374                number_of_contiguous_bytes += desc.len();
1375
1376                if stop_at_eof && desc.flags.suc_eof() {
1377                    found_eof = true;
1378                    break;
1379                }
1380                // If the length is smaller than the size, the contiguous-ness ends here.
1381                if desc.len() < desc.size() {
1382                    break;
1383                }
1384            }
1385
1386            (
1387                &self.buf.buffer[chunk_size * self.descriptor_idx..][..number_of_contiguous_bytes]
1388                    [self.descriptor_offset..],
1389                found_eof,
1390            )
1391        }
1392    }
1393}
1394
1395static mut EMPTY: [DmaDescriptor; 1] = [DmaDescriptor::EMPTY];
1396
1397/// An empty buffer that can be used when you don't need to transfer any data.
1398pub struct EmptyBuf;
1399
1400unsafe impl DmaTxBuffer for EmptyBuf {
1401    type View = EmptyBuf;
1402
1403    fn prepare(&mut self) -> Preparation {
1404        #[allow(unused_unsafe)] // stable requires unsafe, nightly complains about it
1405        Preparation {
1406            start: unsafe { core::ptr::addr_of_mut!(EMPTY).cast() },
1407            direction: TransferDirection::Out,
1408            #[cfg(psram_dma)]
1409            accesses_psram: false,
1410            burst_transfer: BurstConfig::default(),
1411
1412            // As we don't give ownership of the descriptor to the DMA, it's important that the DMA
1413            // channel does *NOT* check for ownership, otherwise the channel will return an error.
1414            check_owner: Some(false),
1415
1416            // The DMA should not write back to the descriptor as it is shared.
1417            auto_write_back: false,
1418        }
1419    }
1420
1421    fn into_view(self) -> EmptyBuf {
1422        self
1423    }
1424
1425    fn from_view(view: Self::View) -> Self {
1426        view
1427    }
1428}
1429
1430unsafe impl DmaRxBuffer for EmptyBuf {
1431    type View = EmptyBuf;
1432
1433    fn prepare(&mut self) -> Preparation {
1434        #[allow(unused_unsafe)] // stable requires unsafe, nightly complains about it
1435        Preparation {
1436            start: unsafe { core::ptr::addr_of_mut!(EMPTY).cast() },
1437            direction: TransferDirection::In,
1438            #[cfg(psram_dma)]
1439            accesses_psram: false,
1440            burst_transfer: BurstConfig::default(),
1441
1442            // As we don't give ownership of the descriptor to the DMA, it's important that the DMA
1443            // channel does *NOT* check for ownership, otherwise the channel will return an error.
1444            check_owner: Some(false),
1445            auto_write_back: true,
1446        }
1447    }
1448
1449    fn into_view(self) -> EmptyBuf {
1450        self
1451    }
1452
1453    fn from_view(view: Self::View) -> Self {
1454        view
1455    }
1456}
1457
1458/// DMA Loop Buffer
1459///
1460/// This consists of a single descriptor that points to itself and points to a
1461/// single buffer, resulting in the buffer being transmitted over and over
1462/// again, indefinitely.
1463///
1464/// Note: A DMA descriptor is 12 bytes. If your buffer is significantly shorter
1465/// than this, the DMA channel will spend more time reading the descriptor than
1466/// it does reading the buffer, which may leave it unable to keep up with the
1467/// bandwidth requirements of some peripherals at high frequencies.
1468pub struct DmaLoopBuf {
1469    descriptor: &'static mut DmaDescriptor,
1470    buffer: &'static mut [u8],
1471}
1472
1473impl DmaLoopBuf {
1474    /// Create a new [DmaLoopBuf].
1475    pub fn new(
1476        descriptor: &'static mut DmaDescriptor,
1477        buffer: &'static mut [u8],
1478    ) -> Result<DmaLoopBuf, DmaBufError> {
1479        if !is_slice_in_dram(buffer) {
1480            return Err(DmaBufError::UnsupportedMemoryRegion);
1481        }
1482        if !is_slice_in_dram(core::slice::from_ref(descriptor)) {
1483            return Err(DmaBufError::UnsupportedMemoryRegion);
1484        }
1485
1486        if buffer.len() > BurstConfig::default().max_chunk_size_for(buffer, TransferDirection::Out)
1487        {
1488            return Err(DmaBufError::InsufficientDescriptors);
1489        }
1490
1491        descriptor.set_owner(Owner::Dma); // Doesn't matter
1492        descriptor.set_suc_eof(false);
1493        descriptor.set_length(buffer.len());
1494        descriptor.set_size(buffer.len());
1495        descriptor.buffer = buffer.as_mut_ptr();
1496        descriptor.next = descriptor;
1497
1498        Ok(Self { descriptor, buffer })
1499    }
1500
1501    /// Consume the buf, returning the descriptor and buffer.
1502    pub fn split(self) -> (&'static mut DmaDescriptor, &'static mut [u8]) {
1503        (self.descriptor, self.buffer)
1504    }
1505}
1506
1507unsafe impl DmaTxBuffer for DmaLoopBuf {
1508    type View = Self;
1509
1510    fn prepare(&mut self) -> Preparation {
1511        Preparation {
1512            start: self.descriptor,
1513            #[cfg(psram_dma)]
1514            accesses_psram: false,
1515            direction: TransferDirection::Out,
1516            burst_transfer: BurstConfig::default(),
1517            // The DMA must not check the owner bit, as it is never set.
1518            check_owner: Some(false),
1519
1520            // Doesn't matter either way but it is set to true for ESP32 SPI_DMA compatibility.
1521            auto_write_back: false,
1522        }
1523    }
1524
1525    fn into_view(self) -> Self::View {
1526        self
1527    }
1528
1529    fn from_view(view: Self::View) -> Self {
1530        view
1531    }
1532}
1533
1534impl Deref for DmaLoopBuf {
1535    type Target = [u8];
1536
1537    fn deref(&self) -> &Self::Target {
1538        self.buffer
1539    }
1540}
1541
1542impl DerefMut for DmaLoopBuf {
1543    fn deref_mut(&mut self) -> &mut Self::Target {
1544        self.buffer
1545    }
1546}