esp_hal/dma/
mod.rs

1//! # Direct Memory Access (DMA)
2//!
3//! ## Overview
4//!
5//! The DMA driver provides an interface to efficiently transfer data between
6//! different memory regions and peripherals within the ESP microcontroller
7//! without involving the CPU. The DMA controller is responsible for managing
8//! these data transfers.
9//!
10//! Notice, that this module is a common version of the DMA driver, `ESP32` and
11//! `ESP32-S2` are using older `PDMA` controller, whenever other chips are using
12//! newer `GDMA` controller.
13//!
14//! ## Examples
15//!
16//! ### Initialize and utilize DMA controller in `SPI`
17//!
18//! ```rust, no_run
19#![doc = crate::before_snippet!()]
20//! # use esp_hal::dma_buffers;
21//! # use esp_hal::spi::{master::{Config, Spi}, Mode};
22#![cfg_attr(pdma, doc = "let dma_channel = peripherals.DMA_SPI2;")]
23#![cfg_attr(gdma, doc = "let dma_channel = peripherals.DMA_CH0;")]
24//! let sclk = peripherals.GPIO0;
25//! let miso = peripherals.GPIO2;
26//! let mosi = peripherals.GPIO4;
27//! let cs = peripherals.GPIO5;
28//!
29//! let mut spi = Spi::new(
30//!     peripherals.SPI2,
31//!     Config::default().with_frequency(Rate::from_khz(100)).
32//! with_mode(Mode::_0) )?
33//! .with_sck(sclk)
34//! .with_mosi(mosi)
35//! .with_miso(miso)
36//! .with_cs(cs)
37//! .with_dma(dma_channel);
38//! # Ok(())
39//! # }
40//! ```
41//! 
42//! ⚠️ Note: Descriptors should be sized as `(max_transfer_size + CHUNK_SIZE - 1) / CHUNK_SIZE`.
43//! I.e., to transfer buffers of size `1..=CHUNK_SIZE`, you need 1 descriptor.
44//!
45//! ⚠️ Note: For chips that support DMA to/from PSRAM (ESP32-S3) DMA transfers to/from PSRAM
46//! have extra alignment requirements. The address and size of the buffer pointed to by
47//! each descriptor must be a multiple of the cache line (block) size. This is 32 bytes
48//! on ESP32-S3.
49//!
50//! For convenience you can use the [crate::dma_buffers] macro.
51
52use core::{cmp::min, fmt::Debug, marker::PhantomData, sync::atomic::compiler_fence};
53
54use enumset::{EnumSet, EnumSetType};
55
56pub use self::buffers::*;
57#[cfg(gdma)]
58pub use self::gdma::*;
59#[cfg(any(gdma, esp32s2))]
60pub use self::m2m::*;
61#[cfg(pdma)]
62pub use self::pdma::*;
63use crate::{
64    Async,
65    Blocking,
66    DriverMode,
67    interrupt::InterruptHandler,
68    peripherals::Interrupt,
69    soc::{is_slice_in_dram, is_valid_memory_address, is_valid_ram_address},
70    system,
71    system::Cpu,
72};
73
74trait Word: crate::private::Sealed {}
75
76macro_rules! impl_word {
77    ($w:ty) => {
78        impl $crate::private::Sealed for $w {}
79        impl Word for $w {}
80    };
81}
82
83impl_word!(u8);
84impl_word!(u16);
85impl_word!(u32);
86impl_word!(i8);
87impl_word!(i16);
88impl_word!(i32);
89
90impl<W, const S: usize> crate::private::Sealed for [W; S] where W: Word {}
91
92impl<W, const S: usize> crate::private::Sealed for &[W; S] where W: Word {}
93
94impl<W> crate::private::Sealed for &[W] where W: Word {}
95
96impl<W> crate::private::Sealed for &mut [W] where W: Word {}
97
98/// Trait for buffers that can be given to DMA for reading.
99///
100/// # Safety
101///
102/// Once the `read_buffer` method has been called, it is unsafe to call any
103/// `&mut self` methods on this object as long as the returned value is in use
104/// (by DMA).
105pub unsafe trait ReadBuffer {
106    /// Provide a buffer usable for DMA reads.
107    ///
108    /// The return value is:
109    ///
110    /// - pointer to the start of the buffer
111    /// - buffer size in bytes
112    ///
113    /// # Safety
114    ///
115    /// Once this method has been called, it is unsafe to call any `&mut self`
116    /// methods on this object as long as the returned value is in use (by DMA).
117    unsafe fn read_buffer(&self) -> (*const u8, usize);
118}
119
120unsafe impl<W, const S: usize> ReadBuffer for [W; S]
121where
122    W: Word,
123{
124    unsafe fn read_buffer(&self) -> (*const u8, usize) {
125        (self.as_ptr() as *const u8, core::mem::size_of_val(self))
126    }
127}
128
129unsafe impl<W, const S: usize> ReadBuffer for &[W; S]
130where
131    W: Word,
132{
133    unsafe fn read_buffer(&self) -> (*const u8, usize) {
134        (self.as_ptr() as *const u8, core::mem::size_of_val(*self))
135    }
136}
137
138unsafe impl<W, const S: usize> ReadBuffer for &mut [W; S]
139where
140    W: Word,
141{
142    unsafe fn read_buffer(&self) -> (*const u8, usize) {
143        (self.as_ptr() as *const u8, core::mem::size_of_val(*self))
144    }
145}
146
147unsafe impl<W> ReadBuffer for &[W]
148where
149    W: Word,
150{
151    unsafe fn read_buffer(&self) -> (*const u8, usize) {
152        (self.as_ptr() as *const u8, core::mem::size_of_val(*self))
153    }
154}
155
156unsafe impl<W> ReadBuffer for &mut [W]
157where
158    W: Word,
159{
160    unsafe fn read_buffer(&self) -> (*const u8, usize) {
161        (self.as_ptr() as *const u8, core::mem::size_of_val(*self))
162    }
163}
164
165/// Trait for buffers that can be given to DMA for writing.
166///
167/// # Safety
168///
169/// Once the `write_buffer` method has been called, it is unsafe to call any
170/// `&mut self` methods, except for `write_buffer`, on this object as long as
171/// the returned value is in use (by DMA).
172pub unsafe trait WriteBuffer {
173    /// Provide a buffer usable for DMA writes.
174    ///
175    /// The return value is:
176    ///
177    /// - pointer to the start of the buffer
178    /// - buffer size in bytes
179    ///
180    /// # Safety
181    ///
182    /// Once this method has been called, it is unsafe to call any `&mut self`
183    /// methods, except for `write_buffer`, on this object as long as the
184    /// returned value is in use (by DMA).
185    unsafe fn write_buffer(&mut self) -> (*mut u8, usize);
186}
187
188unsafe impl<W, const S: usize> WriteBuffer for [W; S]
189where
190    W: Word,
191{
192    unsafe fn write_buffer(&mut self) -> (*mut u8, usize) {
193        (self.as_mut_ptr() as *mut u8, core::mem::size_of_val(self))
194    }
195}
196
197unsafe impl<W, const S: usize> WriteBuffer for &mut [W; S]
198where
199    W: Word,
200{
201    unsafe fn write_buffer(&mut self) -> (*mut u8, usize) {
202        (self.as_mut_ptr() as *mut u8, core::mem::size_of_val(*self))
203    }
204}
205
206unsafe impl<W> WriteBuffer for &mut [W]
207where
208    W: Word,
209{
210    unsafe fn write_buffer(&mut self) -> (*mut u8, usize) {
211        (self.as_mut_ptr() as *mut u8, core::mem::size_of_val(*self))
212    }
213}
214
215bitfield::bitfield! {
216    /// DMA descriptor flags.
217    #[derive(Clone, Copy, PartialEq, Eq)]
218    pub struct DmaDescriptorFlags(u32);
219
220    u16;
221
222    /// Specifies the size of the buffer that this descriptor points to.
223    pub size, set_size: 11, 0;
224
225    /// Specifies the number of valid bytes in the buffer that this descriptor points to.
226    ///
227    /// This field in a transmit descriptor is written by software and indicates how many bytes can
228    /// be read from the buffer.
229    ///
230    /// This field in a receive descriptor is written by hardware automatically and indicates how
231    /// many valid bytes have been stored into the buffer.
232    pub length, set_length: 23, 12;
233
234    /// For receive descriptors, software needs to clear this bit to 0, and hardware will set it to 1 after receiving
235    /// data containing the EOF flag.
236    /// For transmit descriptors, software needs to set this bit to 1 as needed.
237    /// If software configures this bit to 1 in a descriptor, the DMA will include the EOF flag in the data sent to
238    /// the corresponding peripheral, indicating to the peripheral that this data segment marks the end of one
239    /// transfer phase.
240    pub suc_eof, set_suc_eof: 30;
241
242    /// Specifies who is allowed to access the buffer that this descriptor points to.
243    /// - 0: CPU can access the buffer;
244    /// - 1: The GDMA controller can access the buffer.
245    pub owner, set_owner: 31;
246}
247
248impl Debug for DmaDescriptorFlags {
249    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
250        f.debug_struct("DmaDescriptorFlags")
251            .field("size", &self.size())
252            .field("length", &self.length())
253            .field("suc_eof", &self.suc_eof())
254            .field("owner", &(if self.owner() { "DMA" } else { "CPU" }))
255            .finish()
256    }
257}
258
259#[cfg(feature = "defmt")]
260impl defmt::Format for DmaDescriptorFlags {
261    fn format(&self, fmt: defmt::Formatter<'_>) {
262        defmt::write!(
263            fmt,
264            "DmaDescriptorFlags {{ size: {}, length: {}, suc_eof: {}, owner: {} }}",
265            self.size(),
266            self.length(),
267            self.suc_eof(),
268            if self.owner() { "DMA" } else { "CPU" }
269        );
270    }
271}
272
273/// A DMA transfer descriptor.
274#[derive(Clone, Copy, Debug, PartialEq, Eq)]
275#[cfg_attr(feature = "defmt", derive(defmt::Format))]
276#[repr(C)]
277pub struct DmaDescriptor {
278    /// Descriptor flags.
279    pub flags: DmaDescriptorFlags,
280
281    /// Address of the buffer.
282    pub buffer: *mut u8,
283
284    /// Address of the next descriptor.
285    /// If the current descriptor is the last one, this value is 0.
286    /// This field can only point to internal RAM.
287    pub next: *mut DmaDescriptor,
288}
289
290impl DmaDescriptor {
291    /// An empty DMA descriptor used to initialize the descriptor list.
292    pub const EMPTY: Self = Self {
293        flags: DmaDescriptorFlags(0),
294        buffer: core::ptr::null_mut(),
295        next: core::ptr::null_mut(),
296    };
297
298    /// Resets the descriptor for a new receive transfer.
299    pub fn reset_for_rx(&mut self) {
300        // Give ownership to the DMA
301        self.set_owner(Owner::Dma);
302
303        // Clear this to allow hardware to set it when the peripheral returns an EOF
304        // bit.
305        self.set_suc_eof(false);
306
307        // Clear this to allow hardware to set it when it's
308        // done receiving data for this descriptor.
309        self.set_length(0);
310    }
311
312    /// Resets the descriptor for a new transmit transfer. See
313    /// [DmaDescriptorFlags::suc_eof] for more details on the `set_eof`
314    /// parameter.
315    pub fn reset_for_tx(&mut self, set_eof: bool) {
316        // Give ownership to the DMA
317        self.set_owner(Owner::Dma);
318
319        // The `suc_eof` bit doesn't affect the transfer itself, but signals when the
320        // hardware should trigger an interrupt request.
321        self.set_suc_eof(set_eof);
322    }
323
324    /// Set the size of the buffer. See [DmaDescriptorFlags::size].
325    pub fn set_size(&mut self, len: usize) {
326        self.flags.set_size(len as u16)
327    }
328
329    /// Set the length of the descriptor. See [DmaDescriptorFlags::length].
330    pub fn set_length(&mut self, len: usize) {
331        self.flags.set_length(len as u16)
332    }
333
334    /// Returns the size of the buffer. See [DmaDescriptorFlags::size].
335    pub fn size(&self) -> usize {
336        self.flags.size() as usize
337    }
338
339    /// Returns the length of the descriptor. See [DmaDescriptorFlags::length].
340    #[allow(clippy::len_without_is_empty)]
341    pub fn len(&self) -> usize {
342        self.flags.length() as usize
343    }
344
345    /// Set the suc_eof bit. See [DmaDescriptorFlags::suc_eof].
346    pub fn set_suc_eof(&mut self, suc_eof: bool) {
347        self.flags.set_suc_eof(suc_eof)
348    }
349
350    /// Set the owner. See [DmaDescriptorFlags::owner].
351    pub fn set_owner(&mut self, owner: Owner) {
352        let owner = match owner {
353            Owner::Cpu => false,
354            Owner::Dma => true,
355        };
356        self.flags.set_owner(owner)
357    }
358
359    /// Returns the owner. See [DmaDescriptorFlags::owner].
360    pub fn owner(&self) -> Owner {
361        match self.flags.owner() {
362            false => Owner::Cpu,
363            true => Owner::Dma,
364        }
365    }
366}
367
368// The pointers in the descriptor can be Sent.
369// Marking this Send also allows DmaBuffer implementations to automatically be
370// Send (where the compiler sees fit).
371unsafe impl Send for DmaDescriptor {}
372
373mod buffers;
374#[cfg(gdma)]
375mod gdma;
376#[cfg(any(gdma, esp32s2))]
377mod m2m;
378#[cfg(pdma)]
379mod pdma;
380
381/// Kinds of interrupt to listen to.
382#[derive(Debug, EnumSetType)]
383#[cfg_attr(feature = "defmt", derive(defmt::Format))]
384pub enum DmaInterrupt {
385    /// RX is done
386    RxDone,
387    /// TX is done
388    TxDone,
389}
390
391/// Types of interrupts emitted by the TX channel.
392#[derive(Debug, EnumSetType)]
393#[cfg_attr(feature = "defmt", derive(defmt::Format))]
394pub enum DmaTxInterrupt {
395    /// Triggered when all data corresponding to a linked list (including
396    /// multiple descriptors) have been sent via transmit channel.
397    TotalEof,
398
399    /// Triggered when an error is detected in a transmit descriptor on transmit
400    /// channel.
401    DescriptorError,
402
403    /// Triggered when EOF in a transmit descriptor is true and data
404    /// corresponding to this descriptor have been sent via transmit
405    /// channel.
406    Eof,
407
408    /// Triggered when all data corresponding to a transmit descriptor have been
409    /// sent via transmit channel.
410    Done,
411}
412
413/// Types of interrupts emitted by the RX channel.
414#[derive(Debug, EnumSetType)]
415#[cfg_attr(feature = "defmt", derive(defmt::Format))]
416pub enum DmaRxInterrupt {
417    /// Triggered when the size of the buffer pointed by receive descriptors
418    /// is smaller than the length of data to be received via receive channel.
419    DescriptorEmpty,
420
421    /// Triggered when an error is detected in a receive descriptor on receive
422    /// channel.
423    DescriptorError,
424
425    /// Triggered when an error is detected in the data segment corresponding to
426    /// a descriptor received via receive channel n.
427    /// This interrupt is used only for UHCI0 peripheral (UART0 or UART1).
428    ErrorEof,
429
430    /// Triggered when the suc_eof bit in a receive descriptor is 1 and the data
431    /// corresponding to this receive descriptor has been received via receive
432    /// channel.
433    SuccessfulEof,
434
435    /// Triggered when all data corresponding to a receive descriptor have been
436    /// received via receive channel.
437    Done,
438}
439
440/// The default chunk size used for DMA transfers.
441pub const CHUNK_SIZE: usize = 4092;
442
443/// Convenience macro to create DMA buffers and descriptors.
444///
445/// ## Usage
446/// ```rust,no_run
447#[doc = crate::before_snippet!()]
448/// use esp_hal::dma_buffers;
449///
450/// // RX and TX buffers are 32000 bytes - passing only one parameter makes RX
451/// // and TX the same size.
452/// let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) =
453///     dma_buffers!(32000, 32000);
454/// # Ok(())
455/// # }
456/// ```
457#[macro_export]
458macro_rules! dma_buffers {
459    ($rx_size:expr, $tx_size:expr) => {
460        $crate::dma_buffers_chunk_size!($rx_size, $tx_size, $crate::dma::CHUNK_SIZE)
461    };
462    ($size:expr) => {
463        $crate::dma_buffers_chunk_size!($size, $crate::dma::CHUNK_SIZE)
464    };
465}
466
467/// Convenience macro to create circular DMA buffers and descriptors.
468///
469/// ## Usage
470/// ```rust,no_run
471#[doc = crate::before_snippet!()]
472/// use esp_hal::dma_circular_buffers;
473///
474/// // RX and TX buffers are 32000 bytes - passing only one parameter makes RX
475/// // and TX the same size.
476/// let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) =
477///     dma_circular_buffers!(32000, 32000);
478/// # Ok(())
479/// # }
480/// ```
481#[macro_export]
482macro_rules! dma_circular_buffers {
483    ($rx_size:expr, $tx_size:expr) => {
484        $crate::dma_circular_buffers_chunk_size!($rx_size, $tx_size, $crate::dma::CHUNK_SIZE)
485    };
486
487    ($size:expr) => {
488        $crate::dma_circular_buffers_chunk_size!($size, $size, $crate::dma::CHUNK_SIZE)
489    };
490}
491
492/// Convenience macro to create DMA descriptors.
493///
494/// ## Usage
495/// ```rust,no_run
496#[doc = crate::before_snippet!()]
497/// use esp_hal::dma_descriptors;
498///
499/// // Create RX and TX descriptors for transactions up to 32000 bytes - passing
500/// // only one parameter assumes RX and TX are the same size.
501/// let (rx_descriptors, tx_descriptors) = dma_descriptors!(32000, 32000);
502/// # Ok(())
503/// # }
504/// ```
505#[macro_export]
506macro_rules! dma_descriptors {
507    ($rx_size:expr, $tx_size:expr) => {
508        $crate::dma_descriptors_chunk_size!($rx_size, $tx_size, $crate::dma::CHUNK_SIZE)
509    };
510
511    ($size:expr) => {
512        $crate::dma_descriptors_chunk_size!($size, $size, $crate::dma::CHUNK_SIZE)
513    };
514}
515
516/// Convenience macro to create circular DMA descriptors.
517///
518/// ## Usage
519/// ```rust,no_run
520#[doc = crate::before_snippet!()]
521/// use esp_hal::dma_circular_descriptors;
522///
523/// // Create RX and TX descriptors for transactions up to 32000
524/// // bytes - passing only one parameter assumes RX and TX are the same size.
525/// let (rx_descriptors, tx_descriptors) =
526///     dma_circular_descriptors!(32000, 32000);
527/// # Ok(())
528/// # }
529/// ```
530#[macro_export]
531macro_rules! dma_circular_descriptors {
532    ($rx_size:expr, $tx_size:expr) => {
533        $crate::dma_circular_descriptors_chunk_size!($rx_size, $tx_size, $crate::dma::CHUNK_SIZE)
534    };
535
536    ($size:expr) => {
537        $crate::dma_circular_descriptors_chunk_size!($size, $size, $crate::dma::CHUNK_SIZE)
538    };
539}
540
541/// Declares a DMA buffer with a specific size, aligned to 4 bytes
542#[doc(hidden)]
543#[macro_export]
544macro_rules! declare_aligned_dma_buffer {
545    ($name:ident, $size:expr) => {
546        // ESP32 requires word alignment for DMA buffers.
547        // ESP32-S2 technically supports byte-aligned DMA buffers, but the
548        // transfer ends up writing out of bounds.
549        // if the buffer's length is 2 or 3 (mod 4).
550        static mut $name: [u32; ($size + 3) / 4] = [0; ($size + 3) / 4];
551    };
552}
553
554/// Turns the potentially oversized static `u32`` array reference into a
555/// correctly sized `u8` one
556#[doc(hidden)]
557#[macro_export]
558macro_rules! as_mut_byte_array {
559    ($name:expr, $size:expr) => {
560        unsafe { &mut *($name.as_mut_ptr() as *mut [u8; $size]) }
561    };
562}
563pub use as_mut_byte_array; // TODO: can be removed as soon as DMA is stabilized
564
565/// Convenience macro to create DMA buffers and descriptors with specific chunk
566/// size.
567///
568/// ## Usage
569/// ```rust,no_run
570#[doc = crate::before_snippet!()]
571/// use esp_hal::dma_buffers_chunk_size;
572///
573/// // TX and RX buffers are 32000 bytes - passing only one parameter makes TX
574/// // and RX the same size.
575/// let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) =
576///     dma_buffers_chunk_size!(32000, 32000, 4032);
577/// # Ok(())
578/// # }
579/// ```
580#[macro_export]
581macro_rules! dma_buffers_chunk_size {
582    ($rx_size:expr, $tx_size:expr, $chunk_size:expr) => {{ $crate::dma_buffers_impl!($rx_size, $tx_size, $chunk_size, is_circular = false) }};
583
584    ($size:expr, $chunk_size:expr) => {
585        $crate::dma_buffers_chunk_size!($size, $size, $chunk_size)
586    };
587}
588
589/// Convenience macro to create circular DMA buffers and descriptors with
590/// specific chunk size.
591///
592/// ## Usage
593/// ```rust,no_run
594#[doc = crate::before_snippet!()]
595/// use esp_hal::dma_circular_buffers_chunk_size;
596///
597/// // RX and TX buffers are 32000 bytes - passing only one parameter makes RX
598/// // and TX the same size.
599/// let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) =
600///     dma_circular_buffers_chunk_size!(32000, 32000, 4032);
601/// # Ok(())
602/// # }
603/// ```
604#[macro_export]
605macro_rules! dma_circular_buffers_chunk_size {
606    ($rx_size:expr, $tx_size:expr, $chunk_size:expr) => {{ $crate::dma_buffers_impl!($rx_size, $tx_size, $chunk_size, is_circular = true) }};
607
608    ($size:expr, $chunk_size:expr) => {{ $crate::dma_circular_buffers_chunk_size!($size, $size, $chunk_size) }};
609}
610
611/// Convenience macro to create DMA descriptors with specific chunk size
612///
613/// ## Usage
614/// ```rust,no_run
615#[doc = crate::before_snippet!()]
616/// use esp_hal::dma_descriptors_chunk_size;
617///
618/// // Create RX and TX descriptors for transactions up to 32000 bytes - passing
619/// // only one parameter assumes RX and TX are the same size.
620/// let (rx_descriptors, tx_descriptors) =
621///     dma_descriptors_chunk_size!(32000, 32000, 4032);
622/// # Ok(())
623/// # }
624/// ```
625#[macro_export]
626macro_rules! dma_descriptors_chunk_size {
627    ($rx_size:expr, $tx_size:expr, $chunk_size:expr) => {{ $crate::dma_descriptors_impl!($rx_size, $tx_size, $chunk_size, is_circular = false) }};
628
629    ($size:expr, $chunk_size:expr) => {
630        $crate::dma_descriptors_chunk_size!($size, $size, $chunk_size)
631    };
632}
633
634/// Convenience macro to create circular DMA descriptors with specific chunk
635/// size
636///
637/// ## Usage
638/// ```rust,no_run
639#[doc = crate::before_snippet!()]
640/// use esp_hal::dma_circular_descriptors_chunk_size;
641///
642/// // Create RX and TX descriptors for transactions up to 32000 bytes - passing
643/// // only one parameter assumes RX and TX are the same size.
644/// let (rx_descriptors, tx_descriptors) =
645///     dma_circular_descriptors_chunk_size!(32000, 32000, 4032);
646/// # Ok(())
647/// # }
648/// ```
649#[macro_export]
650macro_rules! dma_circular_descriptors_chunk_size {
651    ($rx_size:expr, $tx_size:expr, $chunk_size:expr) => {{ $crate::dma_descriptors_impl!($rx_size, $tx_size, $chunk_size, is_circular = true) }};
652
653    ($size:expr, $chunk_size:expr) => {
654        $crate::dma_circular_descriptors_chunk_size!($size, $size, $chunk_size)
655    };
656}
657
658#[doc(hidden)]
659#[macro_export]
660macro_rules! dma_buffers_impl {
661    ($rx_size:expr, $tx_size:expr, $chunk_size:expr, is_circular = $circular:tt) => {{
662        let rx = $crate::dma_buffers_impl!($rx_size, $chunk_size, is_circular = $circular);
663        let tx = $crate::dma_buffers_impl!($tx_size, $chunk_size, is_circular = $circular);
664        (rx.0, rx.1, tx.0, tx.1)
665    }};
666
667    ($size:expr, $chunk_size:expr, is_circular = $circular:tt) => {{
668        $crate::declare_aligned_dma_buffer!(BUFFER, $size);
669
670        unsafe {
671            (
672                $crate::dma::as_mut_byte_array!(BUFFER, $size),
673                $crate::dma_descriptors_impl!($size, $chunk_size, is_circular = $circular),
674            )
675        }
676    }};
677
678    ($size:expr, is_circular = $circular:tt) => {
679        $crate::dma_buffers_impl!(
680            $size,
681            $crate::dma::BurstConfig::DEFAULT.max_compatible_chunk_size(),
682            is_circular = $circular
683        );
684    };
685}
686
687#[doc(hidden)]
688#[macro_export]
689macro_rules! dma_descriptors_impl {
690    ($rx_size:expr, $tx_size:expr, $chunk_size:expr, is_circular = $circular:tt) => {{
691        let rx = $crate::dma_descriptors_impl!($rx_size, $chunk_size, is_circular = $circular);
692        let tx = $crate::dma_descriptors_impl!($tx_size, $chunk_size, is_circular = $circular);
693        (rx, tx)
694    }};
695
696    ($size:expr, $chunk_size:expr, is_circular = $circular:tt) => {{
697        const COUNT: usize =
698            $crate::dma_descriptor_count!($size, $chunk_size, is_circular = $circular);
699
700        static mut DESCRIPTORS: [$crate::dma::DmaDescriptor; COUNT] =
701            [$crate::dma::DmaDescriptor::EMPTY; COUNT];
702
703        unsafe { &mut DESCRIPTORS }
704    }};
705}
706
707#[doc(hidden)]
708#[macro_export]
709macro_rules! dma_descriptor_count {
710    ($size:expr, $chunk_size:expr, is_circular = $is_circular:tt) => {{
711        const {
712            ::core::assert!($chunk_size <= 4095, "chunk size must be <= 4095");
713            ::core::assert!($chunk_size > 0, "chunk size must be > 0");
714        }
715
716        // We allow 0 in the macros as a "not needed" case.
717        if $size == 0 {
718            0
719        } else {
720            $crate::dma::descriptor_count($size, $chunk_size, $is_circular)
721        }
722    }};
723}
724
725/// Convenience macro to create a DmaTxBuf from buffer size. The buffer and
726/// descriptors are statically allocated and used to create the `DmaTxBuf`.
727///
728/// ## Usage
729/// ```rust,no_run
730#[doc = crate::before_snippet!()]
731/// use esp_hal::dma_tx_buffer;
732///
733/// let tx_buf = dma_tx_buffer!(32000);
734/// # Ok(())
735/// # }
736/// ```
737#[macro_export]
738macro_rules! dma_tx_buffer {
739    ($tx_size:expr) => {{
740        let (tx_buffer, tx_descriptors) = $crate::dma_buffers_impl!($tx_size, is_circular = false);
741
742        $crate::dma::DmaTxBuf::new(tx_descriptors, tx_buffer)
743    }};
744}
745
746/// Convenience macro to create a [DmaRxStreamBuf] from buffer size and
747/// optional chunk size (uses max if unspecified).
748/// The buffer and descriptors are statically allocated and
749/// used to create the [DmaRxStreamBuf].
750///
751/// Smaller chunk sizes are recommended for lower latency.
752///
753/// ## Usage
754/// ```rust,no_run
755#[doc = crate::before_snippet!()]
756/// use esp_hal::dma_rx_stream_buffer;
757///
758/// let buf = dma_rx_stream_buffer!(32000);
759/// let buf = dma_rx_stream_buffer!(32000, 1000);
760/// # Ok(())
761/// # }
762/// ```
763#[macro_export]
764macro_rules! dma_rx_stream_buffer {
765    ($rx_size:expr) => {
766        $crate::dma_rx_stream_buffer!($rx_size, 4095)
767    };
768    ($rx_size:expr, $chunk_size:expr) => {{
769        let (buffer, descriptors) =
770            $crate::dma_buffers_impl!($rx_size, $chunk_size, is_circular = false);
771
772        $crate::dma::DmaRxStreamBuf::new(descriptors, buffer).unwrap()
773    }};
774}
775
776/// Convenience macro to create a [DmaLoopBuf] from a buffer size.
777///
778/// ## Usage
779/// ```rust,no_run
780#[doc = crate::before_snippet!()]
781/// use esp_hal::dma_loop_buffer;
782///
783/// let buf = dma_loop_buffer!(2000);
784/// # Ok(())
785/// # }
786/// ```
787#[macro_export]
788macro_rules! dma_loop_buffer {
789    ($size:expr) => {{
790        const {
791            ::core::assert!($size <= 4095, "size must be <= 4095");
792            ::core::assert!($size > 0, "size must be > 0");
793        }
794
795        let (buffer, descriptors) = $crate::dma_buffers_impl!($size, $size, is_circular = false);
796
797        $crate::dma::DmaLoopBuf::new(&mut descriptors[0], buffer).unwrap()
798    }};
799}
800
801/// DMA Errors
802#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
803#[cfg_attr(feature = "defmt", derive(defmt::Format))]
804pub enum DmaError {
805    /// The alignment of data is invalid
806    InvalidAlignment(DmaAlignmentError),
807    /// More descriptors are needed for the buffer size
808    OutOfDescriptors,
809    /// DescriptorError the DMA rejected the descriptor configuration. This
810    /// could be because the source address of the data is not in RAM. Ensure
811    /// your source data is in a valid address space.
812    DescriptorError,
813    /// The available free buffer is less than the amount of data to push
814    Overflow,
815    /// The given buffer is too small
816    BufferTooSmall,
817    /// Descriptors or buffers are not located in a supported memory region
818    UnsupportedMemoryRegion,
819    /// Invalid DMA chunk size
820    InvalidChunkSize,
821    /// Indicates writing to or reading from a circular DMA transaction is done
822    /// too late and the DMA buffers already overrun / underrun.
823    Late,
824}
825
826impl From<DmaBufError> for DmaError {
827    fn from(error: DmaBufError) -> Self {
828        // FIXME: use nested errors
829        match error {
830            DmaBufError::InsufficientDescriptors => DmaError::OutOfDescriptors,
831            DmaBufError::UnsupportedMemoryRegion => DmaError::UnsupportedMemoryRegion,
832            DmaBufError::InvalidAlignment(err) => DmaError::InvalidAlignment(err),
833            DmaBufError::InvalidChunkSize => DmaError::InvalidChunkSize,
834            DmaBufError::BufferTooSmall => DmaError::BufferTooSmall,
835        }
836    }
837}
838
839/// DMA Priorities
840#[cfg(gdma)]
841#[derive(Debug, Clone, Copy, PartialEq)]
842#[cfg_attr(feature = "defmt", derive(defmt::Format))]
843pub enum DmaPriority {
844    /// The lowest priority level (Priority 0).
845    Priority0 = 0,
846    /// Priority level 1.
847    Priority1 = 1,
848    /// Priority level 2.
849    Priority2 = 2,
850    /// Priority level 3.
851    Priority3 = 3,
852    /// Priority level 4.
853    Priority4 = 4,
854    /// Priority level 5.
855    Priority5 = 5,
856    /// Priority level 6.
857    Priority6 = 6,
858    /// Priority level 7.
859    Priority7 = 7,
860    /// Priority level 8.
861    Priority8 = 8,
862    /// The highest priority level (Priority 9).
863    Priority9 = 9,
864}
865
866/// DMA Priorities
867/// The values need to match the TRM
868#[cfg(pdma)]
869#[derive(Debug, Clone, Copy, PartialEq)]
870#[cfg_attr(feature = "defmt", derive(defmt::Format))]
871pub enum DmaPriority {
872    /// The lowest priority level (Priority 0).
873    Priority0 = 0,
874}
875
876/// DMA capable peripherals
877/// The values need to match the TRM
878#[derive(Debug, Clone, Copy, PartialEq)]
879#[cfg_attr(feature = "defmt", derive(defmt::Format))]
880#[doc(hidden)]
881pub enum DmaPeripheral {
882    Spi2      = 0,
883    #[cfg(any(pdma, esp32s3))]
884    Spi3      = 1,
885    #[cfg(any(esp32c2, esp32c6, esp32h2))]
886    Mem2Mem1  = 1,
887    #[cfg(any(esp32c3, esp32c6, esp32h2, esp32s3))]
888    Uhci0     = 2,
889    #[cfg(any(esp32, esp32s2, esp32c3, esp32c6, esp32h2, esp32s3))]
890    I2s0      = 3,
891    #[cfg(any(esp32, esp32s3))]
892    I2s1      = 4,
893    #[cfg(any(esp32c6, esp32h2))]
894    Mem2Mem4  = 4,
895    #[cfg(esp32s3)]
896    LcdCam    = 5,
897    #[cfg(any(esp32c6, esp32h2))]
898    Mem2Mem5  = 5,
899    #[cfg(not(esp32c2))]
900    Aes       = 6,
901    #[cfg(any(esp32s2, gdma))]
902    Sha       = 7,
903    #[cfg(any(esp32c3, esp32c6, esp32h2, esp32s3))]
904    Adc       = 8,
905    #[cfg(esp32s3)]
906    Rmt       = 9,
907    #[cfg(parl_io)]
908    ParlIo    = 9,
909    #[cfg(any(esp32c6, esp32h2))]
910    Mem2Mem10 = 10,
911    #[cfg(any(esp32c6, esp32h2))]
912    Mem2Mem11 = 11,
913    #[cfg(any(esp32c6, esp32h2))]
914    Mem2Mem12 = 12,
915    #[cfg(any(esp32c6, esp32h2))]
916    Mem2Mem13 = 13,
917    #[cfg(any(esp32c6, esp32h2))]
918    Mem2Mem14 = 14,
919    #[cfg(any(esp32c6, esp32h2))]
920    Mem2Mem15 = 15,
921}
922
923/// The owner bit of a DMA descriptor.
924#[derive(PartialEq, PartialOrd)]
925pub enum Owner {
926    /// Owned by CPU
927    Cpu = 0,
928    /// Owned by DMA
929    Dma = 1,
930}
931
932impl From<u32> for Owner {
933    fn from(value: u32) -> Self {
934        match value {
935            0 => Owner::Cpu,
936            _ => Owner::Dma,
937        }
938    }
939}
940
941#[doc(hidden)]
942#[instability::unstable]
943pub trait DmaEligible {
944    /// The most specific DMA channel type usable by this peripheral.
945    type Dma: DmaChannel;
946
947    fn dma_peripheral(&self) -> DmaPeripheral;
948}
949
950#[doc(hidden)]
951#[derive(Debug)]
952pub struct DescriptorChain {
953    pub(crate) descriptors: &'static mut [DmaDescriptor],
954    chunk_size: usize,
955}
956
957impl DescriptorChain {
958    pub fn new(descriptors: &'static mut [DmaDescriptor]) -> Self {
959        Self::new_with_chunk_size(descriptors, CHUNK_SIZE)
960    }
961
962    pub fn new_with_chunk_size(
963        descriptors: &'static mut [DmaDescriptor],
964        chunk_size: usize,
965    ) -> Self {
966        Self {
967            descriptors,
968            chunk_size,
969        }
970    }
971
972    pub fn first_mut(&mut self) -> *mut DmaDescriptor {
973        self.descriptors.as_mut_ptr()
974    }
975
976    pub fn first(&self) -> *const DmaDescriptor {
977        self.descriptors.as_ptr()
978    }
979
980    pub fn last_mut(&mut self) -> *mut DmaDescriptor {
981        self.descriptors.last_mut().unwrap()
982    }
983
984    pub fn last(&self) -> *const DmaDescriptor {
985        self.descriptors.last().unwrap()
986    }
987
988    #[allow(clippy::not_unsafe_ptr_arg_deref)]
989    pub fn fill_for_rx(
990        &mut self,
991        circular: bool,
992        data: *mut u8,
993        len: usize,
994    ) -> Result<(), DmaError> {
995        self.fill(circular, data, len, |desc, _| {
996            desc.reset_for_rx();
997            // Descriptor::size has been set up by `fill`
998        })
999    }
1000
1001    #[allow(clippy::not_unsafe_ptr_arg_deref)]
1002    pub fn fill_for_tx(
1003        &mut self,
1004        is_circular: bool,
1005        data: *const u8,
1006        len: usize,
1007    ) -> Result<(), DmaError> {
1008        self.fill(is_circular, data.cast_mut(), len, |desc, chunk_size| {
1009            // In circular mode, we set the `suc_eof` bit for every buffer we send. We use
1010            // this for I2S to track progress of a transfer by checking OUTLINK_DSCR_ADDR.
1011            // In non-circular mode, we only set `suc_eof` for the last descriptor to signal
1012            // the end of the transfer.
1013            desc.reset_for_tx(desc.next.is_null() || is_circular);
1014            desc.set_length(chunk_size); // align to 32 bits?
1015        })
1016    }
1017
1018    #[allow(clippy::not_unsafe_ptr_arg_deref)]
1019    pub fn fill(
1020        &mut self,
1021        circular: bool,
1022        data: *mut u8,
1023        len: usize,
1024        prepare_descriptor: impl Fn(&mut DmaDescriptor, usize),
1025    ) -> Result<(), DmaError> {
1026        if !is_valid_ram_address(self.first() as usize)
1027            || !is_valid_ram_address(self.last() as usize)
1028            || !is_valid_memory_address(data as usize)
1029            || !is_valid_memory_address(unsafe { data.add(len) } as usize)
1030        {
1031            return Err(DmaError::UnsupportedMemoryRegion);
1032        }
1033
1034        let max_chunk_size = if circular && len <= self.chunk_size * 2 {
1035            if len <= 3 {
1036                return Err(DmaError::BufferTooSmall);
1037            }
1038            len / 3 + len % 3
1039        } else {
1040            self.chunk_size
1041        };
1042
1043        DescriptorSet::set_up_buffer_ptrs(
1044            unsafe { core::slice::from_raw_parts_mut(data, len) },
1045            self.descriptors,
1046            max_chunk_size,
1047            circular,
1048        )?;
1049        DescriptorSet::set_up_descriptors(
1050            self.descriptors,
1051            len,
1052            max_chunk_size,
1053            circular,
1054            prepare_descriptor,
1055        )?;
1056
1057        Ok(())
1058    }
1059}
1060
1061/// Computes the number of descriptors required for a given buffer size with
1062/// a given chunk size.
1063pub const fn descriptor_count(buffer_size: usize, chunk_size: usize, is_circular: bool) -> usize {
1064    if is_circular && buffer_size <= chunk_size * 2 {
1065        return 3;
1066    }
1067
1068    if buffer_size < chunk_size {
1069        // At least one descriptor is always required.
1070        return 1;
1071    }
1072
1073    buffer_size.div_ceil(chunk_size)
1074}
1075
1076#[derive(Debug)]
1077#[cfg_attr(feature = "defmt", derive(defmt::Format))]
1078struct DescriptorSet<'a> {
1079    descriptors: &'a mut [DmaDescriptor],
1080}
1081
1082impl<'a> DescriptorSet<'a> {
1083    /// Creates a new `DescriptorSet` from a slice of descriptors and associates
1084    /// them with the given buffer.
1085    fn new(descriptors: &'a mut [DmaDescriptor]) -> Result<Self, DmaBufError> {
1086        if !is_slice_in_dram(descriptors) {
1087            return Err(DmaBufError::UnsupportedMemoryRegion);
1088        }
1089
1090        descriptors.fill(DmaDescriptor::EMPTY);
1091
1092        Ok(unsafe { Self::new_unchecked(descriptors) })
1093    }
1094
1095    /// Creates a new `DescriptorSet` from a slice of descriptors and associates
1096    /// them with the given buffer.
1097    ///
1098    /// # Safety
1099    ///
1100    /// The caller must ensure that the descriptors are located in a supported
1101    /// memory region.
1102    unsafe fn new_unchecked(descriptors: &'a mut [DmaDescriptor]) -> Self {
1103        Self { descriptors }
1104    }
1105
1106    /// Consumes the `DescriptorSet` and returns the inner slice of descriptors.
1107    fn into_inner(self) -> &'a mut [DmaDescriptor] {
1108        self.descriptors
1109    }
1110
1111    /// Returns a pointer to the first descriptor in the chain.
1112    fn head(&mut self) -> *mut DmaDescriptor {
1113        self.descriptors.as_mut_ptr()
1114    }
1115
1116    /// Returns an iterator over the linked descriptors.
1117    fn linked_iter(&self) -> impl Iterator<Item = &DmaDescriptor> {
1118        let mut was_last = false;
1119        self.descriptors.iter().take_while(move |d| {
1120            if was_last {
1121                false
1122            } else {
1123                was_last = d.next.is_null();
1124                true
1125            }
1126        })
1127    }
1128
1129    /// Returns an iterator over the linked descriptors.
1130    fn linked_iter_mut(&mut self) -> impl Iterator<Item = &mut DmaDescriptor> + use<'_> {
1131        let mut was_last = false;
1132        self.descriptors.iter_mut().take_while(move |d| {
1133            if was_last {
1134                false
1135            } else {
1136                was_last = d.next.is_null();
1137                true
1138            }
1139        })
1140    }
1141
1142    /// Associate each descriptor with a chunk of the buffer.
1143    ///
1144    /// This function checks the alignment and location of the buffer.
1145    ///
1146    /// See [`Self::set_up_buffer_ptrs`] for more details.
1147    fn link_with_buffer(
1148        &mut self,
1149        buffer: &mut [u8],
1150        chunk_size: usize,
1151    ) -> Result<(), DmaBufError> {
1152        Self::set_up_buffer_ptrs(buffer, self.descriptors, chunk_size, false)
1153    }
1154
1155    /// Prepares descriptors for transferring `len` bytes of data.
1156    ///
1157    /// See [`Self::set_up_descriptors`] for more details.
1158    fn set_length(
1159        &mut self,
1160        len: usize,
1161        chunk_size: usize,
1162        prepare: fn(&mut DmaDescriptor, usize),
1163    ) -> Result<(), DmaBufError> {
1164        Self::set_up_descriptors(self.descriptors, len, chunk_size, false, prepare)
1165    }
1166
1167    /// Prepares descriptors for reading `len` bytes of data.
1168    ///
1169    /// See [`Self::set_up_descriptors`] for more details.
1170    fn set_rx_length(&mut self, len: usize, chunk_size: usize) -> Result<(), DmaBufError> {
1171        self.set_length(len, chunk_size, |desc, chunk_size| {
1172            desc.set_size(chunk_size);
1173        })
1174    }
1175
1176    /// Prepares descriptors for writing `len` bytes of data.
1177    ///
1178    /// See [`Self::set_up_descriptors`] for more details.
1179    fn set_tx_length(&mut self, len: usize, chunk_size: usize) -> Result<(), DmaBufError> {
1180        self.set_length(len, chunk_size, |desc, chunk_size| {
1181            desc.set_length(chunk_size);
1182        })
1183    }
1184
1185    /// Returns a slice of descriptors that can cover a buffer of length `len`.
1186    fn descriptors_for_buffer_len(
1187        descriptors: &mut [DmaDescriptor],
1188        len: usize,
1189        chunk_size: usize,
1190        is_circular: bool,
1191    ) -> Result<&mut [DmaDescriptor], DmaBufError> {
1192        // First, pick enough descriptors to cover the buffer.
1193        let required_descriptors = descriptor_count(len, chunk_size, is_circular);
1194        if descriptors.len() < required_descriptors {
1195            return Err(DmaBufError::InsufficientDescriptors);
1196        }
1197        Ok(&mut descriptors[..required_descriptors])
1198    }
1199
1200    /// Prepares descriptors for transferring `len` bytes of data.
1201    ///
1202    /// `Prepare` means setting up the descriptor lengths and flags, as well as
1203    /// linking the descriptors into a linked list.
1204    ///
1205    /// The actual descriptor setup is done in a callback, because different
1206    /// transfer directions require different descriptor setup.
1207    fn set_up_descriptors(
1208        descriptors: &mut [DmaDescriptor],
1209        len: usize,
1210        chunk_size: usize,
1211        is_circular: bool,
1212        prepare: impl Fn(&mut DmaDescriptor, usize),
1213    ) -> Result<(), DmaBufError> {
1214        let descriptors =
1215            Self::descriptors_for_buffer_len(descriptors, len, chunk_size, is_circular)?;
1216
1217        // Link up the descriptors.
1218        let mut next = if is_circular {
1219            descriptors.as_mut_ptr()
1220        } else {
1221            core::ptr::null_mut()
1222        };
1223        for desc in descriptors.iter_mut().rev() {
1224            desc.next = next;
1225            next = desc;
1226        }
1227
1228        // Prepare each descriptor.
1229        let mut remaining_length = len;
1230        for desc in descriptors.iter_mut() {
1231            let chunk_size = min(chunk_size, remaining_length);
1232            prepare(desc, chunk_size);
1233            remaining_length -= chunk_size;
1234        }
1235        debug_assert_eq!(remaining_length, 0);
1236
1237        Ok(())
1238    }
1239
1240    /// Associate each descriptor with a chunk of the buffer.
1241    ///
1242    /// This function does not check the alignment and location of the buffer,
1243    /// because some callers may not have enough information currently.
1244    ///
1245    /// This function does not set up descriptor lengths or states.
1246    ///
1247    /// This function also does not link descriptors into a linked list. This is
1248    /// intentional, because it is done in `set_up_descriptors` to support
1249    /// changing length without requiring buffer pointers to be set
1250    /// repeatedly.
1251    fn set_up_buffer_ptrs(
1252        buffer: &mut [u8],
1253        descriptors: &mut [DmaDescriptor],
1254        chunk_size: usize,
1255        is_circular: bool,
1256    ) -> Result<(), DmaBufError> {
1257        let descriptors =
1258            Self::descriptors_for_buffer_len(descriptors, buffer.len(), chunk_size, is_circular)?;
1259
1260        let chunks = buffer.chunks_mut(chunk_size);
1261        for (desc, chunk) in descriptors.iter_mut().zip(chunks) {
1262            desc.set_size(chunk.len());
1263            desc.buffer = chunk.as_mut_ptr();
1264        }
1265
1266        Ok(())
1267    }
1268}
1269
1270/// Block size for transfers to/from PSRAM
1271#[cfg(psram_dma)]
1272#[derive(Copy, Clone, Debug, PartialEq)]
1273pub enum DmaExtMemBKSize {
1274    /// External memory block size of 16 bytes.
1275    Size16 = 0,
1276    /// External memory block size of 32 bytes.
1277    Size32 = 1,
1278    /// External memory block size of 64 bytes.
1279    Size64 = 2,
1280}
1281
1282#[cfg(psram_dma)]
1283impl From<ExternalBurstConfig> for DmaExtMemBKSize {
1284    fn from(size: ExternalBurstConfig) -> Self {
1285        match size {
1286            ExternalBurstConfig::Size16 => DmaExtMemBKSize::Size16,
1287            ExternalBurstConfig::Size32 => DmaExtMemBKSize::Size32,
1288            ExternalBurstConfig::Size64 => DmaExtMemBKSize::Size64,
1289        }
1290    }
1291}
1292
1293pub(crate) struct TxCircularState {
1294    write_offset: usize,
1295    write_descr_ptr: *mut DmaDescriptor,
1296    pub(crate) available: usize,
1297    last_seen_handled_descriptor_ptr: *mut DmaDescriptor,
1298    buffer_start: *const u8,
1299    buffer_len: usize,
1300
1301    first_desc_ptr: *mut DmaDescriptor,
1302}
1303
1304impl TxCircularState {
1305    pub(crate) fn new(chain: &mut DescriptorChain) -> Self {
1306        Self {
1307            write_offset: 0,
1308            write_descr_ptr: chain.first_mut(),
1309            available: 0,
1310            last_seen_handled_descriptor_ptr: chain.first_mut(),
1311            buffer_start: chain.descriptors[0].buffer as _,
1312            buffer_len: chain.descriptors.iter().map(|d| d.len()).sum(),
1313
1314            first_desc_ptr: chain.first_mut(),
1315        }
1316    }
1317
1318    pub(crate) fn update<Dm, CH>(&mut self, channel: &ChannelTx<Dm, CH>) -> Result<(), DmaError>
1319    where
1320        Dm: DriverMode,
1321        CH: DmaTxChannel,
1322    {
1323        if channel
1324            .pending_out_interrupts()
1325            .contains(DmaTxInterrupt::Eof)
1326        {
1327            channel.clear_out(DmaTxInterrupt::Eof);
1328
1329            // check if all descriptors are owned by CPU - this indicates we failed to push
1330            // data fast enough in future we can enable `check_owner` and check
1331            // the interrupt instead
1332            let mut current = self.last_seen_handled_descriptor_ptr;
1333            loop {
1334                let descr = unsafe { current.read_volatile() };
1335                if descr.owner() == Owner::Cpu {
1336                    current = descr.next;
1337                } else {
1338                    break;
1339                }
1340
1341                if core::ptr::eq(current, self.last_seen_handled_descriptor_ptr) {
1342                    return Err(DmaError::Late);
1343                }
1344            }
1345
1346            let descr_address = channel.last_out_dscr_address() as *mut DmaDescriptor;
1347
1348            let mut ptr = self.last_seen_handled_descriptor_ptr;
1349            if descr_address >= self.last_seen_handled_descriptor_ptr {
1350                unsafe {
1351                    while ptr < descr_address {
1352                        let dw0 = ptr.read_volatile();
1353                        self.available += dw0.len();
1354                        ptr = ptr.offset(1);
1355                    }
1356                }
1357            } else {
1358                unsafe {
1359                    while !((*ptr).next.is_null()
1360                        || core::ptr::eq((*ptr).next, self.first_desc_ptr))
1361                    {
1362                        let dw0 = ptr.read_volatile();
1363                        self.available += dw0.len();
1364                        ptr = ptr.offset(1);
1365                    }
1366
1367                    // add bytes pointed to by the last descriptor
1368                    let dw0 = ptr.read_volatile();
1369                    self.available += dw0.len();
1370
1371                    // in circular mode we need to honor the now available bytes at start
1372                    if core::ptr::eq((*ptr).next, self.first_desc_ptr) {
1373                        ptr = self.first_desc_ptr;
1374                        while ptr < descr_address {
1375                            let dw0 = ptr.read_volatile();
1376                            self.available += dw0.len();
1377                            ptr = ptr.offset(1);
1378                        }
1379                    }
1380                }
1381            }
1382
1383            if self.available >= self.buffer_len {
1384                unsafe {
1385                    let dw0 = self.write_descr_ptr.read_volatile();
1386                    let segment_len = dw0.len();
1387                    let next_descriptor = dw0.next;
1388                    self.available -= segment_len;
1389                    self.write_offset = (self.write_offset + segment_len) % self.buffer_len;
1390
1391                    self.write_descr_ptr = if next_descriptor.is_null() {
1392                        self.first_desc_ptr
1393                    } else {
1394                        next_descriptor
1395                    }
1396                }
1397            }
1398
1399            self.last_seen_handled_descriptor_ptr = descr_address;
1400        }
1401
1402        Ok(())
1403    }
1404
1405    pub(crate) fn push(&mut self, data: &[u8]) -> Result<usize, DmaError> {
1406        let avail = self.available;
1407
1408        if avail < data.len() {
1409            return Err(DmaError::Overflow);
1410        }
1411
1412        let mut remaining = data.len();
1413        let mut offset = 0;
1414        while self.available >= remaining && remaining > 0 {
1415            let written = self.push_with(|buffer| {
1416                let len = usize::min(buffer.len(), data.len() - offset);
1417                buffer[..len].copy_from_slice(&data[offset..][..len]);
1418                len
1419            })?;
1420            offset += written;
1421            remaining -= written;
1422        }
1423
1424        Ok(data.len())
1425    }
1426
1427    pub(crate) fn push_with(
1428        &mut self,
1429        f: impl FnOnce(&mut [u8]) -> usize,
1430    ) -> Result<usize, DmaError> {
1431        // this might write less than available in case of a wrap around
1432        // caller needs to check and write the remaining part
1433        let written = unsafe {
1434            let dst = self.buffer_start.add(self.write_offset).cast_mut();
1435            let block_size = usize::min(self.available, self.buffer_len - self.write_offset);
1436            let buffer = core::slice::from_raw_parts_mut(dst, block_size);
1437            f(buffer)
1438        };
1439
1440        let mut forward = written;
1441        loop {
1442            unsafe {
1443                let mut descr = self.write_descr_ptr.read_volatile();
1444                descr.set_owner(Owner::Dma);
1445                self.write_descr_ptr.write_volatile(descr);
1446
1447                let segment_len = descr.len();
1448                self.write_descr_ptr = if descr.next.is_null() {
1449                    self.first_desc_ptr
1450                } else {
1451                    descr.next
1452                };
1453
1454                if forward <= segment_len {
1455                    break;
1456                }
1457
1458                forward -= segment_len;
1459            }
1460        }
1461
1462        self.write_offset = (self.write_offset + written) % self.buffer_len;
1463        self.available -= written;
1464
1465        Ok(written)
1466    }
1467}
1468
1469pub(crate) struct RxCircularState {
1470    read_descr_ptr: *mut DmaDescriptor,
1471    pub(crate) available: usize,
1472    last_seen_handled_descriptor_ptr: *mut DmaDescriptor,
1473    last_descr_ptr: *mut DmaDescriptor,
1474}
1475
1476impl RxCircularState {
1477    pub(crate) fn new(chain: &mut DescriptorChain) -> Self {
1478        Self {
1479            read_descr_ptr: chain.first_mut(),
1480            available: 0,
1481            last_seen_handled_descriptor_ptr: core::ptr::null_mut(),
1482            last_descr_ptr: chain.last_mut(),
1483        }
1484    }
1485
1486    pub(crate) fn update(&mut self) -> Result<(), DmaError> {
1487        if self.last_seen_handled_descriptor_ptr.is_null() {
1488            // initially start at last descriptor (so that next will be the first
1489            // descriptor)
1490            self.last_seen_handled_descriptor_ptr = self.last_descr_ptr;
1491        }
1492
1493        let mut current_in_descr_ptr =
1494            unsafe { self.last_seen_handled_descriptor_ptr.read_volatile() }.next;
1495        let mut current_in_descr = unsafe { current_in_descr_ptr.read_volatile() };
1496
1497        let last_seen_ptr = self.last_seen_handled_descriptor_ptr;
1498        while current_in_descr.owner() == Owner::Cpu {
1499            self.available += current_in_descr.len();
1500            self.last_seen_handled_descriptor_ptr = current_in_descr_ptr;
1501
1502            current_in_descr_ptr =
1503                unsafe { self.last_seen_handled_descriptor_ptr.read_volatile() }.next;
1504            current_in_descr = unsafe { current_in_descr_ptr.read_volatile() };
1505
1506            if core::ptr::eq(current_in_descr_ptr, last_seen_ptr) {
1507                return Err(DmaError::Late);
1508            }
1509        }
1510
1511        Ok(())
1512    }
1513
1514    pub(crate) fn pop(&mut self, data: &mut [u8]) -> Result<usize, DmaError> {
1515        let len = data.len();
1516        let mut avail = self.available;
1517
1518        if avail > len {
1519            return Err(DmaError::BufferTooSmall);
1520        }
1521
1522        let mut remaining_buffer = data;
1523        let mut descr_ptr = self.read_descr_ptr;
1524
1525        if descr_ptr.is_null() {
1526            return Ok(0);
1527        }
1528
1529        let mut descr = unsafe { descr_ptr.read_volatile() };
1530
1531        while avail > 0 && !remaining_buffer.is_empty() && remaining_buffer.len() >= descr.len() {
1532            unsafe {
1533                let dst = remaining_buffer.as_mut_ptr();
1534                let src = descr.buffer;
1535                let count = descr.len();
1536                core::ptr::copy_nonoverlapping(src, dst, count);
1537
1538                descr.set_owner(Owner::Dma);
1539                descr.set_suc_eof(false);
1540                descr.set_length(0);
1541                descr_ptr.write_volatile(descr);
1542
1543                remaining_buffer = &mut remaining_buffer[count..];
1544                avail -= count;
1545                descr_ptr = descr.next;
1546            }
1547
1548            if descr_ptr.is_null() {
1549                break;
1550            }
1551
1552            descr = unsafe { descr_ptr.read_volatile() };
1553        }
1554
1555        self.read_descr_ptr = descr_ptr;
1556        self.available = avail;
1557        Ok(len - remaining_buffer.len())
1558    }
1559}
1560
1561#[doc(hidden)]
1562macro_rules! impl_dma_eligible {
1563    ([$dma_ch:ident] $name:ident => $dma:ident) => {
1564        impl<'d> $crate::dma::DmaEligible for $crate::peripherals::$name<'d> {
1565            type Dma = $dma_ch<'d>;
1566
1567            fn dma_peripheral(&self) -> $crate::dma::DmaPeripheral {
1568                $crate::dma::DmaPeripheral::$dma
1569            }
1570        }
1571    };
1572
1573    (
1574        $dma_ch:ident {
1575            $($(#[$cfg:meta])? $name:ident => $dma:ident,)*
1576        }
1577    ) => {
1578        $(
1579            $(#[$cfg])?
1580            $crate::dma::impl_dma_eligible!([$dma_ch] $name => $dma);
1581        )*
1582    };
1583}
1584
1585pub(crate) use impl_dma_eligible; // TODO: can be removed as soon as DMA is stabilized
1586
1587/// Helper type to get the DMA (Rx and Tx) channel for a peripheral.
1588pub type PeripheralDmaChannel<T> = <T as DmaEligible>::Dma;
1589/// Helper type to get the DMA Rx channel for a peripheral.
1590pub type PeripheralRxChannel<T> = <PeripheralDmaChannel<T> as DmaChannel>::Rx;
1591/// Helper type to get the DMA Tx channel for a peripheral.
1592pub type PeripheralTxChannel<T> = <PeripheralDmaChannel<T> as DmaChannel>::Tx;
1593
1594#[instability::unstable]
1595pub trait DmaRxChannel: RxRegisterAccess + InterruptAccess<DmaRxInterrupt> {}
1596
1597#[instability::unstable]
1598pub trait DmaTxChannel: TxRegisterAccess + InterruptAccess<DmaTxInterrupt> {}
1599
1600/// A description of a DMA Channel.
1601pub trait DmaChannel: Sized {
1602    /// A description of the RX half of a DMA Channel.
1603    type Rx: DmaRxChannel;
1604
1605    /// A description of the TX half of a DMA Channel.
1606    type Tx: DmaTxChannel;
1607
1608    /// Splits the DMA channel into its RX and TX halves.
1609    #[cfg(any(esp32c6, esp32h2, esp32s3))] // TODO relax this to allow splitting on all chips
1610    fn split(self) -> (Self::Rx, Self::Tx) {
1611        // This function is exposed safely on chips that have separate IN and OUT
1612        // interrupt handlers.
1613        // TODO: this includes the P4 as well.
1614        unsafe { self.split_internal(crate::private::Internal) }
1615    }
1616
1617    /// Splits the DMA channel into its RX and TX halves.
1618    ///
1619    /// # Safety
1620    ///
1621    /// This function must only be used if the separate halves are used by the
1622    /// same peripheral.
1623    unsafe fn split_internal(self, _: crate::private::Internal) -> (Self::Rx, Self::Tx);
1624}
1625
1626#[doc(hidden)]
1627pub trait DmaChannelExt: DmaChannel {
1628    fn rx_interrupts() -> impl InterruptAccess<DmaRxInterrupt>;
1629    fn tx_interrupts() -> impl InterruptAccess<DmaTxInterrupt>;
1630}
1631
1632#[diagnostic::on_unimplemented(
1633    message = "The DMA channel isn't suitable for this peripheral",
1634    label = "This DMA channel",
1635    note = "Not all channels are useable with all peripherals"
1636)]
1637#[doc(hidden)]
1638pub trait DmaChannelConvert<DEG> {
1639    fn degrade(self) -> DEG;
1640}
1641
1642impl<DEG: DmaChannel> DmaChannelConvert<DEG> for DEG {
1643    fn degrade(self) -> DEG {
1644        self
1645    }
1646}
1647
1648/// Trait implemented for DMA channels that are compatible with a particular
1649/// peripheral.
1650///
1651/// You can use this in places where a peripheral driver would expect a
1652/// `DmaChannel` implementation.
1653#[cfg_attr(pdma, doc = "")]
1654#[cfg_attr(
1655    pdma,
1656    doc = "Note that using mismatching channels (e.g. trying to use `DMA_SPI2` with SPI3) may compile, but will panic in runtime."
1657)]
1658#[cfg_attr(pdma, doc = "")]
1659/// ## Example
1660///
1661/// The following example demonstrates how this trait can be used to only accept
1662/// types compatible with a specific peripheral.
1663///
1664/// ```rust,no_run
1665#[doc = crate::before_snippet!()]
1666/// use esp_hal::spi::AnySpi;
1667/// use esp_hal::spi::master::{Spi, SpiDma, Config, Instance as SpiInstance};
1668/// use esp_hal::dma::DmaChannelFor;
1669/// use esp_hal::Blocking;
1670///
1671/// fn configures_spi_dma<'d>(
1672///     spi: Spi<'d, Blocking>,
1673///     channel: impl DmaChannelFor<AnySpi<'d>>,
1674/// ) -> SpiDma<'d, Blocking> {
1675///     spi.with_dma(channel)
1676/// }
1677#[cfg_attr(pdma, doc = "let dma_channel = peripherals.DMA_SPI2;")]
1678#[cfg_attr(gdma, doc = "let dma_channel = peripherals.DMA_CH0;")]
1679#[doc = ""]
1680/// let spi = Spi::new(
1681///     peripherals.SPI2,
1682///     Config::default(),
1683/// )?;
1684///
1685/// let spi_dma = configures_spi_dma(spi, dma_channel);
1686/// # Ok(())
1687/// # }
1688/// ```
1689#[allow(private_bounds)]
1690pub trait DmaChannelFor<P: DmaEligible>:
1691    DmaChannel + DmaChannelConvert<PeripheralDmaChannel<P>>
1692{
1693}
1694impl<P, CH> DmaChannelFor<P> for CH
1695where
1696    P: DmaEligible,
1697    CH: DmaChannel + DmaChannelConvert<PeripheralDmaChannel<P>>,
1698{
1699}
1700
1701/// Trait implemented for the RX half of split DMA channels that are compatible
1702/// with a particular peripheral. Accepts complete DMA channels or split halves.
1703///
1704/// This trait is similar in use to [`DmaChannelFor`].
1705///
1706/// You can use this in places where a peripheral driver would expect a
1707/// `DmaRxChannel` implementation.
1708#[allow(private_bounds)]
1709pub trait RxChannelFor<P: DmaEligible>: DmaChannelConvert<PeripheralRxChannel<P>> {}
1710impl<P, RX> RxChannelFor<P> for RX
1711where
1712    P: DmaEligible,
1713    RX: DmaChannelConvert<PeripheralRxChannel<P>>,
1714{
1715}
1716
1717/// Trait implemented for the TX half of split DMA channels that are compatible
1718/// with a particular peripheral. Accepts complete DMA channels or split halves.
1719///
1720/// This trait is similar in use to [`DmaChannelFor`].
1721///
1722/// You can use this in places where a peripheral driver would expect a
1723/// `DmaTxChannel` implementation.
1724#[allow(private_bounds)]
1725pub trait TxChannelFor<PER: DmaEligible>: DmaChannelConvert<PeripheralTxChannel<PER>> {}
1726impl<P, TX> TxChannelFor<P> for TX
1727where
1728    P: DmaEligible,
1729    TX: DmaChannelConvert<PeripheralTxChannel<P>>,
1730{
1731}
1732
1733// NOTE(p4): because the P4 has two different GDMAs, we won't be able to use
1734// `GenericPeripheralGuard`.
1735cfg_if::cfg_if! {
1736    if #[cfg(pdma)] {
1737        type PeripheralGuard = system::GenericPeripheralGuard<{ system::Peripheral::Dma as u8}>;
1738    } else {
1739        type PeripheralGuard = system::GenericPeripheralGuard<{ system::Peripheral::Gdma as u8}>;
1740    }
1741}
1742
1743fn create_guard(_ch: &impl RegisterAccess) -> PeripheralGuard {
1744    // NOTE(p4): this function will read the channel's DMA peripheral from `_ch`
1745    system::GenericPeripheralGuard::new_with(init_dma)
1746}
1747
1748// DMA receive channel
1749#[non_exhaustive]
1750#[doc(hidden)]
1751pub struct ChannelRx<Dm, CH>
1752where
1753    Dm: DriverMode,
1754    CH: DmaRxChannel,
1755{
1756    pub(crate) rx_impl: CH,
1757    pub(crate) _phantom: PhantomData<Dm>,
1758    pub(crate) _guard: PeripheralGuard,
1759}
1760
1761impl<CH> ChannelRx<Blocking, CH>
1762where
1763    CH: DmaRxChannel,
1764{
1765    /// Creates a new RX channel half.
1766    pub fn new(rx_impl: CH) -> Self {
1767        let _guard = create_guard(&rx_impl);
1768
1769        #[cfg(gdma)]
1770        // clear the mem2mem mode to avoid failed DMA if this
1771        // channel was previously used for a mem2mem transfer.
1772        rx_impl.set_mem2mem_mode(false);
1773
1774        if let Some(interrupt) = rx_impl.peripheral_interrupt() {
1775            for cpu in Cpu::all() {
1776                crate::interrupt::disable(cpu, interrupt);
1777            }
1778        }
1779        rx_impl.set_async(false);
1780
1781        Self {
1782            rx_impl,
1783            _phantom: PhantomData,
1784            _guard,
1785        }
1786    }
1787
1788    /// Converts a blocking channel to an async channel.
1789    pub(crate) fn into_async(mut self) -> ChannelRx<Async, CH> {
1790        if let Some(handler) = self.rx_impl.async_handler() {
1791            self.set_interrupt_handler(handler);
1792        }
1793        self.rx_impl.set_async(true);
1794        ChannelRx {
1795            rx_impl: self.rx_impl,
1796            _phantom: PhantomData,
1797            _guard: self._guard,
1798        }
1799    }
1800
1801    fn set_interrupt_handler(&mut self, handler: InterruptHandler) {
1802        self.unlisten_in(EnumSet::all());
1803        self.clear_in(EnumSet::all());
1804
1805        if let Some(interrupt) = self.rx_impl.peripheral_interrupt() {
1806            for core in crate::system::Cpu::other() {
1807                crate::interrupt::disable(core, interrupt);
1808            }
1809            unsafe { crate::interrupt::bind_interrupt(interrupt, handler.handler()) };
1810            unwrap!(crate::interrupt::enable(interrupt, handler.priority()));
1811        }
1812    }
1813}
1814
1815impl<CH> ChannelRx<Async, CH>
1816where
1817    CH: DmaRxChannel,
1818{
1819    /// Converts an async channel into a blocking channel.
1820    pub(crate) fn into_blocking(self) -> ChannelRx<Blocking, CH> {
1821        if let Some(interrupt) = self.rx_impl.peripheral_interrupt() {
1822            crate::interrupt::disable(Cpu::current(), interrupt);
1823        }
1824        self.rx_impl.set_async(false);
1825        ChannelRx {
1826            rx_impl: self.rx_impl,
1827            _phantom: PhantomData,
1828            _guard: self._guard,
1829        }
1830    }
1831}
1832
1833impl<Dm, CH> ChannelRx<Dm, CH>
1834where
1835    Dm: DriverMode,
1836    CH: DmaRxChannel,
1837{
1838    /// Configure the channel.
1839    #[cfg(gdma)]
1840    pub fn set_priority(&mut self, priority: DmaPriority) {
1841        self.rx_impl.set_priority(priority);
1842    }
1843
1844    fn do_prepare(
1845        &mut self,
1846        preparation: Preparation,
1847        peri: DmaPeripheral,
1848    ) -> Result<(), DmaError> {
1849        debug_assert_eq!(preparation.direction, TransferDirection::In);
1850
1851        debug!("Preparing RX transfer {:?}", preparation);
1852        trace!("First descriptor {:?}", unsafe { &*preparation.start });
1853
1854        #[cfg(psram_dma)]
1855        if preparation.accesses_psram && !self.rx_impl.can_access_psram() {
1856            return Err(DmaError::UnsupportedMemoryRegion);
1857        }
1858
1859        #[cfg(psram_dma)]
1860        self.rx_impl
1861            .set_ext_mem_block_size(preparation.burst_transfer.external_memory.into());
1862        self.rx_impl.set_burst_mode(preparation.burst_transfer);
1863        self.rx_impl.set_descr_burst_mode(true);
1864        self.rx_impl.set_check_owner(preparation.check_owner);
1865
1866        compiler_fence(core::sync::atomic::Ordering::SeqCst);
1867
1868        self.rx_impl.clear_all();
1869        self.rx_impl.reset();
1870        self.rx_impl.set_link_addr(preparation.start as u32);
1871        self.rx_impl.set_peripheral(peri as u8);
1872
1873        Ok(())
1874    }
1875}
1876
1877impl<Dm, CH> crate::private::Sealed for ChannelRx<Dm, CH>
1878where
1879    Dm: DriverMode,
1880    CH: DmaRxChannel,
1881{
1882}
1883
1884#[allow(unused)]
1885impl<Dm, CH> ChannelRx<Dm, CH>
1886where
1887    Dm: DriverMode,
1888    CH: DmaRxChannel,
1889{
1890    // TODO: used by I2S, which should be rewritten to use the Preparation-based
1891    // API.
1892    pub(crate) unsafe fn prepare_transfer_without_start(
1893        &mut self,
1894        peri: DmaPeripheral,
1895        chain: &DescriptorChain,
1896    ) -> Result<(), DmaError> {
1897        // We check each descriptor buffer that points to PSRAM for
1898        // alignment and invalidate the cache for that buffer.
1899        // NOTE: for RX the `buffer` and `size` need to be aligned but the `len` does
1900        // not. TRM section 3.4.9
1901        // Note that DmaBuffer implementations are required to do this for us.
1902        cfg_if::cfg_if! {
1903            if #[cfg(psram_dma)] {
1904                let mut uses_psram = false;
1905                let psram_range = crate::soc::psram_range();
1906                for des in chain.descriptors.iter() {
1907                    // we are forcing the DMA alignment to the cache line size
1908                    // required when we are using dcache
1909                    let alignment = unsafe { crate::soc::cache_get_dcache_line_size() } as usize;
1910                    if crate::soc::addr_in_range(des.buffer as usize, psram_range.clone()) {
1911                        uses_psram = true;
1912                        // both the size and address of the buffer must be aligned
1913                        if des.buffer as usize % alignment != 0 {
1914                            return Err(DmaError::InvalidAlignment(DmaAlignmentError::Address));
1915                        }
1916                        if des.size() % alignment != 0 {
1917                            return Err(DmaError::InvalidAlignment(DmaAlignmentError::Size));
1918                        }
1919                        unsafe {crate::soc::cache_invalidate_addr(des.buffer as u32, des.size() as u32); }
1920                    }
1921                }
1922            }
1923        }
1924
1925        let preparation = Preparation {
1926            start: chain.first().cast_mut(),
1927            direction: TransferDirection::In,
1928            #[cfg(psram_dma)]
1929            accesses_psram: uses_psram,
1930            burst_transfer: BurstConfig::default(),
1931            check_owner: Some(false),
1932            auto_write_back: true,
1933        };
1934        self.do_prepare(preparation, peri)
1935    }
1936
1937    pub(crate) unsafe fn prepare_transfer<BUF: DmaRxBuffer>(
1938        &mut self,
1939        peri: DmaPeripheral,
1940        buffer: &mut BUF,
1941    ) -> Result<(), DmaError> {
1942        let preparation = buffer.prepare();
1943
1944        self.do_prepare(preparation, peri)
1945    }
1946
1947    pub(crate) fn start_transfer(&mut self) -> Result<(), DmaError> {
1948        self.rx_impl.start();
1949
1950        if self
1951            .pending_in_interrupts()
1952            .contains(DmaRxInterrupt::DescriptorError)
1953        {
1954            Err(DmaError::DescriptorError)
1955        } else {
1956            Ok(())
1957        }
1958    }
1959
1960    pub(crate) fn stop_transfer(&mut self) {
1961        self.rx_impl.stop()
1962    }
1963
1964    #[cfg(gdma)]
1965    pub(crate) fn set_mem2mem_mode(&mut self, value: bool) {
1966        self.rx_impl.set_mem2mem_mode(value);
1967    }
1968
1969    pub(crate) fn listen_in(&self, interrupts: impl Into<EnumSet<DmaRxInterrupt>>) {
1970        self.rx_impl.listen(interrupts);
1971    }
1972
1973    pub(crate) fn unlisten_in(&self, interrupts: impl Into<EnumSet<DmaRxInterrupt>>) {
1974        self.rx_impl.unlisten(interrupts);
1975    }
1976
1977    pub(crate) fn is_listening_in(&self) -> EnumSet<DmaRxInterrupt> {
1978        self.rx_impl.is_listening()
1979    }
1980
1981    pub(crate) fn clear_in(&self, interrupts: impl Into<EnumSet<DmaRxInterrupt>>) {
1982        self.rx_impl.clear(interrupts);
1983    }
1984
1985    pub(crate) fn pending_in_interrupts(&self) -> EnumSet<DmaRxInterrupt> {
1986        self.rx_impl.pending_interrupts()
1987    }
1988
1989    pub(crate) fn is_done(&self) -> bool {
1990        self.pending_in_interrupts()
1991            .contains(DmaRxInterrupt::SuccessfulEof)
1992    }
1993
1994    pub(crate) fn clear_interrupts(&self) {
1995        self.rx_impl.clear_all();
1996    }
1997
1998    pub(crate) fn waker(&self) -> &'static crate::asynch::AtomicWaker {
1999        self.rx_impl.waker()
2000    }
2001
2002    pub(crate) fn has_error(&self) -> bool {
2003        self.pending_in_interrupts()
2004            .contains(DmaRxInterrupt::DescriptorError)
2005    }
2006
2007    pub(crate) fn has_dscr_empty_error(&self) -> bool {
2008        self.pending_in_interrupts()
2009            .contains(DmaRxInterrupt::DescriptorEmpty)
2010    }
2011
2012    pub(crate) fn has_eof_error(&self) -> bool {
2013        self.pending_in_interrupts()
2014            .contains(DmaRxInterrupt::ErrorEof)
2015    }
2016}
2017
2018/// DMA transmit channel
2019#[doc(hidden)]
2020pub struct ChannelTx<Dm, CH>
2021where
2022    Dm: DriverMode,
2023    CH: DmaTxChannel,
2024{
2025    pub(crate) tx_impl: CH,
2026    pub(crate) _phantom: PhantomData<Dm>,
2027    pub(crate) _guard: PeripheralGuard,
2028}
2029
2030impl<CH> ChannelTx<Blocking, CH>
2031where
2032    CH: DmaTxChannel,
2033{
2034    /// Creates a new TX channel half.
2035    pub fn new(tx_impl: CH) -> Self {
2036        let _guard = create_guard(&tx_impl);
2037
2038        if let Some(interrupt) = tx_impl.peripheral_interrupt() {
2039            for cpu in Cpu::all() {
2040                crate::interrupt::disable(cpu, interrupt);
2041            }
2042        }
2043        tx_impl.set_async(false);
2044        Self {
2045            tx_impl,
2046            _phantom: PhantomData,
2047            _guard,
2048        }
2049    }
2050
2051    /// Converts a blocking channel to an async channel.
2052    pub(crate) fn into_async(mut self) -> ChannelTx<Async, CH> {
2053        if let Some(handler) = self.tx_impl.async_handler() {
2054            self.set_interrupt_handler(handler);
2055        }
2056        self.tx_impl.set_async(true);
2057        ChannelTx {
2058            tx_impl: self.tx_impl,
2059            _phantom: PhantomData,
2060            _guard: self._guard,
2061        }
2062    }
2063
2064    fn set_interrupt_handler(&mut self, handler: InterruptHandler) {
2065        self.unlisten_out(EnumSet::all());
2066        self.clear_out(EnumSet::all());
2067
2068        if let Some(interrupt) = self.tx_impl.peripheral_interrupt() {
2069            for core in crate::system::Cpu::other() {
2070                crate::interrupt::disable(core, interrupt);
2071            }
2072            unsafe { crate::interrupt::bind_interrupt(interrupt, handler.handler()) };
2073            unwrap!(crate::interrupt::enable(interrupt, handler.priority()));
2074        }
2075    }
2076}
2077
2078impl<CH> ChannelTx<Async, CH>
2079where
2080    CH: DmaTxChannel,
2081{
2082    /// Converts an async channel into a blocking channel.
2083    pub(crate) fn into_blocking(self) -> ChannelTx<Blocking, CH> {
2084        if let Some(interrupt) = self.tx_impl.peripheral_interrupt() {
2085            crate::interrupt::disable(Cpu::current(), interrupt);
2086        }
2087        self.tx_impl.set_async(false);
2088        ChannelTx {
2089            tx_impl: self.tx_impl,
2090            _phantom: PhantomData,
2091            _guard: self._guard,
2092        }
2093    }
2094}
2095
2096impl<Dm, CH> ChannelTx<Dm, CH>
2097where
2098    Dm: DriverMode,
2099    CH: DmaTxChannel,
2100{
2101    /// Configure the channel priority.
2102    #[cfg(gdma)]
2103    pub fn set_priority(&mut self, priority: DmaPriority) {
2104        self.tx_impl.set_priority(priority);
2105    }
2106
2107    fn do_prepare(
2108        &mut self,
2109        preparation: Preparation,
2110        peri: DmaPeripheral,
2111    ) -> Result<(), DmaError> {
2112        debug_assert_eq!(preparation.direction, TransferDirection::Out);
2113
2114        debug!("Preparing TX transfer {:?}", preparation);
2115        trace!("First descriptor {:?}", unsafe { &*preparation.start });
2116
2117        #[cfg(psram_dma)]
2118        if preparation.accesses_psram && !self.tx_impl.can_access_psram() {
2119            return Err(DmaError::UnsupportedMemoryRegion);
2120        }
2121
2122        #[cfg(psram_dma)]
2123        self.tx_impl
2124            .set_ext_mem_block_size(preparation.burst_transfer.external_memory.into());
2125        self.tx_impl.set_burst_mode(preparation.burst_transfer);
2126        self.tx_impl.set_descr_burst_mode(true);
2127        self.tx_impl.set_check_owner(preparation.check_owner);
2128        self.tx_impl
2129            .set_auto_write_back(preparation.auto_write_back);
2130
2131        compiler_fence(core::sync::atomic::Ordering::SeqCst);
2132
2133        self.tx_impl.clear_all();
2134        self.tx_impl.reset();
2135        self.tx_impl.set_link_addr(preparation.start as u32);
2136        self.tx_impl.set_peripheral(peri as u8);
2137
2138        Ok(())
2139    }
2140}
2141
2142impl<Dm, CH> crate::private::Sealed for ChannelTx<Dm, CH>
2143where
2144    Dm: DriverMode,
2145    CH: DmaTxChannel,
2146{
2147}
2148
2149#[allow(unused)]
2150impl<Dm, CH> ChannelTx<Dm, CH>
2151where
2152    Dm: DriverMode,
2153    CH: DmaTxChannel,
2154{
2155    // TODO: used by I2S, which should be rewritten to use the Preparation-based
2156    // API.
2157    pub(crate) unsafe fn prepare_transfer_without_start(
2158        &mut self,
2159        peri: DmaPeripheral,
2160        chain: &DescriptorChain,
2161    ) -> Result<(), DmaError> {
2162        // Based on the ESP32-S3 TRM the alignment check is not needed for TX
2163
2164        // We check each descriptor buffer that points to PSRAM for
2165        // alignment and writeback the cache for that buffer.
2166        // Note that DmaBuffer implementations are required to do this for us.
2167        #[cfg(psram_dma)]
2168        cfg_if::cfg_if! {
2169            if #[cfg(psram_dma)] {
2170                let mut uses_psram = false;
2171                let psram_range = crate::soc::psram_range();
2172                for des in chain.descriptors.iter() {
2173                    // we are forcing the DMA alignment to the cache line size
2174                    // required when we are using dcache
2175                    let alignment = unsafe { crate::soc::cache_get_dcache_line_size()} as usize;
2176                    if crate::soc::addr_in_range(des.buffer as usize, psram_range.clone()) {
2177                        uses_psram = true;
2178                        // both the size and address of the buffer must be aligned
2179                        if des.buffer as usize % alignment != 0 {
2180                            return Err(DmaError::InvalidAlignment(DmaAlignmentError::Address));
2181                        }
2182                        if des.size() % alignment != 0 {
2183                            return Err(DmaError::InvalidAlignment(DmaAlignmentError::Size));
2184                        }
2185                        unsafe { crate::soc::cache_writeback_addr(des.buffer as u32, des.size() as u32); }
2186                    }
2187                }
2188            }
2189        }
2190
2191        let preparation = Preparation {
2192            start: chain.first().cast_mut(),
2193            direction: TransferDirection::Out,
2194            #[cfg(psram_dma)]
2195            accesses_psram: uses_psram,
2196            burst_transfer: BurstConfig::default(),
2197            check_owner: Some(false),
2198            // enable descriptor write back in circular mode
2199            auto_write_back: !(unsafe { *chain.last() }).next.is_null(),
2200        };
2201        self.do_prepare(preparation, peri)?;
2202
2203        Ok(())
2204    }
2205
2206    pub(crate) unsafe fn prepare_transfer<BUF: DmaTxBuffer>(
2207        &mut self,
2208        peri: DmaPeripheral,
2209        buffer: &mut BUF,
2210    ) -> Result<(), DmaError> {
2211        let preparation = buffer.prepare();
2212
2213        self.do_prepare(preparation, peri)
2214    }
2215
2216    pub(crate) fn start_transfer(&mut self) -> Result<(), DmaError> {
2217        self.tx_impl.start();
2218        while self.tx_impl.is_fifo_empty() && self.pending_out_interrupts().is_empty() {}
2219
2220        if self
2221            .pending_out_interrupts()
2222            .contains(DmaTxInterrupt::DescriptorError)
2223        {
2224            Err(DmaError::DescriptorError)
2225        } else {
2226            Ok(())
2227        }
2228    }
2229
2230    pub(crate) fn stop_transfer(&mut self) {
2231        self.tx_impl.stop()
2232    }
2233
2234    pub(crate) fn listen_out(&self, interrupts: impl Into<EnumSet<DmaTxInterrupt>>) {
2235        self.tx_impl.listen(interrupts);
2236    }
2237
2238    pub(crate) fn unlisten_out(&self, interrupts: impl Into<EnumSet<DmaTxInterrupt>>) {
2239        self.tx_impl.unlisten(interrupts);
2240    }
2241
2242    pub(crate) fn is_listening_out(&self) -> EnumSet<DmaTxInterrupt> {
2243        self.tx_impl.is_listening()
2244    }
2245
2246    pub(crate) fn clear_out(&self, interrupts: impl Into<EnumSet<DmaTxInterrupt>>) {
2247        self.tx_impl.clear(interrupts);
2248    }
2249
2250    pub(crate) fn pending_out_interrupts(&self) -> EnumSet<DmaTxInterrupt> {
2251        self.tx_impl.pending_interrupts()
2252    }
2253
2254    pub(crate) fn waker(&self) -> &'static crate::asynch::AtomicWaker {
2255        self.tx_impl.waker()
2256    }
2257
2258    pub(crate) fn clear_interrupts(&self) {
2259        self.tx_impl.clear_all();
2260    }
2261
2262    pub(crate) fn last_out_dscr_address(&self) -> usize {
2263        self.tx_impl.last_dscr_address()
2264    }
2265
2266    pub(crate) fn is_done(&self) -> bool {
2267        self.pending_out_interrupts()
2268            .contains(DmaTxInterrupt::TotalEof)
2269    }
2270
2271    pub(crate) fn has_error(&self) -> bool {
2272        self.pending_out_interrupts()
2273            .contains(DmaTxInterrupt::DescriptorError)
2274    }
2275}
2276
2277#[doc(hidden)]
2278pub trait RegisterAccess: crate::private::Sealed {
2279    /// Reset the state machine of the channel and FIFO pointer.
2280    fn reset(&self);
2281
2282    /// Enable/Disable INCR burst transfer for channel reading
2283    /// accessing data in internal RAM.
2284    fn set_burst_mode(&self, burst_mode: BurstConfig);
2285
2286    /// Enable/Disable burst transfer for channel reading
2287    /// descriptors in internal RAM.
2288    fn set_descr_burst_mode(&self, burst_mode: bool);
2289
2290    /// The priority of the channel. The larger the value, the higher the
2291    /// priority.
2292    #[cfg(gdma)]
2293    fn set_priority(&self, priority: DmaPriority);
2294
2295    /// Select a peripheral for the channel.
2296    fn set_peripheral(&self, peripheral: u8);
2297
2298    /// Set the address of the first descriptor.
2299    fn set_link_addr(&self, address: u32);
2300
2301    /// Enable the channel for data transfer.
2302    fn start(&self);
2303
2304    /// Stop the channel from transferring data.
2305    fn stop(&self);
2306
2307    /// Mount a new descriptor.
2308    fn restart(&self);
2309
2310    /// Configure the bit to enable checking the owner attribute of the
2311    /// descriptor.
2312    fn set_check_owner(&self, check_owner: Option<bool>);
2313
2314    #[cfg(psram_dma)]
2315    fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize);
2316
2317    #[cfg(pdma)]
2318    fn is_compatible_with(&self, peripheral: DmaPeripheral) -> bool;
2319
2320    #[cfg(psram_dma)]
2321    fn can_access_psram(&self) -> bool;
2322}
2323
2324#[doc(hidden)]
2325pub trait RxRegisterAccess: RegisterAccess {
2326    #[cfg(gdma)]
2327    fn set_mem2mem_mode(&self, value: bool);
2328
2329    fn peripheral_interrupt(&self) -> Option<Interrupt>;
2330    fn async_handler(&self) -> Option<InterruptHandler>;
2331}
2332
2333#[doc(hidden)]
2334pub trait TxRegisterAccess: RegisterAccess {
2335    /// Returns whether the DMA's FIFO is empty.
2336    fn is_fifo_empty(&self) -> bool;
2337
2338    /// Enable/disable outlink-writeback
2339    fn set_auto_write_back(&self, enable: bool);
2340
2341    /// Outlink descriptor address when EOF occurs of Tx channel.
2342    fn last_dscr_address(&self) -> usize;
2343
2344    fn peripheral_interrupt(&self) -> Option<Interrupt>;
2345    fn async_handler(&self) -> Option<InterruptHandler>;
2346}
2347
2348#[doc(hidden)]
2349pub trait InterruptAccess<T: EnumSetType>: crate::private::Sealed {
2350    fn listen(&self, interrupts: impl Into<EnumSet<T>>) {
2351        self.enable_listen(interrupts.into(), true)
2352    }
2353    fn unlisten(&self, interrupts: impl Into<EnumSet<T>>) {
2354        self.enable_listen(interrupts.into(), false)
2355    }
2356
2357    fn clear_all(&self) {
2358        self.clear(EnumSet::all());
2359    }
2360
2361    fn enable_listen(&self, interrupts: EnumSet<T>, enable: bool);
2362    fn is_listening(&self) -> EnumSet<T>;
2363    fn clear(&self, interrupts: impl Into<EnumSet<T>>);
2364    fn pending_interrupts(&self) -> EnumSet<T>;
2365    fn waker(&self) -> &'static crate::asynch::AtomicWaker;
2366
2367    fn is_async(&self) -> bool;
2368    fn set_async(&self, is_async: bool);
2369}
2370
2371/// DMA Channel
2372#[non_exhaustive]
2373pub struct Channel<Dm, CH>
2374where
2375    Dm: DriverMode,
2376    CH: DmaChannel,
2377{
2378    /// RX half of the channel
2379    pub rx: ChannelRx<Dm, CH::Rx>,
2380    /// TX half of the channel
2381    pub tx: ChannelTx<Dm, CH::Tx>,
2382}
2383
2384impl<CH> Channel<Blocking, CH>
2385where
2386    CH: DmaChannel,
2387{
2388    /// Creates a new DMA channel driver.
2389    #[instability::unstable]
2390    pub fn new(channel: CH) -> Self {
2391        let (rx, tx) = unsafe { channel.split_internal(crate::private::Internal) };
2392        Self {
2393            rx: ChannelRx::new(rx),
2394            tx: ChannelTx::new(tx),
2395        }
2396    }
2397
2398    /// Sets the interrupt handler for RX and TX interrupts.
2399    ///
2400    /// Interrupts are not enabled at the peripheral level here.
2401    #[instability::unstable]
2402    pub fn set_interrupt_handler(&mut self, handler: InterruptHandler) {
2403        self.rx.set_interrupt_handler(handler);
2404        self.tx.set_interrupt_handler(handler);
2405    }
2406
2407    /// Listen for the given interrupts
2408    pub fn listen(&mut self, interrupts: impl Into<EnumSet<DmaInterrupt>>) {
2409        for interrupt in interrupts.into() {
2410            match interrupt {
2411                DmaInterrupt::RxDone => self.rx.listen_in(DmaRxInterrupt::Done),
2412                DmaInterrupt::TxDone => self.tx.listen_out(DmaTxInterrupt::Done),
2413            }
2414        }
2415    }
2416
2417    /// Unlisten the given interrupts
2418    pub fn unlisten(&mut self, interrupts: impl Into<EnumSet<DmaInterrupt>>) {
2419        for interrupt in interrupts.into() {
2420            match interrupt {
2421                DmaInterrupt::RxDone => self.rx.unlisten_in(DmaRxInterrupt::Done),
2422                DmaInterrupt::TxDone => self.tx.unlisten_out(DmaTxInterrupt::Done),
2423            }
2424        }
2425    }
2426
2427    /// Gets asserted interrupts
2428    pub fn interrupts(&mut self) -> EnumSet<DmaInterrupt> {
2429        let mut res = EnumSet::new();
2430        if self.rx.is_done() {
2431            res.insert(DmaInterrupt::RxDone);
2432        }
2433        if self.tx.is_done() {
2434            res.insert(DmaInterrupt::TxDone);
2435        }
2436        res
2437    }
2438
2439    /// Resets asserted interrupts
2440    pub fn clear_interrupts(&mut self, interrupts: impl Into<EnumSet<DmaInterrupt>>) {
2441        for interrupt in interrupts.into() {
2442            match interrupt {
2443                DmaInterrupt::RxDone => self.rx.clear_in(DmaRxInterrupt::Done),
2444                DmaInterrupt::TxDone => self.tx.clear_out(DmaTxInterrupt::Done),
2445            }
2446        }
2447    }
2448
2449    /// Configure the channel priorities.
2450    #[cfg(gdma)]
2451    pub fn set_priority(&mut self, priority: DmaPriority) {
2452        self.tx.set_priority(priority);
2453        self.rx.set_priority(priority);
2454    }
2455
2456    /// Converts a blocking channel to an async channel.
2457    pub fn into_async(self) -> Channel<Async, CH> {
2458        Channel {
2459            rx: self.rx.into_async(),
2460            tx: self.tx.into_async(),
2461        }
2462    }
2463}
2464
2465impl<CH> Channel<Async, CH>
2466where
2467    CH: DmaChannel,
2468{
2469    /// Converts an async channel to a blocking channel.
2470    pub fn into_blocking(self) -> Channel<Blocking, CH> {
2471        Channel {
2472            rx: self.rx.into_blocking(),
2473            tx: self.tx.into_blocking(),
2474        }
2475    }
2476}
2477
2478impl<CH: DmaChannel> From<Channel<Blocking, CH>> for Channel<Async, CH> {
2479    fn from(channel: Channel<Blocking, CH>) -> Self {
2480        channel.into_async()
2481    }
2482}
2483
2484impl<CH: DmaChannel> From<Channel<Async, CH>> for Channel<Blocking, CH> {
2485    fn from(channel: Channel<Async, CH>) -> Self {
2486        channel.into_blocking()
2487    }
2488}
2489
2490pub(crate) mod dma_private {
2491    use super::*;
2492
2493    pub trait DmaSupport {
2494        type DriverMode: DriverMode;
2495
2496        /// Wait until the transfer is done.
2497        ///
2498        /// Depending on the peripheral this might include checking the DMA
2499        /// channel and/or the peripheral.
2500        ///
2501        /// After this all data should be processed by the peripheral - i.e. the
2502        /// peripheral should have processed it's FIFO(s)
2503        ///
2504        /// Please note: This is called in the transfer's `wait` function _and_
2505        /// by it's [Drop] implementation.
2506        fn peripheral_wait_dma(&mut self, is_rx: bool, is_tx: bool);
2507
2508        /// Only used by circular DMA transfers in both, the `stop` function
2509        /// _and_ it's [Drop] implementation
2510        fn peripheral_dma_stop(&mut self);
2511    }
2512
2513    #[instability::unstable]
2514    pub trait DmaSupportTx: DmaSupport {
2515        type Channel: DmaTxChannel;
2516
2517        fn tx(&mut self) -> &mut ChannelTx<Self::DriverMode, Self::Channel>;
2518
2519        fn chain(&mut self) -> &mut DescriptorChain;
2520    }
2521
2522    #[instability::unstable]
2523    pub trait DmaSupportRx: DmaSupport {
2524        type Channel: DmaRxChannel;
2525
2526        fn rx(&mut self) -> &mut ChannelRx<Self::DriverMode, Self::Channel>;
2527
2528        fn chain(&mut self) -> &mut DescriptorChain;
2529    }
2530}
2531
2532/// DMA transaction for TX only transfers
2533///
2534/// # Safety
2535///
2536/// Never use [core::mem::forget] on an in-progress transfer
2537#[non_exhaustive]
2538#[must_use]
2539pub struct DmaTransferTx<'a, I>
2540where
2541    I: dma_private::DmaSupportTx,
2542{
2543    instance: &'a mut I,
2544}
2545
2546impl<'a, I> DmaTransferTx<'a, I>
2547where
2548    I: dma_private::DmaSupportTx,
2549{
2550    #[cfg_attr(esp32c2, allow(dead_code))]
2551    pub(crate) fn new(instance: &'a mut I) -> Self {
2552        Self { instance }
2553    }
2554
2555    /// Wait for the transfer to finish.
2556    pub fn wait(self) -> Result<(), DmaError> {
2557        self.instance.peripheral_wait_dma(false, true);
2558
2559        if self
2560            .instance
2561            .tx()
2562            .pending_out_interrupts()
2563            .contains(DmaTxInterrupt::DescriptorError)
2564        {
2565            Err(DmaError::DescriptorError)
2566        } else {
2567            Ok(())
2568        }
2569    }
2570
2571    /// Check if the transfer is finished.
2572    pub fn is_done(&mut self) -> bool {
2573        self.instance.tx().is_done()
2574    }
2575}
2576
2577impl<I> Drop for DmaTransferTx<'_, I>
2578where
2579    I: dma_private::DmaSupportTx,
2580{
2581    fn drop(&mut self) {
2582        self.instance.peripheral_wait_dma(true, false);
2583    }
2584}
2585
2586/// DMA transaction for RX only transfers
2587///
2588/// # Safety
2589///
2590/// Never use [core::mem::forget] on an in-progress transfer
2591#[non_exhaustive]
2592#[must_use]
2593pub struct DmaTransferRx<'a, I>
2594where
2595    I: dma_private::DmaSupportRx,
2596{
2597    instance: &'a mut I,
2598}
2599
2600impl<'a, I> DmaTransferRx<'a, I>
2601where
2602    I: dma_private::DmaSupportRx,
2603{
2604    #[cfg_attr(esp32c2, allow(dead_code))]
2605    pub(crate) fn new(instance: &'a mut I) -> Self {
2606        Self { instance }
2607    }
2608
2609    /// Wait for the transfer to finish.
2610    pub fn wait(self) -> Result<(), DmaError> {
2611        self.instance.peripheral_wait_dma(true, false);
2612
2613        if self
2614            .instance
2615            .rx()
2616            .pending_in_interrupts()
2617            .contains(DmaRxInterrupt::DescriptorError)
2618        {
2619            Err(DmaError::DescriptorError)
2620        } else {
2621            Ok(())
2622        }
2623    }
2624
2625    /// Check if the transfer is finished.
2626    pub fn is_done(&mut self) -> bool {
2627        self.instance.rx().is_done()
2628    }
2629}
2630
2631impl<I> Drop for DmaTransferRx<'_, I>
2632where
2633    I: dma_private::DmaSupportRx,
2634{
2635    fn drop(&mut self) {
2636        self.instance.peripheral_wait_dma(true, false);
2637    }
2638}
2639
2640/// DMA transaction for TX+RX transfers
2641///
2642/// # Safety
2643///
2644/// Never use [core::mem::forget] on an in-progress transfer
2645#[non_exhaustive]
2646#[must_use]
2647pub struct DmaTransferRxTx<'a, I>
2648where
2649    I: dma_private::DmaSupportTx + dma_private::DmaSupportRx,
2650{
2651    instance: &'a mut I,
2652}
2653
2654impl<'a, I> DmaTransferRxTx<'a, I>
2655where
2656    I: dma_private::DmaSupportTx + dma_private::DmaSupportRx,
2657{
2658    #[allow(dead_code)]
2659    pub(crate) fn new(instance: &'a mut I) -> Self {
2660        Self { instance }
2661    }
2662
2663    /// Wait for the transfer to finish.
2664    pub fn wait(self) -> Result<(), DmaError> {
2665        self.instance.peripheral_wait_dma(true, true);
2666
2667        if self
2668            .instance
2669            .tx()
2670            .pending_out_interrupts()
2671            .contains(DmaTxInterrupt::DescriptorError)
2672            || self
2673                .instance
2674                .rx()
2675                .pending_in_interrupts()
2676                .contains(DmaRxInterrupt::DescriptorError)
2677        {
2678            Err(DmaError::DescriptorError)
2679        } else {
2680            Ok(())
2681        }
2682    }
2683
2684    /// Check if the transfer is finished.
2685    pub fn is_done(&mut self) -> bool {
2686        self.instance.tx().is_done() && self.instance.rx().is_done()
2687    }
2688}
2689
2690impl<I> Drop for DmaTransferRxTx<'_, I>
2691where
2692    I: dma_private::DmaSupportTx + dma_private::DmaSupportRx,
2693{
2694    fn drop(&mut self) {
2695        self.instance.peripheral_wait_dma(true, true);
2696    }
2697}
2698
2699/// DMA transaction for TX only circular transfers
2700///
2701/// # Safety
2702///
2703/// Never use [core::mem::forget] on an in-progress transfer
2704#[non_exhaustive]
2705#[must_use]
2706pub struct DmaTransferTxCircular<'a, I>
2707where
2708    I: dma_private::DmaSupportTx,
2709{
2710    instance: &'a mut I,
2711    state: TxCircularState,
2712}
2713
2714impl<'a, I> DmaTransferTxCircular<'a, I>
2715where
2716    I: dma_private::DmaSupportTx,
2717{
2718    #[allow(unused)] // currently used by peripherals not available on all chips
2719    pub(crate) fn new(instance: &'a mut I) -> Self {
2720        let state = TxCircularState::new(instance.chain());
2721        Self { instance, state }
2722    }
2723
2724    /// Amount of bytes which can be pushed.
2725    pub fn available(&mut self) -> Result<usize, DmaError> {
2726        self.state.update(self.instance.tx())?;
2727        Ok(self.state.available)
2728    }
2729
2730    /// Push bytes into the DMA buffer.
2731    pub fn push(&mut self, data: &[u8]) -> Result<usize, DmaError> {
2732        self.state.update(self.instance.tx())?;
2733        self.state.push(data)
2734    }
2735
2736    /// Push bytes into the DMA buffer via the given closure.
2737    /// The closure *must* return the actual number of bytes written.
2738    /// The closure *might* get called with a slice which is smaller than the
2739    /// total available buffer.
2740    pub fn push_with(&mut self, f: impl FnOnce(&mut [u8]) -> usize) -> Result<usize, DmaError> {
2741        self.state.update(self.instance.tx())?;
2742        self.state.push_with(f)
2743    }
2744
2745    /// Stop the DMA transfer
2746    #[allow(clippy::type_complexity)]
2747    pub fn stop(self) -> Result<(), DmaError> {
2748        self.instance.peripheral_dma_stop();
2749
2750        if self
2751            .instance
2752            .tx()
2753            .pending_out_interrupts()
2754            .contains(DmaTxInterrupt::DescriptorError)
2755        {
2756            Err(DmaError::DescriptorError)
2757        } else {
2758            Ok(())
2759        }
2760    }
2761}
2762
2763impl<I> Drop for DmaTransferTxCircular<'_, I>
2764where
2765    I: dma_private::DmaSupportTx,
2766{
2767    fn drop(&mut self) {
2768        self.instance.peripheral_dma_stop();
2769    }
2770}
2771
2772/// DMA transaction for RX only circular transfers
2773///
2774/// # Safety
2775///
2776/// Never use [core::mem::forget] on an in-progress transfer
2777#[non_exhaustive]
2778#[must_use]
2779pub struct DmaTransferRxCircular<'a, I>
2780where
2781    I: dma_private::DmaSupportRx,
2782{
2783    instance: &'a mut I,
2784    state: RxCircularState,
2785}
2786
2787impl<'a, I> DmaTransferRxCircular<'a, I>
2788where
2789    I: dma_private::DmaSupportRx,
2790{
2791    #[allow(unused)] // currently used by peripherals not available on all chips
2792    pub(crate) fn new(instance: &'a mut I) -> Self {
2793        let state = RxCircularState::new(instance.chain());
2794        Self { instance, state }
2795    }
2796
2797    /// Amount of bytes which can be popped.
2798    ///
2799    /// It's expected to call this before trying to [DmaTransferRxCircular::pop]
2800    /// data.
2801    pub fn available(&mut self) -> Result<usize, DmaError> {
2802        self.state.update()?;
2803        Ok(self.state.available)
2804    }
2805
2806    /// Get available data.
2807    ///
2808    /// It's expected that the amount of available data is checked before by
2809    /// calling [DmaTransferRxCircular::available] and that the buffer can hold
2810    /// all available data.
2811    ///
2812    /// Fails with [DmaError::BufferTooSmall] if the given buffer is too small
2813    /// to hold all available data
2814    pub fn pop(&mut self, data: &mut [u8]) -> Result<usize, DmaError> {
2815        self.state.update()?;
2816        self.state.pop(data)
2817    }
2818}
2819
2820impl<I> Drop for DmaTransferRxCircular<'_, I>
2821where
2822    I: dma_private::DmaSupportRx,
2823{
2824    fn drop(&mut self) {
2825        self.instance.peripheral_dma_stop();
2826    }
2827}
2828
2829pub(crate) mod asynch {
2830    use core::task::Poll;
2831
2832    use super::*;
2833
2834    #[must_use = "futures do nothing unless you `.await` or poll them"]
2835    pub struct DmaTxFuture<'a, CH>
2836    where
2837        CH: DmaTxChannel,
2838    {
2839        pub(crate) tx: &'a mut ChannelTx<Async, CH>,
2840    }
2841
2842    impl<'a, CH> DmaTxFuture<'a, CH>
2843    where
2844        CH: DmaTxChannel,
2845    {
2846        #[cfg_attr(esp32c2, allow(dead_code))]
2847        pub fn new(tx: &'a mut ChannelTx<Async, CH>) -> Self {
2848            Self { tx }
2849        }
2850    }
2851
2852    impl<CH> core::future::Future for DmaTxFuture<'_, CH>
2853    where
2854        CH: DmaTxChannel,
2855    {
2856        type Output = Result<(), DmaError>;
2857
2858        fn poll(
2859            self: core::pin::Pin<&mut Self>,
2860            cx: &mut core::task::Context<'_>,
2861        ) -> Poll<Self::Output> {
2862            if self.tx.is_done() {
2863                self.tx.clear_interrupts();
2864                Poll::Ready(Ok(()))
2865            } else if self
2866                .tx
2867                .pending_out_interrupts()
2868                .contains(DmaTxInterrupt::DescriptorError)
2869            {
2870                self.tx.clear_interrupts();
2871                Poll::Ready(Err(DmaError::DescriptorError))
2872            } else {
2873                self.tx.waker().register(cx.waker());
2874                self.tx
2875                    .listen_out(DmaTxInterrupt::TotalEof | DmaTxInterrupt::DescriptorError);
2876                Poll::Pending
2877            }
2878        }
2879    }
2880
2881    impl<CH> Drop for DmaTxFuture<'_, CH>
2882    where
2883        CH: DmaTxChannel,
2884    {
2885        fn drop(&mut self) {
2886            self.tx
2887                .unlisten_out(DmaTxInterrupt::TotalEof | DmaTxInterrupt::DescriptorError);
2888        }
2889    }
2890
2891    #[must_use = "futures do nothing unless you `.await` or poll them"]
2892    pub struct DmaRxFuture<'a, CH>
2893    where
2894        CH: DmaRxChannel,
2895    {
2896        pub(crate) rx: &'a mut ChannelRx<Async, CH>,
2897    }
2898
2899    impl<'a, CH> DmaRxFuture<'a, CH>
2900    where
2901        CH: DmaRxChannel,
2902    {
2903        pub fn new(rx: &'a mut ChannelRx<Async, CH>) -> Self {
2904            Self { rx }
2905        }
2906    }
2907
2908    impl<CH> core::future::Future for DmaRxFuture<'_, CH>
2909    where
2910        CH: DmaRxChannel,
2911    {
2912        type Output = Result<(), DmaError>;
2913
2914        fn poll(
2915            self: core::pin::Pin<&mut Self>,
2916            cx: &mut core::task::Context<'_>,
2917        ) -> Poll<Self::Output> {
2918            if self.rx.is_done() {
2919                self.rx.clear_interrupts();
2920                Poll::Ready(Ok(()))
2921            } else if !self.rx.pending_in_interrupts().is_disjoint(
2922                DmaRxInterrupt::DescriptorError
2923                    | DmaRxInterrupt::DescriptorEmpty
2924                    | DmaRxInterrupt::ErrorEof,
2925            ) {
2926                self.rx.clear_interrupts();
2927                Poll::Ready(Err(DmaError::DescriptorError))
2928            } else {
2929                self.rx.waker().register(cx.waker());
2930                self.rx.listen_in(
2931                    DmaRxInterrupt::SuccessfulEof
2932                        | DmaRxInterrupt::DescriptorError
2933                        | DmaRxInterrupt::DescriptorEmpty
2934                        | DmaRxInterrupt::ErrorEof,
2935                );
2936                Poll::Pending
2937            }
2938        }
2939    }
2940
2941    impl<CH> Drop for DmaRxFuture<'_, CH>
2942    where
2943        CH: DmaRxChannel,
2944    {
2945        fn drop(&mut self) {
2946            self.rx.unlisten_in(
2947                DmaRxInterrupt::DescriptorError
2948                    | DmaRxInterrupt::DescriptorEmpty
2949                    | DmaRxInterrupt::ErrorEof,
2950            );
2951        }
2952    }
2953
2954    #[cfg(any(i2s0, i2s1))]
2955    pub struct DmaTxDoneChFuture<'a, CH>
2956    where
2957        CH: DmaTxChannel,
2958    {
2959        pub(crate) tx: &'a mut ChannelTx<Async, CH>,
2960        _a: (),
2961    }
2962
2963    #[cfg(any(i2s0, i2s1))]
2964    impl<'a, CH> DmaTxDoneChFuture<'a, CH>
2965    where
2966        CH: DmaTxChannel,
2967    {
2968        pub fn new(tx: &'a mut ChannelTx<Async, CH>) -> Self {
2969            Self { tx, _a: () }
2970        }
2971    }
2972
2973    #[cfg(any(i2s0, i2s1))]
2974    impl<CH> core::future::Future for DmaTxDoneChFuture<'_, CH>
2975    where
2976        CH: DmaTxChannel,
2977    {
2978        type Output = Result<(), DmaError>;
2979
2980        fn poll(
2981            self: core::pin::Pin<&mut Self>,
2982            cx: &mut core::task::Context<'_>,
2983        ) -> Poll<Self::Output> {
2984            if self
2985                .tx
2986                .pending_out_interrupts()
2987                .contains(DmaTxInterrupt::Done)
2988            {
2989                self.tx.clear_out(DmaTxInterrupt::Done);
2990                Poll::Ready(Ok(()))
2991            } else if self
2992                .tx
2993                .pending_out_interrupts()
2994                .contains(DmaTxInterrupt::DescriptorError)
2995            {
2996                self.tx.clear_interrupts();
2997                Poll::Ready(Err(DmaError::DescriptorError))
2998            } else {
2999                self.tx.waker().register(cx.waker());
3000                self.tx
3001                    .listen_out(DmaTxInterrupt::Done | DmaTxInterrupt::DescriptorError);
3002                Poll::Pending
3003            }
3004        }
3005    }
3006
3007    #[cfg(any(i2s0, i2s1))]
3008    impl<CH> Drop for DmaTxDoneChFuture<'_, CH>
3009    where
3010        CH: DmaTxChannel,
3011    {
3012        fn drop(&mut self) {
3013            self.tx
3014                .unlisten_out(DmaTxInterrupt::Done | DmaTxInterrupt::DescriptorError);
3015        }
3016    }
3017
3018    #[cfg(any(i2s0, i2s1))]
3019    pub struct DmaRxDoneChFuture<'a, CH>
3020    where
3021        CH: DmaRxChannel,
3022    {
3023        pub(crate) rx: &'a mut ChannelRx<Async, CH>,
3024        _a: (),
3025    }
3026
3027    #[cfg(any(i2s0, i2s1))]
3028    impl<'a, CH> DmaRxDoneChFuture<'a, CH>
3029    where
3030        CH: DmaRxChannel,
3031    {
3032        pub fn new(rx: &'a mut ChannelRx<Async, CH>) -> Self {
3033            Self { rx, _a: () }
3034        }
3035    }
3036
3037    #[cfg(any(i2s0, i2s1))]
3038    impl<CH> core::future::Future for DmaRxDoneChFuture<'_, CH>
3039    where
3040        CH: DmaRxChannel,
3041    {
3042        type Output = Result<(), DmaError>;
3043
3044        fn poll(
3045            self: core::pin::Pin<&mut Self>,
3046            cx: &mut core::task::Context<'_>,
3047        ) -> Poll<Self::Output> {
3048            if self
3049                .rx
3050                .pending_in_interrupts()
3051                .contains(DmaRxInterrupt::Done)
3052            {
3053                self.rx.clear_in(DmaRxInterrupt::Done);
3054                Poll::Ready(Ok(()))
3055            } else if !self.rx.pending_in_interrupts().is_disjoint(
3056                DmaRxInterrupt::DescriptorError
3057                    | DmaRxInterrupt::DescriptorEmpty
3058                    | DmaRxInterrupt::ErrorEof,
3059            ) {
3060                self.rx.clear_interrupts();
3061                Poll::Ready(Err(DmaError::DescriptorError))
3062            } else {
3063                self.rx.waker().register(cx.waker());
3064                self.rx.listen_in(
3065                    DmaRxInterrupt::Done
3066                        | DmaRxInterrupt::DescriptorError
3067                        | DmaRxInterrupt::DescriptorEmpty
3068                        | DmaRxInterrupt::ErrorEof,
3069                );
3070                Poll::Pending
3071            }
3072        }
3073    }
3074
3075    #[cfg(any(i2s0, i2s1))]
3076    impl<CH> Drop for DmaRxDoneChFuture<'_, CH>
3077    where
3078        CH: DmaRxChannel,
3079    {
3080        fn drop(&mut self) {
3081            self.rx.unlisten_in(
3082                DmaRxInterrupt::Done
3083                    | DmaRxInterrupt::DescriptorError
3084                    | DmaRxInterrupt::DescriptorEmpty
3085                    | DmaRxInterrupt::ErrorEof,
3086            );
3087        }
3088    }
3089
3090    pub(super) fn handle_in_interrupt<CH: DmaChannelExt>() {
3091        let rx = CH::rx_interrupts();
3092
3093        if !rx.is_async() {
3094            return;
3095        }
3096
3097        let pending = rx.pending_interrupts();
3098        let enabled = rx.is_listening();
3099
3100        if !pending.is_disjoint(enabled) {
3101            rx.unlisten(EnumSet::all());
3102            rx.waker().wake()
3103        }
3104    }
3105
3106    pub(super) fn handle_out_interrupt<CH: DmaChannelExt>() {
3107        let tx = CH::tx_interrupts();
3108
3109        if !tx.is_async() {
3110            return;
3111        }
3112
3113        let pending = tx.pending_interrupts();
3114        let enabled = tx.is_listening();
3115
3116        if !pending.is_disjoint(enabled) {
3117            tx.unlisten(EnumSet::all());
3118
3119            tx.waker().wake()
3120        }
3121    }
3122}