esp_hal/dma/
mod.rs

1//! # Direct Memory Access (DMA)
2//!
3//! ## Overview
4//!
5//! The DMA driver provides an interface to efficiently transfer data between
6//! different memory regions and peripherals within the ESP microcontroller
7//! without involving the CPU. The DMA controller is responsible for managing
8//! these data transfers.
9//!
10//! Notice, that this module is a common version of the DMA driver, `ESP32` and
11//! `ESP32-S2` are using older `PDMA` controller, whenever other chips are using
12//! newer `GDMA` controller.
13//!
14//! ## Examples
15//!
16//! ### Initialize and utilize DMA controller in `SPI`
17//!
18//! ```rust, no_run
19#![doc = crate::before_snippet!()]
20//! # use esp_hal::dma_buffers;
21//! # use esp_hal::spi::{master::{Config, Spi}, Mode};
22#![cfg_attr(pdma, doc = "let dma_channel = peripherals.DMA_SPI2;")]
23#![cfg_attr(gdma, doc = "let dma_channel = peripherals.DMA_CH0;")]
24//! let sclk = peripherals.GPIO0;
25//! let miso = peripherals.GPIO2;
26//! let mosi = peripherals.GPIO4;
27//! let cs = peripherals.GPIO5;
28//!
29//! let mut spi = Spi::new(
30//!     peripherals.SPI2,
31//!     Config::default().with_frequency(Rate::from_khz(100)).
32//! with_mode(Mode::_0) )?
33//! .with_sck(sclk)
34//! .with_mosi(mosi)
35//! .with_miso(miso)
36//! .with_cs(cs)
37//! .with_dma(dma_channel);
38//! # Ok(())
39//! # }
40//! ```
41//! 
42//! ⚠️ Note: Descriptors should be sized as `(max_transfer_size + CHUNK_SIZE - 1) / CHUNK_SIZE`.
43//! I.e., to transfer buffers of size `1..=CHUNK_SIZE`, you need 1 descriptor.
44//!
45//! ⚠️ Note: For chips that support DMA to/from PSRAM (ESP32-S3) DMA transfers to/from PSRAM
46//! have extra alignment requirements. The address and size of the buffer pointed to by
47//! each descriptor must be a multiple of the cache line (block) size. This is 32 bytes
48//! on ESP32-S3.
49//!
50//! For convenience you can use the [crate::dma_buffers] macro.
51
52use core::{cmp::min, fmt::Debug, marker::PhantomData, sync::atomic::compiler_fence};
53
54use enumset::{EnumSet, EnumSetType};
55
56pub use self::buffers::*;
57#[cfg(gdma)]
58pub use self::gdma::*;
59#[cfg(gdma)]
60pub use self::m2m::*;
61#[cfg(pdma)]
62pub use self::pdma::*;
63use crate::{
64    interrupt::InterruptHandler,
65    peripheral::{Peripheral, PeripheralRef},
66    peripherals::Interrupt,
67    soc::{is_slice_in_dram, is_valid_memory_address, is_valid_ram_address},
68    system,
69    system::Cpu,
70    Async,
71    Blocking,
72    DriverMode,
73};
74
75trait Word: crate::private::Sealed {}
76
77macro_rules! impl_word {
78    ($w:ty) => {
79        impl $crate::private::Sealed for $w {}
80        impl Word for $w {}
81    };
82}
83
84impl_word!(u8);
85impl_word!(u16);
86impl_word!(u32);
87impl_word!(i8);
88impl_word!(i16);
89impl_word!(i32);
90
91impl<W, const S: usize> crate::private::Sealed for [W; S] where W: Word {}
92
93impl<W, const S: usize> crate::private::Sealed for &[W; S] where W: Word {}
94
95impl<W> crate::private::Sealed for &[W] where W: Word {}
96
97impl<W> crate::private::Sealed for &mut [W] where W: Word {}
98
99/// Trait for buffers that can be given to DMA for reading.
100///
101/// # Safety
102///
103/// Once the `read_buffer` method has been called, it is unsafe to call any
104/// `&mut self` methods on this object as long as the returned value is in use
105/// (by DMA).
106pub unsafe trait ReadBuffer {
107    /// Provide a buffer usable for DMA reads.
108    ///
109    /// The return value is:
110    ///
111    /// - pointer to the start of the buffer
112    /// - buffer size in bytes
113    ///
114    /// # Safety
115    ///
116    /// Once this method has been called, it is unsafe to call any `&mut self`
117    /// methods on this object as long as the returned value is in use (by DMA).
118    unsafe fn read_buffer(&self) -> (*const u8, usize);
119}
120
121unsafe impl<W, const S: usize> ReadBuffer for [W; S]
122where
123    W: Word,
124{
125    unsafe fn read_buffer(&self) -> (*const u8, usize) {
126        (self.as_ptr() as *const u8, core::mem::size_of_val(self))
127    }
128}
129
130unsafe impl<W, const S: usize> ReadBuffer for &[W; S]
131where
132    W: Word,
133{
134    unsafe fn read_buffer(&self) -> (*const u8, usize) {
135        (self.as_ptr() as *const u8, core::mem::size_of_val(*self))
136    }
137}
138
139unsafe impl<W, const S: usize> ReadBuffer for &mut [W; S]
140where
141    W: Word,
142{
143    unsafe fn read_buffer(&self) -> (*const u8, usize) {
144        (self.as_ptr() as *const u8, core::mem::size_of_val(*self))
145    }
146}
147
148unsafe impl<W> ReadBuffer for &[W]
149where
150    W: Word,
151{
152    unsafe fn read_buffer(&self) -> (*const u8, usize) {
153        (self.as_ptr() as *const u8, core::mem::size_of_val(*self))
154    }
155}
156
157unsafe impl<W> ReadBuffer for &mut [W]
158where
159    W: Word,
160{
161    unsafe fn read_buffer(&self) -> (*const u8, usize) {
162        (self.as_ptr() as *const u8, core::mem::size_of_val(*self))
163    }
164}
165
166/// Trait for buffers that can be given to DMA for writing.
167///
168/// # Safety
169///
170/// Once the `write_buffer` method has been called, it is unsafe to call any
171/// `&mut self` methods, except for `write_buffer`, on this object as long as
172/// the returned value is in use (by DMA).
173pub unsafe trait WriteBuffer {
174    /// Provide a buffer usable for DMA writes.
175    ///
176    /// The return value is:
177    ///
178    /// - pointer to the start of the buffer
179    /// - buffer size in bytes
180    ///
181    /// # Safety
182    ///
183    /// Once this method has been called, it is unsafe to call any `&mut self`
184    /// methods, except for `write_buffer`, on this object as long as the
185    /// returned value is in use (by DMA).
186    unsafe fn write_buffer(&mut self) -> (*mut u8, usize);
187}
188
189unsafe impl<W, const S: usize> WriteBuffer for [W; S]
190where
191    W: Word,
192{
193    unsafe fn write_buffer(&mut self) -> (*mut u8, usize) {
194        (self.as_mut_ptr() as *mut u8, core::mem::size_of_val(self))
195    }
196}
197
198unsafe impl<W, const S: usize> WriteBuffer for &mut [W; S]
199where
200    W: Word,
201{
202    unsafe fn write_buffer(&mut self) -> (*mut u8, usize) {
203        (self.as_mut_ptr() as *mut u8, core::mem::size_of_val(*self))
204    }
205}
206
207unsafe impl<W> WriteBuffer for &mut [W]
208where
209    W: Word,
210{
211    unsafe fn write_buffer(&mut self) -> (*mut u8, usize) {
212        (self.as_mut_ptr() as *mut u8, core::mem::size_of_val(*self))
213    }
214}
215
216bitfield::bitfield! {
217    /// DMA descriptor flags.
218    #[derive(Clone, Copy, PartialEq, Eq)]
219    pub struct DmaDescriptorFlags(u32);
220
221    u16;
222
223    /// Specifies the size of the buffer that this descriptor points to.
224    pub size, set_size: 11, 0;
225
226    /// Specifies the number of valid bytes in the buffer that this descriptor points to.
227    ///
228    /// This field in a transmit descriptor is written by software and indicates how many bytes can
229    /// be read from the buffer.
230    ///
231    /// This field in a receive descriptor is written by hardware automatically and indicates how
232    /// many valid bytes have been stored into the buffer.
233    pub length, set_length: 23, 12;
234
235    /// For receive descriptors, software needs to clear this bit to 0, and hardware will set it to 1 after receiving
236    /// data containing the EOF flag.
237    /// For transmit descriptors, software needs to set this bit to 1 as needed.
238    /// If software configures this bit to 1 in a descriptor, the DMA will include the EOF flag in the data sent to
239    /// the corresponding peripheral, indicating to the peripheral that this data segment marks the end of one
240    /// transfer phase.
241    pub suc_eof, set_suc_eof: 30;
242
243    /// Specifies who is allowed to access the buffer that this descriptor points to.
244    /// - 0: CPU can access the buffer;
245    /// - 1: The GDMA controller can access the buffer.
246    pub owner, set_owner: 31;
247}
248
249impl Debug for DmaDescriptorFlags {
250    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
251        f.debug_struct("DmaDescriptorFlags")
252            .field("size", &self.size())
253            .field("length", &self.length())
254            .field("suc_eof", &self.suc_eof())
255            .field("owner", &(if self.owner() { "DMA" } else { "CPU" }))
256            .finish()
257    }
258}
259
260#[cfg(feature = "defmt")]
261impl defmt::Format for DmaDescriptorFlags {
262    fn format(&self, fmt: defmt::Formatter<'_>) {
263        defmt::write!(
264            fmt,
265            "DmaDescriptorFlags {{ size: {}, length: {}, suc_eof: {}, owner: {} }}",
266            self.size(),
267            self.length(),
268            self.suc_eof(),
269            if self.owner() { "DMA" } else { "CPU" }
270        );
271    }
272}
273
274/// A DMA transfer descriptor.
275#[derive(Clone, Copy, Debug, PartialEq, Eq)]
276#[cfg_attr(feature = "defmt", derive(defmt::Format))]
277#[repr(C)]
278pub struct DmaDescriptor {
279    /// Descriptor flags.
280    pub flags: DmaDescriptorFlags,
281
282    /// Address of the buffer.
283    pub buffer: *mut u8,
284
285    /// Address of the next descriptor.
286    /// If the current descriptor is the last one, this value is 0.
287    /// This field can only point to internal RAM.
288    pub next: *mut DmaDescriptor,
289}
290
291impl DmaDescriptor {
292    /// An empty DMA descriptor used to initialize the descriptor list.
293    pub const EMPTY: Self = Self {
294        flags: DmaDescriptorFlags(0),
295        buffer: core::ptr::null_mut(),
296        next: core::ptr::null_mut(),
297    };
298
299    /// Resets the descriptor for a new receive transfer.
300    pub fn reset_for_rx(&mut self) {
301        // Give ownership to the DMA
302        self.set_owner(Owner::Dma);
303
304        // Clear this to allow hardware to set it when the peripheral returns an EOF
305        // bit.
306        self.set_suc_eof(false);
307
308        // Clear this to allow hardware to set it when it's
309        // done receiving data for this descriptor.
310        self.set_length(0);
311    }
312
313    /// Resets the descriptor for a new transmit transfer. See
314    /// [DmaDescriptorFlags::suc_eof] for more details on the `set_eof`
315    /// parameter.
316    pub fn reset_for_tx(&mut self, set_eof: bool) {
317        // Give ownership to the DMA
318        self.set_owner(Owner::Dma);
319
320        // The `suc_eof` bit doesn't affect the transfer itself, but signals when the
321        // hardware should trigger an interrupt request.
322        self.set_suc_eof(set_eof);
323    }
324
325    /// Set the size of the buffer. See [DmaDescriptorFlags::size].
326    pub fn set_size(&mut self, len: usize) {
327        self.flags.set_size(len as u16)
328    }
329
330    /// Set the length of the descriptor. See [DmaDescriptorFlags::length].
331    pub fn set_length(&mut self, len: usize) {
332        self.flags.set_length(len as u16)
333    }
334
335    /// Returns the size of the buffer. See [DmaDescriptorFlags::size].
336    pub fn size(&self) -> usize {
337        self.flags.size() as usize
338    }
339
340    /// Returns the length of the descriptor. See [DmaDescriptorFlags::length].
341    #[allow(clippy::len_without_is_empty)]
342    pub fn len(&self) -> usize {
343        self.flags.length() as usize
344    }
345
346    /// Set the suc_eof bit. See [DmaDescriptorFlags::suc_eof].
347    pub fn set_suc_eof(&mut self, suc_eof: bool) {
348        self.flags.set_suc_eof(suc_eof)
349    }
350
351    /// Set the owner. See [DmaDescriptorFlags::owner].
352    pub fn set_owner(&mut self, owner: Owner) {
353        let owner = match owner {
354            Owner::Cpu => false,
355            Owner::Dma => true,
356        };
357        self.flags.set_owner(owner)
358    }
359
360    /// Returns the owner. See [DmaDescriptorFlags::owner].
361    pub fn owner(&self) -> Owner {
362        match self.flags.owner() {
363            false => Owner::Cpu,
364            true => Owner::Dma,
365        }
366    }
367}
368
369// The pointers in the descriptor can be Sent.
370// Marking this Send also allows DmaBuffer implementations to automatically be
371// Send (where the compiler sees fit).
372unsafe impl Send for DmaDescriptor {}
373
374mod buffers;
375#[cfg(gdma)]
376mod gdma;
377#[cfg(gdma)]
378mod m2m;
379#[cfg(pdma)]
380mod pdma;
381
382/// Kinds of interrupt to listen to.
383#[derive(Debug, EnumSetType)]
384#[cfg_attr(feature = "defmt", derive(defmt::Format))]
385pub enum DmaInterrupt {
386    /// RX is done
387    RxDone,
388    /// TX is done
389    TxDone,
390}
391
392/// Types of interrupts emitted by the TX channel.
393#[derive(Debug, EnumSetType)]
394#[cfg_attr(feature = "defmt", derive(defmt::Format))]
395pub enum DmaTxInterrupt {
396    /// Triggered when all data corresponding to a linked list (including
397    /// multiple descriptors) have been sent via transmit channel.
398    TotalEof,
399
400    /// Triggered when an error is detected in a transmit descriptor on transmit
401    /// channel.
402    DescriptorError,
403
404    /// Triggered when EOF in a transmit descriptor is true and data
405    /// corresponding to this descriptor have been sent via transmit
406    /// channel.
407    Eof,
408
409    /// Triggered when all data corresponding to a transmit descriptor have been
410    /// sent via transmit channel.
411    Done,
412}
413
414/// Types of interrupts emitted by the RX channel.
415#[derive(Debug, EnumSetType)]
416#[cfg_attr(feature = "defmt", derive(defmt::Format))]
417pub enum DmaRxInterrupt {
418    /// Triggered when the size of the buffer pointed by receive descriptors
419    /// is smaller than the length of data to be received via receive channel.
420    DescriptorEmpty,
421
422    /// Triggered when an error is detected in a receive descriptor on receive
423    /// channel.
424    DescriptorError,
425
426    /// Triggered when an error is detected in the data segment corresponding to
427    /// a descriptor received via receive channel n.
428    /// This interrupt is used only for UHCI0 peripheral (UART0 or UART1).
429    ErrorEof,
430
431    /// Triggered when the suc_eof bit in a receive descriptor is 1 and the data
432    /// corresponding to this receive descriptor has been received via receive
433    /// channel.
434    SuccessfulEof,
435
436    /// Triggered when all data corresponding to a receive descriptor have been
437    /// received via receive channel.
438    Done,
439}
440
441/// The default chunk size used for DMA transfers.
442pub const CHUNK_SIZE: usize = 4092;
443
444/// Convenience macro to create DMA buffers and descriptors.
445///
446/// ## Usage
447/// ```rust,no_run
448#[doc = crate::before_snippet!()]
449/// use esp_hal::dma_buffers;
450///
451/// // RX and TX buffers are 32000 bytes - passing only one parameter makes RX
452/// // and TX the same size.
453/// let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) =
454///     dma_buffers!(32000, 32000);
455/// # Ok(())
456/// # }
457/// ```
458#[macro_export]
459macro_rules! dma_buffers {
460    ($rx_size:expr, $tx_size:expr) => {
461        $crate::dma_buffers_chunk_size!($rx_size, $tx_size, $crate::dma::CHUNK_SIZE)
462    };
463    ($size:expr) => {
464        $crate::dma_buffers_chunk_size!($size, $crate::dma::CHUNK_SIZE)
465    };
466}
467
468/// Convenience macro to create circular DMA buffers and descriptors.
469///
470/// ## Usage
471/// ```rust,no_run
472#[doc = crate::before_snippet!()]
473/// use esp_hal::dma_circular_buffers;
474///
475/// // RX and TX buffers are 32000 bytes - passing only one parameter makes RX
476/// // and TX the same size.
477/// let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) =
478///     dma_circular_buffers!(32000, 32000);
479/// # Ok(())
480/// # }
481/// ```
482#[macro_export]
483macro_rules! dma_circular_buffers {
484    ($rx_size:expr, $tx_size:expr) => {
485        $crate::dma_circular_buffers_chunk_size!($rx_size, $tx_size, $crate::dma::CHUNK_SIZE)
486    };
487
488    ($size:expr) => {
489        $crate::dma_circular_buffers_chunk_size!($size, $size, $crate::dma::CHUNK_SIZE)
490    };
491}
492
493/// Convenience macro to create DMA descriptors.
494///
495/// ## Usage
496/// ```rust,no_run
497#[doc = crate::before_snippet!()]
498/// use esp_hal::dma_descriptors;
499///
500/// // Create RX and TX descriptors for transactions up to 32000 bytes - passing
501/// // only one parameter assumes RX and TX are the same size.
502/// let (rx_descriptors, tx_descriptors) = dma_descriptors!(32000, 32000);
503/// # Ok(())
504/// # }
505/// ```
506#[macro_export]
507macro_rules! dma_descriptors {
508    ($rx_size:expr, $tx_size:expr) => {
509        $crate::dma_descriptors_chunk_size!($rx_size, $tx_size, $crate::dma::CHUNK_SIZE)
510    };
511
512    ($size:expr) => {
513        $crate::dma_descriptors_chunk_size!($size, $size, $crate::dma::CHUNK_SIZE)
514    };
515}
516
517/// Convenience macro to create circular DMA descriptors.
518///
519/// ## Usage
520/// ```rust,no_run
521#[doc = crate::before_snippet!()]
522/// use esp_hal::dma_circular_descriptors;
523///
524/// // Create RX and TX descriptors for transactions up to 32000
525/// // bytes - passing only one parameter assumes RX and TX are the same size.
526/// let (rx_descriptors, tx_descriptors) =
527///     dma_circular_descriptors!(32000, 32000);
528/// # Ok(())
529/// # }
530/// ```
531#[macro_export]
532macro_rules! dma_circular_descriptors {
533    ($rx_size:expr, $tx_size:expr) => {
534        $crate::dma_circular_descriptors_chunk_size!($rx_size, $tx_size, $crate::dma::CHUNK_SIZE)
535    };
536
537    ($size:expr) => {
538        $crate::dma_circular_descriptors_chunk_size!($size, $size, $crate::dma::CHUNK_SIZE)
539    };
540}
541
542/// Declares a DMA buffer with a specific size, aligned to 4 bytes
543#[doc(hidden)]
544#[macro_export]
545macro_rules! declare_aligned_dma_buffer {
546    ($name:ident, $size:expr) => {
547        // ESP32 requires word alignment for DMA buffers.
548        // ESP32-S2 technically supports byte-aligned DMA buffers, but the
549        // transfer ends up writing out of bounds.
550        // if the buffer's length is 2 or 3 (mod 4).
551        static mut $name: [u32; ($size + 3) / 4] = [0; ($size + 3) / 4];
552    };
553}
554
555/// Turns the potentially oversized static `u32`` array reference into a
556/// correctly sized `u8` one
557#[doc(hidden)]
558#[macro_export]
559macro_rules! as_mut_byte_array {
560    ($name:expr, $size:expr) => {
561        unsafe { &mut *($name.as_mut_ptr() as *mut [u8; $size]) }
562    };
563}
564pub use as_mut_byte_array; // TODO: can be removed as soon as DMA is stabilized
565
566/// Convenience macro to create DMA buffers and descriptors with specific chunk
567/// size.
568///
569/// ## Usage
570/// ```rust,no_run
571#[doc = crate::before_snippet!()]
572/// use esp_hal::dma_buffers_chunk_size;
573///
574/// // TX and RX buffers are 32000 bytes - passing only one parameter makes TX
575/// // and RX the same size.
576/// let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) =
577///     dma_buffers_chunk_size!(32000, 32000, 4032);
578/// # Ok(())
579/// # }
580/// ```
581#[macro_export]
582macro_rules! dma_buffers_chunk_size {
583    ($rx_size:expr, $tx_size:expr, $chunk_size:expr) => {{
584        $crate::dma_buffers_impl!($rx_size, $tx_size, $chunk_size, is_circular = false)
585    }};
586
587    ($size:expr, $chunk_size:expr) => {
588        $crate::dma_buffers_chunk_size!($size, $size, $chunk_size)
589    };
590}
591
592/// Convenience macro to create circular DMA buffers and descriptors with
593/// specific chunk size.
594///
595/// ## Usage
596/// ```rust,no_run
597#[doc = crate::before_snippet!()]
598/// use esp_hal::dma_circular_buffers_chunk_size;
599///
600/// // RX and TX buffers are 32000 bytes - passing only one parameter makes RX
601/// // and TX the same size.
602/// let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) =
603///     dma_circular_buffers_chunk_size!(32000, 32000, 4032);
604/// # Ok(())
605/// # }
606/// ```
607#[macro_export]
608macro_rules! dma_circular_buffers_chunk_size {
609    ($rx_size:expr, $tx_size:expr, $chunk_size:expr) => {{
610        $crate::dma_buffers_impl!($rx_size, $tx_size, $chunk_size, is_circular = true)
611    }};
612
613    ($size:expr, $chunk_size:expr) => {{
614        $crate::dma_circular_buffers_chunk_size!($size, $size, $chunk_size)
615    }};
616}
617
618/// Convenience macro to create DMA descriptors with specific chunk size
619///
620/// ## Usage
621/// ```rust,no_run
622#[doc = crate::before_snippet!()]
623/// use esp_hal::dma_descriptors_chunk_size;
624///
625/// // Create RX and TX descriptors for transactions up to 32000 bytes - passing
626/// // only one parameter assumes RX and TX are the same size.
627/// let (rx_descriptors, tx_descriptors) =
628///     dma_descriptors_chunk_size!(32000, 32000, 4032);
629/// # Ok(())
630/// # }
631/// ```
632#[macro_export]
633macro_rules! dma_descriptors_chunk_size {
634    ($rx_size:expr, $tx_size:expr, $chunk_size:expr) => {{
635        $crate::dma_descriptors_impl!($rx_size, $tx_size, $chunk_size, is_circular = false)
636    }};
637
638    ($size:expr, $chunk_size:expr) => {
639        $crate::dma_descriptors_chunk_size!($size, $size, $chunk_size)
640    };
641}
642
643/// Convenience macro to create circular DMA descriptors with specific chunk
644/// size
645///
646/// ## Usage
647/// ```rust,no_run
648#[doc = crate::before_snippet!()]
649/// use esp_hal::dma_circular_descriptors_chunk_size;
650///
651/// // Create RX and TX descriptors for transactions up to 32000 bytes - passing
652/// // only one parameter assumes RX and TX are the same size.
653/// let (rx_descriptors, tx_descriptors) =
654///     dma_circular_descriptors_chunk_size!(32000, 32000, 4032);
655/// # Ok(())
656/// # }
657/// ```
658#[macro_export]
659macro_rules! dma_circular_descriptors_chunk_size {
660    ($rx_size:expr, $tx_size:expr, $chunk_size:expr) => {{
661        $crate::dma_descriptors_impl!($rx_size, $tx_size, $chunk_size, is_circular = true)
662    }};
663
664    ($size:expr, $chunk_size:expr) => {
665        $crate::dma_circular_descriptors_chunk_size!($size, $size, $chunk_size)
666    };
667}
668
669#[doc(hidden)]
670#[macro_export]
671macro_rules! dma_buffers_impl {
672    ($rx_size:expr, $tx_size:expr, $chunk_size:expr, is_circular = $circular:tt) => {{
673        let rx = $crate::dma_buffers_impl!($rx_size, $chunk_size, is_circular = $circular);
674        let tx = $crate::dma_buffers_impl!($tx_size, $chunk_size, is_circular = $circular);
675        (rx.0, rx.1, tx.0, tx.1)
676    }};
677
678    ($size:expr, $chunk_size:expr, is_circular = $circular:tt) => {{
679        $crate::declare_aligned_dma_buffer!(BUFFER, $size);
680
681        unsafe {
682            (
683                $crate::dma::as_mut_byte_array!(BUFFER, $size),
684                $crate::dma_descriptors_impl!($size, $chunk_size, is_circular = $circular),
685            )
686        }
687    }};
688
689    ($size:expr, is_circular = $circular:tt) => {
690        $crate::dma_buffers_impl!(
691            $size,
692            $crate::dma::BurstConfig::DEFAULT.max_compatible_chunk_size(),
693            is_circular = $circular
694        );
695    };
696}
697
698#[doc(hidden)]
699#[macro_export]
700macro_rules! dma_descriptors_impl {
701    ($rx_size:expr, $tx_size:expr, $chunk_size:expr, is_circular = $circular:tt) => {{
702        let rx = $crate::dma_descriptors_impl!($rx_size, $chunk_size, is_circular = $circular);
703        let tx = $crate::dma_descriptors_impl!($tx_size, $chunk_size, is_circular = $circular);
704        (rx, tx)
705    }};
706
707    ($size:expr, $chunk_size:expr, is_circular = $circular:tt) => {{
708        const COUNT: usize =
709            $crate::dma_descriptor_count!($size, $chunk_size, is_circular = $circular);
710
711        static mut DESCRIPTORS: [$crate::dma::DmaDescriptor; COUNT] =
712            [$crate::dma::DmaDescriptor::EMPTY; COUNT];
713
714        unsafe { &mut DESCRIPTORS }
715    }};
716}
717
718#[doc(hidden)]
719#[macro_export]
720macro_rules! dma_descriptor_count {
721    ($size:expr, $chunk_size:expr, is_circular = $is_circular:tt) => {{
722        const {
723            ::core::assert!($chunk_size <= 4095, "chunk size must be <= 4095");
724            ::core::assert!($chunk_size > 0, "chunk size must be > 0");
725        }
726
727        // We allow 0 in the macros as a "not needed" case.
728        if $size == 0 {
729            0
730        } else {
731            $crate::dma::descriptor_count($size, $chunk_size, $is_circular)
732        }
733    }};
734}
735
736/// Convenience macro to create a DmaTxBuf from buffer size. The buffer and
737/// descriptors are statically allocated and used to create the `DmaTxBuf`.
738///
739/// ## Usage
740/// ```rust,no_run
741#[doc = crate::before_snippet!()]
742/// use esp_hal::dma_tx_buffer;
743///
744/// let tx_buf = dma_tx_buffer!(32000);
745/// # Ok(())
746/// # }
747/// ```
748#[macro_export]
749macro_rules! dma_tx_buffer {
750    ($tx_size:expr) => {{
751        let (tx_buffer, tx_descriptors) = $crate::dma_buffers_impl!($tx_size, is_circular = false);
752
753        $crate::dma::DmaTxBuf::new(tx_descriptors, tx_buffer)
754    }};
755}
756
757/// Convenience macro to create a [DmaRxStreamBuf] from buffer size and
758/// optional chunk size (uses max if unspecified).
759/// The buffer and descriptors are statically allocated and
760/// used to create the [DmaRxStreamBuf].
761///
762/// Smaller chunk sizes are recommended for lower latency.
763///
764/// ## Usage
765/// ```rust,no_run
766#[doc = crate::before_snippet!()]
767/// use esp_hal::dma_rx_stream_buffer;
768///
769/// let buf = dma_rx_stream_buffer!(32000);
770/// let buf = dma_rx_stream_buffer!(32000, 1000);
771/// # Ok(())
772/// # }
773/// ```
774#[macro_export]
775macro_rules! dma_rx_stream_buffer {
776    ($rx_size:expr) => {
777        $crate::dma_rx_stream_buffer!($rx_size, 4095)
778    };
779    ($rx_size:expr, $chunk_size:expr) => {{
780        let (buffer, descriptors) =
781            $crate::dma_buffers_impl!($rx_size, $chunk_size, is_circular = false);
782
783        $crate::dma::DmaRxStreamBuf::new(descriptors, buffer).unwrap()
784    }};
785}
786
787/// Convenience macro to create a [DmaLoopBuf] from a buffer size.
788///
789/// ## Usage
790/// ```rust,no_run
791#[doc = crate::before_snippet!()]
792/// use esp_hal::dma_loop_buffer;
793///
794/// let buf = dma_loop_buffer!(2000);
795/// # Ok(())
796/// # }
797/// ```
798#[macro_export]
799macro_rules! dma_loop_buffer {
800    ($size:expr) => {{
801        const {
802            ::core::assert!($size <= 4095, "size must be <= 4095");
803            ::core::assert!($size > 0, "size must be > 0");
804        }
805
806        let (buffer, descriptors) = $crate::dma_buffers_impl!($size, $size, is_circular = false);
807
808        $crate::dma::DmaLoopBuf::new(&mut descriptors[0], buffer).unwrap()
809    }};
810}
811
812/// DMA Errors
813#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
814#[cfg_attr(feature = "defmt", derive(defmt::Format))]
815pub enum DmaError {
816    /// The alignment of data is invalid
817    InvalidAlignment(DmaAlignmentError),
818    /// More descriptors are needed for the buffer size
819    OutOfDescriptors,
820    /// DescriptorError the DMA rejected the descriptor configuration. This
821    /// could be because the source address of the data is not in RAM. Ensure
822    /// your source data is in a valid address space.
823    DescriptorError,
824    /// The available free buffer is less than the amount of data to push
825    Overflow,
826    /// The given buffer is too small
827    BufferTooSmall,
828    /// Descriptors or buffers are not located in a supported memory region
829    UnsupportedMemoryRegion,
830    /// Invalid DMA chunk size
831    InvalidChunkSize,
832    /// Indicates writing to or reading from a circular DMA transaction is done
833    /// too late and the DMA buffers already overrun / underrun.
834    Late,
835}
836
837impl From<DmaBufError> for DmaError {
838    fn from(error: DmaBufError) -> Self {
839        // FIXME: use nested errors
840        match error {
841            DmaBufError::InsufficientDescriptors => DmaError::OutOfDescriptors,
842            DmaBufError::UnsupportedMemoryRegion => DmaError::UnsupportedMemoryRegion,
843            DmaBufError::InvalidAlignment(err) => DmaError::InvalidAlignment(err),
844            DmaBufError::InvalidChunkSize => DmaError::InvalidChunkSize,
845            DmaBufError::BufferTooSmall => DmaError::BufferTooSmall,
846        }
847    }
848}
849
850/// DMA Priorities
851#[cfg(gdma)]
852#[derive(Debug, Clone, Copy, PartialEq)]
853#[cfg_attr(feature = "defmt", derive(defmt::Format))]
854pub enum DmaPriority {
855    /// The lowest priority level (Priority 0).
856    Priority0 = 0,
857    /// Priority level 1.
858    Priority1 = 1,
859    /// Priority level 2.
860    Priority2 = 2,
861    /// Priority level 3.
862    Priority3 = 3,
863    /// Priority level 4.
864    Priority4 = 4,
865    /// Priority level 5.
866    Priority5 = 5,
867    /// Priority level 6.
868    Priority6 = 6,
869    /// Priority level 7.
870    Priority7 = 7,
871    /// Priority level 8.
872    Priority8 = 8,
873    /// The highest priority level (Priority 9).
874    Priority9 = 9,
875}
876
877/// DMA Priorities
878/// The values need to match the TRM
879#[cfg(pdma)]
880#[derive(Debug, Clone, Copy, PartialEq)]
881#[cfg_attr(feature = "defmt", derive(defmt::Format))]
882pub enum DmaPriority {
883    /// The lowest priority level (Priority 0).
884    Priority0 = 0,
885}
886
887/// DMA capable peripherals
888/// The values need to match the TRM
889#[derive(Debug, Clone, Copy, PartialEq)]
890#[cfg_attr(feature = "defmt", derive(defmt::Format))]
891#[doc(hidden)]
892pub enum DmaPeripheral {
893    Spi2      = 0,
894    #[cfg(any(pdma, esp32s3))]
895    Spi3      = 1,
896    #[cfg(any(esp32c2, esp32c6, esp32h2))]
897    Mem2Mem1  = 1,
898    #[cfg(any(esp32c3, esp32c6, esp32h2, esp32s3))]
899    Uhci0     = 2,
900    #[cfg(any(esp32, esp32s2, esp32c3, esp32c6, esp32h2, esp32s3))]
901    I2s0      = 3,
902    #[cfg(any(esp32, esp32s3))]
903    I2s1      = 4,
904    #[cfg(any(esp32c6, esp32h2))]
905    Mem2Mem4  = 4,
906    #[cfg(esp32s3)]
907    LcdCam    = 5,
908    #[cfg(any(esp32c6, esp32h2))]
909    Mem2Mem5  = 5,
910    #[cfg(not(esp32c2))]
911    Aes       = 6,
912    #[cfg(any(esp32s2, gdma))]
913    Sha       = 7,
914    #[cfg(any(esp32c3, esp32c6, esp32h2, esp32s3))]
915    Adc       = 8,
916    #[cfg(esp32s3)]
917    Rmt       = 9,
918    #[cfg(parl_io)]
919    ParlIo    = 9,
920    #[cfg(any(esp32c6, esp32h2))]
921    Mem2Mem10 = 10,
922    #[cfg(any(esp32c6, esp32h2))]
923    Mem2Mem11 = 11,
924    #[cfg(any(esp32c6, esp32h2))]
925    Mem2Mem12 = 12,
926    #[cfg(any(esp32c6, esp32h2))]
927    Mem2Mem13 = 13,
928    #[cfg(any(esp32c6, esp32h2))]
929    Mem2Mem14 = 14,
930    #[cfg(any(esp32c6, esp32h2))]
931    Mem2Mem15 = 15,
932}
933
934/// The owner bit of a DMA descriptor.
935#[derive(PartialEq, PartialOrd)]
936pub enum Owner {
937    /// Owned by CPU
938    Cpu = 0,
939    /// Owned by DMA
940    Dma = 1,
941}
942
943impl From<u32> for Owner {
944    fn from(value: u32) -> Self {
945        match value {
946            0 => Owner::Cpu,
947            _ => Owner::Dma,
948        }
949    }
950}
951
952#[doc(hidden)]
953pub trait DmaEligible {
954    /// The most specific DMA channel type usable by this peripheral.
955    type Dma: DmaChannel;
956
957    fn dma_peripheral(&self) -> DmaPeripheral;
958}
959
960#[doc(hidden)]
961#[derive(Debug)]
962pub struct DescriptorChain {
963    pub(crate) descriptors: &'static mut [DmaDescriptor],
964    chunk_size: usize,
965}
966
967impl DescriptorChain {
968    pub fn new(descriptors: &'static mut [DmaDescriptor]) -> Self {
969        Self::new_with_chunk_size(descriptors, CHUNK_SIZE)
970    }
971
972    pub fn new_with_chunk_size(
973        descriptors: &'static mut [DmaDescriptor],
974        chunk_size: usize,
975    ) -> Self {
976        Self {
977            descriptors,
978            chunk_size,
979        }
980    }
981
982    pub fn first_mut(&mut self) -> *mut DmaDescriptor {
983        self.descriptors.as_mut_ptr()
984    }
985
986    pub fn first(&self) -> *const DmaDescriptor {
987        self.descriptors.as_ptr()
988    }
989
990    pub fn last_mut(&mut self) -> *mut DmaDescriptor {
991        self.descriptors.last_mut().unwrap()
992    }
993
994    pub fn last(&self) -> *const DmaDescriptor {
995        self.descriptors.last().unwrap()
996    }
997
998    #[allow(clippy::not_unsafe_ptr_arg_deref)]
999    pub fn fill_for_rx(
1000        &mut self,
1001        circular: bool,
1002        data: *mut u8,
1003        len: usize,
1004    ) -> Result<(), DmaError> {
1005        self.fill(circular, data, len, |desc, _| {
1006            desc.reset_for_rx();
1007            // Descriptor::size has been set up by `fill`
1008        })
1009    }
1010
1011    #[allow(clippy::not_unsafe_ptr_arg_deref)]
1012    pub fn fill_for_tx(
1013        &mut self,
1014        is_circular: bool,
1015        data: *const u8,
1016        len: usize,
1017    ) -> Result<(), DmaError> {
1018        self.fill(is_circular, data.cast_mut(), len, |desc, chunk_size| {
1019            // In circular mode, we set the `suc_eof` bit for every buffer we send. We use
1020            // this for I2S to track progress of a transfer by checking OUTLINK_DSCR_ADDR.
1021            // In non-circular mode, we only set `suc_eof` for the last descriptor to signal
1022            // the end of the transfer.
1023            desc.reset_for_tx(desc.next.is_null() || is_circular);
1024            desc.set_length(chunk_size); // align to 32 bits?
1025        })
1026    }
1027
1028    #[allow(clippy::not_unsafe_ptr_arg_deref)]
1029    pub fn fill(
1030        &mut self,
1031        circular: bool,
1032        data: *mut u8,
1033        len: usize,
1034        prepare_descriptor: impl Fn(&mut DmaDescriptor, usize),
1035    ) -> Result<(), DmaError> {
1036        if !is_valid_ram_address(self.first() as usize)
1037            || !is_valid_ram_address(self.last() as usize)
1038            || !is_valid_memory_address(data as usize)
1039            || !is_valid_memory_address(unsafe { data.add(len) } as usize)
1040        {
1041            return Err(DmaError::UnsupportedMemoryRegion);
1042        }
1043
1044        let max_chunk_size = if circular && len <= self.chunk_size * 2 {
1045            if len <= 3 {
1046                return Err(DmaError::BufferTooSmall);
1047            }
1048            len / 3 + len % 3
1049        } else {
1050            self.chunk_size
1051        };
1052
1053        DescriptorSet::set_up_buffer_ptrs(
1054            unsafe { core::slice::from_raw_parts_mut(data, len) },
1055            self.descriptors,
1056            max_chunk_size,
1057            circular,
1058        )?;
1059        DescriptorSet::set_up_descriptors(
1060            self.descriptors,
1061            len,
1062            max_chunk_size,
1063            circular,
1064            prepare_descriptor,
1065        )?;
1066
1067        Ok(())
1068    }
1069}
1070
1071/// Computes the number of descriptors required for a given buffer size with
1072/// a given chunk size.
1073pub const fn descriptor_count(buffer_size: usize, chunk_size: usize, is_circular: bool) -> usize {
1074    if is_circular && buffer_size <= chunk_size * 2 {
1075        return 3;
1076    }
1077
1078    if buffer_size < chunk_size {
1079        // At least one descriptor is always required.
1080        return 1;
1081    }
1082
1083    buffer_size.div_ceil(chunk_size)
1084}
1085
1086#[derive(Debug)]
1087#[cfg_attr(feature = "defmt", derive(defmt::Format))]
1088struct DescriptorSet<'a> {
1089    descriptors: &'a mut [DmaDescriptor],
1090}
1091
1092impl<'a> DescriptorSet<'a> {
1093    /// Creates a new `DescriptorSet` from a slice of descriptors and associates
1094    /// them with the given buffer.
1095    fn new(descriptors: &'a mut [DmaDescriptor]) -> Result<Self, DmaBufError> {
1096        if !is_slice_in_dram(descriptors) {
1097            return Err(DmaBufError::UnsupportedMemoryRegion);
1098        }
1099
1100        descriptors.fill(DmaDescriptor::EMPTY);
1101
1102        Ok(unsafe { Self::new_unchecked(descriptors) })
1103    }
1104
1105    /// Creates a new `DescriptorSet` from a slice of descriptors and associates
1106    /// them with the given buffer.
1107    ///
1108    /// # Safety
1109    ///
1110    /// The caller must ensure that the descriptors are located in a supported
1111    /// memory region.
1112    unsafe fn new_unchecked(descriptors: &'a mut [DmaDescriptor]) -> Self {
1113        Self { descriptors }
1114    }
1115
1116    /// Consumes the `DescriptorSet` and returns the inner slice of descriptors.
1117    fn into_inner(self) -> &'a mut [DmaDescriptor] {
1118        self.descriptors
1119    }
1120
1121    /// Returns a pointer to the first descriptor in the chain.
1122    fn head(&mut self) -> *mut DmaDescriptor {
1123        self.descriptors.as_mut_ptr()
1124    }
1125
1126    /// Returns an iterator over the linked descriptors.
1127    fn linked_iter(&self) -> impl Iterator<Item = &DmaDescriptor> {
1128        let mut was_last = false;
1129        self.descriptors.iter().take_while(move |d| {
1130            if was_last {
1131                false
1132            } else {
1133                was_last = d.next.is_null();
1134                true
1135            }
1136        })
1137    }
1138
1139    /// Returns an iterator over the linked descriptors.
1140    fn linked_iter_mut(&mut self) -> impl Iterator<Item = &mut DmaDescriptor> {
1141        let mut was_last = false;
1142        self.descriptors.iter_mut().take_while(move |d| {
1143            if was_last {
1144                false
1145            } else {
1146                was_last = d.next.is_null();
1147                true
1148            }
1149        })
1150    }
1151
1152    /// Associate each descriptor with a chunk of the buffer.
1153    ///
1154    /// This function checks the alignment and location of the buffer.
1155    ///
1156    /// See [`Self::set_up_buffer_ptrs`] for more details.
1157    fn link_with_buffer(
1158        &mut self,
1159        buffer: &mut [u8],
1160        chunk_size: usize,
1161    ) -> Result<(), DmaBufError> {
1162        Self::set_up_buffer_ptrs(buffer, self.descriptors, chunk_size, false)
1163    }
1164
1165    /// Prepares descriptors for transferring `len` bytes of data.
1166    ///
1167    /// See [`Self::set_up_descriptors`] for more details.
1168    fn set_length(
1169        &mut self,
1170        len: usize,
1171        chunk_size: usize,
1172        prepare: fn(&mut DmaDescriptor, usize),
1173    ) -> Result<(), DmaBufError> {
1174        Self::set_up_descriptors(self.descriptors, len, chunk_size, false, prepare)
1175    }
1176
1177    /// Prepares descriptors for reading `len` bytes of data.
1178    ///
1179    /// See [`Self::set_up_descriptors`] for more details.
1180    fn set_rx_length(&mut self, len: usize, chunk_size: usize) -> Result<(), DmaBufError> {
1181        self.set_length(len, chunk_size, |desc, chunk_size| {
1182            desc.set_size(chunk_size);
1183        })
1184    }
1185
1186    /// Prepares descriptors for writing `len` bytes of data.
1187    ///
1188    /// See [`Self::set_up_descriptors`] for more details.
1189    fn set_tx_length(&mut self, len: usize, chunk_size: usize) -> Result<(), DmaBufError> {
1190        self.set_length(len, chunk_size, |desc, chunk_size| {
1191            desc.set_length(chunk_size);
1192        })
1193    }
1194
1195    /// Returns a slice of descriptors that can cover a buffer of length `len`.
1196    fn descriptors_for_buffer_len(
1197        descriptors: &mut [DmaDescriptor],
1198        len: usize,
1199        chunk_size: usize,
1200        is_circular: bool,
1201    ) -> Result<&mut [DmaDescriptor], DmaBufError> {
1202        // First, pick enough descriptors to cover the buffer.
1203        let required_descriptors = descriptor_count(len, chunk_size, is_circular);
1204        if descriptors.len() < required_descriptors {
1205            return Err(DmaBufError::InsufficientDescriptors);
1206        }
1207        Ok(&mut descriptors[..required_descriptors])
1208    }
1209
1210    /// Prepares descriptors for transferring `len` bytes of data.
1211    ///
1212    /// `Prepare` means setting up the descriptor lengths and flags, as well as
1213    /// linking the descriptors into a linked list.
1214    ///
1215    /// The actual descriptor setup is done in a callback, because different
1216    /// transfer directions require different descriptor setup.
1217    fn set_up_descriptors(
1218        descriptors: &mut [DmaDescriptor],
1219        len: usize,
1220        chunk_size: usize,
1221        is_circular: bool,
1222        prepare: impl Fn(&mut DmaDescriptor, usize),
1223    ) -> Result<(), DmaBufError> {
1224        let descriptors =
1225            Self::descriptors_for_buffer_len(descriptors, len, chunk_size, is_circular)?;
1226
1227        // Link up the descriptors.
1228        let mut next = if is_circular {
1229            descriptors.as_mut_ptr()
1230        } else {
1231            core::ptr::null_mut()
1232        };
1233        for desc in descriptors.iter_mut().rev() {
1234            desc.next = next;
1235            next = desc;
1236        }
1237
1238        // Prepare each descriptor.
1239        let mut remaining_length = len;
1240        for desc in descriptors.iter_mut() {
1241            let chunk_size = min(chunk_size, remaining_length);
1242            prepare(desc, chunk_size);
1243            remaining_length -= chunk_size;
1244        }
1245        debug_assert_eq!(remaining_length, 0);
1246
1247        Ok(())
1248    }
1249
1250    /// Associate each descriptor with a chunk of the buffer.
1251    ///
1252    /// This function does not check the alignment and location of the buffer,
1253    /// because some callers may not have enough information currently.
1254    ///
1255    /// This function does not set up descriptor lengths or states.
1256    ///
1257    /// This function also does not link descriptors into a linked list. This is
1258    /// intentional, because it is done in `set_up_descriptors` to support
1259    /// changing length without requiring buffer pointers to be set
1260    /// repeatedly.
1261    fn set_up_buffer_ptrs(
1262        buffer: &mut [u8],
1263        descriptors: &mut [DmaDescriptor],
1264        chunk_size: usize,
1265        is_circular: bool,
1266    ) -> Result<(), DmaBufError> {
1267        let descriptors =
1268            Self::descriptors_for_buffer_len(descriptors, buffer.len(), chunk_size, is_circular)?;
1269
1270        let chunks = buffer.chunks_mut(chunk_size);
1271        for (desc, chunk) in descriptors.iter_mut().zip(chunks) {
1272            desc.set_size(chunk.len());
1273            desc.buffer = chunk.as_mut_ptr();
1274        }
1275
1276        Ok(())
1277    }
1278}
1279
1280/// Block size for transfers to/from PSRAM
1281#[cfg(psram_dma)]
1282#[derive(Copy, Clone, Debug, PartialEq)]
1283pub enum DmaExtMemBKSize {
1284    /// External memory block size of 16 bytes.
1285    Size16 = 0,
1286    /// External memory block size of 32 bytes.
1287    Size32 = 1,
1288    /// External memory block size of 64 bytes.
1289    Size64 = 2,
1290}
1291
1292#[cfg(psram_dma)]
1293impl From<ExternalBurstConfig> for DmaExtMemBKSize {
1294    fn from(size: ExternalBurstConfig) -> Self {
1295        match size {
1296            ExternalBurstConfig::Size16 => DmaExtMemBKSize::Size16,
1297            ExternalBurstConfig::Size32 => DmaExtMemBKSize::Size32,
1298            ExternalBurstConfig::Size64 => DmaExtMemBKSize::Size64,
1299        }
1300    }
1301}
1302
1303pub(crate) struct TxCircularState {
1304    write_offset: usize,
1305    write_descr_ptr: *mut DmaDescriptor,
1306    pub(crate) available: usize,
1307    last_seen_handled_descriptor_ptr: *mut DmaDescriptor,
1308    buffer_start: *const u8,
1309    buffer_len: usize,
1310
1311    first_desc_ptr: *mut DmaDescriptor,
1312}
1313
1314impl TxCircularState {
1315    pub(crate) fn new(chain: &mut DescriptorChain) -> Self {
1316        Self {
1317            write_offset: 0,
1318            write_descr_ptr: chain.first_mut(),
1319            available: 0,
1320            last_seen_handled_descriptor_ptr: chain.first_mut(),
1321            buffer_start: chain.descriptors[0].buffer as _,
1322            buffer_len: chain.descriptors.iter().map(|d| d.len()).sum(),
1323
1324            first_desc_ptr: chain.first_mut(),
1325        }
1326    }
1327
1328    pub(crate) fn update<T>(&mut self, channel: &T) -> Result<(), DmaError>
1329    where
1330        T: Tx,
1331    {
1332        if channel
1333            .pending_out_interrupts()
1334            .contains(DmaTxInterrupt::Eof)
1335        {
1336            channel.clear_out(DmaTxInterrupt::Eof);
1337
1338            // check if all descriptors are owned by CPU - this indicates we failed to push
1339            // data fast enough in future we can enable `check_owner` and check
1340            // the interrupt instead
1341            let mut current = self.last_seen_handled_descriptor_ptr;
1342            loop {
1343                let descr = unsafe { current.read_volatile() };
1344                if descr.owner() == Owner::Cpu {
1345                    current = descr.next;
1346                } else {
1347                    break;
1348                }
1349
1350                if current == self.last_seen_handled_descriptor_ptr {
1351                    return Err(DmaError::Late);
1352                }
1353            }
1354
1355            let descr_address = channel.last_out_dscr_address() as *mut DmaDescriptor;
1356
1357            let mut ptr = self.last_seen_handled_descriptor_ptr;
1358            if descr_address >= self.last_seen_handled_descriptor_ptr {
1359                unsafe {
1360                    while ptr < descr_address {
1361                        let dw0 = ptr.read_volatile();
1362                        self.available += dw0.len();
1363                        ptr = ptr.offset(1);
1364                    }
1365                }
1366            } else {
1367                unsafe {
1368                    while !((*ptr).next.is_null() || (*ptr).next == self.first_desc_ptr) {
1369                        let dw0 = ptr.read_volatile();
1370                        self.available += dw0.len();
1371                        ptr = ptr.offset(1);
1372                    }
1373
1374                    // add bytes pointed to by the last descriptor
1375                    let dw0 = ptr.read_volatile();
1376                    self.available += dw0.len();
1377
1378                    // in circular mode we need to honor the now available bytes at start
1379                    if (*ptr).next == self.first_desc_ptr {
1380                        ptr = self.first_desc_ptr;
1381                        while ptr < descr_address {
1382                            let dw0 = ptr.read_volatile();
1383                            self.available += dw0.len();
1384                            ptr = ptr.offset(1);
1385                        }
1386                    }
1387                }
1388            }
1389
1390            if self.available >= self.buffer_len {
1391                unsafe {
1392                    let dw0 = self.write_descr_ptr.read_volatile();
1393                    let segment_len = dw0.len();
1394                    let next_descriptor = dw0.next;
1395                    self.available -= segment_len;
1396                    self.write_offset = (self.write_offset + segment_len) % self.buffer_len;
1397
1398                    self.write_descr_ptr = if next_descriptor.is_null() {
1399                        self.first_desc_ptr
1400                    } else {
1401                        next_descriptor
1402                    }
1403                }
1404            }
1405
1406            self.last_seen_handled_descriptor_ptr = descr_address;
1407        }
1408
1409        Ok(())
1410    }
1411
1412    pub(crate) fn push(&mut self, data: &[u8]) -> Result<usize, DmaError> {
1413        let avail = self.available;
1414
1415        if avail < data.len() {
1416            return Err(DmaError::Overflow);
1417        }
1418
1419        let mut remaining = data.len();
1420        let mut offset = 0;
1421        while self.available >= remaining && remaining > 0 {
1422            let written = self.push_with(|buffer| {
1423                let len = usize::min(buffer.len(), data.len() - offset);
1424                buffer[..len].copy_from_slice(&data[offset..][..len]);
1425                len
1426            })?;
1427            offset += written;
1428            remaining -= written;
1429        }
1430
1431        Ok(data.len())
1432    }
1433
1434    pub(crate) fn push_with(
1435        &mut self,
1436        f: impl FnOnce(&mut [u8]) -> usize,
1437    ) -> Result<usize, DmaError> {
1438        // this might write less than available in case of a wrap around
1439        // caller needs to check and write the remaining part
1440        let written = unsafe {
1441            let dst = self.buffer_start.add(self.write_offset).cast_mut();
1442            let block_size = usize::min(self.available, self.buffer_len - self.write_offset);
1443            let buffer = core::slice::from_raw_parts_mut(dst, block_size);
1444            f(buffer)
1445        };
1446
1447        let mut forward = written;
1448        loop {
1449            unsafe {
1450                let mut descr = self.write_descr_ptr.read_volatile();
1451                descr.set_owner(Owner::Dma);
1452                self.write_descr_ptr.write_volatile(descr);
1453
1454                let segment_len = descr.len();
1455                self.write_descr_ptr = if descr.next.is_null() {
1456                    self.first_desc_ptr
1457                } else {
1458                    descr.next
1459                };
1460
1461                if forward <= segment_len {
1462                    break;
1463                }
1464
1465                forward -= segment_len;
1466            }
1467        }
1468
1469        self.write_offset = (self.write_offset + written) % self.buffer_len;
1470        self.available -= written;
1471
1472        Ok(written)
1473    }
1474}
1475
1476pub(crate) struct RxCircularState {
1477    read_descr_ptr: *mut DmaDescriptor,
1478    pub(crate) available: usize,
1479    last_seen_handled_descriptor_ptr: *mut DmaDescriptor,
1480    last_descr_ptr: *mut DmaDescriptor,
1481}
1482
1483impl RxCircularState {
1484    pub(crate) fn new(chain: &mut DescriptorChain) -> Self {
1485        Self {
1486            read_descr_ptr: chain.first_mut(),
1487            available: 0,
1488            last_seen_handled_descriptor_ptr: core::ptr::null_mut(),
1489            last_descr_ptr: chain.last_mut(),
1490        }
1491    }
1492
1493    pub(crate) fn update(&mut self) -> Result<(), DmaError> {
1494        if self.last_seen_handled_descriptor_ptr.is_null() {
1495            // initially start at last descriptor (so that next will be the first
1496            // descriptor)
1497            self.last_seen_handled_descriptor_ptr = self.last_descr_ptr;
1498        }
1499
1500        let mut current_in_descr_ptr =
1501            unsafe { self.last_seen_handled_descriptor_ptr.read_volatile() }.next;
1502        let mut current_in_descr = unsafe { current_in_descr_ptr.read_volatile() };
1503
1504        let last_seen_ptr = self.last_seen_handled_descriptor_ptr;
1505        while current_in_descr.owner() == Owner::Cpu {
1506            self.available += current_in_descr.len();
1507            self.last_seen_handled_descriptor_ptr = current_in_descr_ptr;
1508
1509            current_in_descr_ptr =
1510                unsafe { self.last_seen_handled_descriptor_ptr.read_volatile() }.next;
1511            current_in_descr = unsafe { current_in_descr_ptr.read_volatile() };
1512
1513            if current_in_descr_ptr == last_seen_ptr {
1514                return Err(DmaError::Late);
1515            }
1516        }
1517
1518        Ok(())
1519    }
1520
1521    pub(crate) fn pop(&mut self, data: &mut [u8]) -> Result<usize, DmaError> {
1522        let len = data.len();
1523        let mut avail = self.available;
1524
1525        if avail > len {
1526            return Err(DmaError::BufferTooSmall);
1527        }
1528
1529        let mut remaining_buffer = data;
1530        let mut descr_ptr = self.read_descr_ptr;
1531
1532        if descr_ptr.is_null() {
1533            return Ok(0);
1534        }
1535
1536        let mut descr = unsafe { descr_ptr.read_volatile() };
1537
1538        while avail > 0 && !remaining_buffer.is_empty() && remaining_buffer.len() >= descr.len() {
1539            unsafe {
1540                let dst = remaining_buffer.as_mut_ptr();
1541                let src = descr.buffer;
1542                let count = descr.len();
1543                core::ptr::copy_nonoverlapping(src, dst, count);
1544
1545                descr.set_owner(Owner::Dma);
1546                descr.set_suc_eof(false);
1547                descr.set_length(0);
1548                descr_ptr.write_volatile(descr);
1549
1550                remaining_buffer = &mut remaining_buffer[count..];
1551                avail -= count;
1552                descr_ptr = descr.next;
1553            }
1554
1555            if descr_ptr.is_null() {
1556                break;
1557            }
1558
1559            descr = unsafe { descr_ptr.read_volatile() };
1560        }
1561
1562        self.read_descr_ptr = descr_ptr;
1563        self.available = avail;
1564        Ok(len - remaining_buffer.len())
1565    }
1566}
1567
1568#[doc(hidden)]
1569macro_rules! impl_dma_eligible {
1570    ([$dma_ch:ident] $name:ident => $dma:ident) => {
1571        impl $crate::dma::DmaEligible for $crate::peripherals::$name {
1572            type Dma = $dma_ch;
1573
1574            fn dma_peripheral(&self) -> $crate::dma::DmaPeripheral {
1575                $crate::dma::DmaPeripheral::$dma
1576            }
1577        }
1578    };
1579
1580    (
1581        $dma_ch:ident {
1582            $($(#[$cfg:meta])? $name:ident => $dma:ident,)*
1583        }
1584    ) => {
1585        $(
1586            $(#[$cfg])?
1587            $crate::dma::impl_dma_eligible!([$dma_ch] $name => $dma);
1588        )*
1589    };
1590}
1591
1592pub(crate) use impl_dma_eligible; // TODO: can be removed as soon as DMA is stabilized
1593
1594/// Helper type to get the DMA (Rx and Tx) channel for a peripheral.
1595pub type PeripheralDmaChannel<T> = <T as DmaEligible>::Dma;
1596/// Helper type to get the DMA Rx channel for a peripheral.
1597pub type PeripheralRxChannel<T> = <PeripheralDmaChannel<T> as DmaChannel>::Rx;
1598/// Helper type to get the DMA Tx channel for a peripheral.
1599pub type PeripheralTxChannel<T> = <PeripheralDmaChannel<T> as DmaChannel>::Tx;
1600
1601#[doc(hidden)]
1602pub trait DmaRxChannel:
1603    RxRegisterAccess + InterruptAccess<DmaRxInterrupt> + Peripheral<P = Self>
1604{
1605}
1606
1607#[doc(hidden)]
1608pub trait DmaTxChannel:
1609    TxRegisterAccess + InterruptAccess<DmaTxInterrupt> + Peripheral<P = Self>
1610{
1611}
1612
1613/// A description of a DMA Channel.
1614pub trait DmaChannel: Peripheral<P = Self> {
1615    /// A description of the RX half of a DMA Channel.
1616    type Rx: DmaRxChannel;
1617
1618    /// A description of the TX half of a DMA Channel.
1619    type Tx: DmaTxChannel;
1620
1621    /// Splits the DMA channel into its RX and TX halves.
1622    #[cfg(any(esp32c6, esp32h2, esp32s3))] // TODO relax this to allow splitting on all chips
1623    fn split(self) -> (Self::Rx, Self::Tx) {
1624        // This function is exposed safely on chips that have separate IN and OUT
1625        // interrupt handlers.
1626        // TODO: this includes the P4 as well.
1627        unsafe { self.split_internal(crate::private::Internal) }
1628    }
1629
1630    /// Splits the DMA channel into its RX and TX halves.
1631    ///
1632    /// # Safety
1633    ///
1634    /// This function must only be used if the separate halves are used by the
1635    /// same peripheral.
1636    unsafe fn split_internal(self, _: crate::private::Internal) -> (Self::Rx, Self::Tx);
1637}
1638
1639#[doc(hidden)]
1640pub trait DmaChannelExt: DmaChannel {
1641    fn rx_interrupts() -> impl InterruptAccess<DmaRxInterrupt>;
1642    fn tx_interrupts() -> impl InterruptAccess<DmaTxInterrupt>;
1643}
1644
1645#[diagnostic::on_unimplemented(
1646    message = "The DMA channel isn't suitable for this peripheral",
1647    label = "This DMA channel",
1648    note = "Not all channels are useable with all peripherals"
1649)]
1650#[doc(hidden)]
1651pub trait DmaChannelConvert<DEG> {
1652    fn degrade(self) -> DEG;
1653}
1654
1655impl<DEG: DmaChannel> DmaChannelConvert<DEG> for DEG {
1656    fn degrade(self) -> DEG {
1657        self
1658    }
1659}
1660
1661/// Trait implemented for DMA channels that are compatible with a particular
1662/// peripheral.
1663///
1664/// You can use this in places where a peripheral driver would expect a
1665/// `DmaChannel` implementation.
1666#[cfg_attr(pdma, doc = "")]
1667#[cfg_attr(
1668    pdma,
1669    doc = "Note that using mismatching channels (e.g. trying to use `DMA_SPI2` with SPI3) may compile, but will panic in runtime."
1670)]
1671#[cfg_attr(pdma, doc = "")]
1672/// ## Example
1673///
1674/// The following example demonstrates how this trait can be used to only accept
1675/// types compatible with a specific peripheral.
1676///
1677/// ```rust,no_run
1678#[doc = crate::before_snippet!()]
1679/// use esp_hal::spi::AnySpi;
1680/// use esp_hal::spi::master::{Spi, SpiDma, Config, Instance as SpiInstance};
1681/// use esp_hal::dma::DmaChannelFor;
1682/// use esp_hal::peripheral::Peripheral;
1683/// use esp_hal::Blocking;
1684///
1685/// fn configures_spi_dma<'d, CH>(
1686///     spi: Spi<'d, Blocking>,
1687///     channel: impl Peripheral<P = CH> + 'd,
1688/// ) -> SpiDma<'d, Blocking>
1689/// where
1690///     CH: DmaChannelFor<AnySpi> + 'd,
1691///  {
1692///     spi.with_dma(channel)
1693/// }
1694#[cfg_attr(pdma, doc = "let dma_channel = peripherals.DMA_SPI2;")]
1695#[cfg_attr(gdma, doc = "let dma_channel = peripherals.DMA_CH0;")]
1696#[doc = ""]
1697/// let spi = Spi::new(
1698///     peripherals.SPI2,
1699///     Config::default(),
1700/// )?;
1701///
1702/// let spi_dma = configures_spi_dma(spi, dma_channel);
1703/// # Ok(())
1704/// # }
1705/// ```
1706pub trait DmaChannelFor<P: DmaEligible>:
1707    DmaChannel + DmaChannelConvert<PeripheralDmaChannel<P>>
1708{
1709}
1710impl<P, CH> DmaChannelFor<P> for CH
1711where
1712    P: DmaEligible,
1713    CH: DmaChannel + DmaChannelConvert<PeripheralDmaChannel<P>>,
1714{
1715}
1716
1717/// Trait implemented for the RX half of split DMA channels that are compatible
1718/// with a particular peripheral. Accepts complete DMA channels or split halves.
1719///
1720/// This trait is similar in use to [`DmaChannelFor`].
1721///
1722/// You can use this in places where a peripheral driver would expect a
1723/// `DmaRxChannel` implementation.
1724pub trait RxChannelFor<P: DmaEligible>: DmaChannelConvert<PeripheralRxChannel<P>> {}
1725impl<P, RX> RxChannelFor<P> for RX
1726where
1727    P: DmaEligible,
1728    RX: DmaChannelConvert<PeripheralRxChannel<P>>,
1729{
1730}
1731
1732/// Trait implemented for the TX half of split DMA channels that are compatible
1733/// with a particular peripheral. Accepts complete DMA channels or split halves.
1734///
1735/// This trait is similar in use to [`DmaChannelFor`].
1736///
1737/// You can use this in places where a peripheral driver would expect a
1738/// `DmaTxChannel` implementation.
1739pub trait TxChannelFor<PER: DmaEligible>: DmaChannelConvert<PeripheralTxChannel<PER>> {}
1740impl<P, TX> TxChannelFor<P> for TX
1741where
1742    P: DmaEligible,
1743    TX: DmaChannelConvert<PeripheralTxChannel<P>>,
1744{
1745}
1746
1747/// The functions here are not meant to be used outside the HAL
1748#[doc(hidden)]
1749pub trait Rx: crate::private::Sealed {
1750    unsafe fn prepare_transfer_without_start(
1751        &mut self,
1752        peri: DmaPeripheral,
1753        chain: &DescriptorChain,
1754    ) -> Result<(), DmaError>;
1755
1756    unsafe fn prepare_transfer<BUF: DmaRxBuffer>(
1757        &mut self,
1758        peri: DmaPeripheral,
1759        buffer: &mut BUF,
1760    ) -> Result<(), DmaError>;
1761
1762    fn start_transfer(&mut self) -> Result<(), DmaError>;
1763
1764    fn stop_transfer(&mut self);
1765
1766    #[cfg(gdma)]
1767    fn set_mem2mem_mode(&mut self, value: bool);
1768
1769    fn listen_in(&self, interrupts: impl Into<EnumSet<DmaRxInterrupt>>);
1770
1771    fn unlisten_in(&self, interrupts: impl Into<EnumSet<DmaRxInterrupt>>);
1772
1773    fn is_listening_in(&self) -> EnumSet<DmaRxInterrupt>;
1774
1775    fn clear_in(&self, interrupts: impl Into<EnumSet<DmaRxInterrupt>>);
1776
1777    fn pending_in_interrupts(&self) -> EnumSet<DmaRxInterrupt>;
1778
1779    fn is_done(&self) -> bool;
1780
1781    fn has_error(&self) -> bool {
1782        self.pending_in_interrupts()
1783            .contains(DmaRxInterrupt::DescriptorError)
1784    }
1785
1786    fn has_dscr_empty_error(&self) -> bool {
1787        self.pending_in_interrupts()
1788            .contains(DmaRxInterrupt::DescriptorEmpty)
1789    }
1790
1791    fn has_eof_error(&self) -> bool {
1792        self.pending_in_interrupts()
1793            .contains(DmaRxInterrupt::ErrorEof)
1794    }
1795
1796    fn clear_interrupts(&self);
1797
1798    fn waker(&self) -> &'static crate::asynch::AtomicWaker;
1799}
1800
1801// NOTE(p4): because the P4 has two different GDMAs, we won't be able to use
1802// `GenericPeripheralGuard`.
1803cfg_if::cfg_if! {
1804    if #[cfg(pdma)] {
1805        type PeripheralGuard = system::GenericPeripheralGuard<{ system::Peripheral::Dma as u8}>;
1806    } else {
1807        type PeripheralGuard = system::GenericPeripheralGuard<{ system::Peripheral::Gdma as u8}>;
1808    }
1809}
1810
1811fn create_guard(_ch: &impl RegisterAccess) -> PeripheralGuard {
1812    // NOTE(p4): this function will read the channel's DMA peripheral from `_ch`
1813    system::GenericPeripheralGuard::new_with(init_dma)
1814}
1815
1816// DMA receive channel
1817#[non_exhaustive]
1818#[doc(hidden)]
1819pub struct ChannelRx<'a, Dm, CH>
1820where
1821    Dm: DriverMode,
1822    CH: DmaRxChannel,
1823{
1824    pub(crate) rx_impl: PeripheralRef<'a, CH>,
1825    pub(crate) _phantom: PhantomData<Dm>,
1826    pub(crate) _guard: PeripheralGuard,
1827}
1828
1829impl<'a, CH> ChannelRx<'a, Blocking, CH>
1830where
1831    CH: DmaRxChannel,
1832{
1833    /// Creates a new RX channel half.
1834    pub fn new(rx_impl: impl Peripheral<P = CH> + 'a) -> Self {
1835        crate::into_ref!(rx_impl);
1836
1837        let _guard = create_guard(&*rx_impl);
1838
1839        #[cfg(gdma)]
1840        // clear the mem2mem mode to avoid failed DMA if this
1841        // channel was previously used for a mem2mem transfer.
1842        rx_impl.set_mem2mem_mode(false);
1843
1844        if let Some(interrupt) = rx_impl.peripheral_interrupt() {
1845            for cpu in Cpu::all() {
1846                crate::interrupt::disable(cpu, interrupt);
1847            }
1848        }
1849        rx_impl.set_async(false);
1850
1851        Self {
1852            rx_impl,
1853            _phantom: PhantomData,
1854            _guard,
1855        }
1856    }
1857
1858    /// Converts a blocking channel to an async channel.
1859    pub(crate) fn into_async(mut self) -> ChannelRx<'a, Async, CH> {
1860        if let Some(handler) = self.rx_impl.async_handler() {
1861            self.set_interrupt_handler(handler);
1862        }
1863        self.rx_impl.set_async(true);
1864        ChannelRx {
1865            rx_impl: self.rx_impl,
1866            _phantom: PhantomData,
1867            _guard: self._guard,
1868        }
1869    }
1870
1871    fn set_interrupt_handler(&mut self, handler: InterruptHandler) {
1872        self.unlisten_in(EnumSet::all());
1873        self.clear_in(EnumSet::all());
1874
1875        if let Some(interrupt) = self.rx_impl.peripheral_interrupt() {
1876            for core in crate::system::Cpu::other() {
1877                crate::interrupt::disable(core, interrupt);
1878            }
1879            unsafe { crate::interrupt::bind_interrupt(interrupt, handler.handler()) };
1880            unwrap!(crate::interrupt::enable(interrupt, handler.priority()));
1881        }
1882    }
1883}
1884
1885impl<'a, CH> ChannelRx<'a, Async, CH>
1886where
1887    CH: DmaRxChannel,
1888{
1889    /// Converts an async channel into a blocking channel.
1890    pub(crate) fn into_blocking(self) -> ChannelRx<'a, Blocking, CH> {
1891        if let Some(interrupt) = self.rx_impl.peripheral_interrupt() {
1892            crate::interrupt::disable(Cpu::current(), interrupt);
1893        }
1894        self.rx_impl.set_async(false);
1895        ChannelRx {
1896            rx_impl: self.rx_impl,
1897            _phantom: PhantomData,
1898            _guard: self._guard,
1899        }
1900    }
1901}
1902
1903impl<Dm, CH> ChannelRx<'_, Dm, CH>
1904where
1905    Dm: DriverMode,
1906    CH: DmaRxChannel,
1907{
1908    /// Configure the channel.
1909    #[cfg(gdma)]
1910    pub fn set_priority(&mut self, priority: DmaPriority) {
1911        self.rx_impl.set_priority(priority);
1912    }
1913
1914    fn do_prepare(
1915        &mut self,
1916        preparation: Preparation,
1917        peri: DmaPeripheral,
1918    ) -> Result<(), DmaError> {
1919        debug_assert_eq!(preparation.direction, TransferDirection::In);
1920
1921        debug!("Preparing RX transfer {:?}", preparation);
1922        trace!("First descriptor {:?}", unsafe { &*preparation.start });
1923
1924        #[cfg(psram_dma)]
1925        if preparation.accesses_psram && !self.rx_impl.can_access_psram() {
1926            return Err(DmaError::UnsupportedMemoryRegion);
1927        }
1928
1929        #[cfg(psram_dma)]
1930        self.rx_impl
1931            .set_ext_mem_block_size(preparation.burst_transfer.external_memory.into());
1932        self.rx_impl.set_burst_mode(preparation.burst_transfer);
1933        self.rx_impl.set_descr_burst_mode(true);
1934        self.rx_impl.set_check_owner(preparation.check_owner);
1935
1936        compiler_fence(core::sync::atomic::Ordering::SeqCst);
1937
1938        self.rx_impl.clear_all();
1939        self.rx_impl.reset();
1940        self.rx_impl.set_link_addr(preparation.start as u32);
1941        self.rx_impl.set_peripheral(peri as u8);
1942
1943        Ok(())
1944    }
1945}
1946
1947impl<Dm, CH> crate::private::Sealed for ChannelRx<'_, Dm, CH>
1948where
1949    Dm: DriverMode,
1950    CH: DmaRxChannel,
1951{
1952}
1953
1954impl<Dm, CH> Rx for ChannelRx<'_, Dm, CH>
1955where
1956    Dm: DriverMode,
1957    CH: DmaRxChannel,
1958{
1959    // TODO: used by I2S, which should be rewritten to use the Preparation-based
1960    // API.
1961    unsafe fn prepare_transfer_without_start(
1962        &mut self,
1963        peri: DmaPeripheral,
1964        chain: &DescriptorChain,
1965    ) -> Result<(), DmaError> {
1966        // We check each descriptor buffer that points to PSRAM for
1967        // alignment and invalidate the cache for that buffer.
1968        // NOTE: for RX the `buffer` and `size` need to be aligned but the `len` does
1969        // not. TRM section 3.4.9
1970        // Note that DmaBuffer implementations are required to do this for us.
1971        cfg_if::cfg_if! {
1972            if #[cfg(psram_dma)] {
1973                let mut uses_psram = false;
1974                let psram_range = crate::soc::psram_range();
1975                for des in chain.descriptors.iter() {
1976                    // we are forcing the DMA alignment to the cache line size
1977                    // required when we are using dcache
1978                    let alignment = crate::soc::cache_get_dcache_line_size() as usize;
1979                    if crate::soc::addr_in_range(des.buffer as usize, psram_range.clone()) {
1980                        uses_psram = true;
1981                        // both the size and address of the buffer must be aligned
1982                        if des.buffer as usize % alignment != 0 {
1983                            return Err(DmaError::InvalidAlignment(DmaAlignmentError::Address));
1984                        }
1985                        if des.size() % alignment != 0 {
1986                            return Err(DmaError::InvalidAlignment(DmaAlignmentError::Size));
1987                        }
1988                        crate::soc::cache_invalidate_addr(des.buffer as u32, des.size() as u32);
1989                    }
1990                }
1991            }
1992        }
1993
1994        let preparation = Preparation {
1995            start: chain.first().cast_mut(),
1996            direction: TransferDirection::In,
1997            #[cfg(psram_dma)]
1998            accesses_psram: uses_psram,
1999            burst_transfer: BurstConfig::default(),
2000            check_owner: Some(false),
2001            auto_write_back: true,
2002        };
2003        self.do_prepare(preparation, peri)
2004    }
2005
2006    unsafe fn prepare_transfer<BUF: DmaRxBuffer>(
2007        &mut self,
2008        peri: DmaPeripheral,
2009        buffer: &mut BUF,
2010    ) -> Result<(), DmaError> {
2011        let preparation = buffer.prepare();
2012
2013        self.do_prepare(preparation, peri)
2014    }
2015
2016    fn start_transfer(&mut self) -> Result<(), DmaError> {
2017        self.rx_impl.start();
2018
2019        if self
2020            .pending_in_interrupts()
2021            .contains(DmaRxInterrupt::DescriptorError)
2022        {
2023            Err(DmaError::DescriptorError)
2024        } else {
2025            Ok(())
2026        }
2027    }
2028
2029    fn stop_transfer(&mut self) {
2030        self.rx_impl.stop()
2031    }
2032
2033    #[cfg(gdma)]
2034    fn set_mem2mem_mode(&mut self, value: bool) {
2035        self.rx_impl.set_mem2mem_mode(value);
2036    }
2037
2038    fn listen_in(&self, interrupts: impl Into<EnumSet<DmaRxInterrupt>>) {
2039        self.rx_impl.listen(interrupts);
2040    }
2041
2042    fn unlisten_in(&self, interrupts: impl Into<EnumSet<DmaRxInterrupt>>) {
2043        self.rx_impl.unlisten(interrupts);
2044    }
2045
2046    fn is_listening_in(&self) -> EnumSet<DmaRxInterrupt> {
2047        self.rx_impl.is_listening()
2048    }
2049
2050    fn clear_in(&self, interrupts: impl Into<EnumSet<DmaRxInterrupt>>) {
2051        self.rx_impl.clear(interrupts);
2052    }
2053
2054    fn pending_in_interrupts(&self) -> EnumSet<DmaRxInterrupt> {
2055        self.rx_impl.pending_interrupts()
2056    }
2057
2058    fn is_done(&self) -> bool {
2059        self.pending_in_interrupts()
2060            .contains(DmaRxInterrupt::SuccessfulEof)
2061    }
2062
2063    fn clear_interrupts(&self) {
2064        self.rx_impl.clear_all();
2065    }
2066
2067    fn waker(&self) -> &'static crate::asynch::AtomicWaker {
2068        self.rx_impl.waker()
2069    }
2070}
2071
2072/// The functions here are not meant to be used outside the HAL
2073#[doc(hidden)]
2074pub trait Tx: crate::private::Sealed {
2075    unsafe fn prepare_transfer_without_start(
2076        &mut self,
2077        peri: DmaPeripheral,
2078        chain: &DescriptorChain,
2079    ) -> Result<(), DmaError>;
2080
2081    unsafe fn prepare_transfer<BUF: DmaTxBuffer>(
2082        &mut self,
2083        peri: DmaPeripheral,
2084        buffer: &mut BUF,
2085    ) -> Result<(), DmaError>;
2086
2087    fn listen_out(&self, interrupts: impl Into<EnumSet<DmaTxInterrupt>>);
2088
2089    fn unlisten_out(&self, interrupts: impl Into<EnumSet<DmaTxInterrupt>>);
2090
2091    fn is_listening_out(&self) -> EnumSet<DmaTxInterrupt>;
2092
2093    fn clear_out(&self, interrupts: impl Into<EnumSet<DmaTxInterrupt>>);
2094
2095    fn pending_out_interrupts(&self) -> EnumSet<DmaTxInterrupt>;
2096
2097    fn start_transfer(&mut self) -> Result<(), DmaError>;
2098
2099    fn stop_transfer(&mut self);
2100
2101    fn is_done(&self) -> bool {
2102        self.pending_out_interrupts()
2103            .contains(DmaTxInterrupt::TotalEof)
2104    }
2105
2106    fn has_error(&self) -> bool {
2107        self.pending_out_interrupts()
2108            .contains(DmaTxInterrupt::DescriptorError)
2109    }
2110
2111    fn clear_interrupts(&self);
2112
2113    fn waker(&self) -> &'static crate::asynch::AtomicWaker;
2114
2115    fn last_out_dscr_address(&self) -> usize;
2116}
2117
2118/// DMA transmit channel
2119#[doc(hidden)]
2120pub struct ChannelTx<'a, Dm, CH>
2121where
2122    Dm: DriverMode,
2123    CH: DmaTxChannel,
2124{
2125    pub(crate) tx_impl: PeripheralRef<'a, CH>,
2126    pub(crate) _phantom: PhantomData<Dm>,
2127    pub(crate) _guard: PeripheralGuard,
2128}
2129
2130impl<'a, CH> ChannelTx<'a, Blocking, CH>
2131where
2132    CH: DmaTxChannel,
2133{
2134    /// Creates a new TX channel half.
2135    pub fn new(tx_impl: impl Peripheral<P = CH> + 'a) -> Self {
2136        crate::into_ref!(tx_impl);
2137
2138        let _guard = create_guard(&*tx_impl);
2139
2140        if let Some(interrupt) = tx_impl.peripheral_interrupt() {
2141            for cpu in Cpu::all() {
2142                crate::interrupt::disable(cpu, interrupt);
2143            }
2144        }
2145        tx_impl.set_async(false);
2146        Self {
2147            tx_impl,
2148            _phantom: PhantomData,
2149            _guard,
2150        }
2151    }
2152
2153    /// Converts a blocking channel to an async channel.
2154    pub(crate) fn into_async(mut self) -> ChannelTx<'a, Async, CH> {
2155        if let Some(handler) = self.tx_impl.async_handler() {
2156            self.set_interrupt_handler(handler);
2157        }
2158        self.tx_impl.set_async(true);
2159        ChannelTx {
2160            tx_impl: self.tx_impl,
2161            _phantom: PhantomData,
2162            _guard: self._guard,
2163        }
2164    }
2165
2166    fn set_interrupt_handler(&mut self, handler: InterruptHandler) {
2167        self.unlisten_out(EnumSet::all());
2168        self.clear_out(EnumSet::all());
2169
2170        if let Some(interrupt) = self.tx_impl.peripheral_interrupt() {
2171            for core in crate::system::Cpu::other() {
2172                crate::interrupt::disable(core, interrupt);
2173            }
2174            unsafe { crate::interrupt::bind_interrupt(interrupt, handler.handler()) };
2175            unwrap!(crate::interrupt::enable(interrupt, handler.priority()));
2176        }
2177    }
2178}
2179
2180impl<'a, CH> ChannelTx<'a, Async, CH>
2181where
2182    CH: DmaTxChannel,
2183{
2184    /// Converts an async channel into a blocking channel.
2185    pub(crate) fn into_blocking(self) -> ChannelTx<'a, Blocking, CH> {
2186        if let Some(interrupt) = self.tx_impl.peripheral_interrupt() {
2187            crate::interrupt::disable(Cpu::current(), interrupt);
2188        }
2189        self.tx_impl.set_async(false);
2190        ChannelTx {
2191            tx_impl: self.tx_impl,
2192            _phantom: PhantomData,
2193            _guard: self._guard,
2194        }
2195    }
2196}
2197
2198impl<Dm, CH> ChannelTx<'_, Dm, CH>
2199where
2200    Dm: DriverMode,
2201    CH: DmaTxChannel,
2202{
2203    /// Configure the channel priority.
2204    #[cfg(gdma)]
2205    pub fn set_priority(&mut self, priority: DmaPriority) {
2206        self.tx_impl.set_priority(priority);
2207    }
2208
2209    fn do_prepare(
2210        &mut self,
2211        preparation: Preparation,
2212        peri: DmaPeripheral,
2213    ) -> Result<(), DmaError> {
2214        debug_assert_eq!(preparation.direction, TransferDirection::Out);
2215
2216        debug!("Preparing TX transfer {:?}", preparation);
2217        trace!("First descriptor {:?}", unsafe { &*preparation.start });
2218
2219        #[cfg(psram_dma)]
2220        if preparation.accesses_psram && !self.tx_impl.can_access_psram() {
2221            return Err(DmaError::UnsupportedMemoryRegion);
2222        }
2223
2224        #[cfg(psram_dma)]
2225        self.tx_impl
2226            .set_ext_mem_block_size(preparation.burst_transfer.external_memory.into());
2227        self.tx_impl.set_burst_mode(preparation.burst_transfer);
2228        self.tx_impl.set_descr_burst_mode(true);
2229        self.tx_impl.set_check_owner(preparation.check_owner);
2230        self.tx_impl
2231            .set_auto_write_back(preparation.auto_write_back);
2232
2233        compiler_fence(core::sync::atomic::Ordering::SeqCst);
2234
2235        self.tx_impl.clear_all();
2236        self.tx_impl.reset();
2237        self.tx_impl.set_link_addr(preparation.start as u32);
2238        self.tx_impl.set_peripheral(peri as u8);
2239
2240        Ok(())
2241    }
2242}
2243
2244impl<Dm, CH> crate::private::Sealed for ChannelTx<'_, Dm, CH>
2245where
2246    Dm: DriverMode,
2247    CH: DmaTxChannel,
2248{
2249}
2250
2251impl<Dm, CH> Tx for ChannelTx<'_, Dm, CH>
2252where
2253    Dm: DriverMode,
2254    CH: DmaTxChannel,
2255{
2256    // TODO: used by I2S, which should be rewritten to use the Preparation-based
2257    // API.
2258    unsafe fn prepare_transfer_without_start(
2259        &mut self,
2260        peri: DmaPeripheral,
2261        chain: &DescriptorChain,
2262    ) -> Result<(), DmaError> {
2263        // Based on the ESP32-S3 TRM the alignment check is not needed for TX
2264
2265        // We check each descriptor buffer that points to PSRAM for
2266        // alignment and writeback the cache for that buffer.
2267        // Note that DmaBuffer implementations are required to do this for us.
2268        #[cfg(psram_dma)]
2269        cfg_if::cfg_if! {
2270            if #[cfg(psram_dma)] {
2271                let mut uses_psram = false;
2272                let psram_range = crate::soc::psram_range();
2273                for des in chain.descriptors.iter() {
2274                    // we are forcing the DMA alignment to the cache line size
2275                    // required when we are using dcache
2276                    let alignment = crate::soc::cache_get_dcache_line_size() as usize;
2277                    if crate::soc::addr_in_range(des.buffer as usize, psram_range.clone()) {
2278                        uses_psram = true;
2279                        // both the size and address of the buffer must be aligned
2280                        if des.buffer as usize % alignment != 0 {
2281                            return Err(DmaError::InvalidAlignment(DmaAlignmentError::Address));
2282                        }
2283                        if des.size() % alignment != 0 {
2284                            return Err(DmaError::InvalidAlignment(DmaAlignmentError::Size));
2285                        }
2286                        crate::soc::cache_writeback_addr(des.buffer as u32, des.size() as u32);
2287                    }
2288                }
2289            }
2290        }
2291
2292        let preparation = Preparation {
2293            start: chain.first().cast_mut(),
2294            direction: TransferDirection::Out,
2295            #[cfg(psram_dma)]
2296            accesses_psram: uses_psram,
2297            burst_transfer: BurstConfig::default(),
2298            check_owner: Some(false),
2299            // enable descriptor write back in circular mode
2300            auto_write_back: !(*chain.last()).next.is_null(),
2301        };
2302        self.do_prepare(preparation, peri)?;
2303
2304        Ok(())
2305    }
2306
2307    unsafe fn prepare_transfer<BUF: DmaTxBuffer>(
2308        &mut self,
2309        peri: DmaPeripheral,
2310        buffer: &mut BUF,
2311    ) -> Result<(), DmaError> {
2312        let preparation = buffer.prepare();
2313
2314        self.do_prepare(preparation, peri)
2315    }
2316
2317    fn start_transfer(&mut self) -> Result<(), DmaError> {
2318        self.tx_impl.start();
2319        while self.tx_impl.is_fifo_empty() && self.pending_out_interrupts().is_empty() {}
2320
2321        if self
2322            .pending_out_interrupts()
2323            .contains(DmaTxInterrupt::DescriptorError)
2324        {
2325            Err(DmaError::DescriptorError)
2326        } else {
2327            Ok(())
2328        }
2329    }
2330
2331    fn stop_transfer(&mut self) {
2332        self.tx_impl.stop()
2333    }
2334
2335    fn listen_out(&self, interrupts: impl Into<EnumSet<DmaTxInterrupt>>) {
2336        self.tx_impl.listen(interrupts);
2337    }
2338
2339    fn unlisten_out(&self, interrupts: impl Into<EnumSet<DmaTxInterrupt>>) {
2340        self.tx_impl.unlisten(interrupts);
2341    }
2342
2343    fn is_listening_out(&self) -> EnumSet<DmaTxInterrupt> {
2344        self.tx_impl.is_listening()
2345    }
2346
2347    fn clear_out(&self, interrupts: impl Into<EnumSet<DmaTxInterrupt>>) {
2348        self.tx_impl.clear(interrupts);
2349    }
2350
2351    fn pending_out_interrupts(&self) -> EnumSet<DmaTxInterrupt> {
2352        self.tx_impl.pending_interrupts()
2353    }
2354
2355    fn waker(&self) -> &'static crate::asynch::AtomicWaker {
2356        self.tx_impl.waker()
2357    }
2358
2359    fn clear_interrupts(&self) {
2360        self.tx_impl.clear_all();
2361    }
2362
2363    fn last_out_dscr_address(&self) -> usize {
2364        self.tx_impl.last_dscr_address()
2365    }
2366}
2367
2368#[doc(hidden)]
2369pub trait RegisterAccess: crate::private::Sealed {
2370    /// Reset the state machine of the channel and FIFO pointer.
2371    fn reset(&self);
2372
2373    /// Enable/Disable INCR burst transfer for channel reading
2374    /// accessing data in internal RAM.
2375    fn set_burst_mode(&self, burst_mode: BurstConfig);
2376
2377    /// Enable/Disable burst transfer for channel reading
2378    /// descriptors in internal RAM.
2379    fn set_descr_burst_mode(&self, burst_mode: bool);
2380
2381    /// The priority of the channel. The larger the value, the higher the
2382    /// priority.
2383    #[cfg(gdma)]
2384    fn set_priority(&self, priority: DmaPriority);
2385
2386    /// Select a peripheral for the channel.
2387    fn set_peripheral(&self, peripheral: u8);
2388
2389    /// Set the address of the first descriptor.
2390    fn set_link_addr(&self, address: u32);
2391
2392    /// Enable the channel for data transfer.
2393    fn start(&self);
2394
2395    /// Stop the channel from transferring data.
2396    fn stop(&self);
2397
2398    /// Mount a new descriptor.
2399    fn restart(&self);
2400
2401    /// Configure the bit to enable checking the owner attribute of the
2402    /// descriptor.
2403    fn set_check_owner(&self, check_owner: Option<bool>);
2404
2405    #[cfg(psram_dma)]
2406    fn set_ext_mem_block_size(&self, size: DmaExtMemBKSize);
2407
2408    #[cfg(pdma)]
2409    fn is_compatible_with(&self, peripheral: DmaPeripheral) -> bool;
2410
2411    #[cfg(psram_dma)]
2412    fn can_access_psram(&self) -> bool;
2413}
2414
2415#[doc(hidden)]
2416pub trait RxRegisterAccess: RegisterAccess {
2417    #[cfg(gdma)]
2418    fn set_mem2mem_mode(&self, value: bool);
2419
2420    fn peripheral_interrupt(&self) -> Option<Interrupt>;
2421    fn async_handler(&self) -> Option<InterruptHandler>;
2422}
2423
2424#[doc(hidden)]
2425pub trait TxRegisterAccess: RegisterAccess {
2426    /// Returns whether the DMA's FIFO is empty.
2427    fn is_fifo_empty(&self) -> bool;
2428
2429    /// Enable/disable outlink-writeback
2430    fn set_auto_write_back(&self, enable: bool);
2431
2432    /// Outlink descriptor address when EOF occurs of Tx channel.
2433    fn last_dscr_address(&self) -> usize;
2434
2435    fn peripheral_interrupt(&self) -> Option<Interrupt>;
2436    fn async_handler(&self) -> Option<InterruptHandler>;
2437}
2438
2439#[doc(hidden)]
2440pub trait InterruptAccess<T: EnumSetType>: crate::private::Sealed {
2441    fn listen(&self, interrupts: impl Into<EnumSet<T>>) {
2442        self.enable_listen(interrupts.into(), true)
2443    }
2444    fn unlisten(&self, interrupts: impl Into<EnumSet<T>>) {
2445        self.enable_listen(interrupts.into(), false)
2446    }
2447
2448    fn clear_all(&self) {
2449        self.clear(EnumSet::all());
2450    }
2451
2452    fn enable_listen(&self, interrupts: EnumSet<T>, enable: bool);
2453    fn is_listening(&self) -> EnumSet<T>;
2454    fn clear(&self, interrupts: impl Into<EnumSet<T>>);
2455    fn pending_interrupts(&self) -> EnumSet<T>;
2456    fn waker(&self) -> &'static crate::asynch::AtomicWaker;
2457
2458    fn is_async(&self) -> bool;
2459    fn set_async(&self, is_async: bool);
2460}
2461
2462/// DMA Channel
2463#[non_exhaustive]
2464pub struct Channel<'d, Dm, CH>
2465where
2466    Dm: DriverMode,
2467    CH: DmaChannel,
2468{
2469    /// RX half of the channel
2470    pub rx: ChannelRx<'d, Dm, CH::Rx>,
2471    /// TX half of the channel
2472    pub tx: ChannelTx<'d, Dm, CH::Tx>,
2473}
2474
2475impl<'d, CH> Channel<'d, Blocking, CH>
2476where
2477    CH: DmaChannel,
2478{
2479    /// Creates a new DMA channel driver.
2480    #[instability::unstable]
2481    pub fn new(channel: impl Peripheral<P = CH>) -> Self {
2482        let (rx, tx) = unsafe {
2483            channel
2484                .clone_unchecked()
2485                .split_internal(crate::private::Internal)
2486        };
2487        Self {
2488            rx: ChannelRx::new(rx),
2489            tx: ChannelTx::new(tx),
2490        }
2491    }
2492
2493    /// Sets the interrupt handler for RX and TX interrupts.
2494    ///
2495    /// Interrupts are not enabled at the peripheral level here.
2496    #[instability::unstable]
2497    pub fn set_interrupt_handler(&mut self, handler: InterruptHandler) {
2498        self.rx.set_interrupt_handler(handler);
2499        self.tx.set_interrupt_handler(handler);
2500    }
2501
2502    /// Listen for the given interrupts
2503    pub fn listen(&mut self, interrupts: impl Into<EnumSet<DmaInterrupt>>) {
2504        for interrupt in interrupts.into() {
2505            match interrupt {
2506                DmaInterrupt::RxDone => self.rx.listen_in(DmaRxInterrupt::Done),
2507                DmaInterrupt::TxDone => self.tx.listen_out(DmaTxInterrupt::Done),
2508            }
2509        }
2510    }
2511
2512    /// Unlisten the given interrupts
2513    pub fn unlisten(&mut self, interrupts: impl Into<EnumSet<DmaInterrupt>>) {
2514        for interrupt in interrupts.into() {
2515            match interrupt {
2516                DmaInterrupt::RxDone => self.rx.unlisten_in(DmaRxInterrupt::Done),
2517                DmaInterrupt::TxDone => self.tx.unlisten_out(DmaTxInterrupt::Done),
2518            }
2519        }
2520    }
2521
2522    /// Gets asserted interrupts
2523    pub fn interrupts(&mut self) -> EnumSet<DmaInterrupt> {
2524        let mut res = EnumSet::new();
2525        if self.rx.is_done() {
2526            res.insert(DmaInterrupt::RxDone);
2527        }
2528        if self.tx.is_done() {
2529            res.insert(DmaInterrupt::TxDone);
2530        }
2531        res
2532    }
2533
2534    /// Resets asserted interrupts
2535    pub fn clear_interrupts(&mut self, interrupts: impl Into<EnumSet<DmaInterrupt>>) {
2536        for interrupt in interrupts.into() {
2537            match interrupt {
2538                DmaInterrupt::RxDone => self.rx.clear_in(DmaRxInterrupt::Done),
2539                DmaInterrupt::TxDone => self.tx.clear_out(DmaTxInterrupt::Done),
2540            }
2541        }
2542    }
2543
2544    /// Configure the channel priorities.
2545    #[cfg(gdma)]
2546    pub fn set_priority(&mut self, priority: DmaPriority) {
2547        self.tx.set_priority(priority);
2548        self.rx.set_priority(priority);
2549    }
2550
2551    /// Converts a blocking channel to an async channel.
2552    pub fn into_async(self) -> Channel<'d, Async, CH> {
2553        Channel {
2554            rx: self.rx.into_async(),
2555            tx: self.tx.into_async(),
2556        }
2557    }
2558}
2559
2560impl<'d, CH> Channel<'d, Async, CH>
2561where
2562    CH: DmaChannel,
2563{
2564    /// Converts an async channel to a blocking channel.
2565    pub fn into_blocking(self) -> Channel<'d, Blocking, CH> {
2566        Channel {
2567            rx: self.rx.into_blocking(),
2568            tx: self.tx.into_blocking(),
2569        }
2570    }
2571}
2572
2573impl<'d, CH: DmaChannel> From<Channel<'d, Blocking, CH>> for Channel<'d, Async, CH> {
2574    fn from(channel: Channel<'d, Blocking, CH>) -> Self {
2575        channel.into_async()
2576    }
2577}
2578
2579impl<'d, CH: DmaChannel> From<Channel<'d, Async, CH>> for Channel<'d, Blocking, CH> {
2580    fn from(channel: Channel<'d, Async, CH>) -> Self {
2581        channel.into_blocking()
2582    }
2583}
2584
2585pub(crate) mod dma_private {
2586    use super::*;
2587
2588    pub trait DmaSupport {
2589        /// Wait until the transfer is done.
2590        ///
2591        /// Depending on the peripheral this might include checking the DMA
2592        /// channel and/or the peripheral.
2593        ///
2594        /// After this all data should be processed by the peripheral - i.e. the
2595        /// peripheral should have processed it's FIFO(s)
2596        ///
2597        /// Please note: This is called in the transfer's `wait` function _and_
2598        /// by it's [Drop] implementation.
2599        fn peripheral_wait_dma(&mut self, is_rx: bool, is_tx: bool);
2600
2601        /// Only used by circular DMA transfers in both, the `stop` function
2602        /// _and_ it's [Drop] implementation
2603        fn peripheral_dma_stop(&mut self);
2604    }
2605
2606    pub trait DmaSupportTx: DmaSupport {
2607        type TX: Tx;
2608
2609        fn tx(&mut self) -> &mut Self::TX;
2610
2611        fn chain(&mut self) -> &mut DescriptorChain;
2612    }
2613
2614    pub trait DmaSupportRx: DmaSupport {
2615        type RX: Rx;
2616
2617        fn rx(&mut self) -> &mut Self::RX;
2618
2619        fn chain(&mut self) -> &mut DescriptorChain;
2620    }
2621}
2622
2623/// DMA transaction for TX only transfers
2624///
2625/// # Safety
2626///
2627/// Never use [core::mem::forget] on an in-progress transfer
2628#[non_exhaustive]
2629#[must_use]
2630pub struct DmaTransferTx<'a, I>
2631where
2632    I: dma_private::DmaSupportTx,
2633{
2634    instance: &'a mut I,
2635}
2636
2637impl<'a, I> DmaTransferTx<'a, I>
2638where
2639    I: dma_private::DmaSupportTx,
2640{
2641    pub(crate) fn new(instance: &'a mut I) -> Self {
2642        Self { instance }
2643    }
2644
2645    /// Wait for the transfer to finish.
2646    pub fn wait(self) -> Result<(), DmaError> {
2647        self.instance.peripheral_wait_dma(false, true);
2648
2649        if self
2650            .instance
2651            .tx()
2652            .pending_out_interrupts()
2653            .contains(DmaTxInterrupt::DescriptorError)
2654        {
2655            Err(DmaError::DescriptorError)
2656        } else {
2657            Ok(())
2658        }
2659    }
2660
2661    /// Check if the transfer is finished.
2662    pub fn is_done(&mut self) -> bool {
2663        self.instance.tx().is_done()
2664    }
2665}
2666
2667impl<I> Drop for DmaTransferTx<'_, I>
2668where
2669    I: dma_private::DmaSupportTx,
2670{
2671    fn drop(&mut self) {
2672        self.instance.peripheral_wait_dma(true, false);
2673    }
2674}
2675
2676/// DMA transaction for RX only transfers
2677///
2678/// # Safety
2679///
2680/// Never use [core::mem::forget] on an in-progress transfer
2681#[non_exhaustive]
2682#[must_use]
2683pub struct DmaTransferRx<'a, I>
2684where
2685    I: dma_private::DmaSupportRx,
2686{
2687    instance: &'a mut I,
2688}
2689
2690impl<'a, I> DmaTransferRx<'a, I>
2691where
2692    I: dma_private::DmaSupportRx,
2693{
2694    pub(crate) fn new(instance: &'a mut I) -> Self {
2695        Self { instance }
2696    }
2697
2698    /// Wait for the transfer to finish.
2699    pub fn wait(self) -> Result<(), DmaError> {
2700        self.instance.peripheral_wait_dma(true, false);
2701
2702        if self
2703            .instance
2704            .rx()
2705            .pending_in_interrupts()
2706            .contains(DmaRxInterrupt::DescriptorError)
2707        {
2708            Err(DmaError::DescriptorError)
2709        } else {
2710            Ok(())
2711        }
2712    }
2713
2714    /// Check if the transfer is finished.
2715    pub fn is_done(&mut self) -> bool {
2716        self.instance.rx().is_done()
2717    }
2718}
2719
2720impl<I> Drop for DmaTransferRx<'_, I>
2721where
2722    I: dma_private::DmaSupportRx,
2723{
2724    fn drop(&mut self) {
2725        self.instance.peripheral_wait_dma(true, false);
2726    }
2727}
2728
2729/// DMA transaction for TX+RX transfers
2730///
2731/// # Safety
2732///
2733/// Never use [core::mem::forget] on an in-progress transfer
2734#[non_exhaustive]
2735#[must_use]
2736pub struct DmaTransferRxTx<'a, I>
2737where
2738    I: dma_private::DmaSupportTx + dma_private::DmaSupportRx,
2739{
2740    instance: &'a mut I,
2741}
2742
2743impl<'a, I> DmaTransferRxTx<'a, I>
2744where
2745    I: dma_private::DmaSupportTx + dma_private::DmaSupportRx,
2746{
2747    #[allow(dead_code)]
2748    pub(crate) fn new(instance: &'a mut I) -> Self {
2749        Self { instance }
2750    }
2751
2752    /// Wait for the transfer to finish.
2753    pub fn wait(self) -> Result<(), DmaError> {
2754        self.instance.peripheral_wait_dma(true, true);
2755
2756        if self
2757            .instance
2758            .tx()
2759            .pending_out_interrupts()
2760            .contains(DmaTxInterrupt::DescriptorError)
2761            || self
2762                .instance
2763                .rx()
2764                .pending_in_interrupts()
2765                .contains(DmaRxInterrupt::DescriptorError)
2766        {
2767            Err(DmaError::DescriptorError)
2768        } else {
2769            Ok(())
2770        }
2771    }
2772
2773    /// Check if the transfer is finished.
2774    pub fn is_done(&mut self) -> bool {
2775        self.instance.tx().is_done() && self.instance.rx().is_done()
2776    }
2777}
2778
2779impl<I> Drop for DmaTransferRxTx<'_, I>
2780where
2781    I: dma_private::DmaSupportTx + dma_private::DmaSupportRx,
2782{
2783    fn drop(&mut self) {
2784        self.instance.peripheral_wait_dma(true, true);
2785    }
2786}
2787
2788/// DMA transaction for TX only circular transfers
2789///
2790/// # Safety
2791///
2792/// Never use [core::mem::forget] on an in-progress transfer
2793#[non_exhaustive]
2794#[must_use]
2795pub struct DmaTransferTxCircular<'a, I>
2796where
2797    I: dma_private::DmaSupportTx,
2798{
2799    instance: &'a mut I,
2800    state: TxCircularState,
2801}
2802
2803impl<'a, I> DmaTransferTxCircular<'a, I>
2804where
2805    I: dma_private::DmaSupportTx,
2806{
2807    #[allow(unused)] // currently used by peripherals not available on all chips
2808    pub(crate) fn new(instance: &'a mut I) -> Self {
2809        let state = TxCircularState::new(instance.chain());
2810        Self { instance, state }
2811    }
2812
2813    /// Amount of bytes which can be pushed.
2814    pub fn available(&mut self) -> Result<usize, DmaError> {
2815        self.state.update(self.instance.tx())?;
2816        Ok(self.state.available)
2817    }
2818
2819    /// Push bytes into the DMA buffer.
2820    pub fn push(&mut self, data: &[u8]) -> Result<usize, DmaError> {
2821        self.state.update(self.instance.tx())?;
2822        self.state.push(data)
2823    }
2824
2825    /// Push bytes into the DMA buffer via the given closure.
2826    /// The closure *must* return the actual number of bytes written.
2827    /// The closure *might* get called with a slice which is smaller than the
2828    /// total available buffer.
2829    pub fn push_with(&mut self, f: impl FnOnce(&mut [u8]) -> usize) -> Result<usize, DmaError> {
2830        self.state.update(self.instance.tx())?;
2831        self.state.push_with(f)
2832    }
2833
2834    /// Stop the DMA transfer
2835    #[allow(clippy::type_complexity)]
2836    pub fn stop(self) -> Result<(), DmaError> {
2837        self.instance.peripheral_dma_stop();
2838
2839        if self
2840            .instance
2841            .tx()
2842            .pending_out_interrupts()
2843            .contains(DmaTxInterrupt::DescriptorError)
2844        {
2845            Err(DmaError::DescriptorError)
2846        } else {
2847            Ok(())
2848        }
2849    }
2850}
2851
2852impl<I> Drop for DmaTransferTxCircular<'_, I>
2853where
2854    I: dma_private::DmaSupportTx,
2855{
2856    fn drop(&mut self) {
2857        self.instance.peripheral_dma_stop();
2858    }
2859}
2860
2861/// DMA transaction for RX only circular transfers
2862///
2863/// # Safety
2864///
2865/// Never use [core::mem::forget] on an in-progress transfer
2866#[non_exhaustive]
2867#[must_use]
2868pub struct DmaTransferRxCircular<'a, I>
2869where
2870    I: dma_private::DmaSupportRx,
2871{
2872    instance: &'a mut I,
2873    state: RxCircularState,
2874}
2875
2876impl<'a, I> DmaTransferRxCircular<'a, I>
2877where
2878    I: dma_private::DmaSupportRx,
2879{
2880    #[allow(unused)] // currently used by peripherals not available on all chips
2881    pub(crate) fn new(instance: &'a mut I) -> Self {
2882        let state = RxCircularState::new(instance.chain());
2883        Self { instance, state }
2884    }
2885
2886    /// Amount of bytes which can be popped.
2887    ///
2888    /// It's expected to call this before trying to [DmaTransferRxCircular::pop]
2889    /// data.
2890    pub fn available(&mut self) -> Result<usize, DmaError> {
2891        self.state.update()?;
2892        Ok(self.state.available)
2893    }
2894
2895    /// Get available data.
2896    ///
2897    /// It's expected that the amount of available data is checked before by
2898    /// calling [DmaTransferRxCircular::available] and that the buffer can hold
2899    /// all available data.
2900    ///
2901    /// Fails with [DmaError::BufferTooSmall] if the given buffer is too small
2902    /// to hold all available data
2903    pub fn pop(&mut self, data: &mut [u8]) -> Result<usize, DmaError> {
2904        self.state.update()?;
2905        self.state.pop(data)
2906    }
2907}
2908
2909impl<I> Drop for DmaTransferRxCircular<'_, I>
2910where
2911    I: dma_private::DmaSupportRx,
2912{
2913    fn drop(&mut self) {
2914        self.instance.peripheral_dma_stop();
2915    }
2916}
2917
2918pub(crate) mod asynch {
2919    use core::task::Poll;
2920
2921    use super::*;
2922
2923    #[must_use = "futures do nothing unless you `.await` or poll them"]
2924    pub struct DmaTxFuture<'a, TX>
2925    where
2926        TX: Tx,
2927    {
2928        pub(crate) tx: &'a mut TX,
2929    }
2930
2931    impl<'a, TX> DmaTxFuture<'a, TX>
2932    where
2933        TX: Tx,
2934    {
2935        pub fn new(tx: &'a mut TX) -> Self {
2936            Self { tx }
2937        }
2938    }
2939
2940    impl<TX> core::future::Future for DmaTxFuture<'_, TX>
2941    where
2942        TX: Tx,
2943    {
2944        type Output = Result<(), DmaError>;
2945
2946        fn poll(
2947            self: core::pin::Pin<&mut Self>,
2948            cx: &mut core::task::Context<'_>,
2949        ) -> Poll<Self::Output> {
2950            if self.tx.is_done() {
2951                self.tx.clear_interrupts();
2952                Poll::Ready(Ok(()))
2953            } else if self
2954                .tx
2955                .pending_out_interrupts()
2956                .contains(DmaTxInterrupt::DescriptorError)
2957            {
2958                self.tx.clear_interrupts();
2959                Poll::Ready(Err(DmaError::DescriptorError))
2960            } else {
2961                self.tx.waker().register(cx.waker());
2962                self.tx
2963                    .listen_out(DmaTxInterrupt::TotalEof | DmaTxInterrupt::DescriptorError);
2964                Poll::Pending
2965            }
2966        }
2967    }
2968
2969    impl<TX> Drop for DmaTxFuture<'_, TX>
2970    where
2971        TX: Tx,
2972    {
2973        fn drop(&mut self) {
2974            self.tx
2975                .unlisten_out(DmaTxInterrupt::TotalEof | DmaTxInterrupt::DescriptorError);
2976        }
2977    }
2978
2979    #[must_use = "futures do nothing unless you `.await` or poll them"]
2980    pub struct DmaRxFuture<'a, RX>
2981    where
2982        RX: Rx,
2983    {
2984        pub(crate) rx: &'a mut RX,
2985    }
2986
2987    impl<'a, RX> DmaRxFuture<'a, RX>
2988    where
2989        RX: Rx,
2990    {
2991        pub fn new(rx: &'a mut RX) -> Self {
2992            Self { rx }
2993        }
2994    }
2995
2996    impl<RX> core::future::Future for DmaRxFuture<'_, RX>
2997    where
2998        RX: Rx,
2999    {
3000        type Output = Result<(), DmaError>;
3001
3002        fn poll(
3003            self: core::pin::Pin<&mut Self>,
3004            cx: &mut core::task::Context<'_>,
3005        ) -> Poll<Self::Output> {
3006            if self.rx.is_done() {
3007                self.rx.clear_interrupts();
3008                Poll::Ready(Ok(()))
3009            } else if !self.rx.pending_in_interrupts().is_disjoint(
3010                DmaRxInterrupt::DescriptorError
3011                    | DmaRxInterrupt::DescriptorEmpty
3012                    | DmaRxInterrupt::ErrorEof,
3013            ) {
3014                self.rx.clear_interrupts();
3015                Poll::Ready(Err(DmaError::DescriptorError))
3016            } else {
3017                self.rx.waker().register(cx.waker());
3018                self.rx.listen_in(
3019                    DmaRxInterrupt::SuccessfulEof
3020                        | DmaRxInterrupt::DescriptorError
3021                        | DmaRxInterrupt::DescriptorEmpty
3022                        | DmaRxInterrupt::ErrorEof,
3023                );
3024                Poll::Pending
3025            }
3026        }
3027    }
3028
3029    impl<RX> Drop for DmaRxFuture<'_, RX>
3030    where
3031        RX: Rx,
3032    {
3033        fn drop(&mut self) {
3034            self.rx.unlisten_in(
3035                DmaRxInterrupt::DescriptorError
3036                    | DmaRxInterrupt::DescriptorEmpty
3037                    | DmaRxInterrupt::ErrorEof,
3038            );
3039        }
3040    }
3041
3042    #[cfg(any(i2s0, i2s1))]
3043    pub struct DmaTxDoneChFuture<'a, TX>
3044    where
3045        TX: Tx,
3046    {
3047        pub(crate) tx: &'a mut TX,
3048        _a: (),
3049    }
3050
3051    #[cfg(any(i2s0, i2s1))]
3052    impl<'a, TX> DmaTxDoneChFuture<'a, TX>
3053    where
3054        TX: Tx,
3055    {
3056        pub fn new(tx: &'a mut TX) -> Self {
3057            Self { tx, _a: () }
3058        }
3059    }
3060
3061    #[cfg(any(i2s0, i2s1))]
3062    impl<TX> core::future::Future for DmaTxDoneChFuture<'_, TX>
3063    where
3064        TX: Tx,
3065    {
3066        type Output = Result<(), DmaError>;
3067
3068        fn poll(
3069            self: core::pin::Pin<&mut Self>,
3070            cx: &mut core::task::Context<'_>,
3071        ) -> Poll<Self::Output> {
3072            if self
3073                .tx
3074                .pending_out_interrupts()
3075                .contains(DmaTxInterrupt::Done)
3076            {
3077                self.tx.clear_out(DmaTxInterrupt::Done);
3078                Poll::Ready(Ok(()))
3079            } else if self
3080                .tx
3081                .pending_out_interrupts()
3082                .contains(DmaTxInterrupt::DescriptorError)
3083            {
3084                self.tx.clear_interrupts();
3085                Poll::Ready(Err(DmaError::DescriptorError))
3086            } else {
3087                self.tx.waker().register(cx.waker());
3088                self.tx
3089                    .listen_out(DmaTxInterrupt::Done | DmaTxInterrupt::DescriptorError);
3090                Poll::Pending
3091            }
3092        }
3093    }
3094
3095    #[cfg(any(i2s0, i2s1))]
3096    impl<TX> Drop for DmaTxDoneChFuture<'_, TX>
3097    where
3098        TX: Tx,
3099    {
3100        fn drop(&mut self) {
3101            self.tx
3102                .unlisten_out(DmaTxInterrupt::Done | DmaTxInterrupt::DescriptorError);
3103        }
3104    }
3105
3106    #[cfg(any(i2s0, i2s1))]
3107    pub struct DmaRxDoneChFuture<'a, RX>
3108    where
3109        RX: Rx,
3110    {
3111        pub(crate) rx: &'a mut RX,
3112        _a: (),
3113    }
3114
3115    #[cfg(any(i2s0, i2s1))]
3116    impl<'a, RX> DmaRxDoneChFuture<'a, RX>
3117    where
3118        RX: Rx,
3119    {
3120        pub fn new(rx: &'a mut RX) -> Self {
3121            Self { rx, _a: () }
3122        }
3123    }
3124
3125    #[cfg(any(i2s0, i2s1))]
3126    impl<RX> core::future::Future for DmaRxDoneChFuture<'_, RX>
3127    where
3128        RX: Rx,
3129    {
3130        type Output = Result<(), DmaError>;
3131
3132        fn poll(
3133            self: core::pin::Pin<&mut Self>,
3134            cx: &mut core::task::Context<'_>,
3135        ) -> Poll<Self::Output> {
3136            if self
3137                .rx
3138                .pending_in_interrupts()
3139                .contains(DmaRxInterrupt::Done)
3140            {
3141                self.rx.clear_in(DmaRxInterrupt::Done);
3142                Poll::Ready(Ok(()))
3143            } else if !self.rx.pending_in_interrupts().is_disjoint(
3144                DmaRxInterrupt::DescriptorError
3145                    | DmaRxInterrupt::DescriptorEmpty
3146                    | DmaRxInterrupt::ErrorEof,
3147            ) {
3148                self.rx.clear_interrupts();
3149                Poll::Ready(Err(DmaError::DescriptorError))
3150            } else {
3151                self.rx.waker().register(cx.waker());
3152                self.rx.listen_in(
3153                    DmaRxInterrupt::Done
3154                        | DmaRxInterrupt::DescriptorError
3155                        | DmaRxInterrupt::DescriptorEmpty
3156                        | DmaRxInterrupt::ErrorEof,
3157                );
3158                Poll::Pending
3159            }
3160        }
3161    }
3162
3163    #[cfg(any(i2s0, i2s1))]
3164    impl<RX> Drop for DmaRxDoneChFuture<'_, RX>
3165    where
3166        RX: Rx,
3167    {
3168        fn drop(&mut self) {
3169            self.rx.unlisten_in(
3170                DmaRxInterrupt::Done
3171                    | DmaRxInterrupt::DescriptorError
3172                    | DmaRxInterrupt::DescriptorEmpty
3173                    | DmaRxInterrupt::ErrorEof,
3174            );
3175        }
3176    }
3177
3178    pub(super) fn handle_in_interrupt<CH: DmaChannelExt>() {
3179        let rx = CH::rx_interrupts();
3180
3181        if !rx.is_async() {
3182            return;
3183        }
3184
3185        if rx.pending_interrupts().is_disjoint(
3186            DmaRxInterrupt::DescriptorError
3187                | DmaRxInterrupt::DescriptorEmpty
3188                | DmaRxInterrupt::ErrorEof,
3189        ) {
3190            rx.unlisten(
3191                DmaRxInterrupt::DescriptorError
3192                    | DmaRxInterrupt::DescriptorEmpty
3193                    | DmaRxInterrupt::ErrorEof
3194                    | DmaRxInterrupt::SuccessfulEof
3195                    | DmaRxInterrupt::Done,
3196            );
3197            rx.waker().wake()
3198        }
3199
3200        if rx
3201            .pending_interrupts()
3202            .contains(DmaRxInterrupt::SuccessfulEof)
3203        {
3204            rx.unlisten(DmaRxInterrupt::SuccessfulEof);
3205            rx.waker().wake()
3206        }
3207
3208        if rx.pending_interrupts().contains(DmaRxInterrupt::Done) {
3209            rx.unlisten(DmaRxInterrupt::Done);
3210            rx.waker().wake()
3211        }
3212    }
3213
3214    pub(super) fn handle_out_interrupt<CH: DmaChannelExt>() {
3215        let tx = CH::tx_interrupts();
3216
3217        if !tx.is_async() {
3218            return;
3219        }
3220
3221        if tx
3222            .pending_interrupts()
3223            .contains(DmaTxInterrupt::DescriptorError)
3224        {
3225            tx.unlisten(
3226                DmaTxInterrupt::DescriptorError | DmaTxInterrupt::TotalEof | DmaTxInterrupt::Done,
3227            );
3228            tx.waker().wake()
3229        }
3230
3231        if tx.pending_interrupts().contains(DmaTxInterrupt::TotalEof)
3232            && tx.is_listening().contains(DmaTxInterrupt::TotalEof)
3233        {
3234            tx.unlisten(DmaTxInterrupt::TotalEof);
3235            tx.waker().wake()
3236        }
3237
3238        if tx.pending_interrupts().contains(DmaTxInterrupt::Done) {
3239            tx.unlisten(DmaTxInterrupt::Done);
3240            tx.waker().wake()
3241        }
3242    }
3243}