Skip to main content

esp_hal/dma/
m2m.rs

1use core::{
2    mem::ManuallyDrop,
3    ops::{Deref, DerefMut},
4};
5
6#[cfg(not(esp32s2))]
7use crate::dma::{AnyGdmaChannel, AnyGdmaRxChannel, AnyGdmaTxChannel, DmaEligible};
8use crate::{
9    Async,
10    Blocking,
11    DriverMode,
12    dma::{
13        BurstConfig,
14        Channel,
15        ChannelRx,
16        ChannelTx,
17        DmaChannelConvert,
18        DmaDescriptor,
19        DmaError,
20        DmaPeripheral,
21        DmaRxBuf,
22        DmaRxBuffer,
23        DmaRxInterrupt,
24        DmaTxBuf,
25        DmaTxBuffer,
26        DmaTxInterrupt,
27    },
28};
29#[cfg(esp32s2)]
30use crate::{
31    dma::{CopyDmaRxChannel, CopyDmaTxChannel},
32    peripherals::DMA_COPY,
33};
34
35cfg_if::cfg_if! {
36    if #[cfg(esp32s2)] {
37        type Mem2MemChannel<'d> = DMA_COPY<'d>;
38        type Mem2MemRxChannel<'d> = CopyDmaRxChannel<'d>;
39        type Mem2MemTxChannel<'d> = CopyDmaTxChannel<'d>;
40    } else {
41        type Mem2MemChannel<'d> = AnyGdmaChannel<'d>;
42        type Mem2MemRxChannel<'d> = AnyGdmaRxChannel<'d>;
43        type Mem2MemTxChannel<'d> = AnyGdmaTxChannel<'d>;
44    }
45}
46
47/// DMA Memory to Memory pseudo-Peripheral
48///
49/// This is a pseudo-peripheral that allows for memory to memory transfers.
50/// It is not a real peripheral, but a way to use the DMA engine for memory
51/// to memory transfers.
52pub struct Mem2Mem<'d, Dm>
53where
54    Dm: DriverMode,
55{
56    /// RX Half
57    pub rx: Mem2MemRx<'d, Dm>,
58    /// TX Half
59    pub tx: Mem2MemTx<'d, Dm>,
60}
61
62impl<'d> Mem2Mem<'d, Blocking> {
63    /// Create a new Mem2Mem instance.
64    pub fn new(
65        channel: impl DmaChannelConvert<Mem2MemChannel<'d>>,
66        #[cfg(dma_kind = "gdma")] peripheral: impl DmaEligible,
67    ) -> Self {
68        unsafe {
69            Self::new_unsafe(
70                channel,
71                #[cfg(dma_kind = "gdma")]
72                peripheral.dma_peripheral(),
73            )
74        }
75    }
76
77    /// Create a new Mem2Mem instance.
78    ///
79    /// # Safety
80    ///
81    /// You must ensure that you're not using DMA for the same peripheral and
82    /// that you're the only one using the DmaPeripheral.
83    pub unsafe fn new_unsafe(
84        channel: impl DmaChannelConvert<Mem2MemChannel<'d>>,
85        #[cfg(dma_kind = "gdma")] peripheral: DmaPeripheral,
86    ) -> Self {
87        let channel = Channel::new(channel.degrade());
88
89        cfg_if::cfg_if! {
90            if #[cfg(dma_kind = "gdma")] {
91                let mut channel = channel;
92                channel.rx.set_mem2mem_mode(true);
93            } else {
94                // The S2's COPY DMA channel doesn't care about this. Once support for other
95                // channels are added, this will need updating.
96                let peripheral = DmaPeripheral::Spi2;
97            }
98        }
99
100        Mem2Mem {
101            rx: Mem2MemRx {
102                channel: channel.rx,
103                peripheral,
104            },
105            tx: Mem2MemTx {
106                channel: channel.tx,
107                peripheral,
108            },
109        }
110    }
111
112    /// Shortcut to create a [SimpleMem2Mem]
113    pub fn with_descriptors(
114        self,
115        rx_descriptors: &'static mut [DmaDescriptor],
116        tx_descriptors: &'static mut [DmaDescriptor],
117        config: BurstConfig,
118    ) -> Result<SimpleMem2Mem<'d, Blocking>, DmaError> {
119        SimpleMem2Mem::new(self, rx_descriptors, tx_descriptors, config)
120    }
121
122    /// Convert Mem2Mem to an async Mem2Mem.
123    pub fn into_async(self) -> Mem2Mem<'d, Async> {
124        Mem2Mem {
125            rx: self.rx.into_async(),
126            tx: self.tx.into_async(),
127        }
128    }
129}
130
131/// The RX half of [Mem2Mem].
132pub struct Mem2MemRx<'d, Dm: DriverMode> {
133    channel: ChannelRx<Dm, Mem2MemRxChannel<'d>>,
134    peripheral: DmaPeripheral,
135}
136
137impl<'d> Mem2MemRx<'d, Blocking> {
138    /// Convert Mem2MemRx to an async Mem2MemRx.
139    pub fn into_async(self) -> Mem2MemRx<'d, Async> {
140        Mem2MemRx {
141            channel: self.channel.into_async(),
142            peripheral: self.peripheral,
143        }
144    }
145}
146
147impl<'d, Dm> Mem2MemRx<'d, Dm>
148where
149    Dm: DriverMode,
150{
151    /// Start the RX half of a memory to memory transfer.
152    pub fn receive<BUF>(
153        mut self,
154        mut buf: BUF,
155    ) -> Result<Mem2MemRxTransfer<'d, Dm, BUF>, (DmaError, Self, BUF)>
156    where
157        BUF: DmaRxBuffer,
158    {
159        let result = unsafe {
160            self.channel
161                .prepare_transfer(self.peripheral, &mut buf)
162                .and_then(|_| self.channel.start_transfer())
163        };
164
165        if let Err(e) = result {
166            return Err((e, self, buf));
167        }
168
169        Ok(Mem2MemRxTransfer {
170            m2m: ManuallyDrop::new(self),
171            buf_view: ManuallyDrop::new(buf.into_view()),
172        })
173    }
174}
175
176/// Represents an ongoing (or potentially finished) DMA Memory-to-Memory RX
177/// transfer.
178pub struct Mem2MemRxTransfer<'d, M: DriverMode, BUF: DmaRxBuffer> {
179    m2m: ManuallyDrop<Mem2MemRx<'d, M>>,
180    buf_view: ManuallyDrop<BUF::View>,
181}
182
183impl<'d, M: DriverMode, BUF: DmaRxBuffer> Mem2MemRxTransfer<'d, M, BUF> {
184    /// Returns true when [Self::wait] will not block.
185    pub fn is_done(&self) -> bool {
186        let done_interrupts = DmaRxInterrupt::DescriptorError | DmaRxInterrupt::DescriptorEmpty;
187        !self
188            .m2m
189            .channel
190            .pending_in_interrupts()
191            .is_disjoint(done_interrupts)
192    }
193
194    /// Waits for the transfer to stop and returns the peripheral and buffer.
195    pub fn wait(self) -> (Result<(), DmaError>, Mem2MemRx<'d, M>, BUF::Final) {
196        while !self.is_done() {}
197
198        let (m2m, view) = self.release();
199
200        let result = if m2m.channel.has_error() {
201            Err(DmaError::DescriptorError)
202        } else {
203            Ok(())
204        };
205
206        (result, m2m, BUF::from_view(view))
207    }
208
209    /// Stops this transfer on the spot and returns the peripheral and buffer.
210    pub fn stop(self) -> (Mem2MemRx<'d, M>, BUF::Final) {
211        let (mut m2m, view) = self.release();
212
213        m2m.channel.stop_transfer();
214
215        (m2m, BUF::from_view(view))
216    }
217
218    fn release(mut self) -> (Mem2MemRx<'d, M>, BUF::View) {
219        // SAFETY: Since forget is called on self, we know that self.m2m and
220        // self.buf_view won't be touched again.
221        let result = unsafe {
222            let m2m = ManuallyDrop::take(&mut self.m2m);
223            let view = ManuallyDrop::take(&mut self.buf_view);
224            (m2m, view)
225        };
226        core::mem::forget(self);
227        result
228    }
229}
230
231impl<M: DriverMode, BUF: DmaRxBuffer> Deref for Mem2MemRxTransfer<'_, M, BUF> {
232    type Target = BUF::View;
233
234    fn deref(&self) -> &Self::Target {
235        &self.buf_view
236    }
237}
238
239impl<M: DriverMode, BUF: DmaRxBuffer> DerefMut for Mem2MemRxTransfer<'_, M, BUF> {
240    fn deref_mut(&mut self) -> &mut Self::Target {
241        &mut self.buf_view
242    }
243}
244
245impl<M: DriverMode, BUF: DmaRxBuffer> Drop for Mem2MemRxTransfer<'_, M, BUF> {
246    fn drop(&mut self) {
247        self.m2m.channel.stop_transfer();
248
249        // SAFETY: This is Drop, we know that self.m2m and self.buf_view
250        // won't be touched again.
251        let view = unsafe {
252            ManuallyDrop::drop(&mut self.m2m);
253            ManuallyDrop::take(&mut self.buf_view)
254        };
255        let _ = BUF::from_view(view);
256    }
257}
258
259/// The TX half of [Mem2Mem].
260pub struct Mem2MemTx<'d, Dm: DriverMode> {
261    channel: ChannelTx<Dm, Mem2MemTxChannel<'d>>,
262    peripheral: DmaPeripheral,
263}
264
265impl<'d> Mem2MemTx<'d, Blocking> {
266    /// Convert Mem2MemTx to an async Mem2MemTx.
267    pub fn into_async(self) -> Mem2MemTx<'d, Async> {
268        Mem2MemTx {
269            channel: self.channel.into_async(),
270            peripheral: self.peripheral,
271        }
272    }
273}
274
275impl<'d, Dm: DriverMode> Mem2MemTx<'d, Dm> {
276    /// Start the TX half of a memory to memory transfer.
277    pub fn send<BUF>(
278        mut self,
279        mut buf: BUF,
280    ) -> Result<Mem2MemTxTransfer<'d, Dm, BUF>, (DmaError, Self, BUF)>
281    where
282        BUF: DmaTxBuffer,
283    {
284        let result = unsafe {
285            self.channel
286                .prepare_transfer(self.peripheral, &mut buf)
287                .and_then(|_| self.channel.start_transfer())
288        };
289
290        if let Err(e) = result {
291            return Err((e, self, buf));
292        }
293
294        Ok(Mem2MemTxTransfer {
295            m2m: ManuallyDrop::new(self),
296            buf_view: ManuallyDrop::new(buf.into_view()),
297        })
298    }
299}
300
301/// Represents an ongoing (or potentially finished) DMA Memory-to-Memory TX
302/// transfer.
303pub struct Mem2MemTxTransfer<'d, Dm: DriverMode, BUF: DmaTxBuffer> {
304    m2m: ManuallyDrop<Mem2MemTx<'d, Dm>>,
305    buf_view: ManuallyDrop<BUF::View>,
306}
307
308impl<'d, Dm: DriverMode, BUF: DmaTxBuffer> Mem2MemTxTransfer<'d, Dm, BUF> {
309    /// Returns true when [Self::wait] will not block.
310    pub fn is_done(&self) -> bool {
311        let done_interrupts = DmaTxInterrupt::DescriptorError | DmaTxInterrupt::TotalEof;
312        !self
313            .m2m
314            .channel
315            .pending_out_interrupts()
316            .is_disjoint(done_interrupts)
317    }
318
319    /// Waits for the transfer to stop and returns the peripheral and buffer.
320    pub fn wait(self) -> (Result<(), DmaError>, Mem2MemTx<'d, Dm>, BUF::Final) {
321        while !self.is_done() {}
322
323        let (m2m, view) = self.release();
324
325        let result = if m2m.channel.has_error() {
326            Err(DmaError::DescriptorError)
327        } else {
328            Ok(())
329        };
330
331        (result, m2m, BUF::from_view(view))
332    }
333
334    /// Stops this transfer on the spot and returns the peripheral and buffer.
335    pub fn stop(self) -> (Mem2MemTx<'d, Dm>, BUF::Final) {
336        let (mut m2m, view) = self.release();
337
338        m2m.channel.stop_transfer();
339
340        (m2m, BUF::from_view(view))
341    }
342
343    fn release(mut self) -> (Mem2MemTx<'d, Dm>, BUF::View) {
344        // SAFETY: Since forget is called on self, we know that self.m2m and
345        // self.buf_view won't be touched again.
346        let result = unsafe {
347            let m2m = ManuallyDrop::take(&mut self.m2m);
348            let view = ManuallyDrop::take(&mut self.buf_view);
349            (m2m, view)
350        };
351        core::mem::forget(self);
352        result
353    }
354}
355
356impl<Dm: DriverMode, BUF: DmaTxBuffer> Deref for Mem2MemTxTransfer<'_, Dm, BUF> {
357    type Target = BUF::View;
358
359    fn deref(&self) -> &Self::Target {
360        &self.buf_view
361    }
362}
363
364impl<Dm: DriverMode, BUF: DmaTxBuffer> DerefMut for Mem2MemTxTransfer<'_, Dm, BUF> {
365    fn deref_mut(&mut self) -> &mut Self::Target {
366        &mut self.buf_view
367    }
368}
369
370impl<Dm: DriverMode, BUF: DmaTxBuffer> Drop for Mem2MemTxTransfer<'_, Dm, BUF> {
371    fn drop(&mut self) {
372        self.m2m.channel.stop_transfer();
373
374        // SAFETY: This is Drop, we know that self.m2m and self.buf_view
375        // won't be touched again.
376        let view = unsafe {
377            ManuallyDrop::drop(&mut self.m2m);
378            ManuallyDrop::take(&mut self.buf_view)
379        };
380        let _ = BUF::from_view(view);
381    }
382}
383
384/// A simple and easy to use wrapper around [SimpleMem2Mem].
385/// More complex memory to memory transfers should use [Mem2Mem] directly.
386pub struct SimpleMem2Mem<'d, Dm: DriverMode> {
387    state: State<'d, Dm>,
388    config: BurstConfig,
389}
390
391enum State<'d, Dm: DriverMode> {
392    Idle(
393        Mem2Mem<'d, Dm>,
394        &'d mut [DmaDescriptor],
395        &'d mut [DmaDescriptor],
396    ),
397    Active(
398        Mem2MemRxTransfer<'d, Dm, DmaRxBuf>,
399        Mem2MemTxTransfer<'d, Dm, DmaTxBuf>,
400    ),
401    InUse,
402}
403
404impl<'d, Dm: DriverMode> SimpleMem2Mem<'d, Dm> {
405    /// Creates a new [SimpleMem2Mem].
406    pub fn new(
407        mem2mem: Mem2Mem<'d, Dm>,
408        rx_descriptors: &'d mut [DmaDescriptor],
409        tx_descriptors: &'d mut [DmaDescriptor],
410        config: BurstConfig,
411    ) -> Result<Self, DmaError> {
412        if rx_descriptors.is_empty() || tx_descriptors.is_empty() {
413            return Err(DmaError::OutOfDescriptors);
414        }
415        Ok(Self {
416            state: State::Idle(mem2mem, rx_descriptors, tx_descriptors),
417            config,
418        })
419    }
420}
421
422impl<'d, Dm: DriverMode> SimpleMem2Mem<'d, Dm> {
423    /// Starts a memory to memory transfer.
424    pub fn start_transfer(
425        &mut self,
426        rx_buffer: &mut [u8],
427        tx_buffer: &[u8],
428    ) -> Result<SimpleMem2MemTransfer<'_, 'd, Dm>, DmaError> {
429        let State::Idle(mem2mem, rx_descriptors, tx_descriptors) =
430            core::mem::replace(&mut self.state, State::InUse)
431        else {
432            panic!("SimpleMem2MemTransfer was forgotten with core::mem::forget or similar");
433        };
434
435        // Raise these buffers to 'static. This is not safe, bad things will happen if
436        // the user calls core::mem::forget on SimpleMem2MemTransfer. This is
437        // just the unfortunate consequence of doing DMA without enforcing
438        // 'static.
439        let rx_buffer =
440            unsafe { core::slice::from_raw_parts_mut(rx_buffer.as_mut_ptr(), rx_buffer.len()) };
441        let tx_buffer =
442            unsafe { core::slice::from_raw_parts_mut(tx_buffer.as_ptr() as _, tx_buffer.len()) };
443        let rx_descriptors = unsafe {
444            core::slice::from_raw_parts_mut(rx_descriptors.as_mut_ptr(), rx_descriptors.len())
445        };
446        let tx_descriptors = unsafe {
447            core::slice::from_raw_parts_mut(tx_descriptors.as_mut_ptr(), tx_descriptors.len())
448        };
449
450        // Note: The ESP32-S2 insists that RX is started before TX. Contrary to the TRM
451        // and every other chip.
452
453        let dma_rx_buf = unwrap!(
454            DmaRxBuf::new_with_config(rx_descriptors, rx_buffer, self.config),
455            "There's no way to get the descriptors back yet"
456        );
457
458        let rx = match mem2mem.rx.receive(dma_rx_buf) {
459            Ok(rx) => rx,
460            Err((err, rx, buf)) => {
461                let (rx_descriptors, _rx_buffer) = buf.split();
462                self.state = State::Idle(
463                    Mem2Mem { rx, tx: mem2mem.tx },
464                    rx_descriptors,
465                    tx_descriptors,
466                );
467                return Err(err);
468            }
469        };
470
471        let dma_tx_buf = unwrap!(
472            DmaTxBuf::new_with_config(tx_descriptors, tx_buffer, self.config),
473            "There's no way to get the descriptors back yet"
474        );
475
476        let tx = match mem2mem.tx.send(dma_tx_buf) {
477            Ok(tx) => tx,
478            Err((err, tx, buf)) => {
479                let (tx_descriptors, _tx_buffer) = buf.split();
480                let (rx, buf) = rx.stop();
481                let (rx_descriptors, _rx_buffer) = buf.split();
482                self.state = State::Idle(Mem2Mem { rx, tx }, rx_descriptors, tx_descriptors);
483                return Err(err);
484            }
485        };
486
487        self.state = State::Active(rx, tx);
488
489        Ok(SimpleMem2MemTransfer(self))
490    }
491}
492
493impl<Dm: DriverMode> Drop for SimpleMem2Mem<'_, Dm> {
494    fn drop(&mut self) {
495        if !matches!(&mut self.state, State::Idle(_, _, _)) {
496            panic!("SimpleMem2MemTransfer was forgotten with core::mem::forget or similar");
497        }
498    }
499}
500
501/// Represents an ongoing (or potentially finished) DMA Memory-to-Memory
502/// transfer.
503pub struct SimpleMem2MemTransfer<'a, 'd, Dm: DriverMode>(&'a mut SimpleMem2Mem<'d, Dm>);
504
505impl<Dm: DriverMode> SimpleMem2MemTransfer<'_, '_, Dm> {
506    /// Returns true when [Self::wait] will not block.
507    pub fn is_done(&self) -> bool {
508        let State::Active(rx, tx) = &self.0.state else {
509            unreachable!()
510        };
511
512        // Wait for transmission to finish, and wait for the RX channel to receive the
513        // one and only EOF that DmaTxBuf will send.
514        tx.is_done()
515            && rx
516                .m2m
517                .channel
518                .pending_in_interrupts()
519                .contains(DmaRxInterrupt::SuccessfulEof)
520    }
521
522    /// Wait for the transfer to finish.
523    pub fn wait(self) -> Result<(), DmaError> {
524        while !self.is_done() {}
525        Ok(())
526    }
527}
528
529impl<Dm: DriverMode> Drop for SimpleMem2MemTransfer<'_, '_, Dm> {
530    fn drop(&mut self) {
531        let State::Active(rx, tx) = core::mem::replace(&mut self.0.state, State::InUse) else {
532            unreachable!()
533        };
534
535        let (tx, dma_tx_buf) = tx.stop();
536        let (rx, dma_rx_buf) = rx.stop();
537
538        let (tx_descriptors, _tx_buffer) = dma_tx_buf.split();
539        let (rx_descriptors, _rx_buffer) = dma_rx_buf.split();
540
541        self.0.state = State::Idle(Mem2Mem { rx, tx }, rx_descriptors, tx_descriptors);
542    }
543}