esp_hal/dma/
m2m.rs

1use core::{
2    mem::ManuallyDrop,
3    ops::{Deref, DerefMut},
4};
5
6#[cfg(not(esp32s2))]
7use crate::dma::{
8    AnyGdmaChannel,
9    AnyGdmaRxChannel,
10    AnyGdmaTxChannel,
11    DmaChannelConvert,
12    DmaEligible,
13};
14use crate::{
15    Async,
16    Blocking,
17    DriverMode,
18    dma::{
19        BurstConfig,
20        Channel,
21        ChannelRx,
22        ChannelTx,
23        DmaDescriptor,
24        DmaError,
25        DmaPeripheral,
26        DmaRxBuf,
27        DmaRxBuffer,
28        DmaRxInterrupt,
29        DmaTxBuf,
30        DmaTxBuffer,
31        DmaTxInterrupt,
32    },
33};
34#[cfg(esp32s2)]
35use crate::{
36    dma::{CopyDmaRxChannel, CopyDmaTxChannel},
37    peripherals::DMA_COPY,
38};
39
40/// DMA Memory to Memory pseudo-Peripheral
41///
42/// This is a pseudo-peripheral that allows for memory to memory transfers.
43/// It is not a real peripheral, but a way to use the DMA engine for memory
44/// to memory transfers.
45pub struct Mem2Mem<'d, Dm>
46where
47    Dm: DriverMode,
48{
49    /// RX Half
50    pub rx: Mem2MemRx<'d, Dm>,
51    /// TX Half
52    pub tx: Mem2MemTx<'d, Dm>,
53}
54
55impl<'d> Mem2Mem<'d, Blocking> {
56    /// Create a new Mem2Mem instance.
57    #[cfg(not(esp32s2))]
58    pub fn new(
59        channel: impl DmaChannelConvert<AnyGdmaChannel<'d>>,
60        peripheral: impl DmaEligible,
61    ) -> Self {
62        unsafe { Self::new_unsafe(channel, peripheral.dma_peripheral()) }
63    }
64
65    /// Create a new Mem2Mem instance.
66    ///
67    /// # Safety
68    ///
69    /// You must ensure that you're not using DMA for the same peripheral and
70    /// that you're the only one using the DmaPeripheral.
71    #[cfg(not(esp32s2))]
72    pub unsafe fn new_unsafe(
73        channel: impl DmaChannelConvert<AnyGdmaChannel<'d>>,
74        peripheral: DmaPeripheral,
75    ) -> Self {
76        let mut channel = Channel::new(channel.degrade());
77
78        channel.rx.set_mem2mem_mode(true);
79
80        Mem2Mem {
81            rx: Mem2MemRx {
82                channel: channel.rx,
83                peripheral,
84            },
85            tx: Mem2MemTx {
86                channel: channel.tx,
87                peripheral,
88            },
89        }
90    }
91
92    /// Create a new Mem2Mem instance.
93    #[cfg(esp32s2)]
94    pub fn new(channel: DMA_COPY<'d>) -> Self {
95        let channel = Channel::new(channel);
96
97        // The S2's COPY DMA channel doesn't care about this. Once support for other
98        // channels are added, this will need updating.
99        let peripheral = DmaPeripheral::Spi2;
100
101        Mem2Mem {
102            rx: Mem2MemRx {
103                channel: channel.rx,
104                peripheral,
105            },
106            tx: Mem2MemTx {
107                channel: channel.tx,
108                peripheral,
109            },
110        }
111    }
112
113    /// Shortcut to create a [SimpleMem2Mem]
114    pub fn with_descriptors(
115        self,
116        rx_descriptors: &'static mut [DmaDescriptor],
117        tx_descriptors: &'static mut [DmaDescriptor],
118        config: BurstConfig,
119    ) -> Result<SimpleMem2Mem<'d, Blocking>, DmaError> {
120        SimpleMem2Mem::new(self, rx_descriptors, tx_descriptors, config)
121    }
122
123    /// Convert Mem2Mem to an async Mem2Mem.
124    pub fn into_async(self) -> Mem2Mem<'d, Async> {
125        Mem2Mem {
126            rx: self.rx.into_async(),
127            tx: self.tx.into_async(),
128        }
129    }
130}
131
132/// The RX half of [Mem2Mem].
133pub struct Mem2MemRx<'d, Dm: DriverMode> {
134    #[cfg(not(esp32s2))]
135    channel: ChannelRx<Dm, AnyGdmaRxChannel<'d>>,
136    #[cfg(esp32s2)]
137    channel: ChannelRx<Dm, CopyDmaRxChannel<'d>>,
138    peripheral: DmaPeripheral,
139}
140
141impl<'d> Mem2MemRx<'d, Blocking> {
142    /// Convert Mem2MemRx to an async Mem2MemRx.
143    pub fn into_async(self) -> Mem2MemRx<'d, Async> {
144        Mem2MemRx {
145            channel: self.channel.into_async(),
146            peripheral: self.peripheral,
147        }
148    }
149}
150
151impl<'d, Dm> Mem2MemRx<'d, Dm>
152where
153    Dm: DriverMode,
154{
155    /// Start the RX half of a memory to memory transfer.
156    pub fn receive<BUF>(
157        mut self,
158        mut buf: BUF,
159    ) -> Result<Mem2MemRxTransfer<'d, Dm, BUF>, (DmaError, Self, BUF)>
160    where
161        BUF: DmaRxBuffer,
162    {
163        let result = unsafe {
164            self.channel
165                .prepare_transfer(self.peripheral, &mut buf)
166                .and_then(|_| self.channel.start_transfer())
167        };
168
169        if let Err(e) = result {
170            return Err((e, self, buf));
171        }
172
173        Ok(Mem2MemRxTransfer {
174            m2m: ManuallyDrop::new(self),
175            buf_view: ManuallyDrop::new(buf.into_view()),
176        })
177    }
178}
179
180/// Represents an ongoing (or potentially finished) DMA Memory-to-Memory RX
181/// transfer.
182pub struct Mem2MemRxTransfer<'d, M: DriverMode, BUF: DmaRxBuffer> {
183    m2m: ManuallyDrop<Mem2MemRx<'d, M>>,
184    buf_view: ManuallyDrop<BUF::View>,
185}
186
187impl<'d, M: DriverMode, BUF: DmaRxBuffer> Mem2MemRxTransfer<'d, M, BUF> {
188    /// Returns true when [Self::wait] will not block.
189    pub fn is_done(&self) -> bool {
190        let done_interrupts = DmaRxInterrupt::DescriptorError | DmaRxInterrupt::DescriptorEmpty;
191        !self
192            .m2m
193            .channel
194            .pending_in_interrupts()
195            .is_disjoint(done_interrupts)
196    }
197
198    /// Waits for the transfer to stop and returns the peripheral and buffer.
199    pub fn wait(self) -> (Result<(), DmaError>, Mem2MemRx<'d, M>, BUF) {
200        while !self.is_done() {}
201
202        let (m2m, view) = self.release();
203
204        let result = if m2m.channel.has_error() {
205            Err(DmaError::DescriptorError)
206        } else {
207            Ok(())
208        };
209
210        (result, m2m, BUF::from_view(view))
211    }
212
213    /// Stops this transfer on the spot and returns the peripheral and buffer.
214    pub fn stop(self) -> (Mem2MemRx<'d, M>, BUF) {
215        let (mut m2m, view) = self.release();
216
217        m2m.channel.stop_transfer();
218
219        (m2m, BUF::from_view(view))
220    }
221
222    fn release(mut self) -> (Mem2MemRx<'d, M>, BUF::View) {
223        // SAFETY: Since forget is called on self, we know that self.m2m and
224        // self.buf_view won't be touched again.
225        let result = unsafe {
226            let m2m = ManuallyDrop::take(&mut self.m2m);
227            let view = ManuallyDrop::take(&mut self.buf_view);
228            (m2m, view)
229        };
230        core::mem::forget(self);
231        result
232    }
233}
234
235impl<M: DriverMode, BUF: DmaRxBuffer> Deref for Mem2MemRxTransfer<'_, M, BUF> {
236    type Target = BUF::View;
237
238    fn deref(&self) -> &Self::Target {
239        &self.buf_view
240    }
241}
242
243impl<M: DriverMode, BUF: DmaRxBuffer> DerefMut for Mem2MemRxTransfer<'_, M, BUF> {
244    fn deref_mut(&mut self) -> &mut Self::Target {
245        &mut self.buf_view
246    }
247}
248
249impl<M: DriverMode, BUF: DmaRxBuffer> Drop for Mem2MemRxTransfer<'_, M, BUF> {
250    fn drop(&mut self) {
251        self.m2m.channel.stop_transfer();
252
253        // SAFETY: This is Drop, we know that self.m2m and self.buf_view
254        // won't be touched again.
255        let view = unsafe {
256            ManuallyDrop::drop(&mut self.m2m);
257            ManuallyDrop::take(&mut self.buf_view)
258        };
259        let _ = BUF::from_view(view);
260    }
261}
262
263/// The TX half of [Mem2Mem].
264pub struct Mem2MemTx<'d, Dm: DriverMode> {
265    #[cfg(not(esp32s2))]
266    channel: ChannelTx<Dm, AnyGdmaTxChannel<'d>>,
267    #[cfg(esp32s2)]
268    channel: ChannelTx<Dm, CopyDmaTxChannel<'d>>,
269    peripheral: DmaPeripheral,
270}
271
272impl<'d> Mem2MemTx<'d, Blocking> {
273    /// Convert Mem2MemTx to an async Mem2MemTx.
274    pub fn into_async(self) -> Mem2MemTx<'d, Async> {
275        Mem2MemTx {
276            channel: self.channel.into_async(),
277            peripheral: self.peripheral,
278        }
279    }
280}
281
282impl<'d, Dm: DriverMode> Mem2MemTx<'d, Dm> {
283    /// Start the TX half of a memory to memory transfer.
284    pub fn send<BUF>(
285        mut self,
286        mut buf: BUF,
287    ) -> Result<Mem2MemTxTransfer<'d, Dm, BUF>, (DmaError, Self, BUF)>
288    where
289        BUF: DmaTxBuffer,
290    {
291        let result = unsafe {
292            self.channel
293                .prepare_transfer(self.peripheral, &mut buf)
294                .and_then(|_| self.channel.start_transfer())
295        };
296
297        if let Err(e) = result {
298            return Err((e, self, buf));
299        }
300
301        Ok(Mem2MemTxTransfer {
302            m2m: ManuallyDrop::new(self),
303            buf_view: ManuallyDrop::new(buf.into_view()),
304        })
305    }
306}
307
308/// Represents an ongoing (or potentially finished) DMA Memory-to-Memory TX
309/// transfer.
310pub struct Mem2MemTxTransfer<'d, Dm: DriverMode, BUF: DmaTxBuffer> {
311    m2m: ManuallyDrop<Mem2MemTx<'d, Dm>>,
312    buf_view: ManuallyDrop<BUF::View>,
313}
314
315impl<'d, Dm: DriverMode, BUF: DmaTxBuffer> Mem2MemTxTransfer<'d, Dm, BUF> {
316    /// Returns true when [Self::wait] will not block.
317    pub fn is_done(&self) -> bool {
318        let done_interrupts = DmaTxInterrupt::DescriptorError | DmaTxInterrupt::TotalEof;
319        !self
320            .m2m
321            .channel
322            .pending_out_interrupts()
323            .is_disjoint(done_interrupts)
324    }
325
326    /// Waits for the transfer to stop and returns the peripheral and buffer.
327    pub fn wait(self) -> (Result<(), DmaError>, Mem2MemTx<'d, Dm>, BUF) {
328        while !self.is_done() {}
329
330        let (m2m, view) = self.release();
331
332        let result = if m2m.channel.has_error() {
333            Err(DmaError::DescriptorError)
334        } else {
335            Ok(())
336        };
337
338        (result, m2m, BUF::from_view(view))
339    }
340
341    /// Stops this transfer on the spot and returns the peripheral and buffer.
342    pub fn stop(self) -> (Mem2MemTx<'d, Dm>, BUF) {
343        let (mut m2m, view) = self.release();
344
345        m2m.channel.stop_transfer();
346
347        (m2m, BUF::from_view(view))
348    }
349
350    fn release(mut self) -> (Mem2MemTx<'d, Dm>, BUF::View) {
351        // SAFETY: Since forget is called on self, we know that self.m2m and
352        // self.buf_view won't be touched again.
353        let result = unsafe {
354            let m2m = ManuallyDrop::take(&mut self.m2m);
355            let view = ManuallyDrop::take(&mut self.buf_view);
356            (m2m, view)
357        };
358        core::mem::forget(self);
359        result
360    }
361}
362
363impl<Dm: DriverMode, BUF: DmaTxBuffer> Deref for Mem2MemTxTransfer<'_, Dm, BUF> {
364    type Target = BUF::View;
365
366    fn deref(&self) -> &Self::Target {
367        &self.buf_view
368    }
369}
370
371impl<Dm: DriverMode, BUF: DmaTxBuffer> DerefMut for Mem2MemTxTransfer<'_, Dm, BUF> {
372    fn deref_mut(&mut self) -> &mut Self::Target {
373        &mut self.buf_view
374    }
375}
376
377impl<Dm: DriverMode, BUF: DmaTxBuffer> Drop for Mem2MemTxTransfer<'_, Dm, BUF> {
378    fn drop(&mut self) {
379        self.m2m.channel.stop_transfer();
380
381        // SAFETY: This is Drop, we know that self.m2m and self.buf_view
382        // won't be touched again.
383        let view = unsafe {
384            ManuallyDrop::drop(&mut self.m2m);
385            ManuallyDrop::take(&mut self.buf_view)
386        };
387        let _ = BUF::from_view(view);
388    }
389}
390
391/// A simple and easy to use wrapper around [SimpleMem2Mem].
392/// More complex memory to memory transfers should use [Mem2Mem] directly.
393pub struct SimpleMem2Mem<'d, Dm: DriverMode> {
394    state: State<'d, Dm>,
395    config: BurstConfig,
396}
397
398enum State<'d, Dm: DriverMode> {
399    Idle(
400        Mem2Mem<'d, Dm>,
401        &'d mut [DmaDescriptor],
402        &'d mut [DmaDescriptor],
403    ),
404    Active(
405        Mem2MemRxTransfer<'d, Dm, DmaRxBuf>,
406        Mem2MemTxTransfer<'d, Dm, DmaTxBuf>,
407    ),
408    InUse,
409}
410
411impl<'d, Dm: DriverMode> SimpleMem2Mem<'d, Dm> {
412    /// Creates a new [SimpleMem2Mem].
413    pub fn new(
414        mem2mem: Mem2Mem<'d, Dm>,
415        rx_descriptors: &'d mut [DmaDescriptor],
416        tx_descriptors: &'d mut [DmaDescriptor],
417        config: BurstConfig,
418    ) -> Result<Self, DmaError> {
419        if rx_descriptors.is_empty() || tx_descriptors.is_empty() {
420            return Err(DmaError::OutOfDescriptors);
421        }
422        Ok(Self {
423            state: State::Idle(mem2mem, rx_descriptors, tx_descriptors),
424            config,
425        })
426    }
427}
428
429impl<'d, Dm: DriverMode> SimpleMem2Mem<'d, Dm> {
430    /// Starts a memory to memory transfer.
431    pub fn start_transfer(
432        &mut self,
433        rx_buffer: &mut [u8],
434        tx_buffer: &[u8],
435    ) -> Result<SimpleMem2MemTransfer<'_, 'd, Dm>, DmaError> {
436        let State::Idle(mem2mem, rx_descriptors, tx_descriptors) =
437            core::mem::replace(&mut self.state, State::InUse)
438        else {
439            panic!("SimpleMem2MemTransfer was forgotten with core::mem::forget or similar");
440        };
441
442        // Raise these buffers to 'static. This is not safe, bad things will happen if
443        // the user calls core::mem::forget on SimpleMem2MemTransfer. This is
444        // just the unfortunate consequence of doing DMA without enforcing
445        // 'static.
446        let rx_buffer =
447            unsafe { core::slice::from_raw_parts_mut(rx_buffer.as_mut_ptr(), rx_buffer.len()) };
448        let tx_buffer =
449            unsafe { core::slice::from_raw_parts_mut(tx_buffer.as_ptr() as _, tx_buffer.len()) };
450        let rx_descriptors = unsafe {
451            core::slice::from_raw_parts_mut(rx_descriptors.as_mut_ptr(), rx_descriptors.len())
452        };
453        let tx_descriptors = unsafe {
454            core::slice::from_raw_parts_mut(tx_descriptors.as_mut_ptr(), tx_descriptors.len())
455        };
456
457        // Note: The ESP32-S2 insists that RX is started before TX. Contrary to the TRM
458        // and every other chip.
459
460        let dma_rx_buf = unwrap!(
461            DmaRxBuf::new_with_config(rx_descriptors, rx_buffer, self.config),
462            "There's no way to get the descriptors back yet"
463        );
464
465        let rx = match mem2mem.rx.receive(dma_rx_buf) {
466            Ok(rx) => rx,
467            Err((err, rx, buf)) => {
468                let (rx_descriptors, _rx_buffer) = buf.split();
469                self.state = State::Idle(
470                    Mem2Mem { rx, tx: mem2mem.tx },
471                    rx_descriptors,
472                    tx_descriptors,
473                );
474                return Err(err);
475            }
476        };
477
478        let dma_tx_buf = unwrap!(
479            DmaTxBuf::new_with_config(tx_descriptors, tx_buffer, self.config),
480            "There's no way to get the descriptors back yet"
481        );
482
483        let tx = match mem2mem.tx.send(dma_tx_buf) {
484            Ok(tx) => tx,
485            Err((err, tx, buf)) => {
486                let (tx_descriptors, _tx_buffer) = buf.split();
487                let (rx, buf) = rx.stop();
488                let (rx_descriptors, _rx_buffer) = buf.split();
489                self.state = State::Idle(Mem2Mem { rx, tx }, rx_descriptors, tx_descriptors);
490                return Err(err);
491            }
492        };
493
494        self.state = State::Active(rx, tx);
495
496        Ok(SimpleMem2MemTransfer(self))
497    }
498}
499
500impl<Dm: DriverMode> Drop for SimpleMem2Mem<'_, Dm> {
501    fn drop(&mut self) {
502        if !matches!(&mut self.state, State::Idle(_, _, _)) {
503            panic!("SimpleMem2MemTransfer was forgotten with core::mem::forget or similar");
504        }
505    }
506}
507
508/// Represents an ongoing (or potentially finished) DMA Memory-to-Memory
509/// transfer.
510pub struct SimpleMem2MemTransfer<'a, 'd, Dm: DriverMode>(&'a mut SimpleMem2Mem<'d, Dm>);
511
512impl<Dm: DriverMode> SimpleMem2MemTransfer<'_, '_, Dm> {
513    /// Returns true when [Self::wait] will not block.
514    pub fn is_done(&self) -> bool {
515        let State::Active(rx, tx) = &self.0.state else {
516            unreachable!()
517        };
518
519        // Wait for transmission to finish, and wait for the RX channel to receive the
520        // one and only EOF that DmaTxBuf will send.
521        tx.is_done()
522            && rx
523                .m2m
524                .channel
525                .pending_in_interrupts()
526                .contains(DmaRxInterrupt::SuccessfulEof)
527    }
528
529    /// Wait for the transfer to finish.
530    pub fn wait(self) -> Result<(), DmaError> {
531        while !self.is_done() {}
532        Ok(())
533    }
534}
535
536impl<Dm: DriverMode> Drop for SimpleMem2MemTransfer<'_, '_, Dm> {
537    fn drop(&mut self) {
538        let State::Active(rx, tx) = core::mem::replace(&mut self.0.state, State::InUse) else {
539            unreachable!()
540        };
541
542        let (tx, dma_tx_buf) = tx.stop();
543        let (rx, dma_rx_buf) = rx.stop();
544
545        let (tx_descriptors, _tx_buffer) = dma_tx_buf.split();
546        let (rx_descriptors, _rx_buffer) = dma_rx_buf.split();
547
548        self.0.state = State::Idle(Mem2Mem { rx, tx }, rx_descriptors, tx_descriptors);
549    }
550}