Skip to main content

esp_hal/spi/master/
dma.rs

1use core::{
2    cell::{Cell, UnsafeCell},
3    cmp::min,
4    mem::{ManuallyDrop, MaybeUninit},
5    sync::atomic::{Ordering, fence},
6};
7
8#[cfg(feature = "unstable")]
9use embedded_hal::spi::{ErrorType, SpiBus};
10
11use super::*;
12use crate::{
13    dma::{
14        Channel,
15        DmaChannelFor,
16        DmaDescriptor,
17        DmaEligible,
18        DmaRxBuf,
19        DmaRxBuffer,
20        DmaTxBuf,
21        DmaTxBuffer,
22        PeripheralDmaChannel,
23        asynch::DmaRxFuture,
24    },
25    private::DropGuard,
26    spi::DmaError,
27};
28
29const MAX_DMA_SIZE: usize = 32736;
30
31impl<'d> Spi<'d, Blocking> {
32    #[doc_replace(
33        "dma_channel" => {
34            cfg(any(esp32, esp32s2)) => "DMA_SPI2",
35            _ => "DMA_CH0",
36        }
37    )]
38    /// Configures the SPI instance to use DMA with the specified channel.
39    ///
40    /// This method prepares the SPI instance for DMA transfers using SPI
41    /// and returns an instance of `SpiDma` that supports DMA
42    /// operations.
43    /// ```rust, no_run
44    /// # {before_snippet}
45    /// use esp_hal::{
46    ///     dma::{DmaRxBuf, DmaTxBuf},
47    ///     dma_buffers,
48    ///     spi::{
49    ///         Mode,
50    ///         master::{Config, Spi},
51    ///     },
52    /// };
53    /// let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(32000);
54    ///
55    /// let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer)?;
56    /// let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer)?;
57    ///
58    /// let mut spi = Spi::new(
59    ///     peripherals.SPI2,
60    ///     Config::default()
61    ///         .with_frequency(Rate::from_khz(100))
62    ///         .with_mode(Mode::_0),
63    /// )?
64    /// .with_dma(peripherals.__dma_channel__)
65    /// .with_buffers(dma_rx_buf, dma_tx_buf);
66    /// # {after_snippet}
67    /// ```
68    #[instability::unstable]
69    pub fn with_dma(self, channel: impl DmaChannelFor<AnySpi<'d>>) -> SpiDma<'d, Blocking> {
70        SpiDma::new(self, channel.degrade())
71    }
72}
73
74#[doc_replace(
75    "dma_channel" => {
76        cfg(any(esp32, esp32s2)) => "DMA_SPI2",
77        _ => "DMA_CH0",
78    }
79)]
80/// A DMA capable SPI instance.
81///
82/// Using `SpiDma` is not recommended unless you wish
83/// to manage buffers yourself. It's recommended to use
84/// [`SpiDmaBus`] via `with_buffers` to get access
85/// to a DMA capable SPI bus that implements the
86/// embedded-hal traits.
87/// ```rust, no_run
88/// # {before_snippet}
89/// use esp_hal::{
90///     dma::{DmaRxBuf, DmaTxBuf},
91///     dma_buffers,
92///     spi::{
93///         Mode,
94///         master::{Config, Spi},
95///     },
96/// };
97/// let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(32000);
98///
99/// let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer)?;
100/// let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer)?;
101///
102/// let mut spi = Spi::new(
103///     peripherals.SPI2,
104///     Config::default()
105///         .with_frequency(Rate::from_khz(100))
106///         .with_mode(Mode::_0),
107/// )?
108/// .with_dma(peripherals.__dma_channel__)
109/// .with_buffers(dma_rx_buf, dma_tx_buf);
110/// #
111/// # {after_snippet}
112/// ```
113#[cfg_attr(feature = "defmt", derive(defmt::Format))]
114pub struct SpiDma<'d, Dm>
115where
116    Dm: DriverMode,
117{
118    spi: SpiWrapper<'d>,
119    pub(crate) channel: Channel<Dm, PeripheralDmaChannel<AnySpi<'d>>>,
120}
121
122impl<Dm> crate::private::Sealed for SpiDma<'_, Dm> where Dm: DriverMode {}
123
124impl<'d> SpiDma<'d, Blocking> {
125    /// Converts the SPI instance into async mode.
126    #[instability::unstable]
127    pub fn into_async(self) -> SpiDma<'d, Async> {
128        self.spi
129            .set_interrupt_handler(self.spi.info().async_handler);
130        SpiDma {
131            spi: self.spi,
132            channel: self.channel.into_async(),
133        }
134    }
135
136    pub(super) fn new(
137        spi_driver: Spi<'d, Blocking>,
138        channel: PeripheralDmaChannel<AnySpi<'d>>,
139    ) -> Self {
140        let spi = spi_driver.spi;
141
142        let channel = Channel::new(channel);
143        channel.runtime_ensure_compatible(&spi.spi);
144
145        for_each_spi_master!((all $($inst:tt),*) => {
146            const SPI_NUM: usize = 0 $(+ { stringify!($inst); 1 })*;
147        };);
148        let id = if spi.info() == unsafe { crate::peripherals::SPI2::steal().info() } {
149            0
150        } else {
151            1
152        };
153
154        let state = spi.spi.dma_state();
155
156        state.tx_transfer_in_progress.set(false);
157        state.rx_transfer_in_progress.set(false);
158
159        static mut TX_DESCRIPTORS: [[DmaDescriptor; 1]; SPI_NUM] =
160            [[DmaDescriptor::EMPTY]; SPI_NUM];
161        static mut RX_DESCRIPTORS: [[DmaDescriptor; 1]; SPI_NUM] =
162            [[DmaDescriptor::EMPTY]; SPI_NUM];
163
164        let empty_rx_buffer = unwrap!(DmaRxBuf::new(unsafe { &mut RX_DESCRIPTORS[id] }, &mut []));
165
166        cfg_if::cfg_if! {
167            if #[cfg(all(esp32, spi_address_workaround))] {
168                static mut BUFFERS: [[u32; 1]; SPI_NUM] = [[0]; SPI_NUM];
169                let buffer = crate::dma::as_mut_byte_array!(BUFFERS[id], 4);
170                let empty_tx_buffer = unwrap!(DmaTxBuf::new(unsafe { &mut TX_DESCRIPTORS[id] }, buffer));
171            } else {
172                let empty_tx_buffer = unwrap!(DmaTxBuf::new(unsafe { &mut TX_DESCRIPTORS[id] }, &mut []));
173            }
174        }
175
176        // The buffers must be set up when creating the driver.
177        unsafe { (&mut *state.empty_tx_buffer.get()).write(empty_tx_buffer) };
178        unsafe { (&mut *state.empty_rx_buffer.get()).write(empty_rx_buffer) };
179
180        Self { spi, channel }
181    }
182
183    /// Listen for the given interrupts
184    #[instability::unstable]
185    pub fn listen(&mut self, interrupts: impl Into<EnumSet<SpiInterrupt>>) {
186        self.driver().enable_listen(interrupts.into(), true);
187    }
188
189    /// Unlisten the given interrupts
190    #[instability::unstable]
191    pub fn unlisten(&mut self, interrupts: impl Into<EnumSet<SpiInterrupt>>) {
192        self.driver().enable_listen(interrupts.into(), false);
193    }
194
195    /// Gets asserted interrupts
196    #[instability::unstable]
197    pub fn interrupts(&mut self) -> EnumSet<SpiInterrupt> {
198        self.driver().interrupts()
199    }
200
201    /// Resets asserted interrupts
202    #[instability::unstable]
203    pub fn clear_interrupts(&mut self, interrupts: impl Into<EnumSet<SpiInterrupt>>) {
204        self.driver().clear_interrupts(interrupts.into());
205    }
206
207    #[cfg_attr(
208        not(multi_core),
209        doc = "Registers an interrupt handler for the peripheral."
210    )]
211    #[cfg_attr(
212        multi_core,
213        doc = "Registers an interrupt handler for the peripheral on the current core."
214    )]
215    #[doc = ""]
216    /// Note that this will replace any previously registered interrupt
217    /// handlers.
218    ///
219    /// You can restore the default/unhandled interrupt handler by using
220    /// [crate::interrupt::DEFAULT_INTERRUPT_HANDLER]
221    ///
222    /// # Panics
223    ///
224    /// Panics if passed interrupt handler is invalid (e.g. has priority
225    /// `None`)
226    #[instability::unstable]
227    pub fn set_interrupt_handler(&mut self, handler: InterruptHandler) {
228        self.spi.set_interrupt_handler(handler);
229    }
230}
231
232impl<'d> SpiDma<'d, Async> {
233    /// Converts the SPI instance into async mode.
234    #[instability::unstable]
235    pub fn into_blocking(self) -> SpiDma<'d, Blocking> {
236        self.spi.disable_peri_interrupt_on_all_cores();
237        SpiDma {
238            spi: self.spi,
239            channel: self.channel.into_blocking(),
240        }
241    }
242
243    async fn wait_for_idle_async(&mut self) {
244        if self.dma_driver().state.rx_transfer_in_progress.get() {
245            _ = DmaRxFuture::new(&mut self.channel.rx).await;
246            self.dma_driver().state.rx_transfer_in_progress.set(false);
247        }
248
249        struct Fut(Driver);
250        impl Fut {
251            const DONE_EVENTS: EnumSet<SpiInterrupt> =
252                enumset::enum_set!(SpiInterrupt::TransferDone);
253        }
254        impl Future for Fut {
255            type Output = ();
256
257            fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
258                if !self.0.interrupts().is_disjoint(Self::DONE_EVENTS) {
259                    #[cfg(any(esp32, esp32s2))]
260                    // Need to poll for done-ness even after interrupt fires.
261                    if self.0.busy() {
262                        cx.waker().wake_by_ref();
263                        return Poll::Pending;
264                    }
265
266                    self.0.clear_interrupts(Self::DONE_EVENTS);
267                    return Poll::Ready(());
268                }
269
270                self.0.state.waker.register(cx.waker());
271                self.0.enable_listen(Self::DONE_EVENTS, true);
272                Poll::Pending
273            }
274        }
275        impl Drop for Fut {
276            fn drop(&mut self) {
277                self.0.enable_listen(Self::DONE_EVENTS, false);
278            }
279        }
280
281        if !self.is_done() {
282            Fut(self.driver()).await;
283        }
284
285        if self.dma_driver().state.tx_transfer_in_progress.get() {
286            // In case DMA TX buffer is bigger than what the SPI consumes, stop the DMA.
287            if !self.channel.tx.is_done() {
288                self.channel.tx.stop_transfer();
289            }
290            self.dma_driver().state.tx_transfer_in_progress.set(false);
291        }
292    }
293}
294
295impl<Dm> core::fmt::Debug for SpiDma<'_, Dm>
296where
297    Dm: DriverMode + core::fmt::Debug,
298{
299    /// Formats the `SpiDma` instance for debugging purposes.
300    ///
301    /// This method returns a debug struct with the name "SpiDma" without
302    /// exposing internal details.
303    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
304        f.debug_struct("SpiDma").field("spi", &self.spi).finish()
305    }
306}
307
308#[instability::unstable]
309impl crate::interrupt::InterruptConfigurable for SpiDma<'_, Blocking> {
310    /// Sets the interrupt handler
311    ///
312    /// Interrupts are not enabled at the peripheral level here.
313    fn set_interrupt_handler(&mut self, handler: InterruptHandler) {
314        self.set_interrupt_handler(handler);
315    }
316}
317
318impl<Dm> SpiDma<'_, Dm>
319where
320    Dm: DriverMode,
321{
322    fn spi(&self) -> &SpiWrapper<'_> {
323        &self.spi
324    }
325
326    fn driver(&self) -> Driver {
327        Driver {
328            info: self.spi.info(),
329            state: self.spi.state(),
330        }
331    }
332
333    fn dma_driver(&self) -> DmaDriver {
334        DmaDriver {
335            driver: self.driver(),
336            dma_peripheral: self.spi().dma_peripheral(),
337            state: self.spi().dma_state(),
338        }
339    }
340
341    fn is_done(&self) -> bool {
342        if self.driver().busy() {
343            return false;
344        }
345        if self.dma_driver().state.rx_transfer_in_progress.get() {
346            // If this is an asymmetric transfer and the RX side is smaller, the RX channel
347            // will never be "done" as it won't have enough descriptors/buffer to receive
348            // the EOF bit from the SPI. So instead the RX channel will hit
349            // a "descriptor empty" which means the DMA is written as much
350            // of the received data as possible into the buffer and
351            // discarded the rest. The user doesn't care about this discarded data.
352
353            if !self.channel.rx.is_done() && !self.channel.rx.has_dscr_empty_error() {
354                return false;
355            }
356        }
357        true
358    }
359
360    fn wait_for_idle(&mut self) {
361        while !self.is_done() {
362            // Wait for the SPI to become idle
363        }
364        self.dma_driver().state.rx_transfer_in_progress.set(false);
365        self.dma_driver().state.tx_transfer_in_progress.set(false);
366        fence(Ordering::Acquire);
367    }
368
369    /// # Safety:
370    ///
371    /// The caller must ensure to not access the buffer contents while the
372    /// transfer is in progress. Moving the buffer itself is allowed.
373    #[cfg_attr(place_spi_master_driver_in_ram, ram)]
374    unsafe fn start_transfer_dma<RX: DmaRxBuffer, TX: DmaTxBuffer>(
375        &mut self,
376        full_duplex: bool,
377        bytes_to_read: usize,
378        bytes_to_write: usize,
379        rx_buffer: &mut RX,
380        tx_buffer: &mut TX,
381    ) -> Result<(), Error> {
382        if bytes_to_read > MAX_DMA_SIZE || bytes_to_write > MAX_DMA_SIZE {
383            return Err(Error::MaxDmaTransferSizeExceeded);
384        }
385
386        self.dma_driver()
387            .state
388            .rx_transfer_in_progress
389            .set(bytes_to_read > 0);
390        self.dma_driver()
391            .state
392            .tx_transfer_in_progress
393            .set(bytes_to_write > 0);
394        unsafe {
395            self.dma_driver().start_transfer_dma(
396                full_duplex,
397                bytes_to_read,
398                bytes_to_write,
399                rx_buffer,
400                tx_buffer,
401                &mut self.channel,
402            )
403        }
404    }
405
406    /// # Safety:
407    ///
408    /// The caller must ensure that the buffers are not accessed while the
409    /// transfer is in progress. Moving the buffers is allowed.
410    #[cfg(all(esp32, spi_address_workaround))]
411    unsafe fn set_up_address_workaround(
412        &mut self,
413        cmd: Command,
414        address: Address,
415        dummy: u8,
416    ) -> Result<(), Error> {
417        if dummy > 0 {
418            // FIXME: https://github.com/esp-rs/esp-hal/issues/2240
419            error!("Dummy bits are not supported when there is no data to write");
420            return Err(Error::Unsupported);
421        }
422
423        let buffer = unsafe { self.spi.dma_state().empty_tx_buffer() };
424
425        let bytes_to_write = address.width().div_ceil(8);
426        // The address register is read in big-endian order,
427        // we have to prepare the emulated write in the same way.
428        let addr_bytes = address.value().to_be_bytes();
429        let addr_bytes = &addr_bytes[4 - bytes_to_write..][..bytes_to_write];
430        buffer.fill(addr_bytes);
431
432        self.driver().setup_half_duplex(
433            true,
434            cmd,
435            Address::None,
436            false,
437            dummy,
438            bytes_to_write == 0,
439            address.mode(),
440        )?;
441
442        let empty_rx_buffer = unsafe { self.dma_driver().empty_rx_buffer() };
443
444        unsafe { self.start_transfer_dma(false, 0, bytes_to_write, empty_rx_buffer, buffer) }
445    }
446
447    fn cancel_transfer(&mut self) {
448        let state = self.dma_driver().state;
449        if state.tx_transfer_in_progress.get() || state.rx_transfer_in_progress.get() {
450            self.dma_driver().abort_transfer();
451
452            // We need to stop the DMA transfer, too.
453            if state.tx_transfer_in_progress.get() {
454                self.channel.tx.stop_transfer();
455                state.tx_transfer_in_progress.set(false);
456            }
457            if state.rx_transfer_in_progress.get() {
458                self.channel.rx.stop_transfer();
459                state.rx_transfer_in_progress.set(false);
460            }
461        }
462    }
463}
464
465#[instability::unstable]
466impl<Dm> embassy_embedded_hal::SetConfig for SpiDma<'_, Dm>
467where
468    Dm: DriverMode,
469{
470    type Config = Config;
471    type ConfigError = ConfigError;
472
473    fn set_config(&mut self, config: &Self::Config) -> Result<(), Self::ConfigError> {
474        self.apply_config(config)
475    }
476}
477
478/// A structure representing a DMA transfer for SPI.
479///
480/// This structure holds references to the SPI instance, DMA buffers, and
481/// transfer status.
482#[instability::unstable]
483pub struct SpiDmaTransfer<'d, Dm, Buf>
484where
485    Dm: DriverMode,
486{
487    spi_dma: ManuallyDrop<SpiDma<'d, Dm>>,
488    dma_buf: ManuallyDrop<Buf>,
489}
490
491impl<Buf> SpiDmaTransfer<'_, Async, Buf> {
492    /// Waits for the DMA transfer to complete asynchronously.
493    ///
494    /// This method awaits the completion of both RX and TX operations.
495    #[instability::unstable]
496    pub async fn wait_for_done(&mut self) {
497        self.spi_dma.wait_for_idle_async().await;
498    }
499}
500
501impl<'d, Dm, Buf> SpiDmaTransfer<'d, Dm, Buf>
502where
503    Dm: DriverMode,
504{
505    fn new(spi_dma: SpiDma<'d, Dm>, dma_buf: Buf) -> Self {
506        Self {
507            spi_dma: ManuallyDrop::new(spi_dma),
508            dma_buf: ManuallyDrop::new(dma_buf),
509        }
510    }
511
512    /// Checks if the transfer is complete.
513    ///
514    /// This method returns `true` if both RX and TX operations are done,
515    /// and the SPI instance is no longer busy.
516    pub fn is_done(&self) -> bool {
517        self.spi_dma.is_done()
518    }
519
520    /// Waits for the DMA transfer to complete.
521    ///
522    /// This method blocks until the transfer is finished and returns the
523    /// `SpiDma` instance and the associated buffer.
524    #[instability::unstable]
525    pub fn wait(mut self) -> (SpiDma<'d, Dm>, Buf) {
526        self.spi_dma.wait_for_idle();
527        let retval = unsafe {
528            (
529                ManuallyDrop::take(&mut self.spi_dma),
530                ManuallyDrop::take(&mut self.dma_buf),
531            )
532        };
533        core::mem::forget(self);
534        retval
535    }
536
537    /// Cancels the DMA transfer.
538    #[instability::unstable]
539    pub fn cancel(&mut self) {
540        if !self.spi_dma.is_done() {
541            self.spi_dma.cancel_transfer();
542        }
543    }
544}
545
546impl<Dm, Buf> Drop for SpiDmaTransfer<'_, Dm, Buf>
547where
548    Dm: DriverMode,
549{
550    fn drop(&mut self) {
551        if !self.is_done() {
552            self.spi_dma.cancel_transfer();
553            self.spi_dma.wait_for_idle();
554
555            unsafe {
556                ManuallyDrop::drop(&mut self.spi_dma);
557                ManuallyDrop::drop(&mut self.dma_buf);
558            }
559        }
560    }
561}
562
563impl<'d, Dm> SpiDma<'d, Dm>
564where
565    Dm: DriverMode,
566{
567    /// # Safety:
568    ///
569    /// The caller must ensure that the buffers are not accessed while the
570    /// transfer is in progress. Moving the buffers is allowed.
571    #[cfg_attr(place_spi_master_driver_in_ram, ram)]
572    unsafe fn start_dma_write(
573        &mut self,
574        bytes_to_write: usize,
575        buffer: &mut impl DmaTxBuffer,
576    ) -> Result<(), Error> {
577        let empty_rx_buffer = unsafe { self.dma_driver().empty_rx_buffer() };
578
579        unsafe { self.start_dma_transfer(0, bytes_to_write, empty_rx_buffer, buffer) }
580    }
581
582    /// Configures the DMA buffers for the SPI instance.
583    ///
584    /// This method sets up both RX and TX buffers for DMA transfers.
585    /// It returns an instance of `SpiDmaBus` that can be used for SPI
586    /// communication.
587    #[instability::unstable]
588    pub fn with_buffers(self, dma_rx_buf: DmaRxBuf, dma_tx_buf: DmaTxBuf) -> SpiDmaBus<'d, Dm> {
589        SpiDmaBus::new(self, dma_rx_buf, dma_tx_buf)
590    }
591
592    /// Perform a DMA write.
593    ///
594    /// This will return a [SpiDmaTransfer] owning the buffer and the
595    /// SPI instance. The maximum amount of data to be sent is 32736
596    /// bytes.
597    #[allow(clippy::type_complexity)]
598    #[cfg_attr(place_spi_master_driver_in_ram, ram)]
599    #[instability::unstable]
600    pub fn write<TX: DmaTxBuffer>(
601        mut self,
602        bytes_to_write: usize,
603        mut buffer: TX,
604    ) -> Result<SpiDmaTransfer<'d, Dm, TX>, (Error, Self, TX)> {
605        self.wait_for_idle();
606        if let Err(e) = self.driver().setup_full_duplex() {
607            return Err((e, self, buffer));
608        };
609        match unsafe { self.start_dma_write(bytes_to_write, &mut buffer) } {
610            Ok(_) => Ok(SpiDmaTransfer::new(self, buffer)),
611            Err(e) => Err((e, self, buffer)),
612        }
613    }
614
615    /// # Safety:
616    ///
617    /// The caller must ensure that the buffers are not accessed while the
618    /// transfer is in progress. Moving the buffers is allowed.
619    #[cfg_attr(place_spi_master_driver_in_ram, ram)]
620    unsafe fn start_dma_read(
621        &mut self,
622        bytes_to_read: usize,
623        buffer: &mut impl DmaRxBuffer,
624    ) -> Result<(), Error> {
625        let empty_tx_buffer = unsafe { self.dma_driver().empty_tx_buffer() };
626
627        unsafe { self.start_dma_transfer(bytes_to_read, 0, buffer, empty_tx_buffer) }
628    }
629
630    /// Perform a DMA read.
631    ///
632    /// This will return a [SpiDmaTransfer] owning the buffer and
633    /// the SPI instance. The maximum amount of data to be
634    /// received is 32736 bytes.
635    #[allow(clippy::type_complexity)]
636    #[cfg_attr(place_spi_master_driver_in_ram, ram)]
637    #[instability::unstable]
638    pub fn read<RX: DmaRxBuffer>(
639        mut self,
640        bytes_to_read: usize,
641        mut buffer: RX,
642    ) -> Result<SpiDmaTransfer<'d, Dm, RX>, (Error, Self, RX)> {
643        self.wait_for_idle();
644        if let Err(e) = self.driver().setup_full_duplex() {
645            return Err((e, self, buffer));
646        };
647        match unsafe { self.start_dma_read(bytes_to_read, &mut buffer) } {
648            Ok(_) => Ok(SpiDmaTransfer::new(self, buffer)),
649            Err(e) => Err((e, self, buffer)),
650        }
651    }
652
653    /// # Safety:
654    ///
655    /// The caller must ensure that the buffers are not accessed while the
656    /// transfer is in progress. Moving the buffers is allowed.
657    #[cfg_attr(place_spi_master_driver_in_ram, ram)]
658    unsafe fn start_dma_transfer(
659        &mut self,
660        bytes_to_read: usize,
661        bytes_to_write: usize,
662        rx_buffer: &mut impl DmaRxBuffer,
663        tx_buffer: &mut impl DmaTxBuffer,
664    ) -> Result<(), Error> {
665        unsafe {
666            self.start_transfer_dma(true, bytes_to_read, bytes_to_write, rx_buffer, tx_buffer)
667        }
668    }
669
670    /// Perform a DMA transfer
671    ///
672    /// This will return a [SpiDmaTransfer] owning the buffers and
673    /// the SPI instance. The maximum amount of data to be
674    /// sent/received is 32736 bytes.
675    #[allow(clippy::type_complexity)]
676    #[cfg_attr(place_spi_master_driver_in_ram, ram)]
677    #[instability::unstable]
678    pub fn transfer<RX: DmaRxBuffer, TX: DmaTxBuffer>(
679        mut self,
680        bytes_to_read: usize,
681        mut rx_buffer: RX,
682        bytes_to_write: usize,
683        mut tx_buffer: TX,
684    ) -> Result<SpiDmaTransfer<'d, Dm, (RX, TX)>, (Error, Self, RX, TX)> {
685        self.wait_for_idle();
686        if let Err(e) = self.driver().setup_full_duplex() {
687            return Err((e, self, rx_buffer, tx_buffer));
688        };
689        match unsafe {
690            self.start_dma_transfer(
691                bytes_to_read,
692                bytes_to_write,
693                &mut rx_buffer,
694                &mut tx_buffer,
695            )
696        } {
697            Ok(_) => Ok(SpiDmaTransfer::new(self, (rx_buffer, tx_buffer))),
698            Err(e) => Err((e, self, rx_buffer, tx_buffer)),
699        }
700    }
701
702    /// # Safety:
703    ///
704    /// The caller must ensure that the buffers are not accessed while the
705    /// transfer is in progress. Moving the buffers is allowed.
706    #[cfg_attr(place_spi_master_driver_in_ram, ram)]
707    unsafe fn start_half_duplex_read(
708        &mut self,
709        data_mode: DataMode,
710        cmd: Command,
711        address: Address,
712        dummy: u8,
713        bytes_to_read: usize,
714        buffer: &mut impl DmaRxBuffer,
715    ) -> Result<(), Error> {
716        self.driver().setup_half_duplex(
717            false,
718            cmd,
719            address,
720            false,
721            dummy,
722            bytes_to_read == 0,
723            data_mode,
724        )?;
725
726        let empty_tx_buffer = unsafe { self.dma_driver().empty_tx_buffer() };
727
728        unsafe { self.start_transfer_dma(false, bytes_to_read, 0, buffer, empty_tx_buffer) }
729    }
730
731    /// Perform a half-duplex read operation using DMA.
732    #[allow(clippy::type_complexity)]
733    #[cfg_attr(place_spi_master_driver_in_ram, ram)]
734    #[instability::unstable]
735    pub fn half_duplex_read<RX: DmaRxBuffer>(
736        mut self,
737        data_mode: DataMode,
738        cmd: Command,
739        address: Address,
740        dummy: u8,
741        bytes_to_read: usize,
742        mut buffer: RX,
743    ) -> Result<SpiDmaTransfer<'d, Dm, RX>, (Error, Self, RX)> {
744        self.wait_for_idle();
745
746        match unsafe {
747            self.start_half_duplex_read(data_mode, cmd, address, dummy, bytes_to_read, &mut buffer)
748        } {
749            Ok(_) => Ok(SpiDmaTransfer::new(self, buffer)),
750            Err(e) => Err((e, self, buffer)),
751        }
752    }
753
754    /// # Safety:
755    ///
756    /// The caller must ensure that the buffers are not accessed while the
757    /// transfer is in progress. Moving the buffers is allowed.
758    #[cfg_attr(place_spi_master_driver_in_ram, ram)]
759    unsafe fn start_half_duplex_write(
760        &mut self,
761        data_mode: DataMode,
762        cmd: Command,
763        address: Address,
764        dummy: u8,
765        bytes_to_write: usize,
766        buffer: &mut impl DmaTxBuffer,
767    ) -> Result<(), Error> {
768        #[cfg(all(esp32, spi_address_workaround))]
769        {
770            // On the ESP32, if we don't have data, the address is always sent
771            // on a single line, regardless of its data mode.
772            if bytes_to_write == 0 && address.mode() != DataMode::SingleTwoDataLines {
773                return unsafe { self.set_up_address_workaround(cmd, address, dummy) };
774            }
775        }
776
777        self.driver().setup_half_duplex(
778            true,
779            cmd,
780            address,
781            false,
782            dummy,
783            bytes_to_write == 0,
784            data_mode,
785        )?;
786
787        let empty_rx_buffer = unsafe { self.dma_driver().empty_rx_buffer() };
788
789        unsafe { self.start_transfer_dma(false, 0, bytes_to_write, empty_rx_buffer, buffer) }
790    }
791
792    /// Perform a half-duplex write operation using DMA.
793    #[allow(clippy::type_complexity)]
794    #[cfg_attr(place_spi_master_driver_in_ram, ram)]
795    #[instability::unstable]
796    pub fn half_duplex_write<TX: DmaTxBuffer>(
797        mut self,
798        data_mode: DataMode,
799        cmd: Command,
800        address: Address,
801        dummy: u8,
802        bytes_to_write: usize,
803        mut buffer: TX,
804    ) -> Result<SpiDmaTransfer<'d, Dm, TX>, (Error, Self, TX)> {
805        self.wait_for_idle();
806
807        match unsafe {
808            self.start_half_duplex_write(
809                data_mode,
810                cmd,
811                address,
812                dummy,
813                bytes_to_write,
814                &mut buffer,
815            )
816        } {
817            Ok(_) => Ok(SpiDmaTransfer::new(self, buffer)),
818            Err(e) => Err((e, self, buffer)),
819        }
820    }
821
822    /// Change the bus configuration.
823    ///
824    /// # Errors
825    ///
826    /// If frequency passed in config exceeds
827    #[cfg_attr(not(esp32h2), doc = " 80MHz")]
828    #[cfg_attr(esp32h2, doc = " 48MHz")]
829    /// or is below 70kHz,
830    /// [`ConfigError::UnsupportedFrequency`] error will be returned.
831    #[instability::unstable]
832    pub fn apply_config(&mut self, config: &Config) -> Result<(), ConfigError> {
833        self.driver().apply_config(config)
834    }
835}
836
837/// A DMA-capable SPI bus.
838///
839/// This structure is responsible for managing SPI transfers using DMA
840/// buffers.
841#[derive(Debug)]
842#[cfg_attr(feature = "defmt", derive(defmt::Format))]
843#[instability::unstable]
844pub struct SpiDmaBus<'d, Dm>
845where
846    Dm: DriverMode,
847{
848    spi_dma: SpiDma<'d, Dm>,
849    rx_buf: DmaRxBuf,
850    tx_buf: DmaTxBuf,
851}
852
853impl<Dm> crate::private::Sealed for SpiDmaBus<'_, Dm> where Dm: DriverMode {}
854
855impl<'d> SpiDmaBus<'d, Blocking> {
856    /// Converts the SPI instance into async mode.
857    #[instability::unstable]
858    pub fn into_async(self) -> SpiDmaBus<'d, Async> {
859        SpiDmaBus {
860            spi_dma: self.spi_dma.into_async(),
861            rx_buf: self.rx_buf,
862            tx_buf: self.tx_buf,
863        }
864    }
865
866    /// Listen for the given interrupts
867    #[instability::unstable]
868    pub fn listen(&mut self, interrupts: impl Into<EnumSet<SpiInterrupt>>) {
869        self.spi_dma.listen(interrupts.into());
870    }
871
872    /// Unlisten the given interrupts
873    #[instability::unstable]
874    pub fn unlisten(&mut self, interrupts: impl Into<EnumSet<SpiInterrupt>>) {
875        self.spi_dma.unlisten(interrupts.into());
876    }
877
878    /// Gets asserted interrupts
879    #[instability::unstable]
880    pub fn interrupts(&mut self) -> EnumSet<SpiInterrupt> {
881        self.spi_dma.interrupts()
882    }
883
884    /// Resets asserted interrupts
885    #[instability::unstable]
886    pub fn clear_interrupts(&mut self, interrupts: impl Into<EnumSet<SpiInterrupt>>) {
887        self.spi_dma.clear_interrupts(interrupts.into());
888    }
889}
890
891impl<'d> SpiDmaBus<'d, Async> {
892    /// Converts the SPI instance into async mode.
893    #[instability::unstable]
894    pub fn into_blocking(self) -> SpiDmaBus<'d, Blocking> {
895        SpiDmaBus {
896            spi_dma: self.spi_dma.into_blocking(),
897            rx_buf: self.rx_buf,
898            tx_buf: self.tx_buf,
899        }
900    }
901
902    /// Fill the given buffer with data from the bus.
903    #[instability::unstable]
904    pub async fn read_async(&mut self, words: &mut [u8]) -> Result<(), Error> {
905        self.spi_dma.wait_for_idle_async().await;
906        self.spi_dma.driver().setup_full_duplex()?;
907        let chunk_size = self.rx_buf.capacity();
908
909        let empty_tx_buffer = unsafe { self.spi_dma.dma_driver().empty_tx_buffer() };
910
911        for chunk in words.chunks_mut(chunk_size) {
912            let mut spi = DropGuard::new(&mut self.spi_dma, |spi| spi.cancel_transfer());
913
914            unsafe { spi.start_dma_transfer(chunk.len(), 0, &mut self.rx_buf, empty_tx_buffer)? };
915
916            spi.wait_for_idle_async().await;
917
918            chunk.copy_from_slice(&self.rx_buf.as_slice()[..chunk.len()]);
919
920            spi.defuse();
921        }
922
923        Ok(())
924    }
925
926    /// Transmit the given buffer to the bus.
927    #[instability::unstable]
928    pub async fn write_async(&mut self, words: &[u8]) -> Result<(), Error> {
929        self.spi_dma.wait_for_idle_async().await;
930        self.spi_dma.driver().setup_full_duplex()?;
931
932        let empty_rx_buffer = unsafe { self.spi_dma.dma_driver().empty_rx_buffer() };
933
934        let mut spi = DropGuard::new(&mut self.spi_dma, |spi| spi.cancel_transfer());
935        let chunk_size = self.tx_buf.capacity();
936
937        for chunk in words.chunks(chunk_size) {
938            self.tx_buf.as_mut_slice()[..chunk.len()].copy_from_slice(chunk);
939
940            unsafe { spi.start_dma_transfer(0, chunk.len(), empty_rx_buffer, &mut self.tx_buf)? };
941
942            spi.wait_for_idle_async().await;
943        }
944        spi.defuse();
945
946        Ok(())
947    }
948
949    /// Transfer by writing out a buffer and reading the response from
950    /// the bus into another buffer.
951    #[instability::unstable]
952    pub async fn transfer_async(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Error> {
953        self.spi_dma.wait_for_idle_async().await;
954        self.spi_dma.driver().setup_full_duplex()?;
955
956        let mut spi = DropGuard::new(&mut self.spi_dma, |spi| spi.cancel_transfer());
957        let chunk_size = min(self.tx_buf.capacity(), self.rx_buf.capacity());
958
959        let common_length = min(read.len(), write.len());
960        let (read_common, read_remainder) = read.split_at_mut(common_length);
961        let (write_common, write_remainder) = write.split_at(common_length);
962
963        for (read_chunk, write_chunk) in read_common
964            .chunks_mut(chunk_size)
965            .zip(write_common.chunks(chunk_size))
966        {
967            self.tx_buf.as_mut_slice()[..write_chunk.len()].copy_from_slice(write_chunk);
968
969            unsafe {
970                spi.start_dma_transfer(
971                    read_chunk.len(),
972                    write_chunk.len(),
973                    &mut self.rx_buf,
974                    &mut self.tx_buf,
975                )?;
976            }
977            spi.wait_for_idle_async().await;
978
979            read_chunk.copy_from_slice(&self.rx_buf.as_slice()[..read_chunk.len()]);
980        }
981
982        spi.defuse();
983
984        if !read_remainder.is_empty() {
985            self.read_async(read_remainder).await
986        } else if !write_remainder.is_empty() {
987            self.write_async(write_remainder).await
988        } else {
989            Ok(())
990        }
991    }
992
993    /// Transfer by writing out a buffer and reading the response from
994    /// the bus into the same buffer.
995    #[instability::unstable]
996    pub async fn transfer_in_place_async(&mut self, words: &mut [u8]) -> Result<(), Error> {
997        self.spi_dma.wait_for_idle_async().await;
998        self.spi_dma.driver().setup_full_duplex()?;
999
1000        let mut spi = DropGuard::new(&mut self.spi_dma, |spi| spi.cancel_transfer());
1001        for chunk in words.chunks_mut(self.tx_buf.capacity()) {
1002            self.tx_buf.as_mut_slice()[..chunk.len()].copy_from_slice(chunk);
1003
1004            unsafe {
1005                spi.start_dma_transfer(
1006                    chunk.len(),
1007                    chunk.len(),
1008                    &mut self.rx_buf,
1009                    &mut self.tx_buf,
1010                )?;
1011            }
1012            spi.wait_for_idle_async().await;
1013            chunk.copy_from_slice(&self.rx_buf.as_slice()[..chunk.len()]);
1014        }
1015
1016        spi.defuse();
1017
1018        Ok(())
1019    }
1020}
1021
1022impl<'d, Dm> SpiDmaBus<'d, Dm>
1023where
1024    Dm: DriverMode,
1025{
1026    /// Creates a new `SpiDmaBus` with the specified SPI instance and DMA
1027    /// buffers.
1028    pub fn new(spi_dma: SpiDma<'d, Dm>, rx_buf: DmaRxBuf, tx_buf: DmaTxBuf) -> Self {
1029        Self {
1030            spi_dma,
1031            rx_buf,
1032            tx_buf,
1033        }
1034    }
1035
1036    /// Splits [SpiDmaBus] back into [SpiDma], [DmaRxBuf] and [DmaTxBuf].
1037    #[instability::unstable]
1038    pub fn split(mut self) -> (SpiDma<'d, Dm>, DmaRxBuf, DmaTxBuf) {
1039        self.wait_for_idle();
1040        (self.spi_dma, self.rx_buf, self.tx_buf)
1041    }
1042
1043    fn wait_for_idle(&mut self) {
1044        self.spi_dma.wait_for_idle();
1045    }
1046
1047    /// Change the bus configuration.
1048    ///
1049    /// # Errors
1050    ///
1051    /// If frequency passed in config exceeds
1052    #[cfg_attr(not(esp32h2), doc = " 80MHz")]
1053    #[cfg_attr(esp32h2, doc = " 48MHz")]
1054    /// or is below 70kHz,
1055    /// [`ConfigError::UnsupportedFrequency`] error will be returned.
1056    #[instability::unstable]
1057    pub fn apply_config(&mut self, config: &Config) -> Result<(), ConfigError> {
1058        self.spi_dma.apply_config(config)
1059    }
1060
1061    /// Reads data from the SPI bus using DMA.
1062    #[instability::unstable]
1063    pub fn read(&mut self, words: &mut [u8]) -> Result<(), Error> {
1064        self.wait_for_idle();
1065        self.spi_dma.driver().setup_full_duplex()?;
1066
1067        let empty_tx_buffer = unsafe { self.spi_dma.dma_driver().empty_tx_buffer() };
1068
1069        for chunk in words.chunks_mut(self.rx_buf.capacity()) {
1070            unsafe {
1071                self.spi_dma.start_dma_transfer(
1072                    chunk.len(),
1073                    0,
1074                    &mut self.rx_buf,
1075                    empty_tx_buffer,
1076                )?;
1077            }
1078
1079            self.wait_for_idle();
1080            chunk.copy_from_slice(&self.rx_buf.as_slice()[..chunk.len()]);
1081        }
1082
1083        Ok(())
1084    }
1085
1086    /// Writes data to the SPI bus using DMA.
1087    #[instability::unstable]
1088    pub fn write(&mut self, words: &[u8]) -> Result<(), Error> {
1089        self.wait_for_idle();
1090        self.spi_dma.driver().setup_full_duplex()?;
1091        let empty_rx_buffer = unsafe { self.spi_dma.dma_driver().empty_rx_buffer() };
1092
1093        for chunk in words.chunks(self.tx_buf.capacity()) {
1094            self.tx_buf.as_mut_slice()[..chunk.len()].copy_from_slice(chunk);
1095
1096            unsafe {
1097                self.spi_dma.start_dma_transfer(
1098                    0,
1099                    chunk.len(),
1100                    empty_rx_buffer,
1101                    &mut self.tx_buf,
1102                )?;
1103            }
1104
1105            self.wait_for_idle();
1106        }
1107
1108        Ok(())
1109    }
1110
1111    /// Transfers data to and from the SPI bus simultaneously using DMA.
1112    #[instability::unstable]
1113    pub fn transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Error> {
1114        self.wait_for_idle();
1115        self.spi_dma.driver().setup_full_duplex()?;
1116        let chunk_size = min(self.tx_buf.capacity(), self.rx_buf.capacity());
1117
1118        let common_length = min(read.len(), write.len());
1119        let (read_common, read_remainder) = read.split_at_mut(common_length);
1120        let (write_common, write_remainder) = write.split_at(common_length);
1121
1122        for (read_chunk, write_chunk) in read_common
1123            .chunks_mut(chunk_size)
1124            .zip(write_common.chunks(chunk_size))
1125        {
1126            self.tx_buf.as_mut_slice()[..write_chunk.len()].copy_from_slice(write_chunk);
1127
1128            unsafe {
1129                self.spi_dma.start_dma_transfer(
1130                    read_chunk.len(),
1131                    write_chunk.len(),
1132                    &mut self.rx_buf,
1133                    &mut self.tx_buf,
1134                )?;
1135            }
1136            self.wait_for_idle();
1137
1138            read_chunk.copy_from_slice(&self.rx_buf.as_slice()[..read_chunk.len()]);
1139        }
1140
1141        if !read_remainder.is_empty() {
1142            self.read(read_remainder)
1143        } else if !write_remainder.is_empty() {
1144            self.write(write_remainder)
1145        } else {
1146            Ok(())
1147        }
1148    }
1149
1150    /// Transfers data in place on the SPI bus using DMA.
1151    #[instability::unstable]
1152    pub fn transfer_in_place(&mut self, words: &mut [u8]) -> Result<(), Error> {
1153        self.wait_for_idle();
1154        self.spi_dma.driver().setup_full_duplex()?;
1155        let chunk_size = min(self.tx_buf.capacity(), self.rx_buf.capacity());
1156
1157        for chunk in words.chunks_mut(chunk_size) {
1158            self.tx_buf.as_mut_slice()[..chunk.len()].copy_from_slice(chunk);
1159
1160            unsafe {
1161                self.spi_dma.start_dma_transfer(
1162                    chunk.len(),
1163                    chunk.len(),
1164                    &mut self.rx_buf,
1165                    &mut self.tx_buf,
1166                )?;
1167            }
1168            self.wait_for_idle();
1169            chunk.copy_from_slice(&self.rx_buf.as_slice()[..chunk.len()]);
1170        }
1171
1172        Ok(())
1173    }
1174
1175    /// Half-duplex read.
1176    #[instability::unstable]
1177    pub fn half_duplex_read(
1178        &mut self,
1179        data_mode: DataMode,
1180        cmd: Command,
1181        address: Address,
1182        dummy: u8,
1183        buffer: &mut [u8],
1184    ) -> Result<(), Error> {
1185        if buffer.len() > self.rx_buf.capacity() {
1186            return Err(Error::from(DmaError::Overflow));
1187        }
1188        self.wait_for_idle();
1189
1190        unsafe {
1191            self.spi_dma.start_half_duplex_read(
1192                data_mode,
1193                cmd,
1194                address,
1195                dummy,
1196                buffer.len(),
1197                &mut self.rx_buf,
1198            )?;
1199        }
1200
1201        self.wait_for_idle();
1202
1203        buffer.copy_from_slice(&self.rx_buf.as_slice()[..buffer.len()]);
1204
1205        Ok(())
1206    }
1207
1208    /// Half-duplex write.
1209    #[instability::unstable]
1210    pub fn half_duplex_write(
1211        &mut self,
1212        data_mode: DataMode,
1213        cmd: Command,
1214        address: Address,
1215        dummy: u8,
1216        buffer: &[u8],
1217    ) -> Result<(), Error> {
1218        if buffer.len() > self.tx_buf.capacity() {
1219            return Err(Error::from(DmaError::Overflow));
1220        }
1221        self.wait_for_idle();
1222        self.tx_buf.as_mut_slice()[..buffer.len()].copy_from_slice(buffer);
1223
1224        unsafe {
1225            self.spi_dma.start_half_duplex_write(
1226                data_mode,
1227                cmd,
1228                address,
1229                dummy,
1230                buffer.len(),
1231                &mut self.tx_buf,
1232            )?;
1233        }
1234
1235        self.wait_for_idle();
1236
1237        Ok(())
1238    }
1239}
1240
1241#[instability::unstable]
1242impl crate::interrupt::InterruptConfigurable for SpiDmaBus<'_, Blocking> {
1243    /// Sets the interrupt handler
1244    ///
1245    /// Interrupts are not enabled at the peripheral level here.
1246    fn set_interrupt_handler(&mut self, handler: InterruptHandler) {
1247        self.spi_dma.set_interrupt_handler(handler);
1248    }
1249}
1250
1251#[instability::unstable]
1252impl<Dm> embassy_embedded_hal::SetConfig for SpiDmaBus<'_, Dm>
1253where
1254    Dm: DriverMode,
1255{
1256    type Config = Config;
1257    type ConfigError = ConfigError;
1258
1259    fn set_config(&mut self, config: &Self::Config) -> Result<(), Self::ConfigError> {
1260        self.apply_config(config)
1261    }
1262}
1263
1264pub(super) struct DmaDriver {
1265    driver: Driver,
1266    dma_peripheral: crate::dma::DmaPeripheral,
1267    state: &'static DmaState,
1268}
1269
1270impl DmaDriver {
1271    unsafe fn empty_rx_buffer(&self) -> &'static mut DmaRxBuf {
1272        unsafe { self.state.empty_rx_buffer() }
1273    }
1274
1275    unsafe fn empty_tx_buffer(&self) -> &'static mut DmaTxBuf {
1276        unsafe { self.state.empty_tx_buffer() }
1277    }
1278
1279    fn abort_transfer(&self) {
1280        // The SPI peripheral is controlling how much data we transfer, so let's
1281        // update its counter.
1282        // 0 doesn't take effect on ESP32 and cuts the currently transmitted byte
1283        // immediately.
1284        // 1 seems to stop after transmitting the current byte which is somewhat less
1285        // impolite.
1286        self.driver.configure_datalen(1, 1);
1287        self.driver.update();
1288    }
1289
1290    fn regs(&self) -> &RegisterBlock {
1291        self.driver.regs()
1292    }
1293
1294    #[cfg_attr(place_spi_master_driver_in_ram, ram)]
1295    unsafe fn start_transfer_dma<Dm: DriverMode>(
1296        &self,
1297        _full_duplex: bool,
1298        rx_len: usize,
1299        tx_len: usize,
1300        rx_buffer: &mut impl DmaRxBuffer,
1301        tx_buffer: &mut impl DmaTxBuffer,
1302        channel: &mut Channel<Dm, PeripheralDmaChannel<AnySpi<'_>>>,
1303    ) -> Result<(), Error> {
1304        #[cfg(esp32s2)]
1305        {
1306            // without this a transfer after a write will fail
1307            self.regs().dma_out_link().write(|w| unsafe { w.bits(0) });
1308            self.regs().dma_in_link().write(|w| unsafe { w.bits(0) });
1309        }
1310
1311        self.driver.configure_datalen(rx_len, tx_len);
1312
1313        // enable the MISO and MOSI if needed
1314        self.regs()
1315            .user()
1316            .modify(|_, w| w.usr_miso().bit(rx_len > 0).usr_mosi().bit(tx_len > 0));
1317
1318        self.enable_dma();
1319
1320        if rx_len > 0 {
1321            unsafe {
1322                channel
1323                    .rx
1324                    .prepare_transfer(self.dma_peripheral, rx_buffer)
1325                    .and_then(|_| channel.rx.start_transfer())?;
1326            }
1327        } else {
1328            #[cfg(esp32)]
1329            {
1330                // see https://github.com/espressif/esp-idf/commit/366e4397e9dae9d93fe69ea9d389b5743295886f
1331                // see https://github.com/espressif/esp-idf/commit/0c3653b1fd7151001143451d4aa95dbf15ee8506
1332                if _full_duplex {
1333                    self.regs()
1334                        .dma_in_link()
1335                        .modify(|_, w| unsafe { w.inlink_addr().bits(0) });
1336                    self.regs()
1337                        .dma_in_link()
1338                        .modify(|_, w| w.inlink_start().set_bit());
1339                }
1340            }
1341        }
1342        if tx_len > 0 {
1343            unsafe {
1344                channel
1345                    .tx
1346                    .prepare_transfer(self.dma_peripheral, tx_buffer)
1347                    .and_then(|_| channel.tx.start_transfer())?;
1348            }
1349        }
1350
1351        #[cfg(dma_kind = "gdma")]
1352        self.reset_dma();
1353
1354        self.driver.start_operation();
1355
1356        Ok(())
1357    }
1358
1359    fn enable_dma(&self) {
1360        #[cfg(dma_kind = "gdma")]
1361        // for non GDMA this is done in `assign_tx_device` / `assign_rx_device`
1362        self.regs().dma_conf().modify(|_, w| {
1363            w.dma_tx_ena().set_bit();
1364            w.dma_rx_ena().set_bit()
1365        });
1366
1367        #[cfg(dma_kind = "pdma")]
1368        self.reset_dma();
1369    }
1370
1371    fn reset_dma(&self) {
1372        #[cfg(dma_kind = "pdma")]
1373        self.regs().dma_conf().toggle(|w, bit| {
1374            w.out_rst().bit(bit);
1375            w.in_rst().bit(bit);
1376            w.ahbm_fifo_rst().bit(bit);
1377            w.ahbm_rst().bit(bit)
1378        });
1379
1380        #[cfg(dma_kind = "gdma")]
1381        self.regs().dma_conf().toggle(|w, bit| {
1382            w.rx_afifo_rst().bit(bit);
1383            w.buf_afifo_rst().bit(bit);
1384            w.dma_afifo_rst().bit(bit)
1385        });
1386
1387        self.clear_dma_interrupts();
1388    }
1389
1390    #[cfg(dma_kind = "gdma")]
1391    fn clear_dma_interrupts(&self) {
1392        self.regs().dma_int_clr().write(|w| {
1393            w.dma_infifo_full_err().clear_bit_by_one();
1394            w.dma_outfifo_empty_err().clear_bit_by_one();
1395            w.trans_done().clear_bit_by_one();
1396            w.mst_rx_afifo_wfull_err().clear_bit_by_one();
1397            w.mst_tx_afifo_rempty_err().clear_bit_by_one()
1398        });
1399    }
1400
1401    #[cfg(dma_kind = "pdma")]
1402    fn clear_dma_interrupts(&self) {
1403        self.regs().dma_int_clr().write(|w| {
1404            w.inlink_dscr_empty().clear_bit_by_one();
1405            w.outlink_dscr_error().clear_bit_by_one();
1406            w.inlink_dscr_error().clear_bit_by_one();
1407            w.in_done().clear_bit_by_one();
1408            w.in_err_eof().clear_bit_by_one();
1409            w.in_suc_eof().clear_bit_by_one();
1410            w.out_done().clear_bit_by_one();
1411            w.out_eof().clear_bit_by_one();
1412            w.out_total_eof().clear_bit_by_one()
1413        });
1414    }
1415}
1416
1417impl<'d> DmaEligible for AnySpi<'d> {
1418    #[cfg(dma_kind = "gdma")]
1419    type Dma = crate::dma::AnyGdmaChannel<'d>;
1420    #[cfg(dma_kind = "pdma")]
1421    type Dma = crate::dma::AnySpiDmaChannel<'d>;
1422
1423    fn dma_peripheral(&self) -> crate::dma::DmaPeripheral {
1424        let (info, _state) = self.dma_parts();
1425        info.dma_peripheral
1426    }
1427}
1428
1429#[instability::unstable]
1430impl embedded_hal_async::spi::SpiBus for SpiDmaBus<'_, Async> {
1431    async fn read(&mut self, words: &mut [u8]) -> Result<(), Self::Error> {
1432        self.read_async(words).await
1433    }
1434
1435    async fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> {
1436        self.write_async(words).await
1437    }
1438
1439    async fn transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Self::Error> {
1440        self.transfer_async(read, write).await
1441    }
1442
1443    async fn transfer_in_place(&mut self, words: &mut [u8]) -> Result<(), Self::Error> {
1444        self.transfer_in_place_async(words).await
1445    }
1446
1447    async fn flush(&mut self) -> Result<(), Self::Error> {
1448        // All operations currently flush so this is no-op.
1449        Ok(())
1450    }
1451}
1452
1453#[instability::unstable]
1454impl<Dm> ErrorType for SpiDmaBus<'_, Dm>
1455where
1456    Dm: DriverMode,
1457{
1458    type Error = Error;
1459}
1460
1461#[instability::unstable]
1462impl<Dm> SpiBus for SpiDmaBus<'_, Dm>
1463where
1464    Dm: DriverMode,
1465{
1466    fn read(&mut self, words: &mut [u8]) -> Result<(), Self::Error> {
1467        self.read(words)
1468    }
1469
1470    fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> {
1471        self.write(words)
1472    }
1473
1474    fn transfer(&mut self, read: &mut [u8], write: &[u8]) -> Result<(), Self::Error> {
1475        self.transfer(read, write)
1476    }
1477
1478    fn transfer_in_place(&mut self, words: &mut [u8]) -> Result<(), Self::Error> {
1479        self.transfer_in_place(words)
1480    }
1481
1482    fn flush(&mut self) -> Result<(), Self::Error> {
1483        // All operations currently flush so this is no-op.
1484        Ok(())
1485    }
1486}
1487
1488struct DmaInfo {
1489    dma_peripheral: crate::dma::DmaPeripheral,
1490}
1491struct DmaState {
1492    tx_transfer_in_progress: Cell<bool>,
1493    rx_transfer_in_progress: Cell<bool>,
1494
1495    empty_rx_buffer: UnsafeCell<MaybeUninit<DmaRxBuf>>,
1496    empty_tx_buffer: UnsafeCell<MaybeUninit<DmaTxBuf>>,
1497}
1498
1499impl DmaState {
1500    // Syntactic helper to get a mutable reference to the "empty" RX DMA buffer.
1501    //
1502    // # Safety
1503    //
1504    // The caller must ensure that Rust's aliasing rules are upheld.
1505    #[allow(
1506        clippy::mut_from_ref,
1507        reason = "Safety requirements ensure this is okay"
1508    )]
1509    unsafe fn empty_rx_buffer(&self) -> &mut DmaRxBuf {
1510        unsafe { (&mut *self.empty_rx_buffer.get()).assume_init_mut() }
1511    }
1512
1513    // Syntactic helper to get a mutable reference to the "empty" TX DMA buffer.
1514    //
1515    // # Safety
1516    //
1517    // The caller must ensure that Rust's aliasing rules are upheld.
1518    #[allow(
1519        clippy::mut_from_ref,
1520        reason = "Safety requirements ensure this is okay"
1521    )]
1522    unsafe fn empty_tx_buffer(&self) -> &mut DmaTxBuf {
1523        unsafe { (&mut *self.empty_tx_buffer.get()).assume_init_mut() }
1524    }
1525}
1526
1527// SAFETY: State belongs to the currently constructed driver instance. As such, it'll not be
1528// accessed concurrently in multiple threads.
1529unsafe impl Sync for DmaState {}
1530
1531for_each_spi_master!(
1532    (all $( ($peri:ident, $sys:ident, $sclk:ident $_cs:tt $_sio:tt $(, $is_qspi:tt)?)),* ) => {
1533        impl AnySpi<'_> {
1534            #[inline(always)]
1535            fn dma_parts(&self) -> (&'static DmaInfo, &'static DmaState) {
1536                match &self.0 {
1537                    $(
1538                        super::any::Inner::$sys(_spi) => {
1539                            static DMA_INFO: DmaInfo = DmaInfo {
1540                                dma_peripheral: crate::dma::DmaPeripheral::$sys,
1541                            };
1542
1543                            static DMA_STATE: DmaState = DmaState {
1544                                tx_transfer_in_progress: Cell::new(false),
1545                                rx_transfer_in_progress: Cell::new(false),
1546
1547                                empty_rx_buffer: UnsafeCell::new(MaybeUninit::uninit()),
1548                                empty_tx_buffer: UnsafeCell::new(MaybeUninit::uninit()),
1549                            };
1550
1551                            (&DMA_INFO, &DMA_STATE)
1552                        }
1553                    )*
1554                }
1555            }
1556
1557            #[inline(always)]
1558            fn dma_state(&self) -> &'static DmaState {
1559                let (_, state) = self.dma_parts();
1560                state
1561            }
1562
1563            #[inline(always)]
1564            fn dma_info(&self) -> &'static DmaInfo {
1565                let (info, _) = self.dma_parts();
1566                info
1567            }
1568        }
1569    };
1570);
1571
1572impl SpiWrapper<'_> {
1573    fn dma_state(&self) -> &'static DmaState {
1574        self.spi.dma_state()
1575    }
1576
1577    #[inline(always)]
1578    fn dma_peripheral(&self) -> crate::dma::DmaPeripheral {
1579        self.spi.dma_peripheral()
1580    }
1581}