diff --git a/esp-hal/CHANGELOG.md b/esp-hal/CHANGELOG.md index b273f6e0a..7ecd4fba1 100644 --- a/esp-hal/CHANGELOG.md +++ b/esp-hal/CHANGELOG.md @@ -12,9 +12,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - uart: Constructors now require TX and RX pins (#1592) - uart: Added `Uart::new_with_default_pins` constructor (#1592) - uart: Added `UartTx` and `UartRx` constructors (#1592) - - Add Flex / AnyFlex GPIO pin driver (#1659) - Add new `DmaError::UnsupportedMemoryRegion` - used memory regions are checked when preparing a transfer now (#1670) +- Add DmaTransactionTxOwned, DmaTransactionRxOwned, DmaTransactionTxRxOwned, functions to do owning transfers added to SPI half-duplex (#1672) ### Fixed diff --git a/esp-hal/src/dma/mod.rs b/esp-hal/src/dma/mod.rs index 0eb86ed34..2b59c2266 100644 --- a/esp-hal/src/dma/mod.rs +++ b/esp-hal/src/dma/mod.rs @@ -113,6 +113,7 @@ impl DmaDescriptor { } } +use embedded_dma::{ReadBuffer, WriteBuffer}; use enumset::{EnumSet, EnumSetType}; #[cfg(gdma)] @@ -1690,6 +1691,226 @@ where } } +/// DMA transaction for TX transfers with moved-in/moved-out peripheral and +/// buffer +#[non_exhaustive] +#[must_use] +pub struct DmaTransferTxOwned +where + I: dma_private::DmaSupportTx, + T: ReadBuffer, +{ + instance: I, + tx_buffer: T, +} + +impl DmaTransferTxOwned +where + I: dma_private::DmaSupportTx, + T: ReadBuffer, +{ + pub(crate) fn new(instance: I, tx_buffer: T) -> Self { + Self { + instance, + tx_buffer, + } + } + + /// Wait for the transfer to finish and return the peripheral and the + /// buffer. + pub fn wait(mut self) -> Result<(I, T), (DmaError, I, T)> { + self.instance.peripheral_wait_dma(true, false); + + let err = self.instance.tx().has_error(); + + // We need to have a `Drop` implementation, because we accept + // managed buffers that can free their memory on drop. Because of that + // we can't move out of the `Transfer`'s fields, so we use `ptr::read` + // and `mem::forget`. + // + // NOTE(unsafe) There is no panic branch between getting the resources + // and forgetting `self`. + + let (instance, tx_buffer) = unsafe { + let instance = core::ptr::read(&self.instance); + let tx_buffer = core::ptr::read(&self.tx_buffer); + core::mem::forget(self); + + (instance, tx_buffer) + }; + + if err { + Err((DmaError::DescriptorError, instance, tx_buffer)) + } else { + Ok((instance, tx_buffer)) + } + } + + /// Check if the transfer is finished. + pub fn is_done(&mut self) -> bool { + self.instance.tx().is_done() + } +} + +impl Drop for DmaTransferTxOwned +where + I: dma_private::DmaSupportTx, + T: ReadBuffer, +{ + fn drop(&mut self) { + self.instance.peripheral_wait_dma(true, false); + } +} + +/// DMA transaction for RX transfers with moved-in/moved-out peripheral and +/// buffer +#[non_exhaustive] +#[must_use] +pub struct DmaTransferRxOwned +where + I: dma_private::DmaSupportRx, + R: WriteBuffer, +{ + instance: I, + rx_buffer: R, +} + +impl DmaTransferRxOwned +where + I: dma_private::DmaSupportRx, + R: WriteBuffer, +{ + pub(crate) fn new(instance: I, rx_buffer: R) -> Self { + Self { + instance, + rx_buffer, + } + } + + /// Wait for the transfer to finish and return the peripheral and the + /// buffers. + pub fn wait(mut self) -> Result<(I, R), (DmaError, I, R)> { + self.instance.peripheral_wait_dma(false, true); + + let err = self.instance.rx().has_error(); + + // We need to have a `Drop` implementation, because we accept + // managed buffers that can free their memory on drop. Because of that + // we can't move out of the `Transfer`'s fields, so we use `ptr::read` + // and `mem::forget`. + // + // NOTE(unsafe) There is no panic branch between getting the resources + // and forgetting `self`. + + let (instance, rx_buffer) = unsafe { + let instance = core::ptr::read(&self.instance); + let rx_buffer = core::ptr::read(&self.rx_buffer); + core::mem::forget(self); + + (instance, rx_buffer) + }; + + if err { + Err((DmaError::DescriptorError, instance, rx_buffer)) + } else { + Ok((instance, rx_buffer)) + } + } + + /// Check if the transfer is finished. + pub fn is_done(&mut self) -> bool { + self.instance.rx().is_done() + } +} + +impl Drop for DmaTransferRxOwned +where + I: dma_private::DmaSupportRx, + R: WriteBuffer, +{ + fn drop(&mut self) { + self.instance.peripheral_wait_dma(false, true); + } +} + +/// DMA transaction for TX+RX transfers with moved-in/moved-out peripheral and +/// buffers +#[non_exhaustive] +#[must_use] +pub struct DmaTransferTxRxOwned +where + I: dma_private::DmaSupportTx + dma_private::DmaSupportRx, + T: ReadBuffer, + R: WriteBuffer, +{ + instance: I, + tx_buffer: T, + rx_buffer: R, +} + +impl DmaTransferTxRxOwned +where + I: dma_private::DmaSupportTx + dma_private::DmaSupportRx, + T: ReadBuffer, + R: WriteBuffer, +{ + pub(crate) fn new(instance: I, tx_buffer: T, rx_buffer: R) -> Self { + Self { + instance, + tx_buffer, + rx_buffer, + } + } + + /// Wait for the transfer to finish and return the peripheral and the + /// buffers. + #[allow(clippy::type_complexity)] + pub fn wait(mut self) -> Result<(I, T, R), (DmaError, I, T, R)> { + self.instance.peripheral_wait_dma(true, true); + + let err = self.instance.tx().has_error() || self.instance.rx().has_error(); + + // We need to have a `Drop` implementation, because we accept + // managed buffers that can free their memory on drop. Because of that + // we can't move out of the `Transfer`'s fields, so we use `ptr::read` + // and `mem::forget`. + // + // NOTE(unsafe) There is no panic branch between getting the resources + // and forgetting `self`. + + let (instance, tx_buffer, rx_buffer) = unsafe { + let instance = core::ptr::read(&self.instance); + let tx_buffer = core::ptr::read(&self.tx_buffer); + let rx_buffer = core::ptr::read(&self.rx_buffer); + core::mem::forget(self); + + (instance, tx_buffer, rx_buffer) + }; + + if err { + Err((DmaError::DescriptorError, instance, tx_buffer, rx_buffer)) + } else { + Ok((instance, tx_buffer, rx_buffer)) + } + } + + /// Check if the transfer is finished. + pub fn is_done(&mut self) -> bool { + self.instance.tx().is_done() && self.instance.rx().is_done() + } +} + +impl Drop for DmaTransferTxRxOwned +where + I: dma_private::DmaSupportTx + dma_private::DmaSupportRx, + T: ReadBuffer, + R: WriteBuffer, +{ + fn drop(&mut self) { + self.instance.peripheral_wait_dma(true, true); + } +} + /// DMA transaction for TX only circular transfers #[non_exhaustive] #[must_use] diff --git a/esp-hal/src/spi/master.rs b/esp-hal/src/spi/master.rs index 0eb6a7310..3e2b24612 100644 --- a/esp-hal/src/spi/master.rs +++ b/esp-hal/src/spi/master.rs @@ -846,8 +846,11 @@ pub mod dma { Channel, ChannelTypes, DmaTransferRx, + DmaTransferRxOwned, DmaTransferTx, + DmaTransferTxOwned, DmaTransferTxRx, + DmaTransferTxRxOwned, Spi2Peripheral, SpiPeripheral, TxPrivate, @@ -1068,6 +1071,32 @@ pub mod dma { &'t mut self, words: &'t TXBUF, ) -> Result, super::Error> + where + TXBUF: ReadBuffer, + { + self.dma_write_start(words)?; + Ok(DmaTransferTx::new(self)) + } + + /// Perform a DMA write. + /// + /// This will return a [DmaTransferTxOwned] owning the buffer and the + /// SPI instance. The maximum amount of data to be sent is 32736 + /// bytes. + #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] + pub fn dma_write_owned( + mut self, + words: TXBUF, + ) -> Result, super::Error> + where + TXBUF: ReadBuffer, + { + self.dma_write_start(&words)?; + Ok(DmaTransferTxOwned::new(self, words)) + } + + #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] + fn dma_write_start<'t, TXBUF>(&'t mut self, words: &'t TXBUF) -> Result<(), super::Error> where TXBUF: ReadBuffer, { @@ -1081,7 +1110,7 @@ pub mod dma { self.spi .start_write_bytes_dma(ptr, len, &mut self.channel.tx, false)?; } - Ok(DmaTransferTx::new(self)) + Ok(()) } /// Perform a DMA read. @@ -1093,6 +1122,32 @@ pub mod dma { &'t mut self, words: &'t mut RXBUF, ) -> Result, super::Error> + where + RXBUF: WriteBuffer, + { + self.dma_read_start(words)?; + Ok(DmaTransferRx::new(self)) + } + + /// Perform a DMA read. + /// + /// This will return a [DmaTransferRxOwned] owning the buffer and + /// the SPI instance. The maximum amount of data to be + /// received is 32736 bytes. + #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] + pub fn dma_read_owned( + mut self, + mut words: RXBUF, + ) -> Result, super::Error> + where + RXBUF: WriteBuffer, + { + self.dma_read_start(&mut words)?; + Ok(DmaTransferRxOwned::new(self, words)) + } + + #[cfg_attr(feature = "place-spi-driver-in-ram", ram)] + fn dma_read_start<'t, RXBUF>(&'t mut self, words: &'t mut RXBUF) -> Result<(), super::Error> where RXBUF: WriteBuffer, { @@ -1106,19 +1161,50 @@ pub mod dma { self.spi .start_read_bytes_dma(ptr, len, &mut self.channel.rx, false)?; } - Ok(DmaTransferRx::new(self)) + + Ok(()) } /// Perform a DMA transfer. /// - /// This will return a [DmaTransferTxRx] owning the buffer(s) and the - /// SPI instance. The maximum amount of data to be sent/received - /// is 32736 bytes. + /// This will return a [DmaTransferTxRx]. + /// The maximum amount of data to be sent/received is 32736 bytes. pub fn dma_transfer<'t, TXBUF, RXBUF>( &'t mut self, words: &'t TXBUF, read_buffer: &'t mut RXBUF, ) -> Result, super::Error> + where + TXBUF: ReadBuffer, + RXBUF: WriteBuffer, + { + self.dma_transfer_start(words, read_buffer)?; + Ok(DmaTransferTxRx::new(self)) + } + + /// Perform a DMA transfer + /// + /// This will return a [DmaTransferTxRxOwned] owning the buffers and + /// the SPI instance. The maximum amount of data to be + /// sent/received is 32736 bytes. + pub fn dma_transfer_owned( + mut self, + words: TXBUF, + mut read_buffer: RXBUF, + ) -> Result, super::Error> + where + TXBUF: ReadBuffer, + RXBUF: WriteBuffer, + { + self.dma_transfer_start(&words, &mut read_buffer)?; + Ok(DmaTransferTxRxOwned::new(self, words, read_buffer)) + } + + fn dma_transfer_start<'t, TXBUF, RXBUF>( + &'t mut self, + words: &'t TXBUF, + read_buffer: &'t mut RXBUF, + ) -> Result<(), super::Error> where TXBUF: ReadBuffer, RXBUF: WriteBuffer, @@ -1140,7 +1226,8 @@ pub mod dma { &mut self.channel.rx, )?; } - Ok(DmaTransferTxRx::new(self)) + + Ok(()) } } diff --git a/hil-test/tests/spi_full_duplex_dma.rs b/hil-test/tests/spi_full_duplex_dma.rs index e9568aae3..8542a9d19 100644 --- a/hil-test/tests/spi_full_duplex_dma.rs +++ b/hil-test/tests/spi_full_duplex_dma.rs @@ -267,4 +267,52 @@ mod tests { )) )); } + + #[test] + #[timeout(3)] + fn test_symmetric_dma_transfer_owned() { + const DMA_BUFFER_SIZE: usize = 4096; + + let peripherals = Peripherals::take(); + let system = SystemControl::new(peripherals.SYSTEM); + let clocks = ClockControl::boot_defaults(system.clock_control).freeze(); + + let io = Io::new(peripherals.GPIO, peripherals.IO_MUX); + let sclk = io.pins.gpio0; + let miso = io.pins.gpio2; + let mosi = io.pins.gpio4; + let cs = io.pins.gpio5; + + let dma = Dma::new(peripherals.DMA); + + #[cfg(any(feature = "esp32", feature = "esp32s2"))] + let dma_channel = dma.spi2channel; + #[cfg(not(any(feature = "esp32", feature = "esp32s2")))] + let dma_channel = dma.channel0; + + let (tx_buffer, mut tx_descriptors, rx_buffer, mut rx_descriptors) = + dma_buffers!(DMA_BUFFER_SIZE); + + let spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks) + .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs)) + .with_dma(dma_channel.configure( + false, + &mut tx_descriptors, + &mut rx_descriptors, + DmaPriority::Priority0, + )); + + // DMA buffer require a static life-time + let send = tx_buffer; + let receive = rx_buffer; + + send.copy_from_slice(&[0x55u8; 4096]); + for byte in 0..send.len() { + send[byte] = byte as u8; + } + + let transfer = spi.dma_transfer_owned(send, receive).unwrap(); + let (_, send, receive) = transfer.wait().unwrap(); + assert_eq!(send, receive); + } }