Reimplement SPI DMA operations in terms of transfer (#2308)

* Reimplement SPI DMA operations in terms of transfer

* Deduplicate reset_dma

* Create empty constructors

* Introduce EmptyBuf

* Fix comment

* Undo unnecessary change

* Remove unnecessary slicing
This commit is contained in:
Dániel Buga 2024-10-09 01:15:21 +02:00 committed by GitHub
parent b27482df3c
commit dc88cb13e8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 139 additions and 162 deletions

View File

@ -913,3 +913,54 @@ impl DmaRxStreamBufView {
} }
} }
} }
static mut EMPTY: [DmaDescriptor; 1] = [DmaDescriptor::EMPTY];
/// An empty buffer that can be used when you don't need to transfer any data.
pub struct EmptyBuf;
unsafe impl DmaTxBuffer for EmptyBuf {
type View = EmptyBuf;
fn prepare(&mut self) -> Preparation {
Preparation {
start: unsafe { EMPTY.as_mut_ptr() },
block_size: None,
}
}
fn into_view(self) -> EmptyBuf {
self
}
fn from_view(view: Self::View) -> Self {
view
}
fn length(&self) -> usize {
0
}
}
unsafe impl DmaRxBuffer for EmptyBuf {
type View = EmptyBuf;
fn prepare(&mut self) -> Preparation {
Preparation {
start: unsafe { EMPTY.as_mut_ptr() },
block_size: None,
}
}
fn into_view(self) -> EmptyBuf {
self
}
fn from_view(view: Self::View) -> Self {
view
}
fn length(&self) -> usize {
0
}
}

View File

@ -944,6 +944,7 @@ mod dma {
DmaRxBuffer, DmaRxBuffer,
DmaTxBuf, DmaTxBuf,
DmaTxBuffer, DmaTxBuffer,
EmptyBuf,
Rx, Rx,
Tx, Tx,
}, },
@ -1121,40 +1122,6 @@ mod dma {
.await; .await;
} }
/// # Safety:
///
/// The caller must ensure to not access the buffer contents while the
/// transfer is in progress. Moving the buffer itself is allowed.
#[cfg_attr(place_spi_driver_in_ram, ram)]
unsafe fn start_write_bytes_dma<TX: DmaTxBuffer>(
&mut self,
buffer: &mut TX,
full_duplex: bool,
) -> Result<(), Error> {
self.tx_transfer_in_progress = buffer.length() > 0;
unsafe {
self.spi
.start_write_bytes_dma(buffer, &mut self.channel.tx, full_duplex)
}
}
/// # Safety:
///
/// The caller must ensure to not access the buffer contents while the
/// transfer is in progress. Moving the buffer itself is allowed.
#[cfg_attr(place_spi_driver_in_ram, ram)]
unsafe fn start_read_bytes_dma<RX: DmaRxBuffer>(
&mut self,
buffer: &mut RX,
full_duplex: bool,
) -> Result<(), Error> {
self.rx_transfer_in_progress = buffer.length() > 0;
unsafe {
self.spi
.start_read_bytes_dma(buffer, &mut self.channel.rx, full_duplex)
}
}
/// # Safety: /// # Safety:
/// ///
/// The caller must ensure to not access the buffer contents while the /// The caller must ensure to not access the buffer contents while the
@ -1162,6 +1129,7 @@ mod dma {
#[cfg_attr(place_spi_driver_in_ram, ram)] #[cfg_attr(place_spi_driver_in_ram, ram)]
unsafe fn start_transfer_dma<RX: DmaRxBuffer, TX: DmaTxBuffer>( unsafe fn start_transfer_dma<RX: DmaRxBuffer, TX: DmaTxBuffer>(
&mut self, &mut self,
full_duplex: bool,
rx_buffer: &mut RX, rx_buffer: &mut RX,
tx_buffer: &mut TX, tx_buffer: &mut TX,
) -> Result<(), Error> { ) -> Result<(), Error> {
@ -1169,6 +1137,7 @@ mod dma {
self.tx_transfer_in_progress = tx_buffer.length() > 0; self.tx_transfer_in_progress = tx_buffer.length() > 0;
unsafe { unsafe {
self.spi.start_transfer_dma( self.spi.start_transfer_dma(
full_duplex,
rx_buffer, rx_buffer,
tx_buffer, tx_buffer,
&mut self.channel.rx, &mut self.channel.rx,
@ -1203,10 +1172,12 @@ mod dma {
self.tx_transfer_in_progress = true; self.tx_transfer_in_progress = true;
unsafe { unsafe {
self.spi.start_write_bytes_dma( self.spi.start_transfer_dma(
&mut self.address_buffer,
&mut self.channel.tx,
false, false,
&mut EmptyBuf,
&mut self.address_buffer,
&mut self.channel.rx,
&mut self.channel.tx,
) )
} }
} }
@ -1387,7 +1358,13 @@ mod dma {
return Err(Error::MaxDmaTransferSizeExceeded); return Err(Error::MaxDmaTransferSizeExceeded);
} }
self.start_write_bytes_dma(buffer, true) self.spi.start_transfer_dma(
true,
&mut EmptyBuf,
buffer,
&mut self.channel.rx,
&mut self.channel.tx,
)
} }
/// Perform a DMA write. /// Perform a DMA write.
@ -1420,7 +1397,13 @@ mod dma {
return Err(Error::MaxDmaTransferSizeExceeded); return Err(Error::MaxDmaTransferSizeExceeded);
} }
self.start_read_bytes_dma(buffer, true) self.spi.start_transfer_dma(
false,
buffer,
&mut EmptyBuf,
&mut self.channel.rx,
&mut self.channel.tx,
)
} }
/// Perform a DMA read. /// Perform a DMA read.
@ -1458,7 +1441,7 @@ mod dma {
return Err(Error::MaxDmaTransferSizeExceeded); return Err(Error::MaxDmaTransferSizeExceeded);
} }
self.start_transfer_dma(rx_buffer, tx_buffer) self.start_transfer_dma(true, rx_buffer, tx_buffer)
} }
/// Perform a DMA transfer /// Perform a DMA transfer
@ -1515,7 +1498,13 @@ mod dma {
data_mode, data_mode,
); );
self.start_read_bytes_dma(buffer, false) self.spi.start_transfer_dma(
false,
buffer,
&mut EmptyBuf,
&mut self.channel.rx,
&mut self.channel.tx,
)
} }
/// Perform a half-duplex read operation using DMA. /// Perform a half-duplex read operation using DMA.
@ -1576,7 +1565,13 @@ mod dma {
data_mode, data_mode,
); );
self.start_write_bytes_dma(buffer, false) self.spi.start_transfer_dma(
false,
&mut EmptyBuf,
buffer,
&mut self.channel.rx,
&mut self.channel.tx,
)
} }
/// Perform a half-duplex write operation using DMA. /// Perform a half-duplex write operation using DMA.
@ -1704,7 +1699,11 @@ mod dma {
for chunk in words.chunks_mut(self.rx_buf.capacity()) { for chunk in words.chunks_mut(self.rx_buf.capacity()) {
self.rx_buf.set_length(chunk.len()); self.rx_buf.set_length(chunk.len());
unsafe { self.spi_dma.start_dma_read(&mut self.rx_buf)? }; unsafe {
self.spi_dma
.start_dma_transfer(&mut self.rx_buf, &mut EmptyBuf)?;
}
self.wait_for_idle(); self.wait_for_idle();
let bytes_read = self.rx_buf.read_received_data(chunk); let bytes_read = self.rx_buf.read_received_data(chunk);
@ -1721,8 +1720,10 @@ mod dma {
self.tx_buf.fill(chunk); self.tx_buf.fill(chunk);
unsafe { unsafe {
self.spi_dma.start_dma_write(&mut self.tx_buf)?; self.spi_dma
.start_dma_transfer(&mut EmptyBuf, &mut self.tx_buf)?;
} }
self.wait_for_idle(); self.wait_for_idle();
} }
@ -1945,9 +1946,8 @@ mod dma {
let mut spi = DropGuard::new(&mut self.spi_dma, |spi| spi.cancel_transfer()); let mut spi = DropGuard::new(&mut self.spi_dma, |spi| spi.cancel_transfer());
unsafe { unsafe { spi.start_dma_transfer(&mut self.rx_buf, &mut EmptyBuf)? };
spi.start_dma_read(&mut self.rx_buf)?;
}
spi.wait_for_idle_async().await; spi.wait_for_idle_async().await;
let bytes_read = self.rx_buf.read_received_data(chunk); let bytes_read = self.rx_buf.read_received_data(chunk);
@ -1969,9 +1969,8 @@ mod dma {
for chunk in words.chunks(chunk_size) { for chunk in words.chunks(chunk_size) {
self.tx_buf.fill(chunk); self.tx_buf.fill(chunk);
unsafe { unsafe { spi.start_dma_transfer(&mut EmptyBuf, &mut self.tx_buf)? };
spi.start_dma_write(&mut self.tx_buf)?;
}
spi.wait_for_idle_async().await; spi.wait_for_idle_async().await;
} }
spi.defuse(); spi.defuse();
@ -2215,9 +2214,10 @@ mod ehal1 {
#[doc(hidden)] #[doc(hidden)]
pub trait InstanceDma: Instance + DmaEligible { pub trait InstanceDma: Instance + DmaEligible {
#[allow(clippy::too_many_arguments)] #[cfg_attr(place_spi_driver_in_ram, ram)]
unsafe fn start_transfer_dma<RX: Rx, TX: Tx>( unsafe fn start_transfer_dma<RX: Rx, TX: Tx>(
&mut self, &mut self,
_full_duplex: bool,
rx_buffer: &mut impl DmaRxBuffer, rx_buffer: &mut impl DmaRxBuffer,
tx_buffer: &mut impl DmaTxBuffer, tx_buffer: &mut impl DmaTxBuffer,
rx: &mut RX, rx: &mut RX,
@ -2232,102 +2232,40 @@ pub trait InstanceDma: Instance + DmaEligible {
reg_block.dma_in_link().write(|w| w.bits(0)); reg_block.dma_in_link().write(|w| w.bits(0));
} }
self.configure_datalen(rx_buffer.length(), tx_buffer.length()); let rx_len = rx_buffer.length();
let tx_len = tx_buffer.length();
self.configure_datalen(rx_len, tx_len);
// re-enable the MISO and MOSI // enable the MISO and MOSI if needed
reg_block reg_block
.user() .user()
.modify(|_, w| w.usr_miso().bit(true).usr_mosi().bit(true)); .modify(|_, w| w.usr_miso().bit(rx_len > 0).usr_mosi().bit(tx_len > 0));
self.enable_dma(); self.enable_dma();
rx.prepare_transfer(self.dma_peripheral(), rx_buffer) if rx_len > 0 {
.and_then(|_| rx.start_transfer())?; rx.prepare_transfer(self.dma_peripheral(), rx_buffer)
tx.prepare_transfer(self.dma_peripheral(), tx_buffer) .and_then(|_| rx.start_transfer())?;
.and_then(|_| tx.start_transfer())?; } else {
#[cfg(esp32)]
#[cfg(gdma)] {
self.reset_dma(); // see https://github.com/espressif/esp-idf/commit/366e4397e9dae9d93fe69ea9d389b5743295886f
// see https://github.com/espressif/esp-idf/commit/0c3653b1fd7151001143451d4aa95dbf15ee8506
self.start_operation(); if _full_duplex {
reg_block
Ok(()) .dma_in_link()
} .modify(|_, w| unsafe { w.inlink_addr().bits(0) });
reg_block
#[cfg_attr(place_spi_driver_in_ram, ram)] .dma_in_link()
unsafe fn start_write_bytes_dma<TX: Tx>( .modify(|_, w| w.inlink_start().set_bit());
&mut self, }
buffer: &mut impl DmaTxBuffer,
tx: &mut TX,
full_duplex: bool,
) -> Result<(), Error> {
let reg_block = self.register_block();
self.configure_datalen(0, buffer.length());
// disable MISO and re-enable MOSI (DON'T do it for half-duplex)
if full_duplex {
reg_block
.user()
.modify(|_, w| w.usr_miso().bit(false).usr_mosi().bit(true));
}
#[cfg(esp32)]
{
// see https://github.com/espressif/esp-idf/commit/366e4397e9dae9d93fe69ea9d389b5743295886f
// see https://github.com/espressif/esp-idf/commit/0c3653b1fd7151001143451d4aa95dbf15ee8506
if full_duplex {
reg_block
.dma_in_link()
.modify(|_, w| unsafe { w.inlink_addr().bits(0) });
reg_block
.dma_in_link()
.modify(|_, w| w.inlink_start().set_bit());
} }
} }
if tx_len > 0 {
self.enable_dma(); tx.prepare_transfer(self.dma_peripheral(), tx_buffer)
.and_then(|_| tx.start_transfer())?;
tx.prepare_transfer(self.dma_peripheral(), buffer)
.and_then(|_| tx.start_transfer())?;
#[cfg(gdma)]
self.reset_dma();
self.start_operation();
Ok(())
}
#[cfg_attr(place_spi_driver_in_ram, ram)]
unsafe fn start_read_bytes_dma<RX: Rx, BUF: DmaRxBuffer>(
&mut self,
buffer: &mut BUF,
rx: &mut RX,
full_duplex: bool,
) -> Result<(), Error> {
let reg_block = self.register_block();
#[cfg(esp32s2)]
{
// without this a read after a write will fail
reg_block.dma_out_link().write(|w| w.bits(0));
reg_block.dma_in_link().write(|w| w.bits(0));
} }
self.configure_datalen(buffer.length(), 0);
// re-enable MISO and disable MOSI (DON'T do it for half-duplex)
if full_duplex {
reg_block
.user()
.modify(|_, w| w.usr_miso().bit(true).usr_mosi().bit(false));
}
self.enable_dma();
rx.prepare_transfer(self.dma_peripheral(), buffer)
.and_then(|_| rx.start_transfer())?;
#[cfg(gdma)] #[cfg(gdma)]
self.reset_dma(); self.reset_dma();
@ -2351,36 +2289,24 @@ pub trait InstanceDma: Instance + DmaEligible {
} }
fn reset_dma(&self) { fn reset_dma(&self) {
#[cfg(pdma)] fn set_reset_bit(reg_block: &RegisterBlock, bit: bool) {
{ #[cfg(pdma)]
let reg_block = self.register_block();
reg_block.dma_conf().modify(|_, w| { reg_block.dma_conf().modify(|_, w| {
w.out_rst().set_bit(); w.out_rst().bit(bit);
w.in_rst().set_bit(); w.in_rst().bit(bit);
w.ahbm_fifo_rst().set_bit(); w.ahbm_fifo_rst().bit(bit);
w.ahbm_rst().set_bit() w.ahbm_rst().bit(bit)
}); });
#[cfg(gdma)]
reg_block.dma_conf().modify(|_, w| { reg_block.dma_conf().modify(|_, w| {
w.out_rst().clear_bit(); w.rx_afifo_rst().bit(bit);
w.in_rst().clear_bit(); w.buf_afifo_rst().bit(bit);
w.ahbm_fifo_rst().clear_bit(); w.dma_afifo_rst().bit(bit)
w.ahbm_rst().clear_bit()
});
}
#[cfg(gdma)]
{
let reg_block = self.register_block();
reg_block.dma_conf().modify(|_, w| {
w.rx_afifo_rst().set_bit();
w.buf_afifo_rst().set_bit();
w.dma_afifo_rst().set_bit()
});
reg_block.dma_conf().modify(|_, w| {
w.rx_afifo_rst().clear_bit();
w.buf_afifo_rst().clear_bit();
w.dma_afifo_rst().clear_bit()
}); });
} }
let reg_block = self.register_block();
set_reset_bit(reg_block, true);
set_reset_bit(reg_block, false);
self.clear_dma_interrupts(); self.clear_dma_interrupts();
} }