* Fix DMA starving SPI * Simplify cfgs * Trigger an update before starting transaction * Do not update after enable_dma, use start_operation
191 lines
5.8 KiB
Rust
191 lines
5.8 KiB
Rust
//! SPI Full Duplex DMA ASYNC Test
|
|
|
|
//% CHIPS: esp32 esp32c2 esp32c3 esp32c6 esp32h2 esp32s2 esp32s3
|
|
|
|
#![no_std]
|
|
#![no_main]
|
|
|
|
use esp_hal::{
|
|
dma::{Dma, DmaPriority, DmaRxBuf, DmaTxBuf},
|
|
dma_buffers,
|
|
gpio::Io,
|
|
peripherals::SPI2,
|
|
prelude::*,
|
|
spi::{
|
|
master::{Spi, SpiDma},
|
|
FullDuplexMode,
|
|
SpiMode,
|
|
},
|
|
Blocking,
|
|
};
|
|
use hil_test as _;
|
|
|
|
cfg_if::cfg_if! {
|
|
if #[cfg(any(
|
|
feature = "esp32",
|
|
feature = "esp32s2",
|
|
))] {
|
|
use esp_hal::dma::Spi2DmaChannel as DmaChannel0;
|
|
} else {
|
|
use esp_hal::dma::DmaChannel0;
|
|
}
|
|
}
|
|
|
|
struct Context {
|
|
spi: SpiDma<'static, SPI2, DmaChannel0, FullDuplexMode, Blocking>,
|
|
}
|
|
|
|
#[cfg(test)]
|
|
#[embedded_test::tests]
|
|
mod tests {
|
|
use defmt::assert_eq;
|
|
|
|
use super::*;
|
|
|
|
#[init]
|
|
fn init() -> Context {
|
|
let peripherals = esp_hal::init(esp_hal::Config::default());
|
|
|
|
let io = Io::new(peripherals.GPIO, peripherals.IO_MUX);
|
|
let sclk = io.pins.gpio0;
|
|
let (_, mosi) = hil_test::common_test_pins!(io);
|
|
|
|
let dma = Dma::new(peripherals.DMA);
|
|
|
|
cfg_if::cfg_if! {
|
|
if #[cfg(any(feature = "esp32", feature = "esp32s2"))] {
|
|
let dma_channel = dma.spi2channel;
|
|
} else {
|
|
let dma_channel = dma.channel0;
|
|
}
|
|
}
|
|
|
|
let miso = mosi.peripheral_input();
|
|
let spi = Spi::new(peripherals.SPI2, 10000.kHz(), SpiMode::Mode0)
|
|
.with_sck(sclk)
|
|
.with_mosi(mosi)
|
|
.with_miso(miso)
|
|
.with_dma(dma_channel.configure(false, DmaPriority::Priority0));
|
|
|
|
Context { spi }
|
|
}
|
|
|
|
#[test]
|
|
fn test_symmetric_dma_transfer(ctx: Context) {
|
|
// This test case sends a large amount of data, twice, to verify that
|
|
// https://github.com/esp-rs/esp-hal/issues/2151 is and remains fixed.
|
|
let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(32000);
|
|
let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
|
|
let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();
|
|
|
|
for (i, v) in dma_tx_buf.as_mut_slice().iter_mut().enumerate() {
|
|
*v = (i % 255) as u8;
|
|
}
|
|
|
|
let mut spi = ctx.spi;
|
|
for i in 0..4 {
|
|
dma_tx_buf.as_mut_slice()[0] = i as u8;
|
|
*dma_tx_buf.as_mut_slice().last_mut().unwrap() = i as u8;
|
|
let transfer = spi
|
|
.dma_transfer(dma_rx_buf, dma_tx_buf)
|
|
.map_err(|e| e.0)
|
|
.unwrap();
|
|
|
|
(spi, (dma_rx_buf, dma_tx_buf)) = transfer.wait();
|
|
assert_eq!(dma_tx_buf.as_slice(), dma_rx_buf.as_slice());
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
#[timeout(3)]
|
|
#[cfg(not(feature = "esp32s2"))]
|
|
fn test_asymmetric_dma_transfer(ctx: Context) {
|
|
let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(2, 4);
|
|
let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
|
|
let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();
|
|
|
|
dma_tx_buf.fill(&[0xde, 0xad, 0xbe, 0xef]);
|
|
|
|
let transfer = ctx
|
|
.spi
|
|
.dma_transfer(dma_rx_buf, dma_tx_buf)
|
|
.map_err(|e| e.0)
|
|
.unwrap();
|
|
let (_, (dma_rx_buf, dma_tx_buf)) = transfer.wait();
|
|
assert_eq!(dma_tx_buf.as_slice()[0..1], dma_rx_buf.as_slice()[0..1]);
|
|
}
|
|
|
|
#[test]
|
|
#[timeout(3)]
|
|
fn test_symmetric_dma_transfer_huge_buffer(ctx: Context) {
|
|
let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(4096);
|
|
let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
|
|
let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();
|
|
|
|
for (i, d) in dma_tx_buf.as_mut_slice().iter_mut().enumerate() {
|
|
*d = i as _;
|
|
}
|
|
|
|
let transfer = ctx
|
|
.spi
|
|
.dma_transfer(dma_rx_buf, dma_tx_buf)
|
|
.map_err(|e| e.0)
|
|
.unwrap();
|
|
let (_, (dma_rx_buf, dma_tx_buf)) = transfer.wait();
|
|
assert_eq!(dma_tx_buf.as_slice(), dma_rx_buf.as_slice());
|
|
}
|
|
|
|
#[test]
|
|
#[timeout(3)]
|
|
fn test_dma_bus_symmetric_transfer(ctx: Context) {
|
|
let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(4);
|
|
let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
|
|
let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();
|
|
|
|
let mut spi = ctx.spi.with_buffers(dma_rx_buf, dma_tx_buf);
|
|
|
|
let tx_buf = [0xde, 0xad, 0xbe, 0xef];
|
|
let mut rx_buf = [0; 4];
|
|
|
|
spi.transfer(&mut rx_buf, &tx_buf).unwrap();
|
|
|
|
assert_eq!(tx_buf, rx_buf);
|
|
}
|
|
|
|
#[test]
|
|
#[timeout(3)]
|
|
fn test_dma_bus_asymmetric_transfer(ctx: Context) {
|
|
let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(4);
|
|
let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
|
|
let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();
|
|
|
|
let mut spi = ctx.spi.with_buffers(dma_rx_buf, dma_tx_buf);
|
|
|
|
let tx_buf = [0xde, 0xad, 0xbe, 0xef];
|
|
let mut rx_buf = [0; 4];
|
|
|
|
spi.transfer(&mut rx_buf, &tx_buf).unwrap();
|
|
|
|
assert_eq!(&tx_buf[0..1], &rx_buf[0..1]);
|
|
}
|
|
|
|
#[test]
|
|
#[timeout(3)]
|
|
fn test_dma_bus_symmetric_transfer_huge_buffer(ctx: Context) {
|
|
const DMA_BUFFER_SIZE: usize = 4096;
|
|
|
|
let (rx_buffer, rx_descriptors, tx_buffer, tx_descriptors) = dma_buffers!(40);
|
|
let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
|
|
let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();
|
|
|
|
let mut spi = ctx.spi.with_buffers(dma_rx_buf, dma_tx_buf);
|
|
|
|
let tx_buf = core::array::from_fn(|i| i as _);
|
|
let mut rx_buf = [0; DMA_BUFFER_SIZE];
|
|
|
|
spi.transfer(&mut rx_buf, &tx_buf).unwrap();
|
|
|
|
assert_eq!(tx_buf, rx_buf);
|
|
}
|
|
}
|