[3/3] DMA Move API: Introduce DMA buffer objects (#1856)

* [3/3] DMA Move API: Introduce DMA buffer objects

* Remove FlashSafeDma

* Add async HIL test

* Handle set_length(0) correctly

* Fix tx/rx booleans

* Unlucky

* Preserve previous blocking semantics

* Add delay between starting DMA TX and SPI driver

* Update CHANGELOG

* merge tidy

* Add with_buffers builder

---------

Co-authored-by: Dominic Fischer <git@dominicfischer.me>
This commit is contained in:
Dominic Fischer 2024-08-20 12:47:55 +01:00 committed by GitHub
parent 05582d3ca9
commit 41f9925e2c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 1518 additions and 1290 deletions

View File

@ -9,6 +9,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Added
- Introduce DMA buffer objects (#1856)
- Added new `Io::new_no_bind_interrupt` constructor (#1861)
- Added touch pad support for esp32 (#1873, #1956)
- Allow configuration of period updating method for MCPWM timers (#1898)
@ -20,6 +21,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Changed
- Peripheral driver constructors don't take `InterruptHandler`s anymore. Use `set_interrupt_handler` to explicitly set the interrupt handler now. (#1819)
- Migrate SPI driver to use DMA buffer objects (#1856)
- Use the peripheral ref pattern for `OneShotTimer` and `PeriodicTimer` (#1855)
- Improve SYSTIMER API (#1870)
- DMA: don't require `Sealed` to implement `ReadBuffer` and `WriteBuffer` (#1921)
@ -44,6 +46,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- This package no longer re-exports the `esp_hal_procmacros::main` macro (#1828)
- The `AesFlavour` trait no longer has the `ENCRYPT_MODE`/`DECRYPT_MODE` associated constants (#1849)
- Removed `FlashSafeDma` (#1856)
## [0.19.0] - 2024-07-15

View File

@ -28,15 +28,12 @@
//! let mosi = io.pins.gpio4;
//! let cs = io.pins.gpio5;
//!
//! let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) =
//! dma_buffers!(32000);
//!
//! let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks)
//! .with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs))
//! .with_dma(dma_channel.configure(
//! false,
//! DmaPriority::Priority0,
//! ), tx_descriptors, rx_descriptors);
//! ));
//! # }
//! ```
//!
@ -50,7 +47,13 @@
//!
//! For convenience you can use the [crate::dma_buffers] macro.
use core::{fmt::Debug, marker::PhantomData, ptr::addr_of_mut, sync::atomic::compiler_fence};
use core::{
cmp::min,
fmt::Debug,
marker::PhantomData,
ptr::addr_of_mut,
sync::atomic::compiler_fence,
};
trait Word: crate::private::Sealed {}
@ -162,7 +165,7 @@ pub unsafe trait WriteBuffer {
///
/// Once this method has been called, it is unsafe to call any `&mut self`
/// methods, except for `write_buffer`, on this object as long as the
/// returned value is in use (by DMA).
/// returned value is in use (by DMA).
unsafe fn write_buffer(&mut self) -> (*mut u8, usize);
}
@ -275,7 +278,7 @@ use enumset::{EnumSet, EnumSetType};
pub use self::gdma::*;
#[cfg(pdma)]
pub use self::pdma::*;
use crate::{interrupt::InterruptHandler, Mode};
use crate::{interrupt::InterruptHandler, soc::is_slice_in_dram, Mode};
#[cfg(gdma)]
mod gdma;
@ -504,8 +507,7 @@ pub enum DmaError {
OutOfDescriptors,
/// DescriptorError the DMA rejected the descriptor configuration. This
/// could be because the source address of the data is not in RAM. Ensure
/// your source data is in a valid address space, or try using
/// [`crate::FlashSafeDma`] wrapper.
/// your source data is in a valid address space.
DescriptorError,
/// The available free buffer is less than the amount of data to push
Overflow,
@ -1123,6 +1125,12 @@ pub trait RxPrivate: crate::private::Sealed {
chain: &DescriptorChain,
) -> Result<(), DmaError>;
unsafe fn prepare_transfer(
&mut self,
peri: DmaPeripheral,
first_desc: *mut DmaDescriptor,
) -> Result<(), DmaError>;
fn start_transfer(&mut self) -> Result<(), DmaError>;
#[cfg(esp32s3)]
@ -1198,14 +1206,14 @@ where
unsafe fn prepare_transfer_without_start(
&mut self,
descriptors: &DescriptorChain,
first_desc: *mut DmaDescriptor,
peri: DmaPeripheral,
) -> Result<(), DmaError> {
compiler_fence(core::sync::atomic::Ordering::SeqCst);
R::clear_in_interrupts();
R::reset_in();
R::set_in_descriptors(descriptors.first() as u32);
R::set_in_descriptors(first_desc as u32);
R::set_in_peripheral(peri as u8);
Ok(())
@ -1299,7 +1307,22 @@ where
}
}
self.rx_impl.prepare_transfer_without_start(chain, peri)
self.rx_impl
.prepare_transfer_without_start(chain.first() as _, peri)
}
unsafe fn prepare_transfer(
&mut self,
peri: DmaPeripheral,
first_desc: *mut DmaDescriptor,
) -> Result<(), DmaError> {
// TODO: Figure out burst mode for DmaBuf.
if self.burst_mode {
return Err(DmaError::InvalidAlignment);
}
self.rx_impl
.prepare_transfer_without_start(first_desc, peri)
}
fn start_transfer(&mut self) -> Result<(), DmaError> {
@ -1427,6 +1450,12 @@ pub trait TxPrivate: crate::private::Sealed {
chain: &DescriptorChain,
) -> Result<(), DmaError>;
unsafe fn prepare_transfer(
&mut self,
peri: DmaPeripheral,
desc: *mut DmaDescriptor,
) -> Result<(), DmaError>;
fn start_transfer(&mut self) -> Result<(), DmaError>;
#[cfg(esp32s3)]
@ -1482,14 +1511,14 @@ where
unsafe fn prepare_transfer_without_start(
&mut self,
descriptors: &DescriptorChain,
first_desc: *mut DmaDescriptor,
peri: DmaPeripheral,
) -> Result<(), DmaError> {
compiler_fence(core::sync::atomic::Ordering::SeqCst);
R::clear_out_interrupts();
R::reset_out();
R::set_out_descriptors(descriptors.first() as u32);
R::set_out_descriptors(first_desc as u32);
R::set_out_peripheral(peri as u8);
Ok(())
@ -1606,7 +1635,16 @@ where
crate::soc::cache_writeback_addr(des.buffer as u32, des.size() as u32);
}
}
self.tx_impl.prepare_transfer_without_start(chain, peri)
self.tx_impl
.prepare_transfer_without_start(chain.first() as _, peri)
}
unsafe fn prepare_transfer(
&mut self,
peri: DmaPeripheral,
desc: *mut DmaDescriptor,
) -> Result<(), DmaError> {
self.tx_impl.prepare_transfer_without_start(desc, peri)
}
fn start_transfer(&mut self) -> Result<(), DmaError> {
@ -1840,6 +1878,519 @@ where
}
}
/// Error returned from Dma[Tx|Rx|TxRx]Buf operations.
#[derive(Debug)]
pub enum DmaBufError {
/// More descriptors are needed for the buffer size
InsufficientDescriptors,
/// Descriptors or buffers are not located in a supported memory region
UnsupportedMemoryRegion,
}
/// DMA transmit buffer
///
/// This is a contiguous buffer linked together by DMA descriptors of length
/// 4092. It can only be used for transmitting data to a peripheral's FIFO.
/// See [DmaRxBuf] for receiving data.
#[derive(Debug)]
pub struct DmaTxBuf {
descriptors: &'static mut [DmaDescriptor],
buffer: &'static mut [u8],
}
impl DmaTxBuf {
/// Creates a new [DmaTxBuf] from some descriptors and a buffer.
///
/// There must be enough descriptors for the provided buffer.
/// Each descriptor can handle 4092 bytes worth of buffer.
///
/// Both the descriptors and buffer must be in DMA-capable memory.
/// Only DRAM is supported.
pub fn new(
descriptors: &'static mut [DmaDescriptor],
buffer: &'static mut [u8],
) -> Result<Self, DmaBufError> {
let min_descriptors = buffer.len().div_ceil(CHUNK_SIZE);
if descriptors.len() < min_descriptors {
return Err(DmaBufError::InsufficientDescriptors);
}
if !is_slice_in_dram(descriptors) || !is_slice_in_dram(buffer) {
return Err(DmaBufError::UnsupportedMemoryRegion);
}
// Setup size and buffer pointer as these will not change for the remainder of
// this object's lifetime
let chunk_iter = descriptors.iter_mut().zip(buffer.chunks_mut(CHUNK_SIZE));
for (desc, chunk) in chunk_iter {
desc.set_size(chunk.len());
desc.buffer = chunk.as_mut_ptr();
}
let mut buf = Self {
descriptors,
buffer,
};
buf.set_length(buf.capacity());
Ok(buf)
}
/// Consume the buf, returning the descriptors and buffer.
pub fn split(self) -> (&'static mut [DmaDescriptor], &'static mut [u8]) {
(self.descriptors, self.buffer)
}
/// Returns the size of the underlying buffer
pub fn capacity(&self) -> usize {
self.buffer.len()
}
/// Return the number of bytes that would be transmitted by this buf.
#[allow(clippy::len_without_is_empty)]
pub fn len(&self) -> usize {
let mut result = 0;
for desc in self.descriptors.iter() {
result += desc.len();
if desc.next.is_null() {
break;
}
}
result
}
/// Reset the descriptors to only transmit `len` amount of bytes from this
/// buf.
///
/// The number of bytes in data must be less than or equal to the buffer
/// size.
pub fn set_length(&mut self, len: usize) {
assert!(len <= self.buffer.len());
// Get the minimum number of descriptors needed for this length of data.
let descriptor_count = len.div_ceil(CHUNK_SIZE).max(1);
let required_descriptors = &mut self.descriptors[0..descriptor_count];
// Link up the relevant descriptors.
let mut next = core::ptr::null_mut();
for desc in required_descriptors.iter_mut().rev() {
desc.next = next;
next = desc;
}
let mut remaining_length = len;
for desc in required_descriptors.iter_mut() {
// As this is a simple dma buffer implementation we won't
// be making use of this feature.
desc.set_suc_eof(false);
// This isn't strictly needed for this simple implementation,
// but it is useful for debugging.
desc.set_owner(Owner::Dma);
let chunk_size = min(remaining_length, desc.flags.size() as usize);
desc.set_length(chunk_size);
remaining_length -= chunk_size;
}
debug_assert_eq!(remaining_length, 0);
required_descriptors.last_mut().unwrap().set_suc_eof(true);
}
/// Fills the TX buffer with the bytes provided in `data` and reset the
/// descriptors to only cover the filled section.
///
/// The number of bytes in data must be less than or equal to the buffer
/// size.
pub fn fill(&mut self, data: &[u8]) {
self.set_length(data.len());
self.as_mut_slice()[..data.len()].copy_from_slice(data);
}
/// Returns the buf as a mutable slice than can be written.
pub fn as_mut_slice(&mut self) -> &mut [u8] {
&mut self.buffer[..]
}
/// Returns the buf as a slice than can be read.
pub fn as_slice(&self) -> &[u8] {
self.buffer
}
pub(crate) fn first(&self) -> *mut DmaDescriptor {
self.descriptors.as_ptr() as _
}
}
/// DMA receive buffer
///
/// This is a contiguous buffer linked together by DMA descriptors of length
/// 4092. It can only be used for receiving data from a peripheral's FIFO.
/// See [DmaTxBuf] for transmitting data.
pub struct DmaRxBuf {
descriptors: &'static mut [DmaDescriptor],
buffer: &'static mut [u8],
}
impl DmaRxBuf {
/// Creates a new [DmaRxBuf] from some descriptors and a buffer.
///
/// There must be enough descriptors for the provided buffer.
/// Each descriptor can handle 4092 bytes worth of buffer.
///
/// Both the descriptors and buffer must be in DMA-capable memory.
/// Only DRAM is supported.
pub fn new(
descriptors: &'static mut [DmaDescriptor],
buffer: &'static mut [u8],
) -> Result<Self, DmaBufError> {
let min_descriptors = buffer.len().div_ceil(CHUNK_SIZE);
if descriptors.len() < min_descriptors {
return Err(DmaBufError::InsufficientDescriptors);
}
if !is_slice_in_dram(descriptors) || !is_slice_in_dram(buffer) {
return Err(DmaBufError::UnsupportedMemoryRegion);
}
// Setup size and buffer pointer as these will not change for the remainder of
// this object's lifetime
let chunk_iter = descriptors.iter_mut().zip(buffer.chunks_mut(CHUNK_SIZE));
for (desc, chunk) in chunk_iter {
desc.set_size(chunk.len());
desc.buffer = chunk.as_mut_ptr();
}
let mut buf = Self {
descriptors,
buffer,
};
buf.set_length(buf.capacity());
Ok(buf)
}
/// Consume the buf, returning the descriptors and buffer.
pub fn split(self) -> (&'static mut [DmaDescriptor], &'static mut [u8]) {
(self.descriptors, self.buffer)
}
/// Returns the size of the underlying buffer
pub fn capacity(&self) -> usize {
self.buffer.len()
}
/// Returns the maximum number of bytes that this buf has been configured to
/// receive.
#[allow(clippy::len_without_is_empty)]
pub fn len(&self) -> usize {
let mut result = 0;
for desc in self.descriptors.iter() {
result += desc.flags.size() as usize;
if desc.next.is_null() {
break;
}
}
result
}
/// Reset the descriptors to only receive `len` amount of bytes into this
/// buf.
///
/// The number of bytes in data must be less than or equal to the buffer
/// size.
pub fn set_length(&mut self, len: usize) {
assert!(len <= self.buffer.len());
// Get the minimum number of descriptors needed for this length of data.
let descriptor_count = len.div_ceil(CHUNK_SIZE).max(1);
let required_descriptors = &mut self.descriptors[..descriptor_count];
// Link up the relevant descriptors.
let mut next = core::ptr::null_mut();
for desc in required_descriptors.iter_mut().rev() {
desc.next = next;
next = desc;
}
// Get required part of the buffer.
let mut remaining_length = len;
for desc in required_descriptors.iter_mut() {
// Clear this to allow hardware to set it when the peripheral returns an EOF
// bit.
desc.set_suc_eof(false);
// This isn't strictly needed for this simple implementation,
// but it is useful for debugging.
desc.set_owner(Owner::Dma);
// Clear this to allow hardware to set it when it's
// done receiving data for this descriptor.
desc.set_length(0);
let chunk_size = min(CHUNK_SIZE, remaining_length);
desc.set_size(chunk_size);
remaining_length -= chunk_size;
}
debug_assert_eq!(remaining_length, 0);
}
/// Returns the entire underlying buffer as a slice than can be read.
pub fn as_slice(&self) -> &[u8] {
self.buffer
}
/// Returns the entire underlying buffer as a slice than can be written.
pub fn as_mut_slice(&mut self) -> &mut [u8] {
&mut self.buffer[..]
}
/// Return the number of bytes that was received by this buf.
pub fn number_of_received_bytes(&self) -> usize {
let mut result = 0;
for desc in self.descriptors.iter() {
result += desc.len();
if desc.next.is_null() {
break;
}
}
result
}
/// Reads the received data into the provided `buf`.
///
/// If `buf.len()` is less than the amount of received data then only the
/// first `buf.len()` bytes of received data is written into `buf`.
///
/// Returns the number of bytes in written to `buf`.
pub fn read_received_data(&self, buf: &mut [u8]) -> usize {
let mut remaining = &mut buf[..];
let mut buffer_offset = 0;
for desc in self.descriptors.iter() {
if remaining.is_empty() {
break;
}
let amount_to_copy = min(desc.len(), remaining.len());
let (to_fill, to_remain) = remaining.split_at_mut(amount_to_copy);
to_fill.copy_from_slice(&self.buffer[buffer_offset..][..amount_to_copy]);
remaining = to_remain;
if desc.next.is_null() {
break;
}
buffer_offset += desc.flags.size() as usize;
}
let remaining_bytes = remaining.len();
buf.len() - remaining_bytes
}
/// Returns the received data as an iterator of slices.
pub fn received_data(&self) -> impl Iterator<Item = &[u8]> {
let mut descriptors = self.descriptors.iter();
#[allow(clippy::redundant_slicing)] // Clippy can't see why this is needed.
let mut buf = &self.buffer[..];
core::iter::from_fn(move || {
let mut chunk_size = 0;
let mut skip_size = 0;
while let Some(desc) = descriptors.next() {
chunk_size += desc.len();
skip_size += desc.flags.size() as usize;
// If this is the end of the linked list, we can skip the remaining descriptors.
if desc.next.is_null() {
while descriptors.next().is_some() {
// Drain the iterator so the next call to from_fn return
// None.
}
break;
}
// This typically happens when the DMA gets an EOF bit from the peripheral.
// It can also happen if the DMA is restarted.
if desc.len() < desc.flags.size() as usize {
break;
}
}
if chunk_size == 0 {
return None;
}
let chunk = &buf[..chunk_size];
buf = &buf[skip_size..];
Some(chunk)
})
}
pub(crate) fn first(&self) -> *mut DmaDescriptor {
self.descriptors.as_ptr() as _
}
}
/// DMA transmit and receive buffer.
///
/// This is a (single) contiguous buffer linked together by two sets of DMA
/// descriptors of length 4092 each.
/// It can be used for simultaneously transmitting to and receiving from a
/// peripheral's FIFO. These are typically full-duplex transfers.
pub struct DmaTxRxBuf {
tx_descriptors: &'static mut [DmaDescriptor],
rx_descriptors: &'static mut [DmaDescriptor],
buffer: &'static mut [u8],
}
impl DmaTxRxBuf {
/// Creates a new [DmaTxRxBuf] from some descriptors and a buffer.
///
/// There must be enough descriptors for the provided buffer.
/// Each descriptor can handle 4092 bytes worth of buffer.
///
/// Both the descriptors and buffer must be in DMA-capable memory.
/// Only DRAM is supported.
pub fn new(
tx_descriptors: &'static mut [DmaDescriptor],
rx_descriptors: &'static mut [DmaDescriptor],
buffer: &'static mut [u8],
) -> Result<Self, DmaBufError> {
let min_descriptors = buffer.len().div_ceil(CHUNK_SIZE);
if tx_descriptors.len() < min_descriptors {
return Err(DmaBufError::InsufficientDescriptors);
}
if rx_descriptors.len() < min_descriptors {
return Err(DmaBufError::InsufficientDescriptors);
}
if !is_slice_in_dram(tx_descriptors)
|| !is_slice_in_dram(rx_descriptors)
|| !is_slice_in_dram(buffer)
{
return Err(DmaBufError::UnsupportedMemoryRegion);
}
// Reset the provided descriptors
tx_descriptors.fill(DmaDescriptor::EMPTY);
rx_descriptors.fill(DmaDescriptor::EMPTY);
let descriptors = tx_descriptors.iter_mut().zip(rx_descriptors.iter_mut());
let chunks = buffer.chunks_mut(CHUNK_SIZE);
for ((tx_desc, rx_desc), chunk) in descriptors.zip(chunks) {
tx_desc.set_size(chunk.len());
tx_desc.buffer = chunk.as_mut_ptr();
rx_desc.set_size(chunk.len());
rx_desc.buffer = chunk.as_mut_ptr();
}
let mut buf = Self {
tx_descriptors,
rx_descriptors,
buffer,
};
buf.set_length(buf.capacity());
Ok(buf)
}
/// Consume the buf, returning the tx descriptors, rx descriptors and
/// buffer.
pub fn split(
self,
) -> (
&'static mut [DmaDescriptor],
&'static mut [DmaDescriptor],
&'static mut [u8],
) {
(self.tx_descriptors, self.rx_descriptors, self.buffer)
}
/// Return the size of the underlying buffer.
pub fn capacity(&self) -> usize {
self.buffer.len()
}
/// Returns the entire buf as a slice than can be read.
pub fn as_slice(&self) -> &[u8] {
self.buffer
}
/// Returns the entire buf as a slice than can be written.
pub fn as_slice_mut(&mut self) -> &mut [u8] {
&mut self.buffer[..]
}
/// Reset the descriptors to only transmit/receive `len` amount of bytes
/// with this buf.
///
/// `len` must be less than or equal to the buffer size.
pub fn set_length(&mut self, len: usize) {
assert!(len <= self.buffer.len());
// Get the minimum number of descriptors needed for this length of data.
let descriptor_count = len.div_ceil(CHUNK_SIZE).max(1);
let relevant_tx_descriptors = &mut self.tx_descriptors[..descriptor_count];
let relevant_rx_descriptors = &mut self.rx_descriptors[..descriptor_count];
// Link up the relevant descriptors.
for descriptors in [
&mut relevant_tx_descriptors[..],
&mut relevant_rx_descriptors[..],
] {
let mut next = core::ptr::null_mut();
for desc in descriptors.iter_mut().rev() {
desc.next = next;
next = desc;
}
}
let mut remaining_length = len;
for desc in relevant_tx_descriptors.iter_mut() {
// As this is a simple dma buffer implementation we won't
// be making use of this feature.
desc.set_suc_eof(false);
// This isn't strictly needed for this simple implementation,
// but it is useful for debugging.
desc.set_owner(Owner::Dma);
let chunk_size = min(desc.size(), remaining_length);
desc.set_length(chunk_size);
remaining_length -= chunk_size;
}
debug_assert_eq!(remaining_length, 0);
relevant_tx_descriptors
.last_mut()
.unwrap()
.set_suc_eof(true);
let mut remaining_length = len;
for desc in relevant_rx_descriptors.iter_mut() {
// Clear this to allow hardware to set it when the peripheral returns an EOF
// bit.
desc.set_suc_eof(false);
// This isn't strictly needed for this simple implementation,
// but it is useful for debugging.
desc.set_owner(Owner::Dma);
// Clear this to allow hardware to set it when it is
// done receiving data for this descriptor.
desc.set_length(0);
let chunk_size = min(CHUNK_SIZE, remaining_length);
desc.set_size(chunk_size);
remaining_length -= chunk_size;
}
debug_assert_eq!(remaining_length, 0);
}
}
pub(crate) mod dma_private {
use super::*;
@ -1992,6 +2543,7 @@ impl<'a, I> DmaTransferTxRx<'a, I>
where
I: dma_private::DmaSupportTx + dma_private::DmaSupportRx,
{
#[allow(dead_code)]
pub(crate) fn new(instance: &'a mut I) -> Self {
Self { instance }
}
@ -2022,238 +2574,6 @@ where
}
}
/// DMA transaction for TX transfers with moved-in/moved-out peripheral and
/// buffer
///
/// # Safety
///
/// Never use [core::mem::forget] on an in-progress transfer
#[non_exhaustive]
#[must_use]
pub struct DmaTransferTxOwned<I, T>
where
I: dma_private::DmaSupportTx,
T: ReadBuffer,
{
instance: I,
tx_buffer: T,
}
impl<I, T> DmaTransferTxOwned<I, T>
where
I: dma_private::DmaSupportTx,
T: ReadBuffer,
{
pub(crate) fn new(instance: I, tx_buffer: T) -> Self {
Self {
instance,
tx_buffer,
}
}
/// Wait for the transfer to finish and return the peripheral and the
/// buffer.
pub fn wait(mut self) -> Result<(I, T), (DmaError, I, T)> {
self.instance.peripheral_wait_dma(true, false);
let err = self.instance.tx().has_error();
// We need to have a `Drop` implementation, because we accept
// managed buffers that can free their memory on drop. Because of that
// we can't move out of the `Transfer`'s fields, so we use `ptr::read`
// and `mem::forget`.
//
// NOTE(unsafe) There is no panic branch between getting the resources
// and forgetting `self`.
let (instance, tx_buffer) = unsafe {
let instance = core::ptr::read(&self.instance);
let tx_buffer = core::ptr::read(&self.tx_buffer);
core::mem::forget(self);
(instance, tx_buffer)
};
if err {
Err((DmaError::DescriptorError, instance, tx_buffer))
} else {
Ok((instance, tx_buffer))
}
}
/// Check if the transfer is finished.
pub fn is_done(&mut self) -> bool {
self.instance.tx().is_done()
}
}
impl<I, T> Drop for DmaTransferTxOwned<I, T>
where
I: dma_private::DmaSupportTx,
T: ReadBuffer,
{
fn drop(&mut self) {
self.instance.peripheral_wait_dma(true, false);
}
}
/// DMA transaction for RX transfers with moved-in/moved-out peripheral and
/// buffer
///
/// # Safety
///
/// Never use [core::mem::forget] on an in-progress transfer
#[non_exhaustive]
#[must_use]
pub struct DmaTransferRxOwned<I, R>
where
I: dma_private::DmaSupportRx,
R: WriteBuffer,
{
instance: I,
rx_buffer: R,
}
impl<I, R> DmaTransferRxOwned<I, R>
where
I: dma_private::DmaSupportRx,
R: WriteBuffer,
{
pub(crate) fn new(instance: I, rx_buffer: R) -> Self {
Self {
instance,
rx_buffer,
}
}
/// Wait for the transfer to finish and return the peripheral and the
/// buffers.
pub fn wait(mut self) -> Result<(I, R), (DmaError, I, R)> {
self.instance.peripheral_wait_dma(false, true);
let err = self.instance.rx().has_error();
// We need to have a `Drop` implementation, because we accept
// managed buffers that can free their memory on drop. Because of that
// we can't move out of the `Transfer`'s fields, so we use `ptr::read`
// and `mem::forget`.
//
// NOTE(unsafe) There is no panic branch between getting the resources
// and forgetting `self`.
let (instance, rx_buffer) = unsafe {
let instance = core::ptr::read(&self.instance);
let rx_buffer = core::ptr::read(&self.rx_buffer);
core::mem::forget(self);
(instance, rx_buffer)
};
if err {
Err((DmaError::DescriptorError, instance, rx_buffer))
} else {
Ok((instance, rx_buffer))
}
}
/// Check if the transfer is finished.
pub fn is_done(&mut self) -> bool {
self.instance.rx().is_done()
}
}
impl<I, R> Drop for DmaTransferRxOwned<I, R>
where
I: dma_private::DmaSupportRx,
R: WriteBuffer,
{
fn drop(&mut self) {
self.instance.peripheral_wait_dma(false, true);
}
}
/// DMA transaction for TX+RX transfers with moved-in/moved-out peripheral and
/// buffers
///
/// # Safety
///
/// Never use [core::mem::forget] on an in-progress transfer
#[non_exhaustive]
#[must_use]
pub struct DmaTransferTxRxOwned<I, T, R>
where
I: dma_private::DmaSupportTx + dma_private::DmaSupportRx,
T: ReadBuffer,
R: WriteBuffer,
{
instance: I,
tx_buffer: T,
rx_buffer: R,
}
impl<I, T, R> DmaTransferTxRxOwned<I, T, R>
where
I: dma_private::DmaSupportTx + dma_private::DmaSupportRx,
T: ReadBuffer,
R: WriteBuffer,
{
pub(crate) fn new(instance: I, tx_buffer: T, rx_buffer: R) -> Self {
Self {
instance,
tx_buffer,
rx_buffer,
}
}
/// Wait for the transfer to finish and return the peripheral and the
/// buffers.
#[allow(clippy::type_complexity)]
pub fn wait(mut self) -> Result<(I, T, R), (DmaError, I, T, R)> {
self.instance.peripheral_wait_dma(true, true);
let err = self.instance.tx().has_error() || self.instance.rx().has_error();
// We need to have a `Drop` implementation, because we accept
// managed buffers that can free their memory on drop. Because of that
// we can't move out of the `Transfer`'s fields, so we use `ptr::read`
// and `mem::forget`.
//
// NOTE(unsafe) There is no panic branch between getting the resources
// and forgetting `self`.
let (instance, tx_buffer, rx_buffer) = unsafe {
let instance = core::ptr::read(&self.instance);
let tx_buffer = core::ptr::read(&self.tx_buffer);
let rx_buffer = core::ptr::read(&self.rx_buffer);
core::mem::forget(self);
(instance, tx_buffer, rx_buffer)
};
if err {
Err((DmaError::DescriptorError, instance, tx_buffer, rx_buffer))
} else {
Ok((instance, tx_buffer, rx_buffer))
}
}
/// Check if the transfer is finished.
pub fn is_done(&mut self) -> bool {
self.instance.tx().is_done() && self.instance.rx().is_done()
}
}
impl<I, T, R> Drop for DmaTransferTxRxOwned<I, T, R>
where
I: dma_private::DmaSupportTx + dma_private::DmaSupportRx,
T: ReadBuffer,
R: WriteBuffer,
{
fn drop(&mut self) {
self.instance.peripheral_wait_dma(true, true);
}
}
/// DMA transaction for TX only circular transfers
///
/// # Safety
@ -2400,10 +2720,6 @@ pub(crate) mod asynch {
pub fn new(tx: &'a mut TX) -> Self {
Self { tx, _a: () }
}
pub fn tx(&mut self) -> &mut TX {
self.tx
}
}
impl<'a, TX> core::future::Future for DmaTxFuture<'a, TX>
@ -2457,6 +2773,7 @@ pub(crate) mod asynch {
Self { rx, _a: () }
}
#[allow(dead_code)] // Dead on the C2
pub fn rx(&mut self) -> &mut RX {
self.rx
}

View File

@ -572,61 +572,6 @@ mod critical_section_impl {
}
}
/// FlashSafeDma
///
/// The embedded-hal traits make no guarantees about
/// where the buffers are placed. The DMA implementation in Espressif chips has
/// a limitation in that it can only access the RAM address space, meaning data
/// to be transmitted from the flash address space must be copied into RAM
/// first.
///
/// This wrapper struct should be used when a peripheral using the DMA engine
/// needs to transmit data from flash (ROM) via the embedded-hal traits. This is
/// often a `const` variable.
///
/// Example usage using [`spi::master::dma::SpiDma`]
/// ```rust, ignore
/// const ARRAY_IN_FLASH = [0xAA; 128];
///
/// let spi = SpiDma::new(/* */);
///
/// spi.write(&ARRAY_IN_FLASH[..]).unwrap(); // error when transmission starts
///
/// let spi = FlashSafeDma::new(spi);
///
/// spi.write(&ARRAY_IN_FLASH[..]).unwrap(); // success
/// ```
pub struct FlashSafeDma<T, const SIZE: usize> {
inner: T,
#[allow(unused)]
buffer: [u8; SIZE],
}
impl<T, const SIZE: usize> FlashSafeDma<T, SIZE> {
/// Create a new instance wrapping a given buffer
pub fn new(inner: T) -> Self {
Self {
inner,
buffer: [0u8; SIZE],
}
}
/// Return a mutable reference to the inner buffer
pub fn inner_mut(&mut self) -> &mut T {
&mut self.inner
}
/// Return an immutable reference to the inner buffer
pub fn inner(&self) -> &T {
&self.inner
}
/// Free the inner buffer
pub fn free(self) -> T {
self.inner
}
}
/// Default (unhandled) interrupt handler
pub const DEFAULT_INTERRUPT_HANDLER: interrupt::InterruptHandler = interrupt::InterruptHandler::new(
unsafe { core::mem::transmute::<*const (), extern "C" fn()>(EspDefaultHandler as *const ()) },

View File

@ -74,6 +74,13 @@ pub(crate) fn is_valid_ram_address(address: u32) -> bool {
(self::constants::SOC_DRAM_LOW..=self::constants::SOC_DRAM_HIGH).contains(&address)
}
#[allow(unused)]
pub(crate) fn is_slice_in_dram<T>(slice: &[T]) -> bool {
let start = slice.as_ptr() as u32;
let end = start + slice.len() as u32;
self::constants::SOC_DRAM_LOW <= start && end <= self::constants::SOC_DRAM_HIGH
}
#[allow(unused)]
pub(crate) fn is_valid_psram_address(address: u32) -> bool {
#[cfg(psram)]

File diff suppressed because it is too large Load Diff

View File

@ -24,7 +24,7 @@ use esp_backtrace as _;
use esp_hal::{
clock::ClockControl,
dma::*,
dma_descriptors,
dma_buffers,
gpio::Io,
peripherals::Peripherals,
prelude::*,
@ -59,15 +59,14 @@ async fn main(_spawner: Spawner) {
#[cfg(not(any(feature = "esp32", feature = "esp32s2")))]
let dma_channel = dma.channel0;
let (descriptors, rx_descriptors) = dma_descriptors!(32000);
let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(32000);
let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();
let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks)
.with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs))
.with_dma(
dma_channel.configure_for_async(false, DmaPriority::Priority0),
descriptors,
rx_descriptors,
);
.with_dma(dma_channel.configure_for_async(false, DmaPriority::Priority0))
.with_buffers(dma_tx_buf, dma_rx_buf);
let send_buffer = [0, 1, 2, 3, 4, 5, 6, 7];
loop {

View File

@ -31,7 +31,7 @@ use esp_backtrace as _;
use esp_hal::{
clock::ClockControl,
delay::Delay,
dma::{Dma, DmaPriority},
dma::{Dma, DmaPriority, DmaRxBuf, DmaTxBuf},
dma_buffers,
gpio::Io,
peripherals::Peripherals,
@ -77,6 +77,8 @@ fn main() -> ! {
let dma_channel = dma.channel0;
let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(256, 320);
let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();
let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
let mut spi = Spi::new_half_duplex(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks)
.with_pins(
@ -87,30 +89,23 @@ fn main() -> ! {
Some(sio3),
Some(cs),
)
.with_dma(
dma_channel.configure(false, DmaPriority::Priority0),
tx_descriptors,
rx_descriptors,
);
.with_dma(dma_channel.configure(false, DmaPriority::Priority0));
let delay = Delay::new(&clocks);
// DMA buffer require a static life-time
let (zero_buf, _, _, _) = dma_buffers!(0);
let send = tx_buffer;
let mut receive = rx_buffer;
// write enable
dma_tx_buf.set_length(0);
let transfer = spi
.write(
SpiDataMode::Single,
Command::Command8(0x06, SpiDataMode::Single),
Address::None,
0,
&zero_buf,
dma_tx_buf,
)
.map_err(|e| e.0)
.unwrap();
transfer.wait().unwrap();
(spi, dma_tx_buf) = transfer.wait();
delay.delay_millis(250);
// erase sector
@ -120,10 +115,11 @@ fn main() -> ! {
Command::Command8(0x20, SpiDataMode::Single),
Address::Address24(0x000000, SpiDataMode::Single),
0,
&zero_buf,
dma_tx_buf,
)
.map_err(|e| e.0)
.unwrap();
transfer.wait().unwrap();
(spi, dma_tx_buf) = transfer.wait();
delay.delay_millis(250);
// write enable
@ -133,25 +129,28 @@ fn main() -> ! {
Command::Command8(0x06, SpiDataMode::Single),
Address::None,
0,
&zero_buf,
dma_tx_buf,
)
.map_err(|e| e.0)
.unwrap();
transfer.wait().unwrap();
(spi, dma_tx_buf) = transfer.wait();
delay.delay_millis(250);
// write data / program page
send.fill(b'!');
send[0..][..5].copy_from_slice(&b"Hello"[..]);
dma_tx_buf.set_length(dma_tx_buf.capacity());
dma_tx_buf.as_mut_slice().fill(b'!');
dma_tx_buf.as_mut_slice()[0..][..5].copy_from_slice(&b"Hello"[..]);
let transfer = spi
.write(
SpiDataMode::Quad,
Command::Command8(0x32, SpiDataMode::Single),
Address::Address24(0x000000, SpiDataMode::Single),
0,
&send,
dma_tx_buf,
)
.map_err(|e| e.0)
.unwrap();
transfer.wait().unwrap();
(spi, _) = transfer.wait();
delay.delay_millis(250);
loop {
@ -162,17 +161,18 @@ fn main() -> ! {
Command::Command8(0xeb, SpiDataMode::Single),
Address::Address32(0x000000 << 8, SpiDataMode::Quad),
4,
&mut receive,
dma_rx_buf,
)
.map_err(|e| e.0)
.unwrap();
// here we could do something else while DMA transfer is in progress
// the buffers and spi is moved into the transfer and we can get it back via
// `wait`
transfer.wait().unwrap();
(spi, dma_rx_buf) = transfer.wait();
println!("{:x?}", &receive);
for b in &mut receive.iter() {
println!("{:x?}", dma_rx_buf.as_slice());
for b in &mut dma_rx_buf.as_slice().iter() {
if *b >= 32 && *b <= 127 {
print!("{}", *b as char);
} else {

View File

@ -22,7 +22,7 @@ use esp_backtrace as _;
use esp_hal::{
clock::ClockControl,
delay::Delay,
dma::{Dma, DmaPriority},
dma::{Dma, DmaPriority, DmaRxBuf, DmaTxBuf},
dma_buffers,
gpio::Io,
peripherals::Peripherals,
@ -55,32 +55,30 @@ fn main() -> ! {
let dma_channel = dma.channel0;
let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(32000);
let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();
let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks)
.with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs))
.with_dma(
dma_channel.configure(false, DmaPriority::Priority0),
tx_descriptors,
rx_descriptors,
);
.with_dma(dma_channel.configure(false, DmaPriority::Priority0));
let delay = Delay::new(&clocks);
// DMA buffer require a static life-time
let mut send = tx_buffer;
let mut receive = rx_buffer;
let mut i = 0;
for (i, v) in send.iter_mut().enumerate() {
for (i, v) in dma_tx_buf.as_mut_slice().iter_mut().enumerate() {
*v = (i % 255) as u8;
}
loop {
send[0] = i;
send[send.len() - 1] = i;
dma_tx_buf.as_mut_slice()[0] = i;
*dma_tx_buf.as_mut_slice().last_mut().unwrap() = i;
i = i.wrapping_add(1);
let mut transfer = spi.dma_transfer(&mut send, &mut receive).unwrap();
let transfer = spi
.dma_transfer(dma_tx_buf, dma_rx_buf)
.map_err(|e| e.0)
.unwrap();
// here we could do something else while DMA transfer is in progress
let mut n = 0;
// Check is_done until the transfer is almost done (32000 bytes at 100kHz is
@ -90,11 +88,11 @@ fn main() -> ! {
n += 1;
}
transfer.wait().unwrap();
(spi, (dma_tx_buf, dma_rx_buf)) = transfer.wait();
println!(
"{:x?} .. {:x?}",
&receive[..10],
&receive[receive.len() - 10..]
&dma_rx_buf.as_slice()[..10],
&dma_rx_buf.as_slice().last_chunk::<10>().unwrap()
);
delay.delay_millis(250);

View File

@ -40,6 +40,10 @@ use hil_test as _;
#[embedded_test::tests]
mod tests {
use defmt::assert_eq;
use esp_hal::{
dma::{DmaRxBuf, DmaTxBuf},
spi::master::dma::SpiDmaBus,
};
use super::*;
@ -66,23 +70,21 @@ mod tests {
let dma_channel = dma.channel0;
let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE);
let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();
let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks)
let spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks)
.with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs))
.with_dma(
dma_channel.configure(false, DmaPriority::Priority0),
tx_descriptors,
rx_descriptors,
);
.with_dma(dma_channel.configure(false, DmaPriority::Priority0));
let mut send = tx_buffer;
let mut receive = rx_buffer;
dma_tx_buf.fill(&[0xde, 0xad, 0xbe, 0xef]);
send.copy_from_slice(&[0xde, 0xad, 0xbe, 0xef]);
let transfer = spi.dma_transfer(&mut send, &mut receive).unwrap();
transfer.wait().unwrap();
assert_eq!(send, receive);
let transfer = spi
.dma_transfer(dma_tx_buf, dma_rx_buf)
.map_err(|e| e.0)
.unwrap();
(_, (dma_tx_buf, dma_rx_buf)) = transfer.wait();
assert_eq!(dma_tx_buf.as_slice(), dma_rx_buf.as_slice());
}
#[test]
@ -108,23 +110,21 @@ mod tests {
let dma_channel = dma.channel0;
let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(4, 2);
let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();
let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks)
let spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks)
.with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs))
.with_dma(
dma_channel.configure(false, DmaPriority::Priority0),
tx_descriptors,
rx_descriptors,
);
.with_dma(dma_channel.configure(false, DmaPriority::Priority0));
let mut send = tx_buffer;
let mut receive = rx_buffer;
dma_tx_buf.fill(&[0xde, 0xad, 0xbe, 0xef]);
send.copy_from_slice(&[0xde, 0xad, 0xbe, 0xef]);
let transfer = spi.dma_transfer(&mut send, &mut receive).unwrap();
transfer.wait().unwrap();
assert_eq!(send[0..1], receive[0..1]);
let transfer = spi
.dma_transfer(dma_tx_buf, dma_rx_buf)
.map_err(|e| e.0)
.unwrap();
(_, (dma_tx_buf, dma_rx_buf)) = transfer.wait();
assert_eq!(dma_tx_buf.as_slice()[0..1], dma_rx_buf.as_slice()[0..1]);
}
#[test]
@ -150,176 +150,23 @@ mod tests {
let dma_channel = dma.channel0;
let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE);
let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks)
.with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs))
.with_dma(
dma_channel.configure(false, DmaPriority::Priority0),
tx_descriptors,
rx_descriptors,
);
let mut send = tx_buffer;
let mut receive = rx_buffer;
send.copy_from_slice(&[0x55u8; 4096]);
for byte in 0..send.len() {
send[byte] = byte as u8;
}
let transfer = spi.dma_transfer(&mut send, &mut receive).unwrap();
transfer.wait().unwrap();
assert_eq!(send, receive);
}
#[test]
#[timeout(3)]
fn test_try_using_non_dma_memory_tx_buffer() {
const DMA_BUFFER_SIZE: usize = 4096;
let peripherals = Peripherals::take();
let system = SystemControl::new(peripherals.SYSTEM);
let clocks = ClockControl::boot_defaults(system.clock_control).freeze();
let io = Io::new(peripherals.GPIO, peripherals.IO_MUX);
let sclk = io.pins.gpio0;
let miso = io.pins.gpio2;
let mosi = io.pins.gpio3;
let cs = io.pins.gpio8;
let dma = Dma::new(peripherals.DMA);
#[cfg(any(feature = "esp32", feature = "esp32s2"))]
let dma_channel = dma.spi2channel;
#[cfg(not(any(feature = "esp32", feature = "esp32s2")))]
let dma_channel = dma.channel0;
let (_, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE);
let tx_buffer = {
// using `static`, not `static mut`, places the array in .rodata
static TX_BUFFER: [u8; DMA_BUFFER_SIZE] = [42u8; DMA_BUFFER_SIZE];
unsafe {
core::slice::from_raw_parts(
&mut *(core::ptr::addr_of!(TX_BUFFER) as *mut u8),
DMA_BUFFER_SIZE,
)
}
};
let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks)
.with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs))
.with_dma(
dma_channel.configure(false, DmaPriority::Priority0),
tx_descriptors,
rx_descriptors,
);
let mut receive = rx_buffer;
assert!(matches!(
spi.dma_transfer(&tx_buffer, &mut receive),
Err(esp_hal::spi::Error::DmaError(
esp_hal::dma::DmaError::UnsupportedMemoryRegion
))
));
}
#[test]
#[timeout(3)]
fn test_try_using_non_dma_memory_rx_buffer() {
const DMA_BUFFER_SIZE: usize = 4096;
let peripherals = Peripherals::take();
let system = SystemControl::new(peripherals.SYSTEM);
let clocks = ClockControl::boot_defaults(system.clock_control).freeze();
let io = Io::new(peripherals.GPIO, peripherals.IO_MUX);
let sclk = io.pins.gpio0;
let miso = io.pins.gpio2;
let mosi = io.pins.gpio3;
let cs = io.pins.gpio8;
let dma = Dma::new(peripherals.DMA);
#[cfg(any(feature = "esp32", feature = "esp32s2"))]
let dma_channel = dma.spi2channel;
#[cfg(not(any(feature = "esp32", feature = "esp32s2")))]
let dma_channel = dma.channel0;
let (tx_buffer, tx_descriptors, _, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE);
let rx_buffer = {
// using `static`, not `static mut`, places the array in .rodata
static RX_BUFFER: [u8; DMA_BUFFER_SIZE] = [42u8; DMA_BUFFER_SIZE];
unsafe {
core::slice::from_raw_parts_mut(
&mut *(core::ptr::addr_of!(RX_BUFFER) as *mut u8),
DMA_BUFFER_SIZE,
)
}
};
let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks)
.with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs))
.with_dma(
dma_channel.configure(false, DmaPriority::Priority0),
tx_descriptors,
rx_descriptors,
);
let mut receive = rx_buffer;
assert!(matches!(
spi.dma_transfer(&tx_buffer, &mut receive),
Err(esp_hal::spi::Error::DmaError(
esp_hal::dma::DmaError::UnsupportedMemoryRegion
))
));
}
#[test]
#[timeout(3)]
fn test_symmetric_dma_transfer_owned() {
const DMA_BUFFER_SIZE: usize = 4096;
let peripherals = Peripherals::take();
let system = SystemControl::new(peripherals.SYSTEM);
let clocks = ClockControl::boot_defaults(system.clock_control).freeze();
let io = Io::new(peripherals.GPIO, peripherals.IO_MUX);
let sclk = io.pins.gpio0;
let miso = io.pins.gpio2;
let mosi = io.pins.gpio3;
let cs = io.pins.gpio8;
let dma = Dma::new(peripherals.DMA);
#[cfg(any(feature = "esp32", feature = "esp32s2"))]
let dma_channel = dma.spi2channel;
#[cfg(not(any(feature = "esp32", feature = "esp32s2")))]
let dma_channel = dma.channel0;
let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE);
let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();
let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
let spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks)
.with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs))
.with_dma(
dma_channel.configure(false, DmaPriority::Priority0),
tx_descriptors,
rx_descriptors,
);
.with_dma(dma_channel.configure(false, DmaPriority::Priority0));
let send = tx_buffer;
let receive = rx_buffer;
send.copy_from_slice(&[0x55u8; 4096]);
for byte in 0..send.len() {
send[byte] = byte as u8;
for (i, d) in dma_tx_buf.as_mut_slice().iter_mut().enumerate() {
*d = i as _;
}
let transfer = spi.dma_transfer_owned(send, receive).unwrap();
let (_, send, receive) = transfer.wait().unwrap();
assert_eq!(send, receive);
let transfer = spi
.dma_transfer(dma_tx_buf, dma_rx_buf)
.map_err(|e| e.0)
.unwrap();
(_, (dma_tx_buf, dma_rx_buf)) = transfer.wait();
assert_eq!(dma_tx_buf.as_slice(), dma_rx_buf.as_slice());
}
#[test]
@ -365,14 +212,12 @@ mod tests {
let dma_channel = dma.channel0;
let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE);
let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();
let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks)
.with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs))
.with_dma(
dma_channel.configure(false, DmaPriority::Priority0),
tx_descriptors,
rx_descriptors,
);
.with_dma(dma_channel.configure(false, DmaPriority::Priority0));
let unit = pcnt.unit0;
unit.channel0.set_edge_signal(PcntSource::from_pin(
@ -382,21 +227,19 @@ mod tests {
unit.channel0
.set_input_mode(EdgeMode::Hold, EdgeMode::Increment);
let mut receive = rx_buffer;
// Fill the buffer where each byte has 3 pos edges.
tx_buffer.fill(0b0110_1010);
dma_tx_buf.as_mut_slice().fill(0b0110_1010);
assert_eq!(out_pin.is_set_low(), true);
for i in 1..4 {
receive.copy_from_slice(&[5, 5, 5, 5, 5]);
let transfer = spi.dma_read(&mut receive).unwrap();
transfer.wait().unwrap();
assert_eq!(receive, &[0, 0, 0, 0, 0]);
dma_rx_buf.as_mut_slice().copy_from_slice(&[5, 5, 5, 5, 5]);
let transfer = spi.dma_read(dma_rx_buf).map_err(|e| e.0).unwrap();
(spi, dma_rx_buf) = transfer.wait();
assert_eq!(dma_rx_buf.as_slice(), &[0, 0, 0, 0, 0]);
let transfer = spi.dma_write(&tx_buffer).unwrap();
transfer.wait().unwrap();
let transfer = spi.dma_write(dma_tx_buf).map_err(|e| e.0).unwrap();
(spi, dma_tx_buf) = transfer.wait();
assert_eq!(unit.get_value(), (i * 3 * DMA_BUFFER_SIZE) as _);
}
}
@ -444,14 +287,12 @@ mod tests {
let dma_channel = dma.channel0;
let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE);
let mut dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();
let mut dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks)
.with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs))
.with_dma(
dma_channel.configure(false, DmaPriority::Priority0),
tx_descriptors,
rx_descriptors,
);
.with_dma(dma_channel.configure(false, DmaPriority::Priority0));
let unit = pcnt.unit0;
unit.channel0.set_edge_signal(PcntSource::from_pin(
@ -461,22 +302,138 @@ mod tests {
unit.channel0
.set_input_mode(EdgeMode::Hold, EdgeMode::Increment);
let mut receive = rx_buffer;
// Fill the buffer where each byte has 3 pos edges.
tx_buffer.fill(0b0110_1010);
dma_tx_buf.as_mut_slice().fill(0b0110_1010);
assert_eq!(out_pin.is_set_low(), true);
for i in 1..4 {
receive.copy_from_slice(&[5, 5, 5, 5, 5]);
let transfer = spi.dma_read(&mut receive).unwrap();
transfer.wait().unwrap();
assert_eq!(receive, &[0, 0, 0, 0, 0]);
dma_rx_buf.as_mut_slice().copy_from_slice(&[5, 5, 5, 5, 5]);
let transfer = spi.dma_read(dma_rx_buf).map_err(|e| e.0).unwrap();
(spi, dma_rx_buf) = transfer.wait();
assert_eq!(dma_rx_buf.as_slice(), &[0, 0, 0, 0, 0]);
let transfer = spi.dma_transfer(&tx_buffer, &mut receive).unwrap();
transfer.wait().unwrap();
let transfer = spi
.dma_transfer(dma_tx_buf, dma_rx_buf)
.map_err(|e| e.0)
.unwrap();
(spi, (dma_tx_buf, dma_rx_buf)) = transfer.wait();
assert_eq!(unit.get_value(), (i * 3 * DMA_BUFFER_SIZE) as _);
}
}
#[test]
#[timeout(3)]
fn test_dma_bus_symmetric_transfer() {
const DMA_BUFFER_SIZE: usize = 4;
let peripherals = Peripherals::take();
let system = SystemControl::new(peripherals.SYSTEM);
let clocks = ClockControl::boot_defaults(system.clock_control).freeze();
let io = Io::new(peripherals.GPIO, peripherals.IO_MUX);
let sclk = io.pins.gpio0;
let miso = io.pins.gpio2;
let mosi = io.pins.gpio3;
let cs = io.pins.gpio8;
let dma = Dma::new(peripherals.DMA);
#[cfg(any(feature = "esp32", feature = "esp32s2"))]
let dma_channel = dma.spi2channel;
#[cfg(not(any(feature = "esp32", feature = "esp32s2")))]
let dma_channel = dma.channel0;
let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE);
let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();
let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks)
.with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs))
.with_dma(dma_channel.configure(false, DmaPriority::Priority0))
.with_buffers(dma_tx_buf, dma_rx_buf);
let tx_buf = [0xde, 0xad, 0xbe, 0xef];
let mut rx_buf = [0; 4];
spi.transfer(&mut rx_buf, &tx_buf).unwrap();
assert_eq!(tx_buf, rx_buf);
}
#[test]
#[timeout(3)]
fn test_dma_bus_asymmetric_transfer() {
let peripherals = Peripherals::take();
let system = SystemControl::new(peripherals.SYSTEM);
let clocks = ClockControl::boot_defaults(system.clock_control).freeze();
let io = Io::new(peripherals.GPIO, peripherals.IO_MUX);
let sclk = io.pins.gpio0;
let miso = io.pins.gpio2;
let mosi = io.pins.gpio3;
let cs = io.pins.gpio8;
let dma = Dma::new(peripherals.DMA);
#[cfg(any(feature = "esp32", feature = "esp32s2"))]
let dma_channel = dma.spi2channel;
#[cfg(not(any(feature = "esp32", feature = "esp32s2")))]
let dma_channel = dma.channel0;
let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(4);
let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();
let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks)
.with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs))
.with_dma(dma_channel.configure(false, DmaPriority::Priority0))
.with_buffers(dma_tx_buf, dma_rx_buf);
let tx_buf = [0xde, 0xad, 0xbe, 0xef];
let mut rx_buf = [0; 4];
spi.transfer(&mut rx_buf, &tx_buf).unwrap();
assert_eq!(&tx_buf[0..1], &rx_buf[0..1]);
}
#[test]
#[timeout(3)]
fn test_dma_bus_symmetric_transfer_huge_buffer() {
const DMA_BUFFER_SIZE: usize = 4096;
let peripherals = Peripherals::take();
let system = SystemControl::new(peripherals.SYSTEM);
let clocks = ClockControl::boot_defaults(system.clock_control).freeze();
let io = Io::new(peripherals.GPIO, peripherals.IO_MUX);
let sclk = io.pins.gpio0;
let miso = io.pins.gpio2;
let mosi = io.pins.gpio3;
let cs = io.pins.gpio8;
let dma = Dma::new(peripherals.DMA);
#[cfg(any(feature = "esp32", feature = "esp32s2"))]
let dma_channel = dma.spi2channel;
#[cfg(not(any(feature = "esp32", feature = "esp32s2")))]
let dma_channel = dma.channel0;
let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(40);
let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();
let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks)
.with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs))
.with_dma(dma_channel.configure(false, DmaPriority::Priority0))
.with_buffers(dma_tx_buf, dma_rx_buf);
let tx_buf = core::array::from_fn(|i| i as _);
let mut rx_buf = [0; DMA_BUFFER_SIZE];
spi.transfer(&mut rx_buf, &tx_buf).unwrap();
assert_eq!(tx_buf, rx_buf);
}
}

View File

@ -43,6 +43,7 @@ use hil_test as _;
#[embedded_test::tests(executor = esp_hal_embassy::Executor::new())]
mod tests {
use defmt::assert_eq;
use esp_hal::dma::{DmaRxBuf, DmaTxBuf};
use super::*;
@ -75,14 +76,13 @@ mod tests {
let dma_channel = dma.channel0;
let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE);
let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();
let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks)
.with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs))
.with_dma(
dma_channel.configure_for_async(false, DmaPriority::Priority0),
tx_descriptors,
rx_descriptors,
);
.with_dma(dma_channel.configure_for_async(false, DmaPriority::Priority0))
.with_buffers(dma_tx_buf, dma_rx_buf);
let unit = pcnt.unit0;
unit.channel0.set_edge_signal(PcntSource::from_pin(
@ -92,19 +92,19 @@ mod tests {
unit.channel0
.set_input_mode(EdgeMode::Hold, EdgeMode::Increment);
let receive = rx_buffer;
let mut receive = [0; DMA_BUFFER_SIZE];
// Fill the buffer where each byte has 3 pos edges.
tx_buffer.fill(0b0110_1010);
let transmit = [0b0110_1010; DMA_BUFFER_SIZE];
assert_eq!(out_pin.is_set_low(), true);
for i in 1..4 {
receive.copy_from_slice(&[5, 5, 5, 5, 5]);
SpiBus::read(&mut spi, receive).await.unwrap();
assert_eq!(receive, &[0, 0, 0, 0, 0]);
SpiBus::read(&mut spi, &mut receive).await.unwrap();
assert_eq!(receive, [0, 0, 0, 0, 0]);
SpiBus::write(&mut spi, tx_buffer).await.unwrap();
SpiBus::write(&mut spi, &transmit).await.unwrap();
assert_eq!(unit.get_value(), (i * 3 * DMA_BUFFER_SIZE) as _);
}
}
@ -138,14 +138,13 @@ mod tests {
let dma_channel = dma.channel0;
let (tx_buffer, tx_descriptors, rx_buffer, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE);
let dma_tx_buf = DmaTxBuf::new(tx_descriptors, tx_buffer).unwrap();
let dma_rx_buf = DmaRxBuf::new(rx_descriptors, rx_buffer).unwrap();
let mut spi = Spi::new(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks)
.with_pins(Some(sclk), Some(mosi), Some(miso), Some(cs))
.with_dma(
dma_channel.configure_for_async(false, DmaPriority::Priority0),
tx_descriptors,
rx_descriptors,
);
.with_dma(dma_channel.configure_for_async(false, DmaPriority::Priority0))
.with_buffers(dma_tx_buf, dma_rx_buf);
let unit = pcnt.unit0;
unit.channel0.set_edge_signal(PcntSource::from_pin(
@ -155,19 +154,19 @@ mod tests {
unit.channel0
.set_input_mode(EdgeMode::Hold, EdgeMode::Increment);
let receive = rx_buffer;
let mut receive = [0; DMA_BUFFER_SIZE];
// Fill the buffer where each byte has 3 pos edges.
tx_buffer.fill(0b0110_1010);
let transmit = [0b0110_1010; DMA_BUFFER_SIZE];
assert_eq!(out_pin.is_set_low(), true);
for i in 1..4 {
receive.copy_from_slice(&[5, 5, 5, 5, 5]);
SpiBus::read(&mut spi, receive).await.unwrap();
assert_eq!(receive, &[0, 0, 0, 0, 0]);
SpiBus::read(&mut spi, &mut receive).await.unwrap();
assert_eq!(receive, [0, 0, 0, 0, 0]);
SpiBus::transfer(&mut spi, receive, tx_buffer)
SpiBus::transfer(&mut spi, &mut receive, &transmit)
.await
.unwrap();
assert_eq!(unit.get_value(), (i * 3 * DMA_BUFFER_SIZE) as _);

View File

@ -20,7 +20,7 @@ use hil_test as _;
mod tests {
use esp_hal::{
clock::ClockControl,
dma::{Dma, DmaPriority},
dma::{Dma, DmaPriority, DmaRxBuf},
dma_buffers,
gpio::{Io, Level, Output},
peripherals::Peripherals,
@ -58,19 +58,13 @@ mod tests {
#[cfg(not(any(feature = "esp32", feature = "esp32s2")))]
let dma_channel = dma.channel0;
let (_, tx_descriptors, mut rx_buffer, rx_descriptors) = dma_buffers!(0, DMA_BUFFER_SIZE);
let (buffer, descriptors, _, _) = dma_buffers!(DMA_BUFFER_SIZE, 0);
let mut dma_rx_buf = DmaRxBuf::new(descriptors, buffer).unwrap();
let mut spi = Spi::new_half_duplex(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks)
.with_sck(sclk)
.with_miso(miso)
.with_dma(
dma_channel.configure(false, DmaPriority::Priority0),
tx_descriptors,
rx_descriptors,
);
// Fill with neither 0x00 nor 0xFF.
rx_buffer.fill(5);
.with_dma(dma_channel.configure(false, DmaPriority::Priority0));
// SPI should read '0's from the MISO pin
miso_mirror.set_low();
@ -81,12 +75,13 @@ mod tests {
Command::None,
Address::None,
0,
&mut rx_buffer,
dma_rx_buf,
)
.map_err(|e| e.0)
.unwrap();
transfer.wait().unwrap();
(spi, dma_rx_buf) = transfer.wait();
assert_eq!(rx_buffer, &[0x00; DMA_BUFFER_SIZE]);
assert_eq!(dma_rx_buf.as_slice(), &[0x00; DMA_BUFFER_SIZE]);
// SPI should read '1's from the MISO pin
miso_mirror.set_high();
@ -97,11 +92,13 @@ mod tests {
Command::None,
Address::None,
0,
&mut rx_buffer,
dma_rx_buf,
)
.map_err(|e| e.0)
.unwrap();
transfer.wait().unwrap();
assert_eq!(rx_buffer, &[0xFF; DMA_BUFFER_SIZE]);
(_, dma_rx_buf) = transfer.wait();
assert_eq!(dma_rx_buf.as_slice(), &[0xFF; DMA_BUFFER_SIZE]);
}
}

View File

@ -20,7 +20,7 @@ use hil_test as _;
mod tests {
use esp_hal::{
clock::ClockControl,
dma::{Dma, DmaPriority},
dma::{Dma, DmaPriority, DmaTxBuf},
dma_buffers,
gpio::{Io, Pull},
pcnt::{
@ -62,16 +62,13 @@ mod tests {
#[cfg(not(any(feature = "esp32", feature = "esp32s2")))]
let dma_channel = dma.channel0;
let (tx_buffer, tx_descriptors, _, rx_descriptors) = dma_buffers!(DMA_BUFFER_SIZE, 0);
let (buffer, descriptors, _, _) = dma_buffers!(DMA_BUFFER_SIZE, 0);
let mut dma_tx_buf = DmaTxBuf::new(descriptors, buffer).unwrap();
let mut spi = Spi::new_half_duplex(peripherals.SPI2, 100.kHz(), SpiMode::Mode0, &clocks)
.with_sck(sclk)
.with_mosi(mosi)
.with_dma(
dma_channel.configure(false, DmaPriority::Priority0),
tx_descriptors,
rx_descriptors,
);
.with_dma(dma_channel.configure(false, DmaPriority::Priority0));
let unit = pcnt.unit0;
unit.channel0.set_edge_signal(PcntSource::from_pin(
@ -82,7 +79,7 @@ mod tests {
.set_input_mode(EdgeMode::Hold, EdgeMode::Increment);
// Fill the buffer where each byte has 3 pos edges.
tx_buffer.fill(0b0110_1010);
dma_tx_buf.fill(&[0b0110_1010; DMA_BUFFER_SIZE]);
let transfer = spi
.write(
@ -90,10 +87,11 @@ mod tests {
Command::None,
Address::None,
0,
&tx_buffer,
dma_tx_buf,
)
.map_err(|e| e.0)
.unwrap();
transfer.wait().unwrap();
(spi, dma_tx_buf) = transfer.wait();
assert_eq!(unit.get_value(), (3 * DMA_BUFFER_SIZE) as _);
@ -103,10 +101,11 @@ mod tests {
Command::None,
Address::None,
0,
&tx_buffer,
dma_tx_buf,
)
.map_err(|e| e.0)
.unwrap();
transfer.wait().unwrap();
transfer.wait();
assert_eq!(unit.get_value(), (6 * DMA_BUFFER_SIZE) as _);
}