Improve SHA driver API (#2049)

Co-authored-by: Dominic Fischer <git@dominicfischer.me>
This commit is contained in:
Dominic Fischer 2024-09-05 12:39:46 +01:00 committed by GitHub
parent 5370afb1eb
commit b6aceb1de8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 482 additions and 403 deletions

View File

@ -13,15 +13,18 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Added `esp_hal::init` to simplify HAL initialisation (#1970, #1999)
### Changed
- Make saving and restoring SHA digest state an explicit operation (#2049)
- `Delay::new()` is now a `const` function (#1999)
- You can now create an `AnyPin` out of an `ErasedPin`. (#2072)
### Fixed
- SHA driver can now be safely used in multiple contexts concurrently (#2049)
- Fixed an issue with DMA transfers potentially not waking up the correct async task (#2065)
### Removed
- Removed `digest::Digest` implementation from SHA (#2049)
- Removed `NoPinType` in favour of `DummyPin`. (#2068)
- Removed the `async`, `embedded-hal-02`, `embedded-hal`, `embedded-io`, `embedded-io-async`, and `ufmt` features (#2070)

View File

@ -35,7 +35,8 @@
//! # use esp_hal::sha::Sha256;
//! # use nb::block;
//! let mut source_data = "HELLO, ESPRESSIF!".as_bytes();
//! let mut hasher = Sha256::new();
//! let mut sha = Sha::new(peripherals.SHA);
//! let mut hasher = sha.start::<Sha256>();
//! // Short hashes can be created by decreasing the output buffer to the
//! // desired length
//! let mut output = [0u8; 32];
@ -55,36 +56,52 @@
//! ## Implementation State
//! - DMA-SHA Mode is not supported.
use core::{convert::Infallible, marker::PhantomData};
use core::{borrow::BorrowMut, convert::Infallible, marker::PhantomData, mem::size_of};
/// Re-export digest for convenience
#[cfg(feature = "digest")]
pub use digest::Digest;
use crate::{
peripheral::{Peripheral, PeripheralRef},
peripherals::SHA,
reg_access::{AlignmentHelper, SocDependentEndianess},
system::PeripheralClockControl,
};
/// Context for a SHA Accelerator driver instance
#[derive(Debug, Clone)]
pub struct Context<DM: crate::Mode> {
alignment_helper: AlignmentHelper<SocDependentEndianess>,
cursor: usize,
first_run: bool,
finished: bool,
/// Buffered bytes (SHA_M_n_REG) to be processed.
buffer: [u32; 32],
/// Saved digest (SHA_H_n_REG) for interleaving operation
#[cfg(not(esp32))]
saved_digest: Option<[u8; 64]>,
phantom: PhantomData<DM>,
/// The SHA Accelerator driver instance
pub struct Sha<'d> {
sha: PeripheralRef<'d, SHA>,
}
impl crate::private::Sealed for Context<crate::Blocking> {}
impl<'d> Sha<'d> {
/// Create a new instance of the SHA Accelerator driver.
pub fn new(sha: impl Peripheral<P = SHA> + 'd) -> Self {
crate::into_ref!(sha);
PeripheralClockControl::reset(crate::system::Peripheral::Sha);
PeripheralClockControl::enable(crate::system::Peripheral::Sha);
Self { sha }
}
/// Start a new digest.
pub fn start<'a, A: ShaAlgorithm>(&'a mut self) -> ShaDigest<'d, A, &'a mut Self> {
ShaDigest::new(self)
}
/// Start a new digest and take ownership of the driver.
/// This is useful for storage outside a function body. i.e. in static or
/// struct.
pub fn start_owned<A: ShaAlgorithm>(self) -> ShaDigest<'d, A, Self> {
ShaDigest::new(self)
}
}
impl<'d> crate::private::Sealed for Sha<'d> {}
#[cfg(not(esp32))]
impl crate::InterruptConfigurable for Context<crate::Blocking> {
impl<'d> crate::InterruptConfigurable for Sha<'d> {
fn set_interrupt_handler(&mut self, handler: crate::interrupt::InterruptHandler) {
unsafe {
crate::interrupt::bind_interrupt(crate::peripherals::Interrupt::SHA, handler.handler());
@ -94,23 +111,6 @@ impl crate::InterruptConfigurable for Context<crate::Blocking> {
}
}
impl<DM: crate::Mode> Context<DM> {
/// Indicates if the SHA context is in the first run.
///
/// Returns `true` if this is the first time processing data with the SHA
/// instance, otherwise returns `false`.
pub fn first_run(&self) -> bool {
self.first_run
}
/// Indicates if the SHA context has finished processing the data.
///
/// Returns `true` if the SHA calculation is complete, otherwise returns.
pub fn finished(&self) -> bool {
self.finished
}
}
// A few notes on this implementation with regards to 'memcpy',
// - It seems that ptr::write_bytes already acts as volatile, while ptr::copy_*
// does not (in this case)
@ -125,239 +125,162 @@ impl<DM: crate::Mode> Context<DM> {
// - This means that we need to buffer bytes coming in up to 4 u8's in order
// to create a full u32
/// Trait for defining the behavior of a SHA algorithm instance.
///
/// This trait encapsulates the operations and configuration for a specific SHA
/// algorithm and provides methods for processing data buffers and calculating
/// the final hash.
/// An active digest
///
/// This implementation might fail after u32::MAX/8 bytes, to increase please
/// see ::finish() length/self.cursor usage
pub trait Sha<DM: crate::Mode>: core::ops::DerefMut<Target = Context<DM>> {
/// Constant containing the name of the algorithm as a string.
const ALGORITHM: &'static str;
pub struct ShaDigest<'d, A, S: BorrowMut<Sha<'d>>> {
sha: S,
alignment_helper: AlignmentHelper<SocDependentEndianess>,
cursor: usize,
first_run: bool,
finished: bool,
phantom: PhantomData<(&'d (), A)>,
}
/// Setup SHA Mode
impl<'d, A: ShaAlgorithm, S: BorrowMut<Sha<'d>>> ShaDigest<'d, A, S> {
/// Creates a new digest
#[allow(unused_mut)]
pub fn new(mut sha: S) -> Self {
#[cfg(not(esp32))]
fn mode_as_bits() -> u8;
// Setup SHA Mode.
sha.borrow_mut()
.sha
.mode()
.write(|w| unsafe { w.mode().bits(A::MODE_AS_BITS) });
/// Returns the length of the chunk that the algorithm processes at a time.
///
/// For example, in SHA-256, this would typically return 64 bytes.
fn chunk_length(&self) -> usize;
Self {
sha,
alignment_helper: AlignmentHelper::default(),
cursor: 0,
first_run: true,
finished: false,
phantom: PhantomData,
}
}
/// Returns the length of the resulting digest produced by the algorithm.
///
/// For example, in SHA-256, this would return 32 bytes.
fn digest_length(&self) -> usize;
/// ESP32 requires that a control register to be written to calculate the
/// final SHA hash.
#[cfg(esp32)]
fn load_reg(&self);
/// Checks if the SHA peripheral is busy processing data.
///
/// Returns `true` if the SHA peripheral is busy, `false` otherwise.
fn is_busy(&self) -> bool;
/// Processes the data buffer and updates the hash state.
///
/// This method is platform-specific and differs for ESP32 and non-ESP32
/// platforms.
#[cfg(esp32)]
fn process_buffer(&mut self);
/// Processes the data buffer and updates the hash state.
///
/// This method is platform-specific and differs for ESP32 and non-ESP32
/// platforms.
/// Restores a previously saved digest.
#[cfg(not(esp32))]
fn process_buffer(&mut self) {
// Safety: This is safe because digest state is restored and saved between
// operations.
let sha = unsafe { crate::peripherals::SHA::steal() };
// Setup SHA Mode before processing current buffer.
sha.mode()
.write(|w| unsafe { w.mode().bits(Self::mode_as_bits()) });
if self.first_run {
// Set SHA_START_REG
sha.start().write(|w| unsafe { w.bits(1) });
self.first_run = false;
} else {
// Restore previously saved hash if interleaving operation
if let Some(ref saved_digest) = self.saved_digest.take() {
self.alignment_helper
.volatile_write_regset(h_mem(&sha, 0), saved_digest, 64);
}
// SET SHA_CONTINUE_REG
sha.continue_().write(|w| unsafe { w.bits(1) });
}
pub fn restore(mut sha: S, ctx: &mut Context<A>) -> Self {
// Setup SHA Mode.
sha.borrow_mut()
.sha
.mode()
.write(|w| unsafe { w.mode().bits(A::MODE_AS_BITS) });
// Wait until buffer has completely processed
while self.is_busy() {}
// Save the content of the current hash for interleaving operation.
let mut saved_digest = [0u8; 64];
self.alignment_helper.volatile_read_regset(
h_mem(&sha, 0),
&mut saved_digest,
64 / self.alignment_helper.align_size(),
);
self.saved_digest.replace(saved_digest);
}
/// Flushes any remaining data from the internal buffer to the SHA
/// peripheral.
///
/// Returns a `Result` indicating whether the flush was successful or if the
/// operation would block.
fn flush_data(&mut self) -> nb::Result<(), Infallible> {
if self.is_busy() {
return Err(nb::Error::WouldBlock);
}
// Safety: This is safe because the buffer is processed after being flushed to
// memory.
let sha = unsafe { crate::peripherals::SHA::steal() };
let chunk_len = self.chunk_length();
let ctx = self.deref_mut();
// Flush aligned buffer in memory before flushing alignment_helper
// Restore the message buffer
unsafe {
core::ptr::copy_nonoverlapping(
ctx.buffer.as_ptr(),
m_mem(&sha, 0),
(ctx.cursor % chunk_len) / ctx.alignment_helper.align_size(),
m_mem(&sha.borrow_mut().sha, 0),
32,
);
}
let flushed = ctx.alignment_helper.flush_to(
m_mem(&sha, 0),
(ctx.cursor % chunk_len) / ctx.alignment_helper.align_size(),
);
let mut ah = ctx.alignment_helper.clone();
self.cursor = self.cursor.wrapping_add(flushed);
if flushed > 0 && self.cursor % chunk_len == 0 {
self.process_buffer();
while self.is_busy() {}
// Restore previously saved hash
ah.volatile_write_regset(h_mem(&sha.borrow_mut().sha, 0), &ctx.saved_digest, 64);
Self {
sha,
alignment_helper: ah,
cursor: ctx.cursor,
first_run: ctx.first_run,
finished: ctx.finished,
phantom: PhantomData,
}
}
Ok(())
/// Returns true if the hardware is processing the next message.
pub fn is_busy(&self) -> bool {
cfg_if::cfg_if! {
if #[cfg(esp32)] {
A::is_busy(&self.sha.borrow().sha)
} else {
self.sha.borrow().sha.busy().read().state().bit_is_set()
}
}
}
/// Writes data into the SHA buffer.
/// This function ensures that incoming data is aligned to u32 (due to
/// issues with `cpy_mem<u8>`)
fn write_data<'a>(&mut self, incoming: &'a [u8]) -> nb::Result<&'a [u8], Infallible> {
let mod_cursor = self.cursor % self.chunk_length();
let chunk_len = self.chunk_length();
let ctx = self.deref_mut();
// Buffer the incoming bytes into u32 aligned words.
let (remaining, bound_reached) = ctx.alignment_helper.aligned_volatile_copy(
ctx.buffer.as_mut_ptr(),
incoming,
chunk_len / ctx.alignment_helper.align_size(),
mod_cursor / ctx.alignment_helper.align_size(),
);
self.cursor = self.cursor.wrapping_add(incoming.len() - remaining.len());
// If bound reached we write the buffer to memory and process it.
if bound_reached {
// Safety: This is safe because the bound has been reached and the buffer will
// be fully processed then saved.
unsafe {
let sha = crate::peripherals::SHA::steal();
core::ptr::copy_nonoverlapping(self.buffer.as_ptr(), m_mem(&sha, 0), 32);
}
self.process_buffer();
}
Ok(remaining)
}
/// Updates the SHA context with the provided data buffer.
fn update<'a>(&mut self, buffer: &'a [u8]) -> nb::Result<&'a [u8], Infallible> {
/// Updates the SHA digest with the provided data buffer.
pub fn update<'a>(&mut self, incoming: &'a [u8]) -> nb::Result<&'a [u8], Infallible> {
if self.is_busy() {
return Err(nb::Error::WouldBlock);
}
self.finished = false;
let remaining = self.write_data(buffer)?;
Ok(remaining)
self.write_data(incoming)
}
/// Finish of the calculation (if not already) and copy result to output
/// After `finish()` is called `update()`s will contribute to a new hash
/// which can be calculated again with `finish()`.
///
/// Typically output is expected to be the size of digest_length(), but
/// smaller inputs can be given to get a "short hash"
fn finish(&mut self, output: &mut [u8]) -> nb::Result<(), Infallible> {
// The main purpose of this function is to dynamically generate padding for the
// input. Padding: Append "1" bit, Pad zeros until 512/1024 filled
// then set the message length in the LSB (overwriting the padding)
// If not enough free space for length+1, add length at end of a new zero'd
// block
/// Typically, output is expected to be the size of
/// [ShaAlgorithm::DIGEST_LENGTH], but smaller inputs can be given to
/// get a "short hash"
pub fn finish(&mut self, output: &mut [u8]) -> nb::Result<(), Infallible> {
if self.is_busy() {
return Err(nb::Error::WouldBlock);
}
let sha = unsafe { crate::peripherals::SHA::steal() };
let chunk_len = self.chunk_length();
// Store message length for padding
let length = (self.cursor as u64 * 8).to_be_bytes();
nb::block!(Sha::update(self, &[0x80]))?; // Append "1" bit
nb::block!(self.flush_data())?; // Flush partial data, ensures aligned cursor
nb::block!(self.update(&[0x80]))?; // Append "1" bit
// Flush partial data, ensures aligned cursor
{
while self.is_busy() {}
let flushed = self.alignment_helper.flush_to(
m_mem(&self.sha.borrow_mut().sha, 0),
(self.cursor % A::CHUNK_LENGTH) / self.alignment_helper.align_size(),
);
self.cursor = self.cursor.wrapping_add(flushed);
if flushed > 0 && self.cursor % A::CHUNK_LENGTH == 0 {
self.process_buffer();
while self.is_busy() {}
}
}
debug_assert!(self.cursor % 4 == 0);
let mod_cursor = self.cursor % chunk_len;
if (chunk_len - mod_cursor) < chunk_len / 8 {
let mod_cursor = self.cursor % A::CHUNK_LENGTH;
if (A::CHUNK_LENGTH - mod_cursor) < A::CHUNK_LENGTH / 8 {
// Zero out remaining data if buffer is almost full (>=448/896), and process
// buffer
let pad_len = chunk_len - mod_cursor;
let ctx = self.deref_mut();
ctx.alignment_helper.volatile_write_bytes(
m_mem(&sha, 0),
let pad_len = A::CHUNK_LENGTH - mod_cursor;
self.alignment_helper.volatile_write_bytes(
m_mem(&self.sha.borrow_mut().sha, 0),
0_u8,
pad_len / ctx.alignment_helper.align_size(),
mod_cursor / ctx.alignment_helper.align_size(),
pad_len / self.alignment_helper.align_size(),
mod_cursor / self.alignment_helper.align_size(),
);
self.process_buffer();
self.cursor = self.cursor.wrapping_add(pad_len);
debug_assert_eq!(self.cursor % chunk_len, 0);
debug_assert_eq!(self.cursor % A::CHUNK_LENGTH, 0);
// Spin-wait for finish
while self.is_busy() {}
}
let mod_cursor = self.cursor % chunk_len; // Should be zero if branched above
let pad_len = chunk_len - mod_cursor - core::mem::size_of::<u64>();
let mod_cursor = self.cursor % A::CHUNK_LENGTH; // Should be zero if branched above
let pad_len = A::CHUNK_LENGTH - mod_cursor - size_of::<u64>();
let ctx = self.deref_mut();
ctx.alignment_helper.volatile_write_bytes(
m_mem(&sha, 0),
0_u8,
pad_len / ctx.alignment_helper.align_size(),
mod_cursor / ctx.alignment_helper.align_size(),
self.alignment_helper.volatile_write_bytes(
m_mem(&self.sha.borrow_mut().sha, 0),
0,
pad_len / self.alignment_helper.align_size(),
mod_cursor / self.alignment_helper.align_size(),
);
ctx.alignment_helper.aligned_volatile_copy(
m_mem(&sha, 0),
self.alignment_helper.aligned_volatile_copy(
m_mem(&self.sha.borrow_mut().sha, 0),
&length,
chunk_len / ctx.alignment_helper.align_size(),
(chunk_len - core::mem::size_of::<u64>()) / ctx.alignment_helper.align_size(),
A::CHUNK_LENGTH / self.alignment_helper.align_size(),
(A::CHUNK_LENGTH - size_of::<u64>()) / self.alignment_helper.align_size(),
);
self.process_buffer();
@ -367,13 +290,14 @@ pub trait Sha<DM: crate::Mode>: core::ops::DerefMut<Target = Context<DM>> {
// ESP32 requires additional load to retrieve output
#[cfg(esp32)]
{
self.load_reg();
A::load(&mut self.sha.borrow_mut().sha);
// Spin wait for result, 8-20 clock cycles according to manual
while self.is_busy() {}
}
self.alignment_helper.volatile_read_regset(
h_mem(&sha, 0),
h_mem(&self.sha.borrow_mut().sha, 0),
output,
core::cmp::min(output.len(), 32) / self.alignment_helper.align_size(),
);
@ -384,6 +308,224 @@ pub trait Sha<DM: crate::Mode>: core::ops::DerefMut<Target = Context<DM>> {
Ok(())
}
/// Save the current state of the digest for later continuation.
#[cfg(not(esp32))]
pub fn save(&mut self, context: &mut Context<A>) -> nb::Result<(), Infallible> {
if self.is_busy() {
return Err(nb::Error::WouldBlock);
}
context.alignment_helper = self.alignment_helper.clone();
context.cursor = self.cursor;
context.first_run = self.first_run;
context.finished = self.finished;
// Save the content of the current hash.
self.alignment_helper.volatile_read_regset(
h_mem(&self.sha.borrow_mut().sha, 0),
&mut context.saved_digest,
64 / self.alignment_helper.align_size(),
);
// Save the content of the current (probably partially written) message.
unsafe {
core::ptr::copy_nonoverlapping(
m_mem(&self.sha.borrow_mut().sha, 0),
context.buffer.as_mut_ptr(),
32,
);
}
Ok(())
}
/// Discard the current digest and return the peripheral.
pub fn cancel(self) -> S {
self.sha
}
/// Processes the data buffer and updates the hash state.
///
/// This method is platform-specific and differs for ESP32 and non-ESP32
/// platforms.
fn process_buffer(&mut self) {
#[cfg(not(esp32))]
if self.first_run {
// Set SHA_START_REG
self.sha
.borrow_mut()
.sha
.start()
.write(|w| unsafe { w.bits(1) });
self.first_run = false;
} else {
// SET SHA_CONTINUE_REG
self.sha
.borrow_mut()
.sha
.continue_()
.write(|w| unsafe { w.bits(1) });
}
#[cfg(esp32)]
if self.first_run {
A::start(&mut self.sha.borrow_mut().sha);
self.first_run = false;
} else {
A::r#continue(&mut self.sha.borrow_mut().sha);
}
}
fn write_data<'a>(&mut self, incoming: &'a [u8]) -> nb::Result<&'a [u8], Infallible> {
if self.is_busy() {
return Err(nb::Error::WouldBlock);
}
self.finished = false;
let mod_cursor = self.cursor % A::CHUNK_LENGTH;
let chunk_len = A::CHUNK_LENGTH;
let (remaining, bound_reached) = self.alignment_helper.aligned_volatile_copy(
m_mem(&self.sha.borrow().sha, 0),
incoming,
chunk_len / self.alignment_helper.align_size(),
mod_cursor / self.alignment_helper.align_size(),
);
self.cursor = self.cursor.wrapping_add(incoming.len() - remaining.len());
if bound_reached {
self.process_buffer();
}
Ok(remaining)
}
}
#[cfg(not(esp32))]
/// Context for a SHA Accelerator driver instance
#[derive(Debug, Clone)]
pub struct Context<A: ShaAlgorithm> {
alignment_helper: AlignmentHelper<SocDependentEndianess>,
cursor: usize,
first_run: bool,
finished: bool,
/// Buffered bytes (SHA_M_n_REG) to be processed.
buffer: [u32; 32],
/// Saved digest (SHA_H_n_REG) for interleaving operation
saved_digest: [u8; 64],
phantom: PhantomData<A>,
}
#[cfg(not(esp32))]
impl<A: ShaAlgorithm> Context<A> {
/// Create a new empty context
pub fn new() -> Self {
Self {
cursor: 0,
first_run: true,
finished: false,
alignment_helper: AlignmentHelper::default(),
buffer: [0; 32],
saved_digest: [0; 64],
phantom: PhantomData,
}
}
/// Indicates if the SHA context is in the first run.
///
/// Returns `true` if this is the first time processing data with the SHA
/// instance, otherwise returns `false`.
pub fn first_run(&self) -> bool {
self.first_run
}
/// Indicates if the SHA context has finished processing the data.
///
/// Returns `true` if the SHA calculation is complete, otherwise returns.
pub fn finished(&self) -> bool {
self.finished
}
}
#[cfg(not(esp32))]
impl<A: ShaAlgorithm> Default for Context<A> {
fn default() -> Self {
Self::new()
}
}
/// This trait encapsulates the configuration for a specific SHA algorithm.
pub trait ShaAlgorithm: crate::private::Sealed {
/// Constant containing the name of the algorithm as a string.
const ALGORITHM: &'static str;
/// The length of the chunk that the algorithm processes at a time.
///
/// For example, in SHA-256, this would typically be 64 bytes.
const CHUNK_LENGTH: usize;
/// The length of the resulting digest produced by the algorithm.
///
/// For example, in SHA-256, this would be 32 bytes.
const DIGEST_LENGTH: usize;
#[cfg(feature = "digest")]
#[doc(hidden)]
type DigestOutputSize: digest::generic_array::ArrayLength<u8> + 'static;
#[cfg(not(esp32))]
#[doc(hidden)]
const MODE_AS_BITS: u8;
#[cfg(esp32)]
#[doc(hidden)]
// Initiate the operation
fn start(sha: &mut crate::peripherals::SHA);
#[cfg(esp32)]
#[doc(hidden)]
// Continue the operation
fn r#continue(sha: &mut crate::peripherals::SHA);
#[cfg(esp32)]
#[doc(hidden)]
// Calculate the final hash
fn load(sha: &mut crate::peripherals::SHA);
#[cfg(esp32)]
#[doc(hidden)]
// Check if peripheral is busy
fn is_busy(sha: &crate::peripherals::SHA) -> bool;
}
/// implement digest traits if digest feature is present.
/// Note: digest has a blanket trait implementation for [digest::Digest] for any
/// element that implements FixedOutput + Default + Update + HashMarker
#[cfg(feature = "digest")]
impl<'d, A: ShaAlgorithm, S: BorrowMut<Sha<'d>>> digest::HashMarker for ShaDigest<'d, A, S> {}
#[cfg(feature = "digest")]
impl<'d, A: ShaAlgorithm, S: BorrowMut<Sha<'d>>> digest::OutputSizeUser for ShaDigest<'d, A, S> {
type OutputSize = A::DigestOutputSize;
}
#[cfg(feature = "digest")]
impl<'d, A: ShaAlgorithm, S: BorrowMut<Sha<'d>>> digest::Update for ShaDigest<'d, A, S> {
fn update(&mut self, data: &[u8]) {
let mut remaining = data.as_ref();
while !remaining.is_empty() {
remaining = nb::block!(Self::update(self, remaining)).unwrap();
}
}
}
#[cfg(feature = "digest")]
impl<'d, A: ShaAlgorithm, S: BorrowMut<Sha<'d>>> digest::FixedOutput for ShaDigest<'d, A, S> {
fn finalize_into(mut self, out: &mut digest::Output<Self>) {
nb::block!(self.finish(out)).unwrap();
}
}
/// This macro implements the Sha<'a, DM> trait for a specified Sha algorithm
@ -399,133 +541,52 @@ macro_rules! impl_sha {
/// The struct provides various functionalities such as initializing the hashing
/// process, updating the internal state with new data, and finalizing the
/// hashing operation to generate the final digest.
pub struct $name<DM: crate::Mode>(Context<DM>);
#[non_exhaustive]
pub struct $name;
impl $name<crate::Blocking> {
/// Create a new instance in [crate::Blocking] mode.
#[cfg_attr(not(esp32), doc = "Optionally an interrupt handler can be bound.")]
pub fn new() -> $name<crate::Blocking> {
Self::default()
}
}
impl crate::private::Sealed for $name {}
/// Automatically implement Deref + DerefMut to get access to inner context
impl<DM: crate::Mode> core::ops::Deref for $name<DM> {
type Target = Context<DM>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<DM: crate::Mode> core::ops::DerefMut for $name<DM> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// Implement Default to create hasher out of thin air
impl core::default::Default for $name<crate::Blocking> {
fn default() -> Self {
PeripheralClockControl::reset(crate::system::Peripheral::Sha);
PeripheralClockControl::enable(crate::system::Peripheral::Sha);
Self(Context {
cursor: 0,
first_run: true,
finished: false,
alignment_helper: AlignmentHelper::default(),
buffer: [0u32; 32],
#[cfg(not(esp32))]
saved_digest: None,
phantom: PhantomData,
})
}
}
impl $crate::sha::Sha<crate::Blocking> for $name<crate::Blocking> {
impl $crate::sha::ShaAlgorithm for $name {
const ALGORITHM: &'static str = stringify!($name);
const CHUNK_LENGTH: usize = $chunk_length;
const DIGEST_LENGTH: usize = $digest_length;
#[cfg(not(esp32))]
fn mode_as_bits() -> u8 {
$mode_bits
}
const MODE_AS_BITS: u8 = $mode_bits;
fn chunk_length(&self) -> usize {
$chunk_length
}
#[cfg(feature = "digest")]
// We use paste to append `U` to the digest size to match a const defined in
// digest
type DigestOutputSize = paste::paste!(digest::consts::[< U $digest_length >]);
fn digest_length(&self) -> usize {
$digest_length
}
// ESP32 uses different registers for its operation
#[cfg(esp32)]
fn load_reg(&self) {
// Safety: This is safe because digest state is restored and saved between
// operations.
let sha = unsafe { crate::peripherals::SHA::steal() };
fn start(sha: &mut crate::peripherals::SHA) {
paste::paste! {
unsafe { sha.[< $name:lower _load >]().write(|w| w.bits(1)) };
sha.[< $name:lower _start >]().write(|w| w.[< $name:lower _start >]().set_bit());
}
}
fn is_busy(&self) -> bool {
let sha = unsafe { crate::peripherals::SHA::steal() };
cfg_if::cfg_if! {
if #[cfg(esp32)] {
#[cfg(esp32)]
fn r#continue(sha: &mut crate::peripherals::SHA) {
paste::paste! {
sha.[< $name:lower _continue >]().write(|w| w.[< $name:lower _continue >]().set_bit());
}
}
#[cfg(esp32)]
fn load(sha: &mut crate::peripherals::SHA) {
paste::paste! {
sha.[< $name:lower _load >]().write(|w| w.[< $name:lower _load >]().set_bit());
}
}
#[cfg(esp32)]
fn is_busy(sha: &crate::peripherals::SHA) -> bool {
paste::paste! {
sha.[< $name:lower _busy >]().read().[< $name:lower _busy >]().bit_is_set()
}
} else {
sha.busy().read().bits() != 0
}
}
}
#[cfg(esp32)]
fn process_buffer(&mut self) {
let sha = unsafe { crate::peripherals::SHA::steal() };
paste::paste! {
if self.first_run {
sha.[< $name:lower _start >]().write(|w| unsafe { w.bits(1) });
self.first_run = false;
} else {
sha.[< $name:lower _continue >]().write(|w| unsafe { w.bits(1) });
}
}
}
}
/// implement digest traits if digest feature is present.
/// Note: digest has a blanket trait implementation for [digest::Digest] for any
/// element that implements FixedOutput + Default + Update + HashMarker
#[cfg(feature = "digest")]
impl<DM: crate::Mode> digest::HashMarker for $name<DM> {}
#[cfg(feature = "digest")]
impl<DM: crate::Mode> digest::OutputSizeUser for $name<DM> {
// We use paste to append `U` to the digest size to match a const defined in
// digest
paste::paste! {
type OutputSize = digest::consts::[< U $digest_length >];
}
}
#[cfg(feature = "digest")]
impl digest::Update for $name<crate::Blocking> {
fn update(&mut self, data: &[u8]) {
let mut remaining = data.as_ref();
while remaining.len() > 0 {
remaining = nb::block!(Sha::update(self, remaining)).unwrap();
}
}
}
#[cfg(feature = "digest")]
impl digest::FixedOutput for $name<crate::Blocking> {
fn finalize_into(mut self, out: &mut digest::Output<Self>) {
nb::block!(self.finish(out)).unwrap()
}
}
};

View File

@ -5,7 +5,7 @@
#![no_std]
#![no_main]
use digest::Digest;
use digest::{Digest, Update};
#[cfg(not(feature = "esp32"))]
use esp_hal::sha::Sha224;
#[cfg(any(feature = "esp32", feature = "esp32s2", feature = "esp32s3"))]
@ -15,8 +15,7 @@ use esp_hal::sha::{Sha512_224, Sha512_256};
use esp_hal::{
prelude::*,
rng::Rng,
sha::{Sha, Sha1, Sha256},
Blocking,
sha::{Sha, Sha1, Sha256, ShaAlgorithm, ShaDigest},
};
use hil_test as _;
use nb::block;
@ -33,26 +32,26 @@ fn assert_sw_hash<D: Digest>(input: &[u8], expected_output: &[u8]) {
defmt::assert_eq!(expected_output, &soft_result[..]);
}
fn hash_sha<S: Sha<Blocking> + Default>(mut input: &[u8], output: &mut [u8]) {
let mut hasher = S::default();
fn hash_sha<S: ShaAlgorithm>(sha: &mut Sha<'static>, mut input: &[u8], output: &mut [u8]) {
let mut digest = sha.start::<S>();
while !input.is_empty() {
input = block!(Sha::update(&mut hasher, input)).unwrap();
input = block!(digest.update(input)).unwrap();
}
block!(hasher.finish(output)).unwrap();
block!(digest.finish(output)).unwrap();
}
fn hash_digest<S: Digest>(input: &[u8], output: &mut [u8]) {
let mut hasher = S::new();
digest::Digest::update(&mut hasher, input);
output.copy_from_slice(&digest::Digest::finalize(hasher));
fn hash_digest<'a, S: ShaAlgorithm>(sha: &'a mut Sha<'static>, input: &[u8], output: &mut [u8]) {
let mut hasher = ShaDigest::<S, _>::new(sha);
Update::update(&mut hasher, input);
output.copy_from_slice(&digest::FixedOutput::finalize_fixed(hasher));
}
/// A simple test using the Sha trait. This will compare the result with a
/// software implementation.
#[track_caller]
fn assert_sha<S: Sha<Blocking> + Default, const N: usize>(input: &[u8]) {
fn assert_sha<S: ShaAlgorithm, const N: usize>(sha: &mut Sha<'static>, input: &[u8]) {
let mut output = [0u8; N];
hash_sha::<S>(input, &mut output);
hash_sha::<S>(sha, input, &mut output);
// Compare against Software result.
match N {
@ -68,9 +67,9 @@ fn assert_sha<S: Sha<Blocking> + Default, const N: usize>(input: &[u8]) {
/// A simple test using the Digest trait. This will compare the result with a
/// software implementation.
#[track_caller]
fn assert_digest<D: Digest, const N: usize>(input: &[u8]) {
fn assert_digest<'a, S: ShaAlgorithm, const N: usize>(sha: &'a mut Sha<'static>, input: &[u8]) {
let mut output = [0u8; N];
hash_digest::<D>(input, &mut output);
hash_digest::<S>(sha, input, &mut output);
// Compare against Software result.
match N {
@ -86,7 +85,7 @@ fn assert_digest<D: Digest, const N: usize>(input: &[u8]) {
#[allow(unused_mut)]
fn with_random_data(
mut rng: Rng,
f: impl Fn(
mut f: impl FnMut(
(&[u8], &mut [u8]),
(&[u8], &mut [u8]),
(&[u8], &mut [u8]),
@ -153,6 +152,11 @@ fn with_random_data(
}
}
pub struct Context {
rng: Rng,
sha: Sha<'static>,
}
#[cfg(test)]
#[embedded_test::tests]
mod tests {
@ -162,7 +166,7 @@ mod tests {
use super::*;
#[init]
fn init() -> Rng {
fn init() -> Context {
cfg_if::cfg_if! {
if #[cfg(feature = "esp32")] {
// FIXME: max speed fails...?
@ -174,79 +178,81 @@ mod tests {
}
let peripherals = esp_hal::init(config);
Rng::new(peripherals.RNG)
Context {
rng: Rng::new(peripherals.RNG),
sha: Sha::new(peripherals.SHA),
}
}
#[test]
#[cfg(any(feature = "esp32s2", feature = "esp32s3"))]
fn test_sha_512_224() {
fn test_sha_512_224(mut ctx: Context) {
let expected_output = [
0x19, 0xf2, 0xb3, 0x88, 0x22, 0x86, 0x94, 0x38, 0xee, 0x24, 0xc1, 0xc3, 0xb0, 0xb1,
0x21, 0x6a, 0xf4, 0x81, 0x14, 0x8f, 0x4, 0x34, 0xfd, 0xd7, 0x54, 0x3, 0x2b, 0x88,
];
let mut output = [0u8; 28];
hash_sha::<Sha512_224<Blocking>>(SOURCE_DATA, &mut output);
hash_sha::<Sha512_224>(&mut ctx.sha, SOURCE_DATA, &mut output);
assert_eq!(output, expected_output);
let mut output = [0u8; 28];
hash_digest::<Sha512_224<Blocking>>(SOURCE_DATA, &mut output);
hash_digest::<Sha512_224>(&mut ctx.sha, SOURCE_DATA, &mut output);
assert_eq!(output, expected_output);
}
#[test]
#[cfg(any(feature = "esp32s2", feature = "esp32s3"))]
fn test_sha_512_256() {
fn test_sha_512_256(mut ctx: Context) {
let expected_output = [
0xb7, 0x49, 0x4e, 0xe1, 0xdb, 0xcd, 0xe5, 0x47, 0x5a, 0x61, 0x25, 0xac, 0x27, 0xc2,
0x1b, 0x53, 0xcd, 0x6b, 0x16, 0x33, 0xb4, 0x94, 0xac, 0xa4, 0x2a, 0xe6, 0x99, 0x2f,
0xe7, 0xd, 0x83, 0x19,
];
let mut output = [0u8; 32];
hash_sha::<Sha512_256<Blocking>>(SOURCE_DATA, &mut output);
hash_sha::<Sha512_256>(&mut ctx.sha, SOURCE_DATA, &mut output);
assert_eq!(output, expected_output);
let mut output = [0u8; 32];
hash_digest::<Sha512_256<Blocking>>(SOURCE_DATA, &mut output);
hash_digest::<Sha512_256>(&mut ctx.sha, SOURCE_DATA, &mut output);
assert_eq!(output, expected_output);
}
/// A test that runs a hashing on a digest of every size between 1 and 200
/// inclusively.
#[test]
fn test_digest_of_size_1_to_200() {
fn test_digest_of_size_1_to_200(mut ctx: Context) {
for i in 1..=200 {
assert_sha::<Sha1<Blocking>, 20>(&SOURCE_DATA[..i]);
assert_digest::<Sha1<Blocking>, 20>(&SOURCE_DATA[..i]);
assert_sha::<Sha1, 20>(&mut ctx.sha, &SOURCE_DATA[..i]);
assert_digest::<Sha1, 20>(&mut ctx.sha, &SOURCE_DATA[..i]);
#[cfg(not(feature = "esp32"))]
{
assert_sha::<Sha224<Blocking>, 28>(&SOURCE_DATA[..i]);
assert_digest::<Sha224<Blocking>, 28>(&SOURCE_DATA[..i]);
assert_sha::<Sha224, 28>(&mut ctx.sha, &SOURCE_DATA[..i]);
assert_digest::<Sha224, 28>(&mut ctx.sha, &SOURCE_DATA[..i]);
}
assert_sha::<Sha256<Blocking>, 32>(&SOURCE_DATA[..i]);
assert_digest::<Sha256<Blocking>, 32>(&SOURCE_DATA[..i]);
assert_sha::<Sha256, 32>(&mut ctx.sha, &SOURCE_DATA[..i]);
assert_digest::<Sha256, 32>(&mut ctx.sha, &SOURCE_DATA[..i]);
#[cfg(any(feature = "esp32", feature = "esp32s2", feature = "esp32s3"))]
{
assert_sha::<Sha384<Blocking>, 48>(&SOURCE_DATA[..i]);
assert_digest::<Sha384<Blocking>, 48>(&SOURCE_DATA[..i]);
assert_sha::<Sha384, 48>(&mut ctx.sha, &SOURCE_DATA[..i]);
assert_digest::<Sha384, 48>(&mut ctx.sha, &SOURCE_DATA[..i]);
assert_sha::<Sha512<Blocking>, 64>(&SOURCE_DATA[..i]);
assert_digest::<Sha512<Blocking>, 64>(&SOURCE_DATA[..i]);
assert_sha::<Sha512, 64>(&mut ctx.sha, &SOURCE_DATA[..i]);
assert_digest::<Sha512, 64>(&mut ctx.sha, &SOURCE_DATA[..i]);
}
}
}
#[cfg(not(feature = "esp32"))]
/// A rolling test that loops between hasher for every step to test
/// interleaving. This specifically test the Sha trait implementation
#[test]
fn test_sha_rolling(rng: Rng) {
fn test_sha_rolling(mut ctx: Context) {
#[allow(unused)]
with_random_data(rng, |sha1_p, sha224_p, sha256_p, sha384_p, sha512_p| {
with_random_data(ctx.rng, |sha1_p, sha224_p, sha256_p, sha384_p, sha512_p| {
let mut sha1_remaining = sha1_p.0;
#[cfg(not(feature = "esp32"))]
let mut sha224_remaining = sha224_p.0;
let mut sha256_remaining = sha256_p.0;
#[cfg(any(feature = "esp32", feature = "esp32s2", feature = "esp32s3"))]
@ -254,43 +260,50 @@ mod tests {
#[cfg(any(feature = "esp32", feature = "esp32s2", feature = "esp32s3"))]
let mut sha512_remaining = sha512_p.0;
let mut sha1 = esp_hal::sha::Sha1::default();
#[cfg(not(feature = "esp32"))]
let mut sha224 = esp_hal::sha::Sha224::default();
let mut sha256 = esp_hal::sha::Sha256::default();
let mut sha1 = esp_hal::sha::Context::<esp_hal::sha::Sha1>::new();
let mut sha224 = esp_hal::sha::Context::<esp_hal::sha::Sha224>::new();
let mut sha256 = esp_hal::sha::Context::<esp_hal::sha::Sha256>::new();
#[cfg(any(feature = "esp32", feature = "esp32s2", feature = "esp32s3"))]
let mut sha384 = esp_hal::sha::Sha384::default();
let mut sha384 = esp_hal::sha::Context::<esp_hal::sha::Sha384>::new();
#[cfg(any(feature = "esp32", feature = "esp32s2", feature = "esp32s3"))]
let mut sha512 = esp_hal::sha::Sha512::default();
let mut sha512 = esp_hal::sha::Context::<esp_hal::sha::Sha512>::new();
loop {
let mut all_done = true;
if !sha1_remaining.is_empty() {
sha1_remaining = block!(Sha::update(&mut sha1, sha1_remaining)).unwrap();
let mut digest = ShaDigest::restore(&mut ctx.sha, &mut sha1);
sha1_remaining = block!(digest.update(sha1_remaining)).unwrap();
block!(digest.save(&mut sha1));
all_done = false;
}
#[cfg(not(feature = "esp32"))]
if !sha224_remaining.is_empty() {
sha224_remaining = block!(Sha::update(&mut sha224, sha224_remaining)).unwrap();
let mut digest = ShaDigest::restore(&mut ctx.sha, &mut sha224);
sha224_remaining = block!(digest.update(sha224_remaining)).unwrap();
block!(digest.save(&mut sha224));
all_done = false;
}
if !sha256_remaining.is_empty() {
sha256_remaining = block!(Sha::update(&mut sha256, sha256_remaining)).unwrap();
let mut digest = ShaDigest::restore(&mut ctx.sha, &mut sha256);
sha256_remaining = block!(digest.update(sha256_remaining)).unwrap();
block!(digest.save(&mut sha256));
all_done = false;
}
#[cfg(any(feature = "esp32", feature = "esp32s2", feature = "esp32s3"))]
{
if !sha384_remaining.is_empty() {
sha384_remaining =
block!(Sha::update(&mut sha384, sha384_remaining)).unwrap();
let mut digest = ShaDigest::restore(&mut ctx.sha, &mut sha384);
sha384_remaining = block!(digest.update(sha384_remaining)).unwrap();
block!(digest.save(&mut sha384));
all_done = false;
}
if !sha512_remaining.is_empty() {
sha512_remaining =
block!(Sha::update(&mut sha512, sha512_remaining)).unwrap();
let mut digest = ShaDigest::restore(&mut ctx.sha, &mut sha512);
sha512_remaining = block!(digest.update(sha512_remaining)).unwrap();
block!(digest.save(&mut sha512));
all_done = false;
}
}
@ -300,16 +313,18 @@ mod tests {
}
}
block!(sha1.finish(sha1_p.1)).unwrap();
#[cfg(not(feature = "esp32"))]
{
block!(sha224.finish(sha224_p.1)).unwrap();
}
block!(sha256.finish(sha256_p.1)).unwrap();
let mut digest = ShaDigest::restore(&mut ctx.sha, &mut sha1);
block!(digest.finish(sha1_p.1)).unwrap();
let mut digest = ShaDigest::restore(&mut ctx.sha, &mut sha224);
block!(digest.finish(sha224_p.1)).unwrap();
let mut digest = ShaDigest::restore(&mut ctx.sha, &mut sha256);
block!(digest.finish(sha256_p.1)).unwrap();
#[cfg(any(feature = "esp32", feature = "esp32s2", feature = "esp32s3"))]
{
block!(sha384.finish(sha384_p.1)).unwrap();
block!(sha512.finish(sha512_p.1)).unwrap();
let mut digest = ShaDigest::restore(&mut ctx.sha, &mut sha384);
block!(digest.finish(sha384_p.1)).unwrap();
let mut digest = ShaDigest::restore(&mut ctx.sha, &mut sha512);
block!(digest.finish(sha512_p.1)).unwrap();
}
});
}
@ -317,40 +332,40 @@ mod tests {
/// A rolling test that loops between hasher for every step to test
/// interleaving. This specifically test the Digest trait implementation
#[test]
fn test_for_digest_rolling(rng: Rng) {
fn test_for_digest_rolling(mut ctx: Context) {
#[allow(unused)]
with_random_data(rng, |sha1_p, sha224_p, sha256_p, sha384_p, sha512_p| {
with_random_data(ctx.rng, |sha1_p, sha224_p, sha256_p, sha384_p, sha512_p| {
// The Digest::update will consume the entirety of remaining. We don't need to
// loop until remaining is fully consumed.
let mut sha1 = esp_hal::sha::Sha1::default();
Digest::update(&mut sha1, sha1_p.0);
let sha1_output = Digest::finalize(sha1);
let mut sha1 = ctx.sha.start::<esp_hal::sha::Sha1>();
Update::update(&mut sha1, sha1_p.0);
let sha1_output = digest::FixedOutput::finalize_fixed(sha1);
sha1_p.1.copy_from_slice(&sha1_output);
#[cfg(not(feature = "esp32"))]
{
let mut sha224 = esp_hal::sha::Sha224::default();
Digest::update(&mut sha224, sha224_p.0);
let sha224_output = Digest::finalize(sha224);
let mut sha224 = ctx.sha.start::<esp_hal::sha::Sha224>();
Update::update(&mut sha224, sha224_p.0);
let sha224_output = digest::FixedOutput::finalize_fixed(sha224);
sha224_p.1.copy_from_slice(&sha224_output);
}
let mut sha256 = esp_hal::sha::Sha256::default();
Digest::update(&mut sha256, sha256_p.0);
let sha256_output = Digest::finalize(sha256);
let mut sha256 = ctx.sha.start::<esp_hal::sha::Sha256>();
Update::update(&mut sha256, sha256_p.0);
let sha256_output = digest::FixedOutput::finalize_fixed(sha256);
sha256_p.1.copy_from_slice(&sha256_output);
#[cfg(any(feature = "esp32", feature = "esp32s2", feature = "esp32s3"))]
{
let mut sha384 = esp_hal::sha::Sha384::default();
Digest::update(&mut sha384, sha384_p.0);
let sha384_output = Digest::finalize(sha384);
let mut sha384 = ctx.sha.start::<esp_hal::sha::Sha384>();
Update::update(&mut sha384, sha384_p.0);
let sha384_output = digest::FixedOutput::finalize_fixed(sha384);
sha384_p.1.copy_from_slice(&sha384_output);
let mut sha512 = esp_hal::sha::Sha512::default();
Digest::update(&mut sha512, sha512_p.0);
let sha512_output = Digest::finalize(sha512);
let mut sha512 = ctx.sha.start::<esp_hal::sha::Sha512>();
Update::update(&mut sha512, sha512_p.0);
let sha512_output = digest::FixedOutput::finalize_fixed(sha512);
sha512_p.1.copy_from_slice(&sha512_output);
}
});