Clean up critical-section impl.

Adds new `Sio::core()` function.
This commit is contained in:
Jonathan 'theJPster' Pallant 2022-01-30 16:07:40 +00:00
parent 7e2f8d274c
commit 35a10f2bc6
2 changed files with 23 additions and 10 deletions

View file

@ -26,7 +26,7 @@ unsafe impl critical_section::Impl for RpSpinlockCs {
// Store the initial interrupt state and current core id in stack variables
let interrupts_active = cortex_m::register::primask::read().is_active();
// We reserved 0 as our `LOCK_UNOWNED` value, so add 1 to core_id so we get 1 for core0, 2 for core1.
let core = (*pac::SIO::ptr()).cpuid.read().bits() as u8 + 1_u8;
let core = crate::Sio::core() + 1_u8;
// Do we already own the spinlock?
if LOCK_OWNER.load(Ordering::Acquire) == core {
// We already own the lock, so we must have called acquire within a critical_section.
@ -41,9 +41,11 @@ unsafe impl critical_section::Impl for RpSpinlockCs {
// Ensure the compiler doesn't re-order accesses and violate safety here
core::sync::atomic::compiler_fence(Ordering::SeqCst);
// Read the spinlock reserved for `critical_section`
if (*pac::SIO::ptr()).spinlock[31].read().bits() != 0 {
if let Some(lock) = crate::sio::Spinlock31::try_claim() {
// We just acquired the lock.
// Store which core we are so we can tell if we're called recursively
// 1. Forget it, so we don't immediately unlock
core::mem::forget(lock);
// 2. Store which core we are so we can tell if we're called recursively
LOCK_OWNER.store(core, Ordering::Relaxed);
break;
}
@ -67,7 +69,7 @@ unsafe impl critical_section::Impl for RpSpinlockCs {
// Ensure the compiler doesn't re-order accesses and violate safety here
core::sync::atomic::compiler_fence(Ordering::SeqCst);
// Release the spinlock to allow others to enter critical_section again
(*pac::SIO::ptr()).spinlock[31].write_with_zero(|w| w.bits(1));
crate::sio::Spinlock31::release();
// Re-enable interrupts if they were enabled when we first called acquire()
// We only do this on the outermost `critical_section` to ensure interrupts stay disabled
// for the whole time that we have the lock

View file

@ -76,6 +76,12 @@ impl Sio {
hwdivider: HwDivider { _private: () },
}
}
/// Returns whether we are running on Core 0 (`0`) or Core 1 (`1`).
pub fn core() -> u8 {
// Safety: it is always safe to read this read-only register
unsafe { (*pac::SIO::ptr()).cpuid.read().bits() as u8 }
}
}
impl SioFifo {
@ -270,6 +276,15 @@ where
pub fn claim_async() -> nb::Result<Self, Infallible> {
Self::try_claim().ok_or(nb::Error::WouldBlock)
}
/// Clear a locked spin-lock.
///
/// Safety: Only call this function if you hold the spin-lock.
pub unsafe fn release() {
let sio = &*pac::SIO::ptr();
// Write (any value): release the lock
sio.spinlock[N].write_with_zero(|b| b.bits(1));
}
}
impl<const N: usize> Drop for Spinlock<N>
@ -277,12 +292,8 @@ where
Spinlock<N>: SpinlockValid,
{
fn drop(&mut self) {
// Safety: At this point we should be the only one accessing this spinlock register
// so writing to this address is fine
let sio = unsafe { &*pac::SIO::ptr() };
// Write (any value): release the lock
sio.spinlock[N].write(|b| unsafe { b.bits(1) });
// This is safe because we own the object, and hence hold the lock.
unsafe { Self::release() }
}
}