From 35a10f2bc69384566ce72538f60ddb88174a60c3 Mon Sep 17 00:00:00 2001 From: Jonathan 'theJPster' Pallant Date: Sun, 30 Jan 2022 16:07:40 +0000 Subject: [PATCH] Clean up critical-section impl. Adds new `Sio::core()` function. --- rp2040-hal/src/critical_section_impl.rs | 10 ++++++---- rp2040-hal/src/sio.rs | 23 +++++++++++++++++------ 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/rp2040-hal/src/critical_section_impl.rs b/rp2040-hal/src/critical_section_impl.rs index 711d395..e26cfd9 100644 --- a/rp2040-hal/src/critical_section_impl.rs +++ b/rp2040-hal/src/critical_section_impl.rs @@ -26,7 +26,7 @@ unsafe impl critical_section::Impl for RpSpinlockCs { // Store the initial interrupt state and current core id in stack variables let interrupts_active = cortex_m::register::primask::read().is_active(); // We reserved 0 as our `LOCK_UNOWNED` value, so add 1 to core_id so we get 1 for core0, 2 for core1. - let core = (*pac::SIO::ptr()).cpuid.read().bits() as u8 + 1_u8; + let core = crate::Sio::core() + 1_u8; // Do we already own the spinlock? if LOCK_OWNER.load(Ordering::Acquire) == core { // We already own the lock, so we must have called acquire within a critical_section. @@ -41,9 +41,11 @@ unsafe impl critical_section::Impl for RpSpinlockCs { // Ensure the compiler doesn't re-order accesses and violate safety here core::sync::atomic::compiler_fence(Ordering::SeqCst); // Read the spinlock reserved for `critical_section` - if (*pac::SIO::ptr()).spinlock[31].read().bits() != 0 { + if let Some(lock) = crate::sio::Spinlock31::try_claim() { // We just acquired the lock. - // Store which core we are so we can tell if we're called recursively + // 1. Forget it, so we don't immediately unlock + core::mem::forget(lock); + // 2. Store which core we are so we can tell if we're called recursively LOCK_OWNER.store(core, Ordering::Relaxed); break; } @@ -67,7 +69,7 @@ unsafe impl critical_section::Impl for RpSpinlockCs { // Ensure the compiler doesn't re-order accesses and violate safety here core::sync::atomic::compiler_fence(Ordering::SeqCst); // Release the spinlock to allow others to enter critical_section again - (*pac::SIO::ptr()).spinlock[31].write_with_zero(|w| w.bits(1)); + crate::sio::Spinlock31::release(); // Re-enable interrupts if they were enabled when we first called acquire() // We only do this on the outermost `critical_section` to ensure interrupts stay disabled // for the whole time that we have the lock diff --git a/rp2040-hal/src/sio.rs b/rp2040-hal/src/sio.rs index e8148a8..6d12b0b 100644 --- a/rp2040-hal/src/sio.rs +++ b/rp2040-hal/src/sio.rs @@ -76,6 +76,12 @@ impl Sio { hwdivider: HwDivider { _private: () }, } } + + /// Returns whether we are running on Core 0 (`0`) or Core 1 (`1`). + pub fn core() -> u8 { + // Safety: it is always safe to read this read-only register + unsafe { (*pac::SIO::ptr()).cpuid.read().bits() as u8 } + } } impl SioFifo { @@ -270,6 +276,15 @@ where pub fn claim_async() -> nb::Result { Self::try_claim().ok_or(nb::Error::WouldBlock) } + + /// Clear a locked spin-lock. + /// + /// Safety: Only call this function if you hold the spin-lock. + pub unsafe fn release() { + let sio = &*pac::SIO::ptr(); + // Write (any value): release the lock + sio.spinlock[N].write_with_zero(|b| b.bits(1)); + } } impl Drop for Spinlock @@ -277,12 +292,8 @@ where Spinlock: SpinlockValid, { fn drop(&mut self) { - // Safety: At this point we should be the only one accessing this spinlock register - // so writing to this address is fine - let sio = unsafe { &*pac::SIO::ptr() }; - - // Write (any value): release the lock - sio.spinlock[N].write(|b| unsafe { b.bits(1) }); + // This is safe because we own the object, and hence hold the lock. + unsafe { Self::release() } } }