Merge pull request #281 from rp-rs/update_pac

Use new spinlock API provide by PAC 0.3.0
This commit is contained in:
Jonathan 'theJPster' Pallant 2022-01-31 10:40:02 +00:00 committed by GitHub
commit 354a2a5e5e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 254 additions and 94 deletions

View file

@ -16,7 +16,7 @@ eh1_0_alpha = { version = "=1.0.0-alpha.6", package="embedded-hal", optional=tru
embedded-time = "0.12.0"
itertools = { version = "0.10.1", default-features = false }
nb = "1.0"
rp2040-pac = "0.2.0"
rp2040-pac = "0.3.0"
paste = "1.0"
pio = "0.1.0"
usb-device = "0.2.8"

View file

@ -26,7 +26,7 @@ unsafe impl critical_section::Impl for RpSpinlockCs {
// Store the initial interrupt state and current core id in stack variables
let interrupts_active = cortex_m::register::primask::read().is_active();
// We reserved 0 as our `LOCK_UNOWNED` value, so add 1 to core_id so we get 1 for core0, 2 for core1.
let core = (*pac::SIO::ptr()).cpuid.read().bits() as u8 + 1_u8;
let core = crate::Sio::core() + 1_u8;
// Do we already own the spinlock?
if LOCK_OWNER.load(Ordering::Acquire) == core {
// We already own the lock, so we must have called acquire within a critical_section.
@ -41,9 +41,11 @@ unsafe impl critical_section::Impl for RpSpinlockCs {
// Ensure the compiler doesn't re-order accesses and violate safety here
core::sync::atomic::compiler_fence(Ordering::SeqCst);
// Read the spinlock reserved for `critical_section`
if (*pac::SIO::ptr()).spinlock31.read().bits() != 0 {
if let Some(lock) = crate::sio::Spinlock31::try_claim() {
// We just acquired the lock.
// Store which core we are so we can tell if we're called recursively
// 1. Forget it, so we don't immediately unlock
core::mem::forget(lock);
// 2. Store which core we are so we can tell if we're called recursively
LOCK_OWNER.store(core, Ordering::Relaxed);
break;
}
@ -67,7 +69,7 @@ unsafe impl critical_section::Impl for RpSpinlockCs {
// Ensure the compiler doesn't re-order accesses and violate safety here
core::sync::atomic::compiler_fence(Ordering::SeqCst);
// Release the spinlock to allow others to enter critical_section again
(*pac::SIO::ptr()).spinlock31.write_with_zero(|w| w.bits(1));
crate::sio::Spinlock31::release();
// Re-enable interrupts if they were enabled when we first called acquire()
// We only do this on the outermost `critical_section` to ensure interrupts stay disabled
// for the whole time that we have the lock

View file

@ -76,6 +76,12 @@ impl Sio {
hwdivider: HwDivider { _private: () },
}
}
/// Returns whether we are running on Core 0 (`0`) or Core 1 (`1`).
pub fn core() -> u8 {
// Safety: it is always safe to read this read-only register
unsafe { (*pac::SIO::ptr()).cpuid.read().bits() as u8 }
}
}
impl SioFifo {
@ -207,16 +213,75 @@ impl HwDivider {
}
}
/// Trait for all the spinlock. See the documentation of e.g. [`Spinlock0`] for more information
pub trait Spinlock: typelevel::Sealed + Sized {
/// This type is just used to limit us to Spinlocks `0..=31`
pub trait SpinlockValid {}
/// Hardware based spinlock.
///
/// You can claim this lock by calling either [`claim`], [`try_claim`] or
/// [`claim_async`]. These spin-locks are hardware backed, so if you lock
/// e.g. `Spinlock<6>`, then any other part of your application using
/// `Spinlock<6>` will contend for the same lock, without them needing to
/// share a reference or otherwise communicate with each other.
///
/// When the obtained spinlock goes out of scope, it is automatically unlocked.
///
///
/// ```no_run
/// use rp2040_hal::sio::Spinlock0;
/// static mut SOME_GLOBAL_VAR: u32 = 0;
///
/// /// This function is safe to call from two different cores, but is not safe
/// /// to call from an interrupt routine!
/// fn update_global_var() {
/// // Do not say `let _ = ` here - it will immediately unlock!
/// let _lock = Spinlock0::claim();
/// // Do your thing here that Core 0 and Core 1 might want to do at the
/// // same time, like update this global variable:
/// unsafe { SOME_GLOBAL_VAR += 1 };
/// // The lock is dropped here.
/// }
/// ```
///
/// **Warning**: These spinlocks are not re-entrant, meaning that the
/// following code will cause a deadlock:
///
/// ```no_run
/// use rp2040_hal::sio::Spinlock0;
/// let lock_1 = Spinlock0::claim();
/// let lock_2 = Spinlock0::claim(); // deadlock here
/// ```
///
/// **Note:** The `critical-section` implementation uses Spinlock 31.
///
/// [`claim`]: #method.claim
/// [`try_claim`]: #method.try_claim
/// [`claim_async`]: #method.claim_asyncs
pub struct Spinlock<const N: usize>(core::marker::PhantomData<()>)
where
Spinlock<N>: SpinlockValid;
impl<const N: usize> Spinlock<N>
where
Spinlock<N>: SpinlockValid,
{
/// Try to claim the spinlock. Will return `Some(Self)` if the lock is obtained, and `None` if the lock is
/// already in use somewhere else.
fn try_claim() -> Option<Self>;
pub fn try_claim() -> Option<Self> {
// Safety: We're only reading from this register
let sio = unsafe { &*pac::SIO::ptr() };
let lock = sio.spinlock[N].read().bits();
if lock > 0 {
Some(Self(core::marker::PhantomData))
} else {
None
}
}
/// Claim the spinlock, will block the current thread until the lock is available.
///
/// Note that calling this multiple times in a row will cause a deadlock
fn claim() -> Self {
pub fn claim() -> Self {
loop {
if let Some(result) = Self::try_claim() {
break result;
@ -225,98 +290,191 @@ pub trait Spinlock: typelevel::Sealed + Sized {
}
/// Try to claim the spinlock. Will return `WouldBlock` until the spinlock is available.
fn claim_async() -> nb::Result<Self, Infallible> {
pub fn claim_async() -> nb::Result<Self, Infallible> {
Self::try_claim().ok_or(nb::Error::WouldBlock)
}
}
macro_rules! impl_spinlock {
($($spinlock_name:ident => $register:ident,)*) => {
$(
/// Hardware based spinlock.
///
/// You can claim this lock by calling either [`claim`], [`try_claim`] or [`claim_async`].
/// This will automatically lock ALL spinlocks of type `
#[doc = stringify!($spinlock_name)]
/// `.
///
/// When the obtained spinlock goes out of scope, it is automatically unlocked.
///
/// **warning**: These spinlocks are not re-entrant, meaning that the following code will cause a deadlock:
///
/// ```no_run
/// use rp2040_hal::sio::{Spinlock0, Spinlock};
/// let lock_1 = Spinlock0::claim();
/// let lock_2 = Spinlock0::claim(); // deadlock here
/// ```
///
/// [`claim`]: #method.claim
/// [`try_claim`]: #method.try_claim
/// [`claim_async`]: #method.claim_async
pub struct $spinlock_name(core::marker::PhantomData<()>);
impl Spinlock for $spinlock_name {
fn try_claim() -> Option<$spinlock_name> {
// Safety: We're only reading from this register
let sio = unsafe { &*pac::SIO::ptr() };
let lock = sio.$register.read().bits();
if lock > 0 {
Some(Self(core::marker::PhantomData))
} else {
None
}
}
}
impl typelevel::Sealed for $spinlock_name {}
impl Drop for $spinlock_name {
fn drop(&mut self) {
// Safety: At this point we should be the only one accessing this spinlock register
// so writing to this address is fine
let sio = unsafe { &*pac::SIO::ptr() };
/// Clear a locked spin-lock.
///
/// # Safety
///
/// Only call this function if you hold the spin-lock.
pub unsafe fn release() {
let sio = &*pac::SIO::ptr();
// Write (any value): release the lock
sio.$register.write(|b| unsafe { b.bits(1) });
}
}
)*
sio.spinlock[N].write_with_zero(|b| b.bits(1));
}
}
impl_spinlock! {
Spinlock0 => spinlock0,
Spinlock1 => spinlock1,
Spinlock2 => spinlock2,
Spinlock3 => spinlock3,
Spinlock4 => spinlock4,
Spinlock5 => spinlock5,
Spinlock6 => spinlock6,
Spinlock7 => spinlock7,
Spinlock8 => spinlock8,
Spinlock9 => spinlock9,
Spinlock10 => spinlock10,
Spinlock11 => spinlock11,
Spinlock12 => spinlock12,
Spinlock13 => spinlock13,
Spinlock14 => spinlock14,
Spinlock15 => spinlock15,
Spinlock16 => spinlock16,
Spinlock17 => spinlock17,
Spinlock18 => spinlock18,
Spinlock19 => spinlock19,
Spinlock20 => spinlock20,
Spinlock21 => spinlock21,
Spinlock22 => spinlock22,
Spinlock23 => spinlock23,
Spinlock24 => spinlock24,
Spinlock25 => spinlock25,
Spinlock26 => spinlock26,
Spinlock27 => spinlock27,
Spinlock28 => spinlock28,
Spinlock29 => spinlock29,
Spinlock30 => spinlock30,
Spinlock31 => spinlock31,
impl<const N: usize> Drop for Spinlock<N>
where
Spinlock<N>: SpinlockValid,
{
fn drop(&mut self) {
// This is safe because we own the object, and hence hold the lock.
unsafe { Self::release() }
}
}
/// Spinlock number 0
pub type Spinlock0 = Spinlock<0>;
impl SpinlockValid for Spinlock<0> {}
/// Spinlock number 1
pub type Spinlock1 = Spinlock<1>;
impl SpinlockValid for Spinlock<1> {}
/// Spinlock number 2
pub type Spinlock2 = Spinlock<2>;
impl SpinlockValid for Spinlock<2> {}
/// Spinlock number 3
pub type Spinlock3 = Spinlock<3>;
impl SpinlockValid for Spinlock<3> {}
/// Spinlock number 4
pub type Spinlock4 = Spinlock<4>;
impl SpinlockValid for Spinlock<4> {}
/// Spinlock number 5
pub type Spinlock5 = Spinlock<5>;
impl SpinlockValid for Spinlock<5> {}
/// Spinlock number 6
pub type Spinlock6 = Spinlock<6>;
impl SpinlockValid for Spinlock<6> {}
/// Spinlock number 7
pub type Spinlock7 = Spinlock<7>;
impl SpinlockValid for Spinlock<7> {}
/// Spinlock number 8
pub type Spinlock8 = Spinlock<8>;
impl SpinlockValid for Spinlock<8> {}
/// Spinlock number 9
pub type Spinlock9 = Spinlock<9>;
impl SpinlockValid for Spinlock<9> {}
/// Spinlock number 10
pub type Spinlock10 = Spinlock<10>;
impl SpinlockValid for Spinlock<10> {}
/// Spinlock number 11
pub type Spinlock11 = Spinlock<11>;
impl SpinlockValid for Spinlock<11> {}
/// Spinlock number 12
pub type Spinlock12 = Spinlock<12>;
impl SpinlockValid for Spinlock<12> {}
/// Spinlock number 13
pub type Spinlock13 = Spinlock<13>;
impl SpinlockValid for Spinlock<13> {}
/// Spinlock number 14
pub type Spinlock14 = Spinlock<14>;
impl SpinlockValid for Spinlock<14> {}
/// Spinlock number 15
pub type Spinlock15 = Spinlock<15>;
impl SpinlockValid for Spinlock<15> {}
/// Spinlock number 16
pub type Spinlock16 = Spinlock<16>;
impl SpinlockValid for Spinlock<16> {}
/// Spinlock number 17
pub type Spinlock17 = Spinlock<17>;
impl SpinlockValid for Spinlock<17> {}
/// Spinlock number 18
pub type Spinlock18 = Spinlock<18>;
impl SpinlockValid for Spinlock<18> {}
/// Spinlock number 19
pub type Spinlock19 = Spinlock<19>;
impl SpinlockValid for Spinlock<19> {}
/// Spinlock number 20
pub type Spinlock20 = Spinlock<20>;
impl SpinlockValid for Spinlock<20> {}
/// Spinlock number 21
pub type Spinlock21 = Spinlock<21>;
impl SpinlockValid for Spinlock<21> {}
/// Spinlock number 22
pub type Spinlock22 = Spinlock<22>;
impl SpinlockValid for Spinlock<22> {}
/// Spinlock number 23
pub type Spinlock23 = Spinlock<23>;
impl SpinlockValid for Spinlock<23> {}
/// Spinlock number 24
pub type Spinlock24 = Spinlock<24>;
impl SpinlockValid for Spinlock<24> {}
/// Spinlock number 25
pub type Spinlock25 = Spinlock<25>;
impl SpinlockValid for Spinlock<25> {}
/// Spinlock number 26
pub type Spinlock26 = Spinlock<26>;
impl SpinlockValid for Spinlock<26> {}
/// Spinlock number 27
pub type Spinlock27 = Spinlock<27>;
impl SpinlockValid for Spinlock<27> {}
/// Spinlock number 28
pub type Spinlock28 = Spinlock<28>;
impl SpinlockValid for Spinlock<28> {}
/// Spinlock number 29
pub type Spinlock29 = Spinlock<29>;
impl SpinlockValid for Spinlock<29> {}
/// Spinlock number 30
pub type Spinlock30 = Spinlock<30>;
impl SpinlockValid for Spinlock<30> {}
/// Spinlock number 31 - used by critical section implementation
pub(crate) type Spinlock31 = Spinlock<31>;
impl SpinlockValid for Spinlock<31> {}
/// Returns the current state of the spinlocks. Each index corresponds to the associated spinlock, e.g. if index `5` is set to `true`, it means that [`Spinlock5`] is currently locked.
///