Allow to start multiple state machines in sync (#301)

* add example of synchronized PIOs

* Synchronize state machines using WAIT IRQ instruction

* Use "irq wait 0" instead of "wait 1 irq 0"

This way, the initial value of the interrupt flag doesn't matter

* Start state machines synchronized without IRQ WAIT instruction

* Improve API

Co-authored-by: Andrew Straw <strawman@astraw.com>
This commit is contained in:
Jan Niehusmann 2022-03-18 10:55:31 +01:00 committed by GitHub
parent f8de8755cc
commit 6026ea4ae3
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2 changed files with 442 additions and 0 deletions

View file

@ -0,0 +1,96 @@
//! This example toggles the GPIO0 and GPIO1 pins, with each controlled from a
//! separate PIO state machine.
//!
//! Despite running in separate state machines, the clocks are sychronized at
//! the rise and fall times will be simultaneous.
#![no_std]
#![no_main]
use cortex_m_rt::entry;
use hal::gpio::{FunctionPio0, Pin};
use hal::pac;
use hal::pio::PIOExt;
use hal::Sio;
use panic_halt as _;
use rp2040_hal as hal;
#[link_section = ".boot2"]
#[used]
pub static BOOT2: [u8; 256] = rp2040_boot2::BOOT_LOADER_W25Q080;
#[entry]
fn main() -> ! {
let mut pac = pac::Peripherals::take().unwrap();
let sio = Sio::new(pac.SIO);
let pins = hal::gpio::Pins::new(
pac.IO_BANK0,
pac.PADS_BANK0,
sio.gpio_bank0,
&mut pac.RESETS,
);
// configure pins for Pio0.
let _: Pin<_, FunctionPio0> = pins.gpio0.into_mode();
let _: Pin<_, FunctionPio0> = pins.gpio1.into_mode();
// PIN id for use inside of PIO
let pin0 = 0;
let pin1 = 1;
// Define some simple PIO program.
let program = pio_proc::pio_asm!(
"
.wrap_target
set pins, 1 [31]
set pins, 0 [31]
.wrap
"
);
// Initialize and start PIO
let (mut pio, sm0, sm1, _, _) = pac.PIO0.split(&mut pac.RESETS);
// I'm "measuring" the phase offset between the two pins by connecting
// then through a LED. If there is a clock offset, there will be a
// short time with a voltage between the pins, so the LED will flash up.
// With a slow clock this is not visible, so use a reasonably fast clock.
let div = 256f32;
let installed = pio.install(&program.program).unwrap();
let (mut sm0, _, _) = rp2040_hal::pio::PIOBuilder::from_program(installed)
.set_pins(pin0, 1)
.clock_divisor(div)
.build(sm0);
// The GPIO pin needs to be configured as an output.
sm0.set_pindirs([(pin0, hal::pio::PinDir::Output)]);
// NOTE: with the current rp-hal, I need to call pio.install() twice. This
// should be investigated further as it seems wrong.
let installed = pio.install(&program.program).unwrap();
let (mut sm1, _, _) = rp2040_hal::pio::PIOBuilder::from_program(installed)
.set_pins(pin1, 1)
.clock_divisor(div)
.build(sm1);
// The GPIO pin needs to be configured as an output.
sm1.set_pindirs([(pin1, hal::pio::PinDir::Output)]);
// Start both SMs at the same time
let group = sm0.with(sm1).sync().start();
cortex_m::asm::delay(10_000_000);
// Stop both SMs at the same time
let group = group.stop();
cortex_m::asm::delay(10_000_000);
// Start them again and extract the individual state machines
let (sm1, sm2) = group.start().free();
cortex_m::asm::delay(10_000_000);
// Stop the two state machines separately
let _sm1 = sm1.stop();
cortex_m::asm::delay(10_000_000);
let _sm2 = sm2.stop();
#[allow(clippy::empty_loop)]
loop {}
}

View file

@ -655,6 +655,352 @@ impl<P: PIOExt, SM: StateMachineIndex> StateMachine<(P, SM), Stopped> {
} }
} }
impl<P: PIOExt, SM: StateMachineIndex, State> StateMachine<(P, SM), State> {
/// Create a group of state machines, which can be started/stopped synchonously
pub fn with<SM2: StateMachineIndex>(
self,
other_sm: StateMachine<(P, SM2), State>,
) -> StateMachineGroup2<P, SM, SM2, State> {
StateMachineGroup2 {
sm1: self,
sm2: other_sm,
}
}
}
/// Group of 2 state machines, which can be started/stopped synchronously.
pub struct StateMachineGroup2<
P: PIOExt,
SM1Idx: StateMachineIndex,
SM2Idx: StateMachineIndex,
State,
> {
sm1: StateMachine<(P, SM1Idx), State>,
sm2: StateMachine<(P, SM2Idx), State>,
}
/// Group of 3 state machines, which can be started/stopped synchronously.
pub struct StateMachineGroup3<
P: PIOExt,
SM1Idx: StateMachineIndex,
SM2Idx: StateMachineIndex,
SM3Idx: StateMachineIndex,
State,
> {
sm1: StateMachine<(P, SM1Idx), State>,
sm2: StateMachine<(P, SM2Idx), State>,
sm3: StateMachine<(P, SM3Idx), State>,
}
/// Group of 4 state machines, which can be started/stopped synchronously.
pub struct StateMachineGroup4<
P: PIOExt,
SM1Idx: StateMachineIndex,
SM2Idx: StateMachineIndex,
SM3Idx: StateMachineIndex,
SM4Idx: StateMachineIndex,
State,
> {
sm1: StateMachine<(P, SM1Idx), State>,
sm2: StateMachine<(P, SM2Idx), State>,
sm3: StateMachine<(P, SM3Idx), State>,
sm4: StateMachine<(P, SM4Idx), State>,
}
impl<P: PIOExt, SM1Idx: StateMachineIndex, SM2Idx: StateMachineIndex, State>
StateMachineGroup2<P, SM1Idx, SM2Idx, State>
{
/// Split the group, releasing the contained state machines
#[allow(clippy::type_complexity)]
pub fn free(
self,
) -> (
StateMachine<(P, SM1Idx), State>,
StateMachine<(P, SM2Idx), State>,
) {
(self.sm1, self.sm2)
}
/// Add another state machine to the group
pub fn with<SM3Idx: StateMachineIndex>(
self,
other_sm: StateMachine<(P, SM3Idx), State>,
) -> StateMachineGroup3<P, SM1Idx, SM2Idx, SM3Idx, State> {
StateMachineGroup3 {
sm1: self.sm1,
sm2: self.sm2,
sm3: other_sm,
}
}
fn mask(&self) -> u32 {
(1 << SM1Idx::id()) | (1 << SM2Idx::id())
}
}
impl<
P: PIOExt,
SM1Idx: StateMachineIndex,
SM2Idx: StateMachineIndex,
SM3Idx: StateMachineIndex,
State,
> StateMachineGroup3<P, SM1Idx, SM2Idx, SM3Idx, State>
{
/// Split the group, releasing the contained state machines
#[allow(clippy::type_complexity)]
pub fn free(
self,
) -> (
StateMachine<(P, SM1Idx), State>,
StateMachine<(P, SM2Idx), State>,
StateMachine<(P, SM3Idx), State>,
) {
(self.sm1, self.sm2, self.sm3)
}
/// Add another state machine to the group
pub fn with<SM4Idx: StateMachineIndex>(
self,
other_sm: StateMachine<(P, SM4Idx), State>,
) -> StateMachineGroup4<P, SM1Idx, SM2Idx, SM3Idx, SM4Idx, State> {
StateMachineGroup4 {
sm1: self.sm1,
sm2: self.sm2,
sm3: self.sm3,
sm4: other_sm,
}
}
fn mask(&self) -> u32 {
(1 << SM1Idx::id()) | (1 << SM2Idx::id()) | (1 << SM3Idx::id())
}
}
impl<
P: PIOExt,
SM1Idx: StateMachineIndex,
SM2Idx: StateMachineIndex,
SM3Idx: StateMachineIndex,
SM4Idx: StateMachineIndex,
State,
> StateMachineGroup4<P, SM1Idx, SM2Idx, SM3Idx, SM4Idx, State>
{
/// Split the group, releasing the contained state machines
#[allow(clippy::type_complexity)]
pub fn free(
self,
) -> (
StateMachine<(P, SM1Idx), State>,
StateMachine<(P, SM2Idx), State>,
StateMachine<(P, SM3Idx), State>,
StateMachine<(P, SM4Idx), State>,
) {
(self.sm1, self.sm2, self.sm3, self.sm4)
}
fn mask(&self) -> u32 {
(1 << SM1Idx::id()) | (1 << SM2Idx::id()) | (1 << SM3Idx::id()) | (1 << SM4Idx::id())
}
}
impl<P: PIOExt, SM1Idx: StateMachineIndex, SM2Idx: StateMachineIndex>
StateMachineGroup2<P, SM1Idx, SM2Idx, Stopped>
{
/// Start grouped state machines
pub fn start(mut self) -> StateMachineGroup2<P, SM1Idx, SM2Idx, Running> {
self.sm1.sm.set_ctrl_bits(self.mask());
StateMachineGroup2 {
sm1: StateMachine {
sm: self.sm1.sm,
program: self.sm1.program,
_phantom: core::marker::PhantomData,
},
sm2: StateMachine {
sm: self.sm2.sm,
program: self.sm2.program,
_phantom: core::marker::PhantomData,
},
}
}
/// Sync grouped state machines
pub fn sync(mut self) -> Self {
self.sm1.sm.set_ctrl_bits(self.mask() << 8);
self
}
}
impl<
P: PIOExt,
SM1Idx: StateMachineIndex,
SM2Idx: StateMachineIndex,
SM3Idx: StateMachineIndex,
> StateMachineGroup3<P, SM1Idx, SM2Idx, SM3Idx, Stopped>
{
/// Start grouped state machines
pub fn start(mut self) -> StateMachineGroup3<P, SM1Idx, SM2Idx, SM3Idx, Running> {
self.sm1.sm.set_ctrl_bits(self.mask());
StateMachineGroup3 {
sm1: StateMachine {
sm: self.sm1.sm,
program: self.sm1.program,
_phantom: core::marker::PhantomData,
},
sm2: StateMachine {
sm: self.sm2.sm,
program: self.sm2.program,
_phantom: core::marker::PhantomData,
},
sm3: StateMachine {
sm: self.sm3.sm,
program: self.sm3.program,
_phantom: core::marker::PhantomData,
},
}
}
/// Sync grouped state machines
pub fn sync(mut self) -> Self {
self.sm1.sm.set_ctrl_bits(self.mask() << 8);
self
}
}
impl<
P: PIOExt,
SM1Idx: StateMachineIndex,
SM2Idx: StateMachineIndex,
SM3Idx: StateMachineIndex,
SM4Idx: StateMachineIndex,
> StateMachineGroup4<P, SM1Idx, SM2Idx, SM3Idx, SM4Idx, Stopped>
{
/// Start grouped state machines
pub fn start(mut self) -> StateMachineGroup4<P, SM1Idx, SM2Idx, SM3Idx, SM4Idx, Running> {
self.sm1.sm.set_ctrl_bits(self.mask());
StateMachineGroup4 {
sm1: StateMachine {
sm: self.sm1.sm,
program: self.sm1.program,
_phantom: core::marker::PhantomData,
},
sm2: StateMachine {
sm: self.sm2.sm,
program: self.sm2.program,
_phantom: core::marker::PhantomData,
},
sm3: StateMachine {
sm: self.sm3.sm,
program: self.sm3.program,
_phantom: core::marker::PhantomData,
},
sm4: StateMachine {
sm: self.sm4.sm,
program: self.sm4.program,
_phantom: core::marker::PhantomData,
},
}
}
/// Sync grouped state machines
pub fn sync(mut self) -> Self {
self.sm1.sm.set_ctrl_bits(self.mask() << 8);
self
}
}
impl<P: PIOExt, SM1Idx: StateMachineIndex, SM2Idx: StateMachineIndex>
StateMachineGroup2<P, SM1Idx, SM2Idx, Running>
{
/// Stop grouped state machines
pub fn stop(mut self) -> StateMachineGroup2<P, SM1Idx, SM2Idx, Stopped> {
self.sm1.sm.clear_ctrl_bits(self.mask());
StateMachineGroup2 {
sm1: StateMachine {
sm: self.sm1.sm,
program: self.sm1.program,
_phantom: core::marker::PhantomData,
},
sm2: StateMachine {
sm: self.sm2.sm,
program: self.sm2.program,
_phantom: core::marker::PhantomData,
},
}
}
}
impl<
P: PIOExt,
SM1Idx: StateMachineIndex,
SM2Idx: StateMachineIndex,
SM3Idx: StateMachineIndex,
> StateMachineGroup3<P, SM1Idx, SM2Idx, SM3Idx, Running>
{
/// Stop grouped state machines
pub fn stop(mut self) -> StateMachineGroup3<P, SM1Idx, SM2Idx, SM3Idx, Stopped> {
self.sm1.sm.clear_ctrl_bits(self.mask());
StateMachineGroup3 {
sm1: StateMachine {
sm: self.sm1.sm,
program: self.sm1.program,
_phantom: core::marker::PhantomData,
},
sm2: StateMachine {
sm: self.sm2.sm,
program: self.sm2.program,
_phantom: core::marker::PhantomData,
},
sm3: StateMachine {
sm: self.sm3.sm,
program: self.sm3.program,
_phantom: core::marker::PhantomData,
},
}
}
}
impl<
P: PIOExt,
SM1Idx: StateMachineIndex,
SM2Idx: StateMachineIndex,
SM3Idx: StateMachineIndex,
SM4Idx: StateMachineIndex,
> StateMachineGroup4<P, SM1Idx, SM2Idx, SM3Idx, SM4Idx, Running>
{
/// Stop grouped state machines
pub fn stop(mut self) -> StateMachineGroup4<P, SM1Idx, SM2Idx, SM3Idx, SM4Idx, Stopped> {
self.sm1.sm.clear_ctrl_bits(self.mask());
StateMachineGroup4 {
sm1: StateMachine {
sm: self.sm1.sm,
program: self.sm1.program,
_phantom: core::marker::PhantomData,
},
sm2: StateMachine {
sm: self.sm2.sm,
program: self.sm2.program,
_phantom: core::marker::PhantomData,
},
sm3: StateMachine {
sm: self.sm3.sm,
program: self.sm3.program,
_phantom: core::marker::PhantomData,
},
sm4: StateMachine {
sm: self.sm4.sm,
program: self.sm4.program,
_phantom: core::marker::PhantomData,
},
}
}
/// Sync grouped state machines
pub fn sync(mut self) -> Self {
self.sm1.sm.set_ctrl_bits(self.mask() << 8);
self
}
}
/// Type which, once destructed, restarts the clock dividers for all selected state machines, /// Type which, once destructed, restarts the clock dividers for all selected state machines,
/// effectively synchronizing them. /// effectively synchronizing them.
pub struct Synchronize<'sm, SM: ValidStateMachine> { pub struct Synchronize<'sm, SM: ValidStateMachine> {