Merge pull request #161 from corwinkuiper/alloc-better

Improving allocator
This commit is contained in:
Corwin 2022-01-20 22:53:35 +00:00 committed by GitHub
commit 5a861e7d48
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 340 additions and 235 deletions

7
agb/Cargo.lock generated
View file

@ -16,6 +16,7 @@ dependencies = [
"agb_image_converter",
"agb_macros",
"agb_sound_converter",
"bare-metal",
"bitflags",
]
@ -64,6 +65,12 @@ version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
[[package]]
name = "bare-metal"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8fe8f5a8a398345e52358e18ff07cc17a568fbca5c6f73873d3a62056309603"
[[package]]
name = "bitflags"
version = "1.3.2"

View file

@ -25,6 +25,7 @@ agb_image_converter = { version = "0.6.0", path = "../agb-image-converter" }
agb_sound_converter = { version = "0.1.0", path = "../agb-sound-converter" }
agb_macros = { version = "0.1.0", path = "../agb-macros" }
agb_fixnum = { version = "0.1.0", path = "../agb-fixnum" }
bare-metal = "1.0"
[package.metadata.docs.rs]
default-target = "thumbv6m-none-eabi"

View file

@ -1,13 +1,20 @@
#![no_std]
#![no_main]
use core::cell::RefCell;
use bare_metal::{CriticalSection, Mutex};
#[agb::entry]
fn main(_gba: agb::Gba) -> ! {
let count = agb::interrupt::Mutex::new(0);
agb::add_interrupt_handler!(agb::interrupt::Interrupt::VBlank, |key| {
let mut count = count.lock_with_key(&key);
agb::println!("Hello, world, frame = {}", *count);
*count += 1;
});
let count = Mutex::new(RefCell::new(0));
agb::add_interrupt_handler!(
agb::interrupt::Interrupt::VBlank,
|key: &CriticalSection| {
let mut count = count.borrow(*key).borrow_mut();
agb::println!("Hello, world, frame = {}", *count);
*count += 1;
}
);
loop {}
}

View file

@ -1,11 +1,14 @@
#![no_std]
#![no_main]
use core::cell::RefCell;
use agb::{
display::example_logo,
fixnum::FixedNum,
interrupt::{Interrupt, Mutex},
interrupt::{free, Interrupt},
};
use bare_metal::{CriticalSection, Mutex};
struct BackCosines {
cosines: [u16; 32],
@ -21,10 +24,10 @@ fn main(mut gba: agb::Gba) -> ! {
let mut time = 0;
let cosines = [0_u16; 32];
let back = Mutex::new(BackCosines { cosines, row: 0 });
let back = Mutex::new(RefCell::new(BackCosines { cosines, row: 0 }));
agb::add_interrupt_handler!(Interrupt::HBlank, |_| {
let mut backc = back.lock();
agb::add_interrupt_handler!(Interrupt::HBlank, |key: &CriticalSection| {
let mut backc = back.borrow(*key).borrow_mut();
let deflection = backc.cosines[backc.row % 32];
unsafe { ((0x0400_0010) as *mut u16).write_volatile(deflection) }
backc.row += 1;
@ -34,14 +37,17 @@ fn main(mut gba: agb::Gba) -> ! {
loop {
vblank.wait_for_vblank();
let mut backc = back.lock();
backc.row = 0;
time += 1;
for (r, a) in backc.cosines.iter_mut().enumerate() {
let n: FixedNum<8> = (FixedNum::new(r as i32) / 32 + FixedNum::new(time) / 128).cos()
* (256 * 4 - 1)
/ 256;
*a = (n.trunc() % (32 * 8)) as u16;
}
free(|key| {
let mut backc = back.borrow(*key).borrow_mut();
backc.row = 0;
time += 1;
for (r, a) in backc.cosines.iter_mut().enumerate() {
let n: FixedNum<8> = (FixedNum::new(r as i32) / 32 + FixedNum::new(time) / 128)
.cos()
* (256 * 4 - 1)
/ 256;
*a = (n.trunc() % (32 * 8)) as u16;
}
})
}
}

View file

@ -1,16 +1,28 @@
//! The block allocator works by maintaining a linked list of unused blocks and
//! requesting new blocks using a bump allocator. Freed blocks are inserted into
//! the linked list in order of pointer. Blocks are then merged after every
//! free.
use core::alloc::{GlobalAlloc, Layout};
use core::cell::RefCell;
use core::convert::TryInto;
use core::ptr::NonNull;
use crate::interrupt::Mutex;
use crate::interrupt::free;
use bare_metal::{CriticalSection, Mutex};
use super::bump_allocator::BumpAllocator;
use super::SendNonNull;
struct Block {
size: usize,
next: Option<NonNull<Block>>,
next: Option<SendNonNull<Block>>,
}
impl Block {
/// Returns the layout of either the block or the wanted layout aligned to
/// the maximum alignment used (double word).
pub fn either_layout(layout: Layout) -> Layout {
let block_layout = Layout::new::<Block>();
let aligned_to = layout
@ -21,31 +33,83 @@ impl Block {
aligned_to.align(),
)
.expect("too large allocation")
.align_to(8)
.expect("too large allocation")
.pad_to_align()
}
}
struct BlockAllocatorState {
first_free_block: Option<NonNull<Block>>,
first_free_block: Option<SendNonNull<Block>>,
}
pub(crate) struct BlockAllocator {
inner_allocator: BumpAllocator,
state: Mutex<BlockAllocatorState>,
state: Mutex<RefCell<BlockAllocatorState>>,
}
impl BlockAllocator {
pub(super) const unsafe fn new() -> Self {
Self {
inner_allocator: BumpAllocator::new(),
state: Mutex::new(BlockAllocatorState {
state: Mutex::new(RefCell::new(BlockAllocatorState {
first_free_block: None,
}),
})),
}
}
unsafe fn new_block(&self, layout: Layout) -> *mut u8 {
#[cfg(test)]
pub unsafe fn number_of_blocks(&self) -> u32 {
free(|key| {
let mut state = self.state.borrow(*key).borrow_mut();
let mut count = 0;
let mut list_ptr = &mut state.first_free_block;
while let Some(mut curr) = list_ptr {
count += 1;
list_ptr = &mut curr.as_mut().next;
}
count
})
}
/// Requests a brand new block from the inner bump allocator
fn new_block(&self, layout: Layout, cs: &CriticalSection) -> *mut u8 {
let overall_layout = Block::either_layout(layout);
self.inner_allocator.alloc(overall_layout)
self.inner_allocator.alloc_critical(overall_layout, cs)
}
/// Merges blocks together to create a normalised list
unsafe fn normalise(&self) {
free(|key| {
let mut state = self.state.borrow(*key).borrow_mut();
let mut list_ptr = &mut state.first_free_block;
while let Some(mut curr) = list_ptr {
if let Some(next_elem) = curr.as_mut().next {
let difference = next_elem
.as_ptr()
.cast::<u8>()
.offset_from(curr.as_ptr().cast::<u8>());
let usize_difference: usize = difference
.try_into()
.expect("distances in alloc'd blocks must be positive");
if usize_difference == curr.as_mut().size {
let current = curr.as_mut();
let next = next_elem.as_ref();
current.size += next.size;
current.next = next.next;
continue;
}
}
list_ptr = &mut curr.as_mut().next;
}
});
}
}
@ -54,13 +118,17 @@ unsafe impl GlobalAlloc for BlockAllocator {
// find a block that this current request fits in
let full_layout = Block::either_layout(layout);
let (block_after_layout, block_after_layout_offset) =
full_layout.extend(Layout::new::<Block>()).unwrap();
let (block_after_layout, block_after_layout_offset) = full_layout
.extend(Layout::new::<Block>().align_to(8).unwrap().pad_to_align())
.unwrap();
{
let mut state = self.state.lock();
free(|key| {
let mut state = self.state.borrow(*key).borrow_mut();
let mut current_block = state.first_free_block;
let mut list_ptr = &mut state.first_free_block;
// This iterates the free list until it either finds a block that
// is the exact size requested or a block that can be split into
// one with the desired size and another block header.
while let Some(mut curr) = current_block {
let curr_block = curr.as_mut();
if curr_block.size == full_layout.size() {
@ -78,26 +146,57 @@ unsafe impl GlobalAlloc for BlockAllocator {
.add(block_after_layout_offset)
.cast();
*split_ptr = split_block;
*list_ptr = NonNull::new(split_ptr);
*list_ptr = NonNull::new(split_ptr).map(SendNonNull);
return curr.as_ptr().cast();
}
current_block = curr_block.next;
list_ptr = &mut curr_block.next;
}
}
self.new_block(layout)
self.new_block(layout, key)
})
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
let new_layout = Block::either_layout(layout);
let mut state = self.state.lock();
let new_block_content = Block {
size: new_layout.size(),
next: state.first_free_block,
};
*ptr.cast() = new_block_content;
state.first_free_block = NonNull::new(ptr.cast());
let new_layout = Block::either_layout(layout).pad_to_align();
free(|key| {
let mut state = self.state.borrow(*key).borrow_mut();
// note that this is a reference to a pointer
let mut list_ptr = &mut state.first_free_block;
// This searches the free list until it finds a block further along
// than the block that is being freed. The newly freed block is then
// inserted before this block. If the end of the list is reached
// then the block is placed at the end with no new block after it.
loop {
match list_ptr {
Some(mut current_block) => {
if current_block.as_ptr().cast() > ptr {
let new_block_content = Block {
size: new_layout.size(),
next: Some(current_block),
};
*ptr.cast() = new_block_content;
*list_ptr = NonNull::new(ptr.cast()).map(SendNonNull);
break;
}
list_ptr = &mut current_block.as_mut().next;
}
None => {
// reached the end of the list without finding a place to insert the value
let new_block_content = Block {
size: new_layout.size(),
next: None,
};
*ptr.cast() = new_block_content;
*list_ptr = NonNull::new(ptr.cast()).map(SendNonNull);
break;
}
}
}
});
self.normalise();
}
}

View file

@ -1,23 +1,26 @@
use core::alloc::{GlobalAlloc, Layout};
use core::cell::RefCell;
use core::ptr::NonNull;
use crate::interrupt::Mutex;
use super::SendNonNull;
use crate::interrupt::free;
use bare_metal::{CriticalSection, Mutex};
pub(crate) struct BumpAllocator {
current_ptr: Mutex<Option<NonNull<u8>>>,
current_ptr: Mutex<RefCell<Option<SendNonNull<u8>>>>,
}
impl BumpAllocator {
pub const fn new() -> Self {
Self {
current_ptr: Mutex::new(None),
current_ptr: Mutex::new(RefCell::new(None)),
}
}
}
impl BumpAllocator {
fn alloc_safe(&self, layout: Layout) -> *mut u8 {
let mut current_ptr = self.current_ptr.lock();
pub fn alloc_critical(&self, layout: Layout, cs: &CriticalSection) -> *mut u8 {
let mut current_ptr = self.current_ptr.borrow(*cs).borrow_mut();
let ptr = if let Some(c) = *current_ptr {
c.as_ptr() as usize
@ -28,7 +31,7 @@ impl BumpAllocator {
let alignment_bitmask = layout.align() - 1;
let fixup = ptr & alignment_bitmask;
let amount_to_add = layout.align() - fixup;
let amount_to_add = (layout.align() - fixup) & alignment_bitmask;
let resulting_ptr = ptr + amount_to_add;
let new_current_ptr = resulting_ptr + layout.size();
@ -37,10 +40,13 @@ impl BumpAllocator {
return core::ptr::null_mut();
}
*current_ptr = NonNull::new(new_current_ptr as *mut _);
*current_ptr = NonNull::new(new_current_ptr as *mut _).map(SendNonNull);
resulting_ptr as *mut _
}
pub fn alloc_safe(&self, layout: Layout) -> *mut u8 {
free(|key| self.alloc_critical(layout, key))
}
}
unsafe impl GlobalAlloc for BumpAllocator {

View file

@ -1,15 +1,45 @@
use core::alloc::Layout;
use core::ops::{Deref, DerefMut};
use core::ptr::NonNull;
mod block_allocator;
mod bump_allocator;
use block_allocator::BlockAllocator;
struct SendNonNull<T>(NonNull<T>);
unsafe impl<T> Send for SendNonNull<T> {}
impl<T> Clone for SendNonNull<T> {
fn clone(&self) -> Self {
SendNonNull(self.0)
}
}
impl<T> Copy for SendNonNull<T> {}
impl<T> Deref for SendNonNull<T> {
type Target = NonNull<T>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T> DerefMut for SendNonNull<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
const EWRAM_END: usize = 0x0204_0000;
#[global_allocator]
static GLOBAL_ALLOC: BlockAllocator = unsafe { BlockAllocator::new() };
#[cfg(test)]
pub unsafe fn number_of_blocks() -> u32 {
GLOBAL_ALLOC.number_of_blocks()
}
#[alloc_error_handler]
fn alloc_error(layout: Layout) -> ! {
panic!(

View file

@ -29,6 +29,6 @@ mod tests {
display_logo(&mut gfx);
crate::assert_image_output("gfx/test_logo.png");
crate::test_runner::assert_image_output("gfx/test_logo.png");
}
}

View file

@ -1,10 +1,11 @@
use core::{
cell::{Cell, UnsafeCell},
cell::Cell,
marker::{PhantomData, PhantomPinned},
ops::{Deref, DerefMut},
pin::Pin,
};
use bare_metal::CriticalSection;
use crate::{display::DISPLAY_STATUS, memory_mapped::MemoryMapped};
#[derive(Clone, Copy)]
@ -70,21 +71,22 @@ impl Interrupt {
const ENABLED_INTERRUPTS: MemoryMapped<u16> = unsafe { MemoryMapped::new(0x04000200) };
const INTERRUPTS_ENABLED: MemoryMapped<u16> = unsafe { MemoryMapped::new(0x04000208) };
struct Disable {}
struct Disable {
pre: u16,
}
impl Drop for Disable {
fn drop(&mut self) {
enable_interrupts();
INTERRUPTS_ENABLED.set(self.pre);
}
}
fn temporary_interrupt_disable() -> Disable {
let d = Disable {
pre: INTERRUPTS_ENABLED.get(),
};
disable_interrupts();
Disable {}
}
fn enable_interrupts() {
INTERRUPTS_ENABLED.set(1);
d
}
fn disable_interrupts() {
@ -158,7 +160,7 @@ pub struct InterruptClosureBounded<'a> {
}
struct InterruptClosure {
closure: *const (dyn Fn(Key)),
closure: *const (dyn Fn(&CriticalSection)),
next: Cell<*const InterruptClosure>,
root: *const InterruptRoot,
}
@ -169,7 +171,7 @@ impl InterruptRoot {
while !c.is_null() {
let closure_ptr = unsafe { &*c }.closure;
let closure_ref = unsafe { &*closure_ptr };
closure_ref(Key());
closure_ref(unsafe { &CriticalSection::new() });
c = unsafe { &*c }.next.get();
}
}
@ -201,7 +203,7 @@ fn interrupt_to_root(interrupt: Interrupt) -> &'static InterruptRoot {
}
fn get_interrupt_handle_root<'a>(
f: &'a dyn Fn(Key),
f: &'a dyn Fn(&CriticalSection),
root: &InterruptRoot,
) -> InterruptClosureBounded<'a> {
InterruptClosureBounded {
@ -218,7 +220,7 @@ fn get_interrupt_handle_root<'a>(
/// The [add_interrupt_handler!] macro should be used instead of this function.
/// Creates an interrupt handler from a closure.
pub fn get_interrupt_handle(
f: &(dyn Fn(Key) + Send + Sync),
f: &(dyn Fn(&CriticalSection) + Send + Sync),
interrupt: Interrupt,
) -> InterruptClosureBounded {
let root = interrupt_to_root(interrupt);
@ -230,22 +232,24 @@ pub fn get_interrupt_handle(
/// Adds an interrupt handler to the interrupt table such that when that
/// interrupt is triggered the closure is called.
pub fn add_interrupt<'a>(interrupt: Pin<&'a InterruptClosureBounded<'a>>) {
let root = unsafe { &*interrupt.c.root };
root.add();
let mut c = root.next.get();
if c.is_null() {
root.next.set((&interrupt.c) as *const _);
return;
}
loop {
let p = unsafe { &*c }.next.get();
if p.is_null() {
unsafe { &*c }.next.set((&interrupt.c) as *const _);
free(|_| {
let root = unsafe { &*interrupt.c.root };
root.add();
let mut c = root.next.get();
if c.is_null() {
root.next.set((&interrupt.c) as *const _);
return;
}
loop {
let p = unsafe { &*c }.next.get();
if p.is_null() {
unsafe { &*c }.next.set((&interrupt.c) as *const _);
return;
}
c = p;
}
c = p;
}
})
}
#[macro_export]
@ -270,90 +274,18 @@ macro_rules! add_interrupt_handler {
};
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
enum MutexState {
Unlocked,
Locked(bool),
}
pub fn free<F, R>(f: F) -> R
where
F: FnOnce(&CriticalSection) -> R,
{
let enabled = INTERRUPTS_ENABLED.get();
pub struct Mutex<T> {
internal: UnsafeCell<T>,
state: UnsafeCell<MutexState>,
}
disable_interrupts();
#[non_exhaustive]
pub struct Key();
let r = f(unsafe { &CriticalSection::new() });
unsafe impl<T: Send> Send for Mutex<T> {}
unsafe impl<T> Sync for Mutex<T> {}
impl<T> Mutex<T> {
pub fn lock(&self) -> MutexRef<T> {
let state = INTERRUPTS_ENABLED.get();
INTERRUPTS_ENABLED.set(0);
assert_eq!(
unsafe { *self.state.get() },
MutexState::Unlocked,
"mutex must be unlocked to be able to lock it"
);
unsafe { *self.state.get() = MutexState::Locked(state != 0) };
MutexRef {
internal: &self.internal,
state: &self.state,
}
}
pub fn lock_with_key(&self, _key: &Key) -> MutexRef<T> {
assert_eq!(
unsafe { *self.state.get() },
MutexState::Unlocked,
"mutex must be unlocked to be able to lock it"
);
unsafe { *self.state.get() = MutexState::Locked(false) };
MutexRef {
internal: &self.internal,
state: &self.state,
}
}
pub const fn new(val: T) -> Self {
Mutex {
internal: UnsafeCell::new(val),
state: UnsafeCell::new(MutexState::Unlocked),
}
}
}
pub struct MutexRef<'a, T> {
internal: &'a UnsafeCell<T>,
state: &'a UnsafeCell<MutexState>,
}
impl<'a, T> Drop for MutexRef<'a, T> {
fn drop(&mut self) {
let state = unsafe { &mut *self.state.get() };
let prev_state = *state;
*state = MutexState::Unlocked;
match prev_state {
MutexState::Locked(b) => INTERRUPTS_ENABLED.set(b as u16),
MutexState::Unlocked => {}
}
}
}
impl<'a, T> Deref for MutexRef<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe { &*self.internal.get() }
}
}
impl<'a, T> DerefMut for MutexRef<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.internal.get() }
}
INTERRUPTS_ENABLED.set(enabled);
r
}
#[non_exhaustive]
@ -382,18 +314,28 @@ impl Drop for VBlank {
#[cfg(test)]
mod tests {
use super::*;
use bare_metal::Mutex;
use core::cell::RefCell;
#[test_case]
fn test_vblank_interrupt_handler(_gba: &mut crate::Gba) {
{
let counter = Mutex::new(0);
let counter_2 = Mutex::new(0);
add_interrupt_handler!(Interrupt::VBlank, |key| *counter.lock_with_key(&key) += 1);
add_interrupt_handler!(Interrupt::VBlank, |_| *counter_2.lock() += 1);
let counter = Mutex::new(RefCell::new(0));
let counter_2 = Mutex::new(RefCell::new(0));
add_interrupt_handler!(Interrupt::VBlank, |key: &CriticalSection| *counter
.borrow(*key)
.borrow_mut() +=
1);
add_interrupt_handler!(Interrupt::VBlank, |key: &CriticalSection| *counter_2
.borrow(*key)
.borrow_mut() +=
1);
let vblank = VBlank::get();
while *counter.lock() < 100 || *counter_2.lock() < 100 {
while free(|key| {
*counter.borrow(*key).borrow() < 100 || *counter_2.borrow(*key).borrow() < 100
}) {
vblank.wait_for_vblank();
}
}

View file

@ -2,7 +2,7 @@
// This appears to be needed for testing to work
#![cfg_attr(test, no_main)]
#![cfg_attr(test, feature(custom_test_frameworks))]
#![cfg_attr(test, test_runner(crate::test_runner))]
#![cfg_attr(test, test_runner(crate::test_runner::test_runner))]
#![cfg_attr(test, reexport_test_harness_main = "test_main")]
#![deny(clippy::all)]
#![feature(alloc_error_handler)]
@ -224,89 +224,96 @@ impl Gba {
}
}
#[doc(hidden)]
pub trait Testable {
fn run(&self, gba: &mut Gba);
}
#[cfg(test)]
mod test_runner {
use super::*;
impl<T> Testable for T
where
T: Fn(&mut Gba),
{
fn run(&self, gba: &mut Gba) {
#[doc(hidden)]
pub trait Testable {
fn run(&self, gba: &mut Gba);
}
impl<T> Testable for T
where
T: Fn(&mut Gba),
{
fn run(&self, gba: &mut Gba) {
let mut mgba = mgba::Mgba::new().unwrap();
mgba.print(
format_args!("{}...", core::any::type_name::<T>()),
mgba::DebugLevel::Info,
)
.unwrap();
mgba::number_of_cycles_tagged(785);
self(gba);
mgba::number_of_cycles_tagged(785);
assert!(
unsafe { agb_alloc::number_of_blocks() } < 2,
"memory is being leaked, there are {} blocks",
unsafe { agb_alloc::number_of_blocks() }
);
mgba.print(format_args!("[ok]"), mgba::DebugLevel::Info)
.unwrap();
}
}
#[panic_handler]
fn panic_implementation(info: &core::panic::PanicInfo) -> ! {
if let Some(mut mgba) = mgba::Mgba::new() {
mgba.print(format_args!("[failed]"), mgba::DebugLevel::Error)
.unwrap();
mgba.print(format_args!("Error: {}", info), mgba::DebugLevel::Fatal)
.unwrap();
}
loop {}
}
static mut TEST_GBA: Option<Gba> = None;
#[doc(hidden)]
pub fn test_runner(tests: &[&dyn Testable]) {
let mut mgba = mgba::Mgba::new().unwrap();
mgba.print(
format_args!("{}...", core::any::type_name::<T>()),
format_args!("Running {} tests", tests.len()),
mgba::DebugLevel::Info,
)
.unwrap();
mgba::number_of_cycles_tagged(785);
self(gba);
mgba::number_of_cycles_tagged(785);
mgba.print(format_args!("[ok]"), mgba::DebugLevel::Info)
.unwrap();
}
}
#[panic_handler]
#[cfg(test)]
fn panic_implementation(info: &core::panic::PanicInfo) -> ! {
if let Some(mut mgba) = mgba::Mgba::new() {
mgba.print(format_args!("[failed]"), mgba::DebugLevel::Error)
.unwrap();
mgba.print(format_args!("Error: {}", info), mgba::DebugLevel::Fatal)
.unwrap();
let gba = unsafe { TEST_GBA.as_mut() }.unwrap();
for test in tests {
test.run(gba);
}
mgba.print(
format_args!("Tests finished successfully"),
mgba::DebugLevel::Info,
)
.unwrap();
}
loop {}
}
#[cfg(test)]
static mut TEST_GBA: Option<Gba> = None;
#[doc(hidden)]
#[cfg(test)]
pub fn test_runner(tests: &[&dyn Testable]) {
let mut mgba = mgba::Mgba::new().unwrap();
mgba.print(
format_args!("Running {} tests", tests.len()),
mgba::DebugLevel::Info,
)
.unwrap();
let mut gba = unsafe { TEST_GBA.as_mut() }.unwrap();
for test in tests {
test.run(&mut gba);
#[entry]
fn agb_test_main(gba: Gba) -> ! {
unsafe { TEST_GBA = Some(gba) };
test_main();
#[allow(clippy::empty_loop)]
loop {}
}
mgba.print(
format_args!("Tests finished successfully"),
mgba::DebugLevel::Info,
)
.unwrap();
}
#[cfg(test)]
#[entry]
fn agb_test_main(gba: Gba) -> ! {
unsafe { TEST_GBA = Some(gba) };
test_main();
#[allow(clippy::empty_loop)]
loop {}
}
#[cfg(test)]
fn assert_image_output(image: &str) {
display::busy_wait_for_vblank();
display::busy_wait_for_vblank();
let mut mgba = crate::mgba::Mgba::new().unwrap();
mgba.print(
format_args!("image:{}", image),
crate::mgba::DebugLevel::Info,
)
.unwrap();
display::busy_wait_for_vblank();
pub fn assert_image_output(image: &str) {
display::busy_wait_for_vblank();
display::busy_wait_for_vblank();
let mut mgba = crate::mgba::Mgba::new().unwrap();
mgba.print(
format_args!("image:{}", image),
crate::mgba::DebugLevel::Info,
)
.unwrap();
display::busy_wait_for_vblank();
}
}
#[cfg(test)]