add grow to block allocator

This commit is contained in:
Corwin 2023-04-06 19:23:19 +01:00
parent 7030d65d9f
commit 46faff254f
No known key found for this signature in database
3 changed files with 220 additions and 30 deletions

View file

@ -37,6 +37,10 @@ impl Block {
.expect("too large allocation")
.pad_to_align()
}
pub fn layout() -> Layout {
Layout::new::<Block>().align_to(8).unwrap().pad_to_align()
}
}
struct BlockAllocatorState {
@ -70,7 +74,7 @@ impl BlockAllocator {
}
pub unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
free(|key| self.inner.borrow(key).borrow_mut().dealloc(ptr, layout))
free(|key| self.inner.borrow(key).borrow_mut().dealloc(ptr, layout));
}
pub unsafe fn dealloc_no_normalise(&self, ptr: *mut u8, layout: Layout) {
@ -78,7 +82,21 @@ impl BlockAllocator {
self.inner
.borrow(key)
.borrow_mut()
.dealloc_no_normalise(ptr, layout)
.dealloc_no_normalise(ptr, layout);
});
}
pub unsafe fn grow(
&self,
ptr: *mut u8,
layout: Layout,
new_layout: Layout,
) -> Option<NonNull<u8>> {
free(|key| {
self.inner
.borrow(key)
.borrow_mut()
.grow(ptr, layout, new_layout)
})
}
}
@ -144,41 +162,119 @@ impl BlockAllocatorInner {
// find a block that this current request fits in
let full_layout = Block::either_layout(layout);
let (block_after_layout, block_after_layout_offset) = full_layout
.extend(Layout::new::<Block>().align_to(8).unwrap().pad_to_align())
.unwrap();
let mut current_block = self.state.first_free_block;
let mut list_ptr = &mut self.state.first_free_block;
// This iterates the free list until it either finds a block that
// is the exact size requested or a block that can be split into
// one with the desired size and another block header.
while let Some(mut current) = current_block {
let block_to_examine = current.as_mut();
if block_to_examine.size == full_layout.size() {
*list_ptr = block_to_examine.next;
return Some(current.cast());
} else if block_to_examine.size >= block_after_layout.size() {
// can split block
let split_block = Block {
size: block_to_examine.size - block_after_layout_offset,
next: block_to_examine.next,
};
let split_ptr = current
.as_ptr()
.cast::<u8>()
.add(block_after_layout_offset)
.cast();
*split_ptr = split_block;
*list_ptr = NonNull::new(split_ptr).map(SendNonNull);
return Some(current.cast());
loop {
match list_ptr {
Some(mut current_block) => {
if let Some(alloc) = Self::allocate_into_block(list_ptr, full_layout) {
return Some(alloc);
}
list_ptr = &mut current_block.as_mut().next;
}
None => return self.new_block(layout),
}
current_block = block_to_examine.next;
list_ptr = &mut block_to_examine.next;
}
}
/// splits a block in twain
unsafe fn allocate_into_block(
reference_to_block_pointer: &mut Option<SendNonNull<Block>>,
wanted_layout: Layout,
) -> Option<NonNull<u8>> {
let (extended_layout, offset) = wanted_layout.extend(Block::layout()).unwrap();
let mut examination_block_ptr = reference_to_block_pointer.unwrap().0;
let examination_block = examination_block_ptr.as_mut();
if examination_block.size == wanted_layout.size() {
*reference_to_block_pointer = examination_block.next;
Some(examination_block_ptr.cast())
} else if examination_block.size >= extended_layout.size() {
let split_block = Block {
size: examination_block.size - offset,
next: examination_block.next,
};
let split_block_ptr = examination_block_ptr
.as_ptr()
.cast::<u8>()
.add(offset)
.cast();
*split_block_ptr = split_block;
*reference_to_block_pointer = NonNull::new(split_block_ptr).map(SendNonNull);
Some(examination_block_ptr.cast())
} else {
None
}
}
pub unsafe fn grow(
&mut self,
ptr: *mut u8,
initial_layout: Layout,
desired_layout: Layout,
) -> Option<NonNull<u8>> {
let either_layout_initial = Block::either_layout(initial_layout);
let either_layout_desired = Block::either_layout(desired_layout);
let difference = Layout::from_size_align(
either_layout_desired.size() - either_layout_initial.size(),
either_layout_initial.align(),
)
.expect("should be able to construct difference layout");
if self.is_block_at_end(ptr, either_layout_initial) {
let _additional_space = self.inner_allocator.alloc(difference);
return NonNull::new(ptr);
}
self.new_block(layout)
// cases
// * Our block has no free block after it.
// * Our block has a free block after that we fit in.
// * Our block has a free block after that is too small.
// * UNIMPLEMENTED Out block has a free block after that is too small but that is at the end so we can bump allocate some more space.
let next_block = self.find_first_block_after(ptr);
if let Some(list_to_block) = next_block {
let is_block_directly_after = {
if let Some(block) = list_to_block {
block.0.as_ptr() == ptr.add(either_layout_initial.size()).cast()
} else {
false
}
};
if is_block_directly_after {
if let Some(_split) = Self::allocate_into_block(list_to_block, difference) {
return NonNull::new(ptr);
}
}
}
self.grow_copy(ptr, either_layout_initial, either_layout_desired)
}
unsafe fn grow_copy(
&mut self,
ptr: *mut u8,
initial_layout: Layout,
desired_layout: Layout,
) -> Option<NonNull<u8>> {
let new_ptr = self.alloc(desired_layout)?;
core::ptr::copy_nonoverlapping(ptr, new_ptr.as_ptr(), initial_layout.size());
self.dealloc(ptr, initial_layout);
Some(new_ptr)
}
unsafe fn is_block_at_end(&self, ptr: *mut u8, total_layout: Layout) -> bool {
self.inner_allocator.tip() == NonNull::new(ptr.add(total_layout.size()))
}
pub unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
@ -186,6 +282,28 @@ impl BlockAllocatorInner {
self.normalise();
}
/// Returns a reference to the pointer to the next block
/// Useful because you can modify what points to the block and access the block
unsafe fn find_first_block_after(
&mut self,
ptr: *mut u8,
) -> Option<&mut Option<SendNonNull<Block>>> {
let mut list_ptr = &mut self.state.first_free_block;
loop {
match list_ptr {
Some(mut current_block) => {
if current_block.as_ptr().cast() > ptr {
return Some(list_ptr);
}
list_ptr = &mut current_block.as_mut().next;
}
None => return None,
}
}
}
pub unsafe fn dealloc_no_normalise(&mut self, ptr: *mut u8, layout: Layout) {
let new_layout = Block::either_layout(layout).pad_to_align();
@ -236,6 +354,24 @@ unsafe impl GlobalAlloc for BlockAllocator {
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
self.dealloc(ptr, layout);
}
unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
if new_size > layout.size() {
return match self.grow(ptr, layout, new_layout) {
Some(p) => p.as_ptr(),
None => core::ptr::null_mut(),
};
}
let new_ptr = GlobalAlloc::alloc(self, new_layout);
if !new_ptr.is_null() {
core::ptr::copy_nonoverlapping(ptr, new_ptr, core::cmp::min(layout.size(), new_size));
self.dealloc(ptr, layout);
}
new_ptr
}
}
unsafe impl Allocator for BlockAllocator {
@ -251,6 +387,42 @@ unsafe impl Allocator for BlockAllocator {
}
}
unsafe fn grow(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, core::alloc::AllocError> {
match self.grow(ptr.as_ptr(), old_layout, new_layout) {
Some(p) => Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
p.as_ptr(),
new_layout.size(),
))),
None => Err(core::alloc::AllocError),
}
}
unsafe fn grow_zeroed(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, core::alloc::AllocError> {
let new_ptr = self
.grow(ptr.as_ptr(), old_layout, new_layout)
.ok_or(core::alloc::AllocError)?;
new_ptr
.as_ptr()
.add(old_layout.size())
.write_bytes(0, new_layout.size() - old_layout.size());
Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
new_ptr.as_ptr(),
new_layout.size(),
)))
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
self.dealloc(ptr.as_ptr(), layout);
}

View file

@ -36,6 +36,10 @@ impl BumpAllocatorInner {
}
}
pub fn tip(&self) -> Option<NonNull<u8>> {
self.current_ptr.map(|x| x.0)
}
pub fn alloc(&mut self, layout: Layout) -> Option<NonNull<u8>> {
let current_ptr = &mut self.current_ptr;

View file

@ -292,4 +292,18 @@ mod test {
}
}
}
#[test_case]
fn growth_works(_gba: &mut crate::Gba) {
let mut growing_vector = Vec::with_capacity(1);
for i in 0..1000 {
growing_vector.push(i);
growing_vector.reserve_exact(i + 2);
for (idx, elem) in growing_vector.iter().enumerate() {
assert_eq!(idx, *elem);
}
}
}
}