Prepent CLAP states with the length
To avoid having to read in chunks.
This commit is contained in:
parent
c7d7a790c2
commit
41ee117bfa
1 changed files with 27 additions and 32 deletions
|
@ -1083,12 +1083,21 @@ impl<P: ClapPlugin> Wrapper<P> {
|
||||||
);
|
);
|
||||||
match serialized {
|
match serialized {
|
||||||
Ok(serialized) => {
|
Ok(serialized) => {
|
||||||
|
// CLAP does not provide a way to tell how much data there is left in a stream, so
|
||||||
|
// we need to prepend it to our actual state data.
|
||||||
|
let length_bytes = (serialized.len() as u64).to_le_bytes();
|
||||||
|
let num_length_bytes_written = ((*stream).write)(
|
||||||
|
stream,
|
||||||
|
length_bytes.as_ptr() as *const c_void,
|
||||||
|
length_bytes.len() as u64,
|
||||||
|
);
|
||||||
let num_bytes_written = ((*stream).write)(
|
let num_bytes_written = ((*stream).write)(
|
||||||
stream,
|
stream,
|
||||||
serialized.as_ptr() as *const c_void,
|
serialized.as_ptr() as *const c_void,
|
||||||
serialized.len() as u64,
|
serialized.len() as u64,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
nih_debug_assert_eq!(num_length_bytes_written as usize, length_bytes.len());
|
||||||
nih_debug_assert_eq!(num_bytes_written as usize, serialized.len());
|
nih_debug_assert_eq!(num_bytes_written as usize, serialized.len());
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
@ -1106,39 +1115,25 @@ impl<P: ClapPlugin> Wrapper<P> {
|
||||||
check_null_ptr!(false, plugin, stream);
|
check_null_ptr!(false, plugin, stream);
|
||||||
let wrapper = &*(plugin as *const Self);
|
let wrapper = &*(plugin as *const Self);
|
||||||
|
|
||||||
// CLAP does not have a way to tell you about the size of a stream, so the workaround would
|
// CLAP does not have a way to tell how much data there is left in a stream, so we've
|
||||||
// be to keep reading 1 MiB chunks until we reach the end of file, reallocating the buffer
|
// prepended the size in front of our JSON state
|
||||||
// each time as we go.
|
let mut length_bytes = [0; 8];
|
||||||
const CHUNK_SIZE: usize = 1 << 20;
|
let num_length_bytes_read = ((*stream).read)(
|
||||||
let mut actual_read_buffer_size = 0usize;
|
stream,
|
||||||
let mut read_buffer: Vec<u8> = Vec::with_capacity(CHUNK_SIZE);
|
length_bytes.as_mut_ptr() as *mut c_void,
|
||||||
loop {
|
length_bytes.len() as u64,
|
||||||
let num_bytes_read = ((*stream).read)(
|
);
|
||||||
stream,
|
nih_debug_assert_eq!(num_length_bytes_read as usize, length_bytes.len());
|
||||||
// Make sure to start reading from where we left off if we're going through this
|
let length = u64::from_le_bytes(length_bytes);
|
||||||
// loop multiple times
|
|
||||||
read_buffer.as_mut_ptr().add(actual_read_buffer_size) as *mut c_void,
|
|
||||||
CHUNK_SIZE as u64,
|
|
||||||
);
|
|
||||||
if num_bytes_read < 0 {
|
|
||||||
nih_debug_assert_failure!("Error while reading plugin state");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
actual_read_buffer_size += num_bytes_read as usize;
|
let mut read_buffer: Vec<u8> = Vec::with_capacity(length as usize);
|
||||||
if num_bytes_read != CHUNK_SIZE as i64 {
|
let num_bytes_read = ((*stream).read)(
|
||||||
// If we read anything below `CHUNK_SIZE` bytes, then we've reached the end of file
|
stream,
|
||||||
// on this read
|
read_buffer.as_mut_ptr() as *mut c_void,
|
||||||
break;
|
length as u64,
|
||||||
}
|
);
|
||||||
|
nih_debug_assert_eq!(num_bytes_read as u64, length);
|
||||||
// Otherwise, reallocate the buffer with enough room for another chunk and try again
|
read_buffer.set_len(length as usize);
|
||||||
nih_debug_assert_eq!(num_bytes_read, CHUNK_SIZE as i64);
|
|
||||||
read_buffer.reserve(CHUNK_SIZE);
|
|
||||||
}
|
|
||||||
|
|
||||||
// After reading, trim the additional capacity near the end of the buffer
|
|
||||||
read_buffer.set_len(actual_read_buffer_size as usize);
|
|
||||||
|
|
||||||
let success = state::deserialize(
|
let success = state::deserialize(
|
||||||
&read_buffer,
|
&read_buffer,
|
||||||
|
|
Loading…
Add table
Reference in a new issue