gstreamer/buffer: Refactor buffer cursor implementation with macros to reduce code duplication

This commit is contained in:
Sebastian Dröge 2020-04-07 13:39:11 +03:00
parent 42d09c2834
commit 35f7c910a9

View file

@ -45,6 +45,183 @@ pub struct BufferRefCursor<T> {
map_info: gst_sys::GstMapInfo,
}
macro_rules! define_seek_impl(
($get_buffer_ref:expr) => {
fn seek(&mut self, pos: io::SeekFrom) -> Result<u64, io::Error> {
if !self.map_info.memory.is_null() {
unsafe {
gst_sys::gst_memory_unmap(self.map_info.memory, &mut self.map_info);
self.map_info.memory = ptr::null_mut();
}
}
match pos {
io::SeekFrom::Start(off) => {
self.cur_offset = std::cmp::min(self.size, off);
}
io::SeekFrom::End(off) if off <= 0 => {
self.cur_offset = self.size;
}
io::SeekFrom::End(off) => {
self.cur_offset = self.size.checked_sub(off as u64).ok_or_else(|| {
io::Error::new(io::ErrorKind::InvalidInput, "Seek before start of buffer")
})?;
}
io::SeekFrom::Current(std::i64::MIN) => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Seek before start of buffer",
));
}
io::SeekFrom::Current(off) => {
if off <= 0 {
self.cur_offset =
self.cur_offset.checked_sub((-off) as u64).ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
"Seek before start of buffer",
)
})?;
} else {
self.cur_offset = std::cmp::min(
self.size,
self.cur_offset.checked_add(off as u64).unwrap_or(self.size),
);
}
}
}
// Work around lifetime annotation issues with closures
let get_buffer_ref: fn(&Self) -> &BufferRef = $get_buffer_ref;
let (idx, _, skip) = get_buffer_ref(self)
.find_memory(self.cur_offset as usize, None)
.expect("Failed to find memory");
self.cur_mem_idx = idx;
self.cur_mem_offset = skip;
Ok(self.cur_offset)
}
// Once stabilized
// fn stream_len(&mut self) -> Result<u64, io::Error> {
// Ok(self.size)
// }
//
// fn stream_position(&mut self) -> Result<u64, io::Error> {
// Ok(self.current_offset)
// }
}
);
macro_rules! define_read_write_fn_impl(
($self:ident, $data:ident, $data_type:ty, $get_buffer_ref:expr, $map_flags:path, $copy:expr, $split:expr) => {{
let mut copied = 0;
while !$data.is_empty() && $self.cur_mem_idx < $self.num_mem {
// Map memory if needed. cur_mem_idx, cur_mem_offset and cur_offset are required to be
// set correctly here already (from constructor, seek and the bottom of the loop)
if $self.map_info.memory.is_null() {
unsafe {
// Work around lifetime annotation issues with closures
let get_buffer_ref: fn(&Self) -> &BufferRef = $get_buffer_ref;
let memory = gst_sys::gst_buffer_peek_memory(
get_buffer_ref($self).as_mut_ptr(),
$self.cur_mem_idx,
);
assert!(!memory.is_null());
if gst_sys::gst_memory_map(memory, &mut $self.map_info, $map_flags)
== glib_sys::GFALSE
{
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Failed to map memory readable",
));
}
}
assert!($self.cur_mem_offset < $self.map_info.size);
}
assert!(!$self.map_info.memory.is_null());
// Copy all data we can currently copy
let data_left = $self.map_info.size - $self.cur_mem_offset;
let to_copy = std::cmp::min($data.len(), data_left);
$copy(&$self.map_info, $self.cur_mem_offset, $data, to_copy);
copied += to_copy;
$self.cur_offset += to_copy as u64;
$self.cur_mem_offset += to_copy;
// Work around lifetime annotation issues with closures
let split: fn($data_type, usize) -> $data_type = $split;
#[allow(clippy::redundant_closure_call)]
{
$data = split($data, to_copy);
}
// If we're at the end of the current memory, unmap and advance to the next memory
if $self.cur_mem_offset == $self.map_info.size {
unsafe {
gst_sys::gst_memory_unmap($self.map_info.memory, &mut $self.map_info);
}
$self.map_info.memory = ptr::null_mut();
$self.cur_mem_idx += 1;
$self.cur_mem_offset = 0;
}
}
Ok(copied)
}}
);
macro_rules! define_read_impl(
($get_buffer_ref:expr) => {
fn read(&mut self, mut data: &mut [u8]) -> Result<usize, io::Error> {
define_read_write_fn_impl!(
self,
data,
&mut [u8],
$get_buffer_ref,
gst_sys::GST_MAP_READ,
|map_info: &gst_sys::GstMapInfo, off, data: &mut [u8], to_copy| unsafe {
ptr::copy_nonoverlapping(
(map_info.data as *const u8).add(off),
data.as_mut_ptr(),
to_copy,
);
},
|data, to_copy| &mut data[to_copy..]
)
}
}
);
macro_rules! define_write_impl(
($get_buffer_ref:expr) => {
fn write(&mut self, mut data: &[u8]) -> Result<usize, io::Error> {
define_read_write_fn_impl!(
self,
data,
&[u8],
$get_buffer_ref,
gst_sys::GST_MAP_WRITE,
|map_info: &gst_sys::GstMapInfo, off, data: &[u8], to_copy| unsafe {
ptr::copy_nonoverlapping(
data.as_ptr(),
(map_info.data as *mut u8).add(off),
to_copy,
);
},
|data, to_copy| &data[to_copy..]
)
}
fn flush(&mut self) -> Result<(), io::Error> {
Ok(())
}
}
);
impl<T> fmt::Debug for BufferCursor<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("BufferCursor")
@ -70,194 +247,15 @@ impl<T> Drop for BufferCursor<T> {
}
impl io::Read for BufferCursor<Readable> {
fn read(&mut self, mut data: &mut [u8]) -> Result<usize, io::Error> {
let mut copied = 0;
while !data.is_empty() && self.cur_mem_idx < self.num_mem {
// Map memory if needed. cur_mem_idx, cur_mem_offset and cur_offset are required to be
// set correctly here already (from constructor, seek and the bottom of the loop)
if self.map_info.memory.is_null() {
unsafe {
let memory = gst_sys::gst_buffer_peek_memory(
self.buffer.as_ref().unwrap().as_mut_ptr(),
self.cur_mem_idx,
);
assert!(!memory.is_null());
if gst_sys::gst_memory_map(memory, &mut self.map_info, gst_sys::GST_MAP_READ)
== glib_sys::GFALSE
{
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Failed to map memory readable",
));
}
}
assert!(self.cur_mem_offset < self.map_info.size);
}
assert!(!self.map_info.memory.is_null());
// Copy all data we can currently copy
let data_left = self.map_info.size - self.cur_mem_offset;
let to_copy = std::cmp::min(data.len(), data_left);
unsafe {
ptr::copy_nonoverlapping(
(self.map_info.data as *const u8).add(self.cur_mem_offset),
data.as_mut_ptr(),
to_copy,
);
}
copied += to_copy;
self.cur_offset += to_copy as u64;
self.cur_mem_offset += to_copy;
data = &mut data[to_copy..];
// If we're at the end of the current memory, unmap and advance to the next memory
if self.cur_mem_offset == self.map_info.size {
unsafe {
gst_sys::gst_memory_unmap(self.map_info.memory, &mut self.map_info);
}
self.map_info.memory = ptr::null_mut();
self.cur_mem_idx += 1;
self.cur_mem_offset = 0;
}
}
Ok(copied)
}
define_read_impl!(|s| s.buffer.as_ref().unwrap());
}
impl io::Write for BufferCursor<Writable> {
fn write(&mut self, mut data: &[u8]) -> Result<usize, io::Error> {
let mut copied = 0;
while !data.is_empty() && self.cur_mem_idx < self.num_mem {
// Map memory if needed. cur_mem_idx, cur_mem_offset and cur_offset are required to be
// set correctly here already (from constructor, seek and the bottom of the loop)
if self.map_info.memory.is_null() {
unsafe {
let memory = gst_sys::gst_buffer_peek_memory(
self.buffer.as_ref().unwrap().as_mut_ptr(),
self.cur_mem_idx,
);
assert!(!memory.is_null());
if gst_sys::gst_memory_map(memory, &mut self.map_info, gst_sys::GST_MAP_WRITE)
== glib_sys::GFALSE
{
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Failed to map memory writable",
));
}
}
assert!(self.cur_mem_offset < self.map_info.size);
}
assert!(!self.map_info.memory.is_null());
// Copy all data we can currently copy
let data_left = self.map_info.size - self.cur_mem_offset;
let to_copy = std::cmp::min(data.len(), data_left);
unsafe {
ptr::copy_nonoverlapping(
data.as_ptr(),
(self.map_info.data as *mut u8).add(self.cur_mem_offset),
to_copy,
);
}
copied += to_copy;
self.cur_offset += to_copy as u64;
self.cur_mem_offset += to_copy;
data = &data[to_copy..];
// If we're at the end of the current memory, unmap and advance to the next memory
if self.cur_mem_offset == self.map_info.size {
unsafe {
gst_sys::gst_memory_unmap(self.map_info.memory, &mut self.map_info);
}
self.map_info.memory = ptr::null_mut();
self.cur_mem_idx += 1;
self.cur_mem_offset = 0;
}
}
Ok(copied)
}
fn flush(&mut self) -> Result<(), io::Error> {
Ok(())
}
define_write_impl!(|s| s.buffer.as_ref().unwrap());
}
impl<T> io::Seek for BufferCursor<T> {
fn seek(&mut self, pos: io::SeekFrom) -> Result<u64, io::Error> {
if !self.map_info.memory.is_null() {
unsafe {
gst_sys::gst_memory_unmap(self.map_info.memory, &mut self.map_info);
self.map_info.memory = ptr::null_mut();
}
}
match pos {
io::SeekFrom::Start(off) => {
self.cur_offset = std::cmp::min(self.size, off);
}
io::SeekFrom::End(off) if off <= 0 => {
self.cur_offset = self.size;
}
io::SeekFrom::End(off) => {
self.cur_offset = self.size.checked_sub(off as u64).ok_or_else(|| {
io::Error::new(io::ErrorKind::InvalidInput, "Seek before start of buffer")
})?;
}
io::SeekFrom::Current(std::i64::MIN) => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Seek before start of buffer",
));
}
io::SeekFrom::Current(off) => {
if off <= 0 {
self.cur_offset =
self.cur_offset.checked_sub((-off) as u64).ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
"Seek before start of buffer",
)
})?;
} else {
self.cur_offset = std::cmp::min(
self.size,
self.cur_offset.checked_add(off as u64).unwrap_or(self.size),
);
}
}
}
let (idx, _, skip) = self
.buffer
.as_ref()
.unwrap()
.find_memory(self.cur_offset as usize, None)
.expect("Failed to find memory");
self.cur_mem_idx = idx;
self.cur_mem_offset = skip;
Ok(self.cur_offset)
}
// Once stabilized
// fn stream_len(&mut self) -> Result<u64, io::Error> {
// Ok(self.size)
// }
//
// fn stream_position(&mut self) -> Result<u64, io::Error> {
// Ok(self.current_offset)
// }
define_seek_impl!(|s| s.buffer.as_ref().unwrap());
}
impl<T> BufferCursor<T> {
@ -346,253 +344,19 @@ impl<T> Drop for BufferRefCursor<T> {
}
impl<'a> io::Read for BufferRefCursor<&'a BufferRef> {
fn read(&mut self, mut data: &mut [u8]) -> Result<usize, io::Error> {
let mut copied = 0;
while !data.is_empty() && self.cur_mem_idx < self.num_mem {
// Map memory if needed. cur_mem_idx, cur_mem_offset and cur_offset are required to be
// set correctly here already (from constructor, seek and the bottom of the loop)
if self.map_info.memory.is_null() {
unsafe {
let memory =
gst_sys::gst_buffer_peek_memory(self.buffer.as_mut_ptr(), self.cur_mem_idx);
assert!(!memory.is_null());
if gst_sys::gst_memory_map(memory, &mut self.map_info, gst_sys::GST_MAP_READ)
== glib_sys::GFALSE
{
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Failed to map memory readable",
));
}
}
assert!(self.cur_mem_offset < self.map_info.size);
}
assert!(!self.map_info.memory.is_null());
// Copy all data we can currently copy
let data_left = self.map_info.size - self.cur_mem_offset;
let to_copy = std::cmp::min(data.len(), data_left);
unsafe {
ptr::copy_nonoverlapping(
(self.map_info.data as *const u8).add(self.cur_mem_offset),
data.as_mut_ptr(),
to_copy,
);
}
copied += to_copy;
self.cur_offset += to_copy as u64;
self.cur_mem_offset += to_copy;
data = &mut data[to_copy..];
// If we're at the end of the current memory, unmap and advance to the next memory
if self.cur_mem_offset == self.map_info.size {
unsafe {
gst_sys::gst_memory_unmap(self.map_info.memory, &mut self.map_info);
}
self.map_info.memory = ptr::null_mut();
self.cur_mem_idx += 1;
self.cur_mem_offset = 0;
}
}
Ok(copied)
}
define_read_impl!(|s| s.buffer);
}
impl<'a> io::Write for BufferRefCursor<&'a mut BufferRef> {
fn write(&mut self, mut data: &[u8]) -> Result<usize, io::Error> {
let mut copied = 0;
while !data.is_empty() && self.cur_mem_idx < self.num_mem {
// Map memory if needed. cur_mem_idx, cur_mem_offset and cur_offset are required to be
// set correctly here already (from constructor, seek and the bottom of the loop)
if self.map_info.memory.is_null() {
unsafe {
let memory =
gst_sys::gst_buffer_peek_memory(self.buffer.as_mut_ptr(), self.cur_mem_idx);
assert!(!memory.is_null());
if gst_sys::gst_memory_map(memory, &mut self.map_info, gst_sys::GST_MAP_WRITE)
== glib_sys::GFALSE
{
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Failed to map memory writable",
));
}
}
assert!(self.cur_mem_offset < self.map_info.size);
}
assert!(!self.map_info.memory.is_null());
// Copy all data we can currently copy
let data_left = self.map_info.size - self.cur_mem_offset;
let to_copy = std::cmp::min(data.len(), data_left);
unsafe {
ptr::copy_nonoverlapping(
data.as_ptr(),
(self.map_info.data as *mut u8).add(self.cur_mem_offset),
to_copy,
);
}
copied += to_copy;
self.cur_offset += to_copy as u64;
self.cur_mem_offset += to_copy;
data = &data[to_copy..];
// If we're at the end of the current memory, unmap and advance to the next memory
if self.cur_mem_offset == self.map_info.size {
unsafe {
gst_sys::gst_memory_unmap(self.map_info.memory, &mut self.map_info);
}
self.map_info.memory = ptr::null_mut();
self.cur_mem_idx += 1;
self.cur_mem_offset = 0;
}
}
Ok(copied)
}
fn flush(&mut self) -> Result<(), io::Error> {
Ok(())
}
define_write_impl!(|s| s.buffer);
}
impl<'a> io::Seek for BufferRefCursor<&'a BufferRef> {
fn seek(&mut self, pos: io::SeekFrom) -> Result<u64, io::Error> {
if !self.map_info.memory.is_null() {
unsafe {
gst_sys::gst_memory_unmap(self.map_info.memory, &mut self.map_info);
self.map_info.memory = ptr::null_mut();
}
}
match pos {
io::SeekFrom::Start(off) => {
self.cur_offset = std::cmp::min(self.size, off);
}
io::SeekFrom::End(off) if off <= 0 => {
self.cur_offset = self.size;
}
io::SeekFrom::End(off) => {
self.cur_offset = self.size.checked_sub(off as u64).ok_or_else(|| {
io::Error::new(io::ErrorKind::InvalidInput, "Seek before start of buffer")
})?;
}
io::SeekFrom::Current(std::i64::MIN) => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Seek before start of buffer",
));
}
io::SeekFrom::Current(off) => {
if off <= 0 {
self.cur_offset =
self.cur_offset.checked_sub((-off) as u64).ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
"Seek before start of buffer",
)
})?;
} else {
self.cur_offset = std::cmp::min(
self.size,
self.cur_offset.checked_add(off as u64).unwrap_or(self.size),
);
}
}
}
let (idx, _, skip) = self
.buffer
.find_memory(self.cur_offset as usize, None)
.expect("Failed to find memory");
self.cur_mem_idx = idx;
self.cur_mem_offset = skip;
Ok(self.cur_offset)
}
// Once stabilized
// fn stream_len(&mut self) -> Result<u64, io::Error> {
// Ok(self.size)
// }
//
// fn stream_position(&mut self) -> Result<u64, io::Error> {
// Ok(self.current_offset)
// }
define_seek_impl!(|s| s.buffer);
}
impl<'a> io::Seek for BufferRefCursor<&'a mut BufferRef> {
fn seek(&mut self, pos: io::SeekFrom) -> Result<u64, io::Error> {
if !self.map_info.memory.is_null() {
unsafe {
gst_sys::gst_memory_unmap(self.map_info.memory, &mut self.map_info);
self.map_info.memory = ptr::null_mut();
}
}
match pos {
io::SeekFrom::Start(off) => {
self.cur_offset = std::cmp::min(self.size, off);
}
io::SeekFrom::End(off) if off <= 0 => {
self.cur_offset = self.size;
}
io::SeekFrom::End(off) => {
self.cur_offset = self.size.checked_sub(off as u64).ok_or_else(|| {
io::Error::new(io::ErrorKind::InvalidInput, "Seek before start of buffer")
})?;
}
io::SeekFrom::Current(std::i64::MIN) => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Seek before start of buffer",
));
}
io::SeekFrom::Current(off) => {
if off <= 0 {
self.cur_offset =
self.cur_offset.checked_sub((-off) as u64).ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
"Seek before start of buffer",
)
})?;
} else {
self.cur_offset = std::cmp::min(
self.size,
self.cur_offset.checked_add(off as u64).unwrap_or(self.size),
);
}
}
}
let (idx, _, skip) = self
.buffer
.find_memory(self.cur_offset as usize, None)
.expect("Failed to find memory");
self.cur_mem_idx = idx;
self.cur_mem_offset = skip;
Ok(self.cur_offset)
}
// Once stabilized
// fn stream_len(&mut self) -> Result<u64, io::Error> {
// Ok(self.size)
// }
//
// fn stream_position(&mut self) -> Result<u64, io::Error> {
// Ok(self.current_offset)
// }
define_seek_impl!(|s| s.buffer);
}
impl<T> BufferRefCursor<T> {