v4l2: bufferpool: Drop writable check on output pool process

Output buffers don't have to be writable. Accepting read-only buffers
from the V4L2 buffer pool allows upstream elements to write directly
into the V4L2 buffers without triggering a CPU copy into a new buffer
from the same V4L2 buffer pool every time.

Tested with the vivid output device:

  GST_DEBUG=GST_PERFORMANCE:7 gst-launch-1.0 videotestsrc ! v4l2sink device=/dev/video5

With this change, gst_v4l2_buffer_pool_dqbuf() must be allowed to not
resize read-only memories of output buffers.

Part-of: <https://gitlab.freedesktop.org/gstreamer/gstreamer/-/merge_requests/6572>
This commit is contained in:
Philipp Zabel 2024-04-05 14:09:18 +02:00 committed by GStreamer Marge Bot
parent a7fe79c4de
commit e1f5bacf8d

View file

@ -84,7 +84,8 @@ static void gst_v4l2_buffer_pool_complete_release_buffer (GstBufferPool * bpool,
GstBuffer * buffer, gboolean queued); GstBuffer * buffer, gboolean queued);
static gboolean static gboolean
gst_v4l2_is_buffer_valid (GstBuffer * buffer, GstV4l2MemoryGroup ** out_group) gst_v4l2_is_buffer_valid (GstBuffer * buffer, GstV4l2MemoryGroup ** out_group,
gboolean check_writable)
{ {
GstMemory *mem = gst_buffer_peek_memory (buffer, 0); GstMemory *mem = gst_buffer_peek_memory (buffer, 0);
gboolean valid = FALSE; gboolean valid = FALSE;
@ -108,7 +109,7 @@ gst_v4l2_is_buffer_valid (GstBuffer * buffer, GstV4l2MemoryGroup ** out_group)
if (group->mem[i] != gst_buffer_peek_memory (buffer, i)) if (group->mem[i] != gst_buffer_peek_memory (buffer, i))
goto done; goto done;
if (!gst_memory_is_writable (group->mem[i])) if (check_writable && !gst_memory_is_writable (group->mem[i]))
goto done; goto done;
} }
@ -127,7 +128,7 @@ gst_v4l2_buffer_pool_resize_buffer (GstBufferPool * bpool, GstBuffer * buffer)
GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (bpool); GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL (bpool);
GstV4l2MemoryGroup *group; GstV4l2MemoryGroup *group;
if (gst_v4l2_is_buffer_valid (buffer, &group)) { if (gst_v4l2_is_buffer_valid (buffer, &group, TRUE)) {
gst_v4l2_allocator_reset_group (pool->vallocator, group); gst_v4l2_allocator_reset_group (pool->vallocator, group);
} else { } else {
GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_TAG_MEMORY); GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_TAG_MEMORY);
@ -238,7 +239,7 @@ gst_v4l2_buffer_pool_import_userptr (GstV4l2BufferPool * pool,
GST_LOG_OBJECT (pool, "importing userptr"); GST_LOG_OBJECT (pool, "importing userptr");
/* get the group */ /* get the group */
if (!gst_v4l2_is_buffer_valid (dest, &group)) if (!gst_v4l2_is_buffer_valid (dest, &group, TRUE))
goto not_our_buffer; goto not_our_buffer;
if (V4L2_TYPE_IS_OUTPUT (pool->obj->type)) if (V4L2_TYPE_IS_OUTPUT (pool->obj->type))
@ -355,7 +356,7 @@ gst_v4l2_buffer_pool_import_dmabuf (GstV4l2BufferPool * pool,
GST_LOG_OBJECT (pool, "importing dmabuf"); GST_LOG_OBJECT (pool, "importing dmabuf");
if (!gst_v4l2_is_buffer_valid (dest, &group)) if (!gst_v4l2_is_buffer_valid (dest, &group, TRUE))
goto not_our_buffer; goto not_our_buffer;
if (n_mem > GST_VIDEO_MAX_PLANES) if (n_mem > GST_VIDEO_MAX_PLANES)
@ -1312,15 +1313,18 @@ gst_v4l2_buffer_pool_dqbuf (GstV4l2BufferPool * pool, GstBuffer ** buffer,
if (GST_VIDEO_INFO_FORMAT (&pool->caps_info) == GST_VIDEO_FORMAT_ENCODED) if (GST_VIDEO_INFO_FORMAT (&pool->caps_info) == GST_VIDEO_FORMAT_ENCODED)
break; break;
/* Ensure our offset matches the expected plane size, or image size if if (obj->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
* there is only one memory */ obj->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
if (group->n_mem == 1) { /* Ensure our offset matches the expected plane size, or image size if
gst_memory_resize (group->mem[0], 0, info->size + info->offset[0]); * there is only one memory */
break; if (group->n_mem == 1) {
} gst_memory_resize (group->mem[0], 0, info->size + info->offset[0]);
break;
}
if (!GST_VIDEO_FORMAT_INFO_IS_TILED (finfo)) if (!GST_VIDEO_FORMAT_INFO_IS_TILED (finfo))
gst_memory_resize (group->mem[i], 0, obj->plane_size[i]); gst_memory_resize (group->mem[i], 0, obj->plane_size[i]);
}
} }
/* Ignore timestamp and field for OUTPUT device */ /* Ignore timestamp and field for OUTPUT device */
@ -1520,7 +1524,7 @@ done:
/* Mark buffer as outstanding */ /* Mark buffer as outstanding */
if (ret == GST_FLOW_OK) { if (ret == GST_FLOW_OK) {
GstV4l2MemoryGroup *group; GstV4l2MemoryGroup *group;
if (gst_v4l2_is_buffer_valid (*buffer, &group)) { if (gst_v4l2_is_buffer_valid (*buffer, &group, TRUE)) {
GST_LOG_OBJECT (pool, "mark buffer %u outstanding", group->buffer.index); GST_LOG_OBJECT (pool, "mark buffer %u outstanding", group->buffer.index);
g_atomic_int_or (&pool->buffer_state[group->buffer.index], g_atomic_int_or (&pool->buffer_state[group->buffer.index],
BUFFER_STATE_OUTSTANDING); BUFFER_STATE_OUTSTANDING);
@ -1571,7 +1575,7 @@ gst_v4l2_buffer_pool_complete_release_buffer (GstBufferPool * bpool,
case GST_V4L2_IO_DMABUF_IMPORT: case GST_V4L2_IO_DMABUF_IMPORT:
{ {
GstV4l2MemoryGroup *group; GstV4l2MemoryGroup *group;
if (gst_v4l2_is_buffer_valid (buffer, &group)) { if (gst_v4l2_is_buffer_valid (buffer, &group, TRUE)) {
GstFlowReturn ret = GST_FLOW_OK; GstFlowReturn ret = GST_FLOW_OK;
gst_v4l2_allocator_reset_group (pool->vallocator, group); gst_v4l2_allocator_reset_group (pool->vallocator, group);
@ -1612,7 +1616,7 @@ gst_v4l2_buffer_pool_complete_release_buffer (GstBufferPool * bpool,
GstV4l2MemoryGroup *group; GstV4l2MemoryGroup *group;
guint index; guint index;
if (!gst_v4l2_is_buffer_valid (buffer, &group)) { if (!gst_v4l2_is_buffer_valid (buffer, &group, TRUE)) {
/* Simply release invalid/modified buffer, the allocator will /* Simply release invalid/modified buffer, the allocator will
* give it back later */ * give it back later */
GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_TAG_MEMORY); GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_TAG_MEMORY);
@ -1663,7 +1667,7 @@ gst_v4l2_buffer_pool_release_buffer (GstBufferPool * bpool, GstBuffer * buffer)
GstV4l2MemoryGroup *group; GstV4l2MemoryGroup *group;
gboolean queued = FALSE; gboolean queued = FALSE;
if (gst_v4l2_is_buffer_valid (buffer, &group)) { if (gst_v4l2_is_buffer_valid (buffer, &group, TRUE)) {
gint old_buffer_state = gint old_buffer_state =
g_atomic_int_and (&pool->buffer_state[group->buffer.index], g_atomic_int_and (&pool->buffer_state[group->buffer.index],
~BUFFER_STATE_OUTSTANDING); ~BUFFER_STATE_OUTSTANDING);
@ -2065,7 +2069,8 @@ gst_v4l2_buffer_pool_process (GstV4l2BufferPool * pool, GstBuffer ** buf,
if ((*buf)->pool != bpool) if ((*buf)->pool != bpool)
goto copying; goto copying;
if (!gst_v4l2_is_buffer_valid (*buf, &group)) /* Output buffers don't have to be writable */
if (!gst_v4l2_is_buffer_valid (*buf, &group, FALSE))
goto copying; goto copying;
index = group->buffer.index; index = group->buffer.index;
@ -2102,7 +2107,7 @@ gst_v4l2_buffer_pool_process (GstV4l2BufferPool * pool, GstBuffer ** buf,
} }
/* retrieve the group */ /* retrieve the group */
gst_v4l2_is_buffer_valid (to_queue, &group); gst_v4l2_is_buffer_valid (to_queue, &group, TRUE);
} }
if ((ret = if ((ret =
@ -2115,7 +2120,7 @@ gst_v4l2_buffer_pool_process (GstV4l2BufferPool * pool, GstBuffer ** buf,
* streaming now */ * streaming now */
if (!gst_v4l2_buffer_pool_streamon (pool)) { if (!gst_v4l2_buffer_pool_streamon (pool)) {
/* don't check return value because qbuf would have failed */ /* don't check return value because qbuf would have failed */
gst_v4l2_is_buffer_valid (to_queue, &group); gst_v4l2_is_buffer_valid (to_queue, &group, TRUE);
/* qbuf has stored to_queue buffer but we are not in /* qbuf has stored to_queue buffer but we are not in
* streaming state, so the flush logic won't be performed. * streaming state, so the flush logic won't be performed.