v4l2bufferpool: Copy already queued buffer

This is required as during preroll we pass the first buffer twice, hence already
queued. It is also useful, to allow filters replaying a previous rendered buffers.
This will require 1 more buffer in sink if last-sample is enabled, since the last
sample will not be the same as the currently queued buffer.

https://bugzilla.gnome.org/show_bug.cgi?id=722303
This commit is contained in:
Nicolas Dufresne 2014-05-26 12:34:42 -04:00
parent e2fd7e274e
commit b3bf4e33d0
2 changed files with 50 additions and 28 deletions

View file

@ -969,7 +969,6 @@ select_error:
static GstFlowReturn static GstFlowReturn
gst_v4l2_buffer_pool_qbuf (GstV4l2BufferPool * pool, GstBuffer * buf) gst_v4l2_buffer_pool_qbuf (GstV4l2BufferPool * pool, GstBuffer * buf)
{ {
GstFlowReturn ret;
GstV4l2MemoryGroup *group = NULL; GstV4l2MemoryGroup *group = NULL;
gint index; gint index;
@ -981,22 +980,8 @@ gst_v4l2_buffer_pool_qbuf (GstV4l2BufferPool * pool, GstBuffer * buf)
index = group->buffer.index; index = group->buffer.index;
if (V4L2_TYPE_IS_OUTPUT (pool->obj->type)) { if (pool->buffers[index] != NULL)
/* If already queued, dequeue it, so we keep the render order */ goto already_queued;
while (pool->buffers[index]) {
GstBuffer *tmp;
ret = gst_v4l2_buffer_pool_dqbuf (pool, &tmp);
if (ret != GST_FLOW_OK)
goto already_queued;
gst_buffer_unref (tmp);
}
} else {
/* Should never happen, would mean a buffer got freed twice */
g_return_val_if_fail (pool->buffers[index] == NULL, GST_FLOW_ERROR);
}
GST_LOG_OBJECT (pool, "queuing buffer %i", index); GST_LOG_OBJECT (pool, "queuing buffer %i", index);
@ -1010,10 +995,8 @@ gst_v4l2_buffer_pool_qbuf (GstV4l2BufferPool * pool, GstBuffer * buf)
already_queued: already_queued:
{ {
if (ret != GST_FLOW_FLUSHING) GST_ERROR_OBJECT (pool, "the buffer %i was already queued", index);
GST_ERROR_OBJECT (pool, return GST_FLOW_ERROR;
"buffer %i was already queued and we could not dequeue it", index);
return ret;
} }
queue_failed: queue_failed:
{ {
@ -1618,13 +1601,31 @@ gst_v4l2_buffer_pool_process (GstV4l2BufferPool * pool, GstBuffer ** buf)
case GST_V4L2_IO_DMABUF: case GST_V4L2_IO_DMABUF:
case GST_V4L2_IO_MMAP: case GST_V4L2_IO_MMAP:
{ {
GstBuffer *to_queue; GstBuffer *to_queue = NULL;
GstV4l2MemoryGroup *group;
gint index;
if ((*buf)->pool == bpool) { if ((*buf)->pool != bpool)
/* nothing, we can queue directly */ goto copying;
to_queue = gst_buffer_ref (*buf);
GST_LOG_OBJECT (pool, "processing buffer from our pool"); if (!gst_v4l2_is_buffer_valid (*buf, &group))
} else { goto copying;
index = group->buffer.index;
GST_LOG_OBJECT (pool, "processing buffer %i from our pool", index);
index = group->buffer.index;
if (pool->buffers[index] != NULL) {
GST_LOG_OBJECT (pool, "buffer %i already queued, copying", index);
goto copying;
}
/* we can queue directly */
to_queue = gst_buffer_ref (*buf);
copying:
if (to_queue == NULL) {
GstBufferPoolAcquireParams params = { 0 }; GstBufferPoolAcquireParams params = { 0 };
GST_LOG_OBJECT (pool, "alloc buffer from our pool"); GST_LOG_OBJECT (pool, "alloc buffer from our pool");

View file

@ -536,7 +536,28 @@ static gboolean
gst_v4l2sink_propose_allocation (GstBaseSink * bsink, GstQuery * query) gst_v4l2sink_propose_allocation (GstBaseSink * bsink, GstQuery * query)
{ {
GstV4l2Sink *v4l2sink = GST_V4L2SINK (bsink); GstV4l2Sink *v4l2sink = GST_V4L2SINK (bsink);
return gst_v4l2_object_propose_allocation (v4l2sink->v4l2object, query); gboolean last_sample_enabled;
if (!gst_v4l2_object_propose_allocation (v4l2sink->v4l2object, query))
return FALSE;
g_object_get (bsink, "enable-last-sample", &last_sample_enabled, NULL);
if (last_sample_enabled) {
GstBufferPool *pool;
guint size, min, max;
gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);
/* we need 1 more, otherwise we'll run out of buffers at preroll */
min++;
if (max < min)
max = min;
gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max);
}
return TRUE;
} }
/* called after A/V sync to render frame */ /* called after A/V sync to render frame */