v4l2: Add DMABUF and USERPTR importation

This commit is contained in:
Nicolas Dufresne 2014-04-11 17:10:11 -04:00
parent a114a32d22
commit 92bdd596f2
6 changed files with 786 additions and 123 deletions

View file

@ -339,6 +339,18 @@ gst_v4l2_allocator_release (GstV4l2Allocator * allocator, GstV4l2Memory * mem)
GST_LOG_OBJECT (allocator, "plane %i of buffer %u released",
mem->plane, group->buffer.index);
switch (allocator->memory) {
case V4L2_MEMORY_DMABUF:
close (mem->dmafd);
mem->dmafd = -1;
break;
case V4L2_MEMORY_USERPTR:
mem->data = NULL;
break;
default:
break;
}
/* When all memory are back, put the group back in the free queue */
if (g_atomic_int_dec_and_test (&group->mems_allocated)) {
GST_LOG_OBJECT (allocator, "buffer %u released", group->buffer.index);
@ -709,6 +721,22 @@ gst_v4l2_allocator_alloc (GstV4l2Allocator * allocator)
return group;
}
static void
_cleanup_failed_alloc (GstV4l2Allocator * allocator, GstV4l2MemoryGroup * group)
{
if (group->mems_allocated > 0) {
gint i;
/* If one or more mmap worked, we need to unref the memory, otherwise
* they will keep a ref on the allocator and leak it. This will put back
* the group into the free_queue */
for (i = 0; i < group->n_mem; i++)
gst_memory_unref (group->mem[i]);
} else {
/* Otherwise, group has to be on free queue for _stop() to work */
gst_atomic_queue_push (allocator->free_queue, group);
}
}
GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc_mmap (GstV4l2Allocator * allocator)
{
@ -758,17 +786,7 @@ mmap_failed:
{
GST_ERROR_OBJECT (allocator, "Failed to mmap buffer: %s",
g_strerror (errno));
if (group->mems_allocated > 0) {
/* If one or more mmap worked, we need to unref the memory, otherwise
* they will keep a ref on the allocator and leak it. This will put back
* the group into the free_queue */
for (i = 0; i < group->n_mem; i++)
gst_memory_unref (group->mem[i]);
} else {
/* Otherwise, group has to be on free queue for _stop() to work */
gst_atomic_queue_push (allocator->free_queue, group);
}
_cleanup_failed_alloc (allocator, group);
return NULL;
}
}
@ -846,34 +864,283 @@ dup_failed:
}
cleanup:
{
if (group->mems_allocated > 0) {
for (i = 0; i < group->n_mem; i++)
gst_memory_unref (group->mem[i]);
} else {
gst_atomic_queue_push (allocator->free_queue, group);
}
_cleanup_failed_alloc (allocator, group);
return NULL;
}
}
#if 0
GstV4l2MemoryGroup *
gst_v4l2_allocator_import_dmabuf (GstV4l2Allocator * allocator,
gint dmabuf_fd[VIDEO_MAX_PLANES])
gst_v4l2_allocator_alloc_dmabufin (GstV4l2Allocator * allocator)
{
/* TODO */
return NULL;
GstV4l2MemoryGroup *group;
gint i;
g_return_val_if_fail (allocator->memory == V4L2_MEMORY_DMABUF, NULL);
group = gst_v4l2_allocator_alloc (allocator);
if (group == NULL)
return NULL;
for (i = 0; i < group->n_mem; i++) {
GST_LOG_OBJECT (allocator, "allocation empty DMABUF import group");
if (group->mem[i] == NULL) {
group->mem[i] = (GstMemory *) _v4l2mem_new (0, GST_ALLOCATOR (allocator),
NULL, 0, 0, 0, 0, i, NULL, -1, group);
} else {
/* Take back the allocator reference */
gst_object_ref (allocator);
}
group->mems_allocated++;
}
gst_v4l2_allocator_clear_dmabufin (allocator, group);
return group;
}
GstV4l2MemoryGroup *
gst_v4l2_allocator_import_userptr (GstV4l2Allocator * allocator,
gpointer data[VIDEO_MAX_PLANES], gint stride[VIDEO_MAX_PLANES],
gint offset[VIDEO_MAX_PLANES])
gst_v4l2_allocator_alloc_userptr (GstV4l2Allocator * allocator)
{
/* TODO */
return NULL;
GstV4l2MemoryGroup *group;
gint i;
g_return_val_if_fail (allocator->memory == V4L2_MEMORY_USERPTR, NULL);
group = gst_v4l2_allocator_alloc (allocator);
if (group == NULL)
return NULL;
for (i = 0; i < group->n_mem; i++) {
GST_LOG_OBJECT (allocator, "allocating empty USERPTR group");
if (group->mem[i] == NULL) {
group->mem[i] = (GstMemory *) _v4l2mem_new (0, GST_ALLOCATOR (allocator),
NULL, 0, 0, 0, 0, i, NULL, -1, group);
} else {
/* Take back the allocator reference */
gst_object_ref (allocator);
}
group->mems_allocated++;
}
gst_v4l2_allocator_clear_userptr (allocator, group);
return group;
}
gboolean
gst_v4l2_allocator_import_dmabuf (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup * group, gint n_mem, GstMemory ** dma_mem)
{
GstV4l2Memory *mem;
gint i;
g_return_val_if_fail (allocator->memory == V4L2_MEMORY_DMABUF, FALSE);
if (group->n_mem != n_mem)
goto n_mem_missmatch;
for (i = 0; i < group->n_mem; i++) {
gint dmafd;
gsize size, offset, maxsize;
if (!gst_is_dmabuf_memory (dma_mem[i]))
goto not_dmabuf;
size = gst_memory_get_sizes (dma_mem[i], &offset, &maxsize);
if ((dmafd = dup (gst_dmabuf_memory_get_fd (dma_mem[i]))) < 0)
goto dup_failed;
GST_LOG_OBJECT (allocator, "imported DMABUF as fd %i plane %d", dmafd, i);
mem = (GstV4l2Memory *) group->mem[i];
/* Update memory */
mem->mem.maxsize = maxsize;
mem->mem.offset = offset;
mem->mem.size = size;
mem->dmafd = dmafd;
/* Update v4l2 structure */
group->planes[i].length = maxsize;
group->planes[i].bytesused = size;
group->planes[i].m.fd = dmafd;
group->planes[i].data_offset = offset;
}
/* Copy into buffer structure if not using planes */
if (!V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
group->buffer.bytesused = group->planes[0].bytesused;
group->buffer.length = group->planes[0].length;
group->buffer.m.fd = group->planes[0].m.userptr;
} else {
group->buffer.length = group->n_mem;
}
return TRUE;
n_mem_missmatch:
{
GST_ERROR_OBJECT (allocator, "Got %i dmabuf but needed %i", n_mem,
group->n_mem);
return FALSE;
}
not_dmabuf:
{
GST_ERROR_OBJECT (allocator, "Memory %i is not of DMABUF", i);
return FALSE;
}
dup_failed:
{
GST_ERROR_OBJECT (allocator, "Failed to dup DMABUF descriptor: %s",
g_strerror (errno));
return FALSE;
}
}
void
gst_v4l2_allocator_clear_dmabufin (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup * group)
{
GstV4l2Memory *mem;
gint i;
g_return_if_fail (allocator->memory == V4L2_MEMORY_DMABUF);
for (i = 0; i < group->n_mem; i++) {
mem = (GstV4l2Memory *) group->mem[i];
GST_LOG_OBJECT (allocator, "clearing DMABUF import, fd %i plane %d",
mem->dmafd, i);
if (mem->dmafd >= 0)
close (mem->dmafd);
/* Update memory */
mem->mem.maxsize = 0;
mem->mem.offset = 0;
mem->mem.size = 0;
mem->dmafd = -1;
/* Update v4l2 structure */
group->planes[i].length = 0;
group->planes[i].bytesused = 0;
group->planes[i].m.fd = -1;
group->planes[i].data_offset = 0;
}
if (!V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
group->buffer.bytesused = 0;
group->buffer.length = 0;
group->buffer.m.fd = -1;
}
}
gboolean
gst_v4l2_allocator_import_userptr (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup * group, gsize img_size, int n_planes,
gpointer * data, gsize * offset)
{
GstV4l2Memory *mem;
gint i;
g_return_val_if_fail (allocator->memory == V4L2_MEMORY_USERPTR, FALSE);
/* TODO Support passing N plane from 1 memory to MPLANE v4l2 format */
if (n_planes != group->n_mem)
goto n_mem_missmatch;
for (i = 0; i < group->n_mem; i++) {
gsize size, maxsize;
if (V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
struct v4l2_pix_format_mplane *pix = &allocator->format.fmt.pix_mp;
maxsize = pix->plane_fmt[i].sizeimage;
} else {
maxsize = allocator->format.fmt.pix.sizeimage;
}
if ((i + 1) == n_planes) {
size = img_size - offset[i];
} else {
size = offset[i + 1] - offset[i];
}
g_assert (size <= img_size);
GST_LOG_OBJECT (allocator, "imported USERPTR %p plane %d size %"
G_GSIZE_FORMAT, data[i], i, size);
mem = (GstV4l2Memory *) group->mem[i];
mem->mem.maxsize = maxsize;
mem->mem.size = size;
mem->data = data[i];
group->planes[i].length = maxsize;
group->planes[i].bytesused = size;
group->planes[i].m.userptr = (unsigned long) data[i];
group->planes[i].data_offset = 0;
}
/* Copy into buffer structure if not using planes */
if (!V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
group->buffer.bytesused = group->planes[0].bytesused;
group->buffer.length = group->planes[0].length;
group->buffer.m.userptr = group->planes[0].m.userptr;
} else {
group->buffer.length = group->n_mem;
}
return TRUE;
n_mem_missmatch:
{
GST_ERROR_OBJECT (allocator, "Got %i userptr plane while driver need %i",
n_planes, group->n_mem);
return FALSE;
}
}
void
gst_v4l2_allocator_clear_userptr (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup * group)
{
GstV4l2Memory *mem;
gint i;
g_return_if_fail (allocator->memory == V4L2_MEMORY_USERPTR);
for (i = 0; i < group->n_mem; i++) {
mem = (GstV4l2Memory *) group->mem[i];
GST_LOG_OBJECT (allocator, "clearing USERPTR %p plane %d size %"
G_GSIZE_FORMAT, mem->data, i, mem->mem.size);
mem->mem.maxsize = 0;
mem->mem.size = 0;
mem->data = NULL;
group->planes[i].length = 0;
group->planes[i].bytesused = 0;
group->planes[i].m.userptr = 0;
}
if (!V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
group->buffer.bytesused = 0;
group->buffer.length = 0;
group->buffer.m.userptr = 0;
}
}
#endif
void
gst_v4l2_allocator_flush (GstV4l2Allocator * allocator)

View file

@ -126,6 +126,26 @@ GstV4l2MemoryGroup* gst_v4l2_allocator_alloc_mmap (GstV4l2Allocator * alloc
GstV4l2MemoryGroup* gst_v4l2_allocator_alloc_dmabuf (GstV4l2Allocator * allocator,
GstAllocator * dmabuf_allocator);
GstV4l2MemoryGroup * gst_v4l2_allocator_alloc_dmabufin (GstV4l2Allocator * allocator);
GstV4l2MemoryGroup * gst_v4l2_allocator_alloc_userptr (GstV4l2Allocator * allocator);
gboolean gst_v4l2_allocator_import_dmabuf (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup *group,
gint n_mem, GstMemory ** dma_mem);
void gst_v4l2_allocator_clear_dmabufin (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup *group);
gboolean gst_v4l2_allocator_import_userptr (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup *group,
gsize img_size, int n_planes,
gpointer * data, gsize * offset);
void gst_v4l2_allocator_clear_userptr (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup *group);
void gst_v4l2_allocator_flush (GstV4l2Allocator * allocator);
gboolean gst_v4l2_allocator_qbuf (GstV4l2Allocator * allocator,

View file

@ -47,8 +47,12 @@
#include <gst/glib-compat-private.h>
GST_DEBUG_CATEGORY_EXTERN (v4l2_debug);
GST_DEBUG_CATEGORY_EXTERN (GST_CAT_PERFORMANCE);
#define GST_CAT_DEFAULT v4l2_debug
#define GST_V4L2_IMPORT_QUARK gst_v4l2_buffer_pool_import_quark ()
/*
* GstV4l2BufferPool:
*/
@ -87,6 +91,262 @@ done:
return valid;
}
static GstFlowReturn
gst_v4l2_buffer_pool_copy_buffer (GstV4l2BufferPool * pool, GstBuffer * dest,
GstBuffer * src)
{
const GstVideoFormatInfo *finfo = pool->obj->info.finfo;
GST_LOG_OBJECT (pool, "copying buffer");
if (finfo && (finfo->format != GST_VIDEO_FORMAT_UNKNOWN &&
finfo->format != GST_VIDEO_FORMAT_ENCODED)) {
GstVideoFrame src_frame, dest_frame;
GST_DEBUG_OBJECT (pool, "copy video frame");
/* we have raw video, use videoframe copy to get strides right */
if (!gst_video_frame_map (&src_frame, &pool->obj->info, src, GST_MAP_READ))
goto invalid_buffer;
if (!gst_video_frame_map (&dest_frame, &pool->obj->info, dest,
GST_MAP_WRITE)) {
gst_video_frame_unmap (&src_frame);
goto invalid_buffer;
}
gst_video_frame_copy (&dest_frame, &src_frame);
gst_video_frame_unmap (&src_frame);
gst_video_frame_unmap (&dest_frame);
} else {
GstMapInfo map;
GST_DEBUG_OBJECT (pool, "copy raw bytes");
if (!gst_buffer_map (src, &map, GST_MAP_READ))
goto invalid_buffer;
gst_buffer_fill (dest, 0, map.data, gst_buffer_get_size (src));
gst_buffer_unmap (src, &map);
gst_buffer_resize (dest, 0, gst_buffer_get_size (src));
}
GST_CAT_LOG_OBJECT (GST_CAT_PERFORMANCE, pool, "slow copy into buffer %p",
dest);
return GST_FLOW_OK;
invalid_buffer:
{
GST_ERROR_OBJECT (pool, "could not map buffer");
return GST_FLOW_ERROR;
}
}
struct UserPtrData
{
GstBuffer *buffer;
gboolean is_frame;
GstVideoFrame frame;
GstMapInfo map;
};
static GQuark
gst_v4l2_buffer_pool_import_quark (void)
{
static GQuark quark = 0;
if (quark == 0)
quark = g_quark_from_string ("GstV4l2BufferPoolUsePtrData");
return quark;
}
static void
_unmap_userptr_frame (struct UserPtrData *data)
{
if (data->is_frame)
gst_video_frame_unmap (&data->frame);
else
gst_buffer_unmap (data->buffer, &data->map);
if (data->buffer)
gst_buffer_unref (data->buffer);
g_slice_free (struct UserPtrData, data);
}
static GstFlowReturn
gst_v4l2_buffer_pool_import_userptr (GstV4l2BufferPool * pool,
GstBuffer * dest, GstBuffer * src)
{
GstFlowReturn ret = GST_FLOW_OK;
GstV4l2MemoryGroup *group = NULL;
GstMapFlags flags;
const GstVideoFormatInfo *finfo = pool->obj->info.finfo;
struct UserPtrData *data = NULL;
GST_LOG_OBJECT (pool, "importing userptr");
/* get the group */
if (!gst_v4l2_is_buffer_valid (dest, &group))
goto not_our_buffer;
/* ensure we have a src */
if (src == NULL) {
g_return_val_if_fail (pool->other_pool != NULL, GST_FLOW_ERROR);
ret = gst_buffer_pool_acquire_buffer (pool->other_pool, &src, NULL);
if (ret != GST_FLOW_OK)
goto done;
}
if (!V4L2_TYPE_IS_OUTPUT (pool->obj->type))
flags = GST_MAP_READ;
else
flags = GST_MAP_WRITE;
data = g_slice_new0 (struct UserPtrData);
if (finfo && (finfo->format != GST_VIDEO_FORMAT_UNKNOWN &&
finfo->format != GST_VIDEO_FORMAT_ENCODED)) {
data->is_frame = TRUE;
if (!gst_video_frame_map (&data->frame, &pool->obj->info, src, flags))
goto invalid_buffer;
if (!gst_v4l2_allocator_import_userptr (pool->vallocator, group,
data->frame.info.size, finfo->n_planes, data->frame.data,
data->frame.info.offset))
goto import_failed;
} else {
gsize offset[1] = { 0 };
gpointer ptr[1];
data->is_frame = FALSE;
if (!gst_buffer_map (src, &data->map, flags))
goto invalid_buffer;
ptr[0] = data->map.data;
if (!gst_v4l2_allocator_import_userptr (pool->vallocator, group,
data->map.size, 1, ptr, offset))
goto import_failed;
}
data->buffer = gst_buffer_ref (src);
gst_mini_object_set_qdata (GST_MINI_OBJECT (dest), GST_V4L2_IMPORT_QUARK,
data, (GDestroyNotify) _unmap_userptr_frame);
done:
return ret;
not_our_buffer:
{
GST_ERROR_OBJECT (pool, "destination buffer invalid or not from our pool");
return GST_FLOW_ERROR;
}
invalid_buffer:
{
GST_ERROR_OBJECT (pool, "could not map buffer");
g_slice_free (struct UserPtrData, data);
return GST_FLOW_ERROR;
}
import_failed:
{
GST_ERROR_OBJECT (pool, "failed to import data");
_unmap_userptr_frame (data);
return GST_FLOW_ERROR;
}
}
static GstFlowReturn
gst_v4l2_buffer_pool_import_dmabuf (GstV4l2BufferPool * pool,
GstBuffer * dest, GstBuffer * src)
{
GstV4l2MemoryGroup *group = NULL;
GstMemory *dma_mem[GST_VIDEO_MAX_PLANES] = { 0 };
guint n_mem = gst_buffer_n_memory (src);
gint i;
GST_LOG_OBJECT (pool, "importing dmabuf");
if (!gst_v4l2_is_buffer_valid (dest, &group))
goto not_our_buffer;
if (n_mem > GST_VIDEO_MAX_PLANES)
goto too_many_mems;
for (i = 0; i < n_mem; i++)
dma_mem[i] = gst_buffer_peek_memory (src, i);
if (!gst_v4l2_allocator_import_dmabuf (pool->vallocator, group, n_mem,
dma_mem))
goto import_failed;
gst_mini_object_set_qdata (GST_MINI_OBJECT (dest), GST_V4L2_IMPORT_QUARK,
gst_buffer_ref (src), (GDestroyNotify) gst_buffer_unref);
return GST_FLOW_OK;
not_our_buffer:
{
GST_ERROR_OBJECT (pool, "destination buffer invalid or not from our pool");
return GST_FLOW_ERROR;
}
too_many_mems:
{
GST_ERROR_OBJECT (pool, "could not map buffer");
return GST_FLOW_ERROR;
}
import_failed:
{
GST_ERROR_OBJECT (pool, "failed to import dmabuf");
return GST_FLOW_ERROR;
}
}
static GstFlowReturn
gst_v4l2_buffer_pool_prepare_buffer (GstV4l2BufferPool * pool,
GstBuffer * dest, GstBuffer * src)
{
GstFlowReturn ret = GST_FLOW_OK;
if (src == NULL) {
if (pool->other_pool == NULL) {
GST_ERROR_OBJECT (pool, "can't prepare buffer, source buffer missing");
return GST_FLOW_ERROR;
}
ret = gst_buffer_pool_acquire_buffer (pool->other_pool, &src, NULL);
if (ret != GST_FLOW_OK) {
GST_ERROR_OBJECT (pool, "failed to acquire buffer from downstream pool");
goto done;
}
}
switch (pool->obj->mode) {
case GST_V4L2_IO_MMAP:
case GST_V4L2_IO_DMABUF:
ret = gst_v4l2_buffer_pool_copy_buffer (pool, dest, src);
break;
case GST_V4L2_IO_USERPTR:
ret = gst_v4l2_buffer_pool_import_userptr (pool, dest, src);
break;
case GST_V4L2_IO_DMABUF_IMPORT:
ret = gst_v4l2_buffer_pool_import_dmabuf (pool, dest, src);
break;
default:
break;
}
done:
return ret;
}
static GstFlowReturn
gst_v4l2_buffer_pool_alloc_buffer (GstBufferPool * bpool, GstBuffer ** buffer,
GstBufferPoolAcquireParams * params)
@ -115,7 +375,11 @@ gst_v4l2_buffer_pool_alloc_buffer (GstBufferPool * bpool, GstBuffer ** buffer,
pool->allocator);
break;
case GST_V4L2_IO_USERPTR:
group = gst_v4l2_allocator_alloc_userptr (pool->vallocator);
break;
case GST_V4L2_IO_DMABUF_IMPORT:
group = gst_v4l2_allocator_alloc_dmabufin (pool->vallocator);
break;
default:
newbuf = NULL;
g_assert_not_reached ();
@ -207,8 +471,8 @@ gst_v4l2_buffer_pool_alloc_buffer (GstBufferPool * bpool, GstBuffer ** buffer,
/* ERRORS */
allocation_failed:
{
GST_WARNING ("Failed to allocated buffer");
return GST_FLOW_EOS;
GST_ERROR_OBJECT (pool, "failed to allocate buffer");
return FALSE;
}
}
@ -265,13 +529,15 @@ gst_v4l2_buffer_pool_set_config (GstBufferPool * bpool, GstStructure * config)
can_allocate =
GST_V4L2_ALLOCATOR_CAN_ALLOCATE (pool->vallocator, USERPTR);
break;
case GST_V4L2_IO_DMABUF_IMPORT:
can_allocate = GST_V4L2_ALLOCATOR_CAN_ALLOCATE (pool->vallocator, DMABUF);
break;
case GST_V4L2_IO_RW:
pool->allocator = g_object_ref (allocator);
pool->params = params;
/* No need to change the configuration */
goto done;
break;
case GST_V4L2_IO_DMABUF_IMPORT:
default:
g_assert_not_reached ();
break;
@ -443,7 +709,51 @@ gst_v4l2_buffer_pool_start (GstBufferPool * bpool)
break;
}
case GST_V4L2_IO_USERPTR:
{
guint count;
if (GST_V4L2_ALLOCATOR_CAN_ALLOCATE (pool->vallocator, USERPTR)) {
num_buffers = min_buffers;
} else {
num_buffers = max_buffers;
}
GST_DEBUG_OBJECT (pool, "requesting %d USERPTR buffers", num_buffers);
count = gst_v4l2_allocator_start (pool->vallocator, num_buffers,
V4L2_MEMORY_USERPTR);
/* There is no rational to not get what we asked */
if (count < num_buffers) {
num_buffers = count;
goto no_buffers;
}
break;
}
case GST_V4L2_IO_DMABUF_IMPORT:
{
guint count;
if (GST_V4L2_ALLOCATOR_CAN_ALLOCATE (pool->vallocator, DMABUF)) {
num_buffers = min_buffers;
} else {
num_buffers = max_buffers;
}
GST_DEBUG_OBJECT (pool, "requesting %d DMABUF buffers", num_buffers);
count = gst_v4l2_allocator_start (pool->vallocator, num_buffers,
V4L2_MEMORY_DMABUF);
/* There is no rational to not get what we asked */
if (count < num_buffers) {
num_buffers = count;
goto no_buffers;
}
break;
}
default:
num_buffers = 0;
copy_threshold = 0;
@ -462,6 +772,10 @@ gst_v4l2_buffer_pool_start (GstBufferPool * bpool)
GST_BUFFER_POOL_CLASS (parent_class)->set_config (bpool, config);
gst_structure_free (config);
if (pool->other_pool)
if (!gst_buffer_pool_set_active (pool->other_pool, TRUE))
goto other_pool_failed;
/* now, allocate the buffers: */
if (!GST_BUFFER_POOL_CLASS (parent_class)->start (bpool))
goto start_failed;
@ -499,6 +813,12 @@ start_failed:
GST_ERROR_OBJECT (pool, "failed to start streaming");
return FALSE;
}
other_pool_failed:
{
GST_ERROR_OBJECT (pool, "failed to active the other pool %"
GST_PTR_FORMAT, pool->other_pool);
return FALSE;
}
}
@ -540,6 +860,10 @@ stop_streaming (GstV4l2BufferPool * pool)
pool->buffers[i] = NULL;
pool->num_queued--;
/* Remove qdata, this will unmap any map data in userptr */
gst_mini_object_set_qdata (GST_MINI_OBJECT (buffer),
GST_V4L2_IMPORT_QUARK, NULL, NULL);
if (V4L2_TYPE_IS_OUTPUT (obj->type))
gst_buffer_unref (buffer);
else
@ -558,6 +882,9 @@ stop_streaming (GstV4l2BufferPool * pool)
pool->streaming = FALSE;
if (pool->other_pool)
gst_buffer_pool_set_active (pool->other_pool, FALSE);
return TRUE;
/* ERRORS */
@ -793,25 +1120,28 @@ gst_v4l2_buffer_pool_acquire_buffer (GstBufferPool * bpool, GstBuffer ** buffer,
if (GST_BUFFER_POOL_IS_FLUSHING (bpool))
goto flushing;
/* If this is being called to resurect a lost buffer */
if (params && params->flags & GST_V4L2_POOL_ACQUIRE_FLAG_RESURECT) {
ret = GST_BUFFER_POOL_CLASS (parent_class)->acquire_buffer (bpool, buffer,
params);
goto done;
}
switch (obj->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
/* capture, This function should return a buffer with new captured data */
switch (obj->mode) {
case GST_V4L2_IO_RW:
{
/* take empty buffer from the pool */
ret = GST_BUFFER_POOL_CLASS (parent_class)->acquire_buffer (bpool,
buffer, params);
break;
}
case GST_V4L2_IO_DMABUF:
case GST_V4L2_IO_MMAP:
/* If this is being called to resurect a lost buffer */
if (params && params->flags & GST_V4L2_POOL_ACQUIRE_FLAG_RESURECT) {
ret = GST_BUFFER_POOL_CLASS (parent_class)->acquire_buffer (bpool,
buffer, params);
break;
}
{
/* just dequeue a buffer, we basically use the queue of v4l2 as the
* storage for our buffers. This function does poll first so we can
* interrupt it fine. */
@ -841,9 +1171,46 @@ gst_v4l2_buffer_pool_acquire_buffer (GstBufferPool * bpool, GstBuffer ** buffer,
*buffer = copy;
}
break;
}
case GST_V4L2_IO_USERPTR:
{
struct UserPtrData *data;
/* dequeue filled userptr */
ret = gst_v4l2_buffer_pool_dqbuf (pool, buffer);
if (G_UNLIKELY (ret != GST_FLOW_OK))
goto done;
data = gst_mini_object_steal_qdata (GST_MINI_OBJECT (*buffer),
GST_V4L2_IMPORT_QUARK);
/* and requeue so that we can continue capturing */
gst_v4l2_buffer_pool_prepare_buffer (pool, *buffer, NULL);
ret = gst_v4l2_buffer_pool_qbuf (pool, *buffer);
*buffer = data->buffer;
data->buffer = NULL;
_unmap_userptr_frame (data);
break;
}
case GST_V4L2_IO_DMABUF_IMPORT:
{
GstBuffer *tmp;
/* dequeue filled dmabuf */
ret = gst_v4l2_buffer_pool_dqbuf (pool, buffer);
if (G_UNLIKELY (ret != GST_FLOW_OK))
goto done;
tmp = gst_mini_object_steal_qdata (GST_MINI_OBJECT (*buffer),
GST_V4L2_IMPORT_QUARK);
/* and requeue so that we can continue capturing */
gst_v4l2_buffer_pool_prepare_buffer (pool, *buffer, NULL);
ret = gst_v4l2_buffer_pool_qbuf (pool, *buffer);
*buffer = tmp;
break;
}
default:
ret = GST_FLOW_ERROR;
g_assert_not_reached ();
@ -851,6 +1218,7 @@ gst_v4l2_buffer_pool_acquire_buffer (GstBufferPool * bpool, GstBuffer ** buffer,
}
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
/* playback, This function should return an empty buffer */
@ -863,13 +1231,13 @@ gst_v4l2_buffer_pool_acquire_buffer (GstBufferPool * bpool, GstBuffer ** buffer,
case GST_V4L2_IO_MMAP:
case GST_V4L2_IO_DMABUF:
case GST_V4L2_IO_USERPTR:
case GST_V4L2_IO_DMABUF_IMPORT:
/* get a free unqueued buffer */
ret = GST_BUFFER_POOL_CLASS (parent_class)->acquire_buffer (bpool,
buffer, params);
break;
case GST_V4L2_IO_USERPTR:
case GST_V4L2_IO_DMABUF_IMPORT:
default:
ret = GST_FLOW_ERROR;
g_assert_not_reached ();
@ -914,9 +1282,13 @@ gst_v4l2_buffer_pool_release_buffer (GstBufferPool * bpool, GstBuffer * buffer)
case GST_V4L2_IO_DMABUF:
case GST_V4L2_IO_MMAP:
case GST_V4L2_IO_USERPTR:
case GST_V4L2_IO_DMABUF_IMPORT:
{
if (gst_v4l2_is_buffer_valid (buffer, NULL)) {
/* queue back in the device */
if (pool->other_pool)
gst_v4l2_buffer_pool_prepare_buffer (pool, buffer, NULL);
gst_v4l2_buffer_pool_qbuf (pool, buffer);
} else {
/* Simply release invalide/modified buffer, the allocator will
@ -927,8 +1299,6 @@ gst_v4l2_buffer_pool_release_buffer (GstBufferPool * bpool, GstBuffer * buffer)
}
break;
}
case GST_V4L2_IO_USERPTR:
case GST_V4L2_IO_DMABUF_IMPORT:
default:
g_assert_not_reached ();
break;
@ -945,6 +1315,8 @@ gst_v4l2_buffer_pool_release_buffer (GstBufferPool * bpool, GstBuffer * buffer)
case GST_V4L2_IO_MMAP:
case GST_V4L2_IO_DMABUF:
case GST_V4L2_IO_USERPTR:
case GST_V4L2_IO_DMABUF_IMPORT:
{
GstV4l2MemoryGroup *group;
guint index;
@ -979,8 +1351,6 @@ gst_v4l2_buffer_pool_release_buffer (GstBufferPool * bpool, GstBuffer * buffer)
break;
}
case GST_V4L2_IO_USERPTR:
case GST_V4L2_IO_DMABUF_IMPORT:
default:
g_assert_not_reached ();
break;
@ -1006,11 +1376,16 @@ gst_v4l2_buffer_pool_finalize (GObject * object)
if (pool->video_fd >= 0)
v4l2_close (pool->video_fd);
if (pool->vallocator)
gst_object_unref (pool->vallocator);
if (pool->allocator)
gst_object_unref (pool->allocator);
if (pool->other_pool)
gst_object_unref (pool->other_pool);
/* FIXME Is this required to keep around ? */
gst_object_unref (pool->obj->element);
@ -1211,7 +1586,7 @@ gst_v4l2_buffer_pool_process (GstV4l2BufferPool * pool, GstBuffer * buf)
goto eos;
}
if (!gst_v4l2_object_copy (obj, buf, tmp))
if (gst_v4l2_buffer_pool_copy_buffer (pool, buf, tmp) != GST_FLOW_OK)
goto copy_failed;
/* an queue the buffer again after the copy */
@ -1222,6 +1597,11 @@ gst_v4l2_buffer_pool_process (GstV4l2BufferPool * pool, GstBuffer * buf)
case GST_V4L2_IO_USERPTR:
case GST_V4L2_IO_DMABUF_IMPORT:
{
/* Nothing to do buffer is from the other pool */
break;
}
default:
g_assert_not_reached ();
break;
@ -1236,6 +1616,9 @@ gst_v4l2_buffer_pool_process (GstV4l2BufferPool * pool, GstBuffer * buf)
/* FIXME, do write() */
GST_WARNING_OBJECT (pool, "implement write()");
break;
case GST_V4L2_IO_USERPTR:
case GST_V4L2_IO_DMABUF_IMPORT:
case GST_V4L2_IO_DMABUF:
case GST_V4L2_IO_MMAP:
{
@ -1258,9 +1641,9 @@ gst_v4l2_buffer_pool_process (GstV4l2BufferPool * pool, GstBuffer * buf)
if (ret != GST_FLOW_OK)
goto acquire_failed;
/* copy into it and queue */
if (!gst_v4l2_object_copy (obj, to_queue, buf))
goto copy_failed;
ret = gst_v4l2_buffer_pool_prepare_buffer (pool, to_queue, buf);
if (ret != GST_FLOW_OK)
goto prepare_failed;
}
if ((ret = gst_v4l2_buffer_pool_qbuf (pool, to_queue)) != GST_FLOW_OK)
@ -1291,9 +1674,6 @@ gst_v4l2_buffer_pool_process (GstV4l2BufferPool * pool, GstBuffer * buf)
gst_buffer_unref (to_queue);
break;
}
case GST_V4L2_IO_USERPTR:
case GST_V4L2_IO_DMABUF_IMPORT:
default:
g_assert_not_reached ();
break;
@ -1313,16 +1693,21 @@ acquire_failed:
gst_flow_get_name (ret));
return ret;
}
copy_failed:
prepare_failed:
{
GST_ERROR_OBJECT (obj->element, "failed to copy data");
return GST_FLOW_ERROR;
GST_ERROR_OBJECT (obj->element, "failed to prepare data");
return ret;
}
start_failed:
{
GST_ERROR_OBJECT (obj->element, "failed to start streaming");
return GST_FLOW_ERROR;
}
copy_failed:
{
GST_ERROR_OBJECT (obj->element, "failed to copy buffer");
return GST_FLOW_ERROR;
}
eos:
{
GST_DEBUG_OBJECT (obj->element, "end of stream reached");
@ -1367,3 +1752,14 @@ start_failed:
return FALSE;
}
}
void
gst_v4l2_buffer_pool_set_other_pool (GstV4l2BufferPool * pool,
GstBufferPool * other_pool)
{
g_return_if_fail (!gst_buffer_pool_is_active (GST_BUFFER_POOL (pool)));
if (pool->other_pool)
gst_object_unref (pool->other_pool);
pool->other_pool = gst_object_ref (other_pool);
}

View file

@ -56,6 +56,7 @@ struct _GstV4l2BufferPool
GstV4l2Allocator *vallocator;
GstAllocator *allocator;
GstAllocationParams params;
GstBufferPool *other_pool;
guint size;
gboolean add_videometa;
@ -85,7 +86,10 @@ GstBufferPool * gst_v4l2_buffer_pool_new (GstV4l2Object *obj, GstCaps *c
GstFlowReturn gst_v4l2_buffer_pool_process (GstV4l2BufferPool * bpool, GstBuffer * buf);
gboolean gst_v4l2_buffer_pool_flush (GstV4l2BufferPool * pool);
gboolean gst_v4l2_buffer_pool_flush (GstV4l2BufferPool * pool);
void gst_v4l2_buffer_pool_set_other_pool (GstV4l2BufferPool * pool,
GstBufferPool * other_pool);
G_END_DECLS

View file

@ -45,7 +45,6 @@
#include <gst/video/video.h>
GST_DEBUG_CATEGORY_EXTERN (v4l2_debug);
GST_DEBUG_CATEGORY_EXTERN (GST_CAT_PERFORMANCE);
#define GST_CAT_DEFAULT v4l2_debug
#define DEFAULT_PROP_DEVICE_NAME NULL
@ -2937,57 +2936,6 @@ done:
return TRUE;
}
gboolean
gst_v4l2_object_copy (GstV4l2Object * v4l2object, GstBuffer * dest,
GstBuffer * src)
{
const GstVideoFormatInfo *finfo = v4l2object->info.finfo;
if (finfo && (finfo->format != GST_VIDEO_FORMAT_UNKNOWN &&
finfo->format != GST_VIDEO_FORMAT_ENCODED)) {
GstVideoFrame src_frame, dest_frame;
GST_DEBUG_OBJECT (v4l2object->element, "copy video frame");
/* FIXME This won't work if cropping apply */
/* we have raw video, use videoframe copy to get strides right */
if (!gst_video_frame_map (&src_frame, &v4l2object->info, src, GST_MAP_READ))
goto invalid_buffer;
if (!gst_video_frame_map (&dest_frame, &v4l2object->info, dest,
GST_MAP_WRITE)) {
gst_video_frame_unmap (&src_frame);
goto invalid_buffer;
}
gst_video_frame_copy (&dest_frame, &src_frame);
gst_video_frame_unmap (&src_frame);
gst_video_frame_unmap (&dest_frame);
} else {
GstMapInfo map;
GST_DEBUG_OBJECT (v4l2object->element, "copy raw bytes");
gst_buffer_map (src, &map, GST_MAP_READ);
gst_buffer_fill (dest, 0, map.data, gst_buffer_get_size (src));
gst_buffer_unmap (src, &map);
gst_buffer_resize (dest, 0, gst_buffer_get_size (src));
}
GST_CAT_LOG_OBJECT (GST_CAT_PERFORMANCE, v4l2object->element,
"slow copy into buffer %p", dest);
return TRUE;
/* ERRORS */
invalid_buffer:
{
/* No Window available to put our image into */
GST_WARNING_OBJECT (v4l2object->element, "could not map image");
return FALSE;
}
}
GstCaps *
gst_v4l2_object_get_caps (GstV4l2Object * v4l2object, GstCaps * filter)
{
@ -3043,12 +2991,12 @@ gboolean
gst_v4l2_object_decide_allocation (GstV4l2Object * obj, GstQuery * query)
{
GstCaps *caps;
GstBufferPool *pool;
GstBufferPool *pool, *other_pool = NULL;
GstStructure *config;
guint size, min, max, extra = 0;
gboolean update;
gboolean has_video_meta, has_crop_meta;
gboolean can_use_own_pool;
gboolean can_share_own_pool, pushing_from_our_pool = FALSE;
struct v4l2_control ctl = { 0, };
GST_DEBUG_OBJECT (obj->element, "decide allocation");
@ -3078,7 +3026,7 @@ gst_v4l2_object_decide_allocation (GstV4l2Object * obj, GstQuery * query)
if (min != 0) {
/* if there is a min-buffers suggestion, use it. We add 1 because we need 1
* buffer extra to capture while the other two buffers are downstream */
* buffer extra to capture while the other buffers are downstream */
min += 1;
} else {
min = 2;
@ -3101,7 +3049,7 @@ gst_v4l2_object_decide_allocation (GstV4l2Object * obj, GstQuery * query)
gst_query_find_allocation_meta (query, GST_VIDEO_CROP_META_API_TYPE,
NULL);
can_use_own_pool = ((has_crop_meta || !obj->need_crop_meta) &&
can_share_own_pool = ((has_crop_meta || !obj->need_crop_meta) &&
(has_video_meta || !obj->need_video_meta));
/* select a pool */
@ -3116,28 +3064,42 @@ gst_v4l2_object_decide_allocation (GstV4l2Object * obj, GstQuery * query)
* other size than what the hardware gives us but for downstream pools
* we can try */
size = MAX (size, obj->sizeimage);
} else if (can_use_own_pool) {
} else if (can_share_own_pool) {
/* no downstream pool, use our own then */
GST_DEBUG_OBJECT (obj->element,
"read/write mode: no downstream pool, using our own");
pool = gst_object_ref (obj->pool);
size = obj->sizeimage;
pushing_from_our_pool = TRUE;
}
break;
case GST_V4L2_IO_MMAP:
case GST_V4L2_IO_DMABUF:
/* FIXME in these case we actually prefer/need a downstream pool */
case GST_V4L2_IO_USERPTR:
case GST_V4L2_IO_DMABUF_IMPORT:
/* in importing mode, prefer our own pool, and pass the other pool to
* our own, so it can serve itself */
if (pool == NULL)
goto no_downstream_pool;
gst_v4l2_buffer_pool_set_other_pool (GST_V4L2_BUFFER_POOL (obj->pool),
pool);
other_pool = pool;
gst_object_unref (pool);
pool = gst_object_ref (obj->pool);
size = obj->sizeimage;
break;
case GST_V4L2_IO_MMAP:
case GST_V4L2_IO_DMABUF:
/* in streaming mode, prefer our own pool */
/* Check if we can use it ... */
if (can_use_own_pool) {
if (can_share_own_pool) {
if (pool)
gst_object_unref (pool);
pool = gst_object_ref (obj->pool);
size = obj->sizeimage;
GST_DEBUG_OBJECT (obj->element,
"streaming mode: using our own pool %" GST_PTR_FORMAT, pool);
pushing_from_our_pool = TRUE;
} else if (pool) {
GST_DEBUG_OBJECT (obj->element,
"streaming mode: copying to downstream pool %" GST_PTR_FORMAT,
@ -3182,7 +3144,14 @@ gst_v4l2_object_decide_allocation (GstV4l2Object * obj, GstQuery * query)
GST_V4L2_BUFFER_POOL_OPTION_CROP_META);
}
gst_buffer_pool_config_set_params (config, caps, size, min + extra, 0);
/* If pushing from our own pool, configure it with queried minimum,
* otherwise use the minimum required */
if (pushing_from_our_pool)
extra += min;
else
extra += GST_V4L2_MIN_BUFFERS;
gst_buffer_pool_config_set_params (config, caps, size, extra, 0);
GST_DEBUG_OBJECT (pool, "setting config %" GST_PTR_FORMAT, config);
@ -3199,8 +3168,12 @@ gst_v4l2_object_decide_allocation (GstV4l2Object * obj, GstQuery * query)
}
setup_other_pool:
/* Now configure the other pool if different */
if (pool && obj->pool != pool) {
if (obj->pool != pool)
other_pool = pool;
if (other_pool) {
if (gst_buffer_pool_is_active (obj->pool))
goto done;
@ -3252,6 +3225,13 @@ cleanup:
gst_object_unref (pool);
return FALSE;
}
no_downstream_pool:
{
GST_ELEMENT_ERROR (obj->element, RESOURCE, SETTINGS,
(_("No downstream pool to import from.")),
("When importing DMABUF or USERPTR, we need a pool to import from"));
return FALSE;
}
}
gboolean

View file

@ -253,10 +253,6 @@ gboolean gst_v4l2_object_unlock_stop (GstV4l2Object * v4l2object);
gboolean gst_v4l2_object_stop (GstV4l2Object * v4l2object);
gboolean gst_v4l2_object_copy (GstV4l2Object * v4l2object,
GstBuffer * dest, GstBuffer * src);
GstCaps * gst_v4l2_object_get_caps (GstV4l2Object * v4l2object,
GstCaps * filter);