v4l2: Fixup USERPTR/DMABUF capture support

This commit is contained in:
Nicolas Dufresne 2014-04-18 17:02:50 -04:00
parent 7f8eff7845
commit 62605e11eb
3 changed files with 192 additions and 174 deletions

View file

@ -489,6 +489,107 @@ gst_v4l2_allocator_probe (GstV4l2Allocator * allocator, guint32 memory,
return flags; return flags;
} }
static GstV4l2MemoryGroup *
gst_v4l2_allocator_create_buf (GstV4l2Allocator * allocator)
{
struct v4l2_create_buffers bcreate = { 0 };
GstV4l2MemoryGroup *group = NULL;
GST_OBJECT_LOCK (allocator);
if (!allocator->active)
goto done;
bcreate.memory = allocator->memory;
bcreate.format = allocator->format;
bcreate.count = 1;
if (!allocator->can_allocate)
goto done;
if (v4l2_ioctl (allocator->video_fd, VIDIOC_CREATE_BUFS, &bcreate) < 0)
goto create_bufs_failed;
group = gst_v4l2_memory_group_new (allocator, bcreate.index);
if (group) {
allocator->groups[bcreate.index] = group;
allocator->count++;
}
done:
GST_OBJECT_UNLOCK (allocator);
return group;
create_bufs_failed:
{
GST_WARNING_OBJECT (allocator, "error creating a new buffer: %s",
g_strerror (errno));
goto done;
}
}
static GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc (GstV4l2Allocator * allocator)
{
GstV4l2MemoryGroup *group;
if (!g_atomic_int_get (&allocator->active))
return NULL;
group = gst_atomic_queue_pop (allocator->free_queue);
if (group == NULL) {
if (allocator->can_allocate) {
group = gst_v4l2_allocator_create_buf (allocator);
/* Don't hammer on CREATE_BUFS */
if (group == NULL)
allocator->can_allocate = FALSE;
}
}
return group;
}
static void
gst_v4l2_allocator_reset_size (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup * group)
{
gsize size;
if (V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
gint i;
for (i = 0; i < group->n_mem; i++) {
size = allocator->format.fmt.pix_mp.plane_fmt[i].sizeimage;
gst_memory_resize (group->mem[i], 0, size);
}
} else {
size = allocator->format.fmt.pix.sizeimage;
gst_memory_resize (group->mem[0], 0, size);
}
}
static void
_cleanup_failed_alloc (GstV4l2Allocator * allocator, GstV4l2MemoryGroup * group)
{
if (group->mems_allocated > 0) {
gint i;
/* If one or more mmap worked, we need to unref the memory, otherwise
* they will keep a ref on the allocator and leak it. This will put back
* the group into the free_queue */
for (i = 0; i < group->n_mem; i++)
gst_memory_unref (group->mem[i]);
} else {
/* Otherwise, group has to be on free queue for _stop() to work */
gst_atomic_queue_push (allocator->free_queue, group);
}
}
GstV4l2Allocator * GstV4l2Allocator *
gst_v4l2_allocator_new (GstObject * parent, gint video_fd, gst_v4l2_allocator_new (GstObject * parent, gint video_fd,
struct v4l2_format *format) struct v4l2_format *format)
@ -658,85 +759,6 @@ reqbufs_failed:
} }
} }
static GstV4l2MemoryGroup *
gst_v4l2_allocator_create_buf (GstV4l2Allocator * allocator)
{
struct v4l2_create_buffers bcreate = { 0 };
GstV4l2MemoryGroup *group = NULL;
GST_OBJECT_LOCK (allocator);
if (!allocator->active)
goto done;
bcreate.memory = allocator->memory;
bcreate.format = allocator->format;
bcreate.count = 1;
if (!allocator->can_allocate)
goto done;
if (v4l2_ioctl (allocator->video_fd, VIDIOC_CREATE_BUFS, &bcreate) < 0)
goto create_bufs_failed;
group = gst_v4l2_memory_group_new (allocator, bcreate.index);
if (group) {
allocator->groups[bcreate.index] = group;
allocator->count++;
}
done:
GST_OBJECT_UNLOCK (allocator);
return group;
create_bufs_failed:
{
GST_WARNING_OBJECT (allocator, "error creating a new buffer: %s",
g_strerror (errno));
goto done;
}
}
static GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc (GstV4l2Allocator * allocator)
{
GstV4l2MemoryGroup *group;
if (!g_atomic_int_get (&allocator->active))
return NULL;
group = gst_atomic_queue_pop (allocator->free_queue);
if (group == NULL) {
if (allocator->can_allocate) {
group = gst_v4l2_allocator_create_buf (allocator);
/* Don't hammer on CREATE_BUFS */
if (group == NULL)
allocator->can_allocate = FALSE;
}
}
return group;
}
static void
_cleanup_failed_alloc (GstV4l2Allocator * allocator, GstV4l2MemoryGroup * group)
{
if (group->mems_allocated > 0) {
gint i;
/* If one or more mmap worked, we need to unref the memory, otherwise
* they will keep a ref on the allocator and leak it. This will put back
* the group into the free_queue */
for (i = 0; i < group->n_mem; i++)
gst_memory_unref (group->mem[i]);
} else {
/* Otherwise, group has to be on free queue for _stop() to work */
gst_atomic_queue_push (allocator->free_queue, group);
}
}
GstV4l2MemoryGroup * GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc_mmap (GstV4l2Allocator * allocator) gst_v4l2_allocator_alloc_mmap (GstV4l2Allocator * allocator)
{ {
@ -869,6 +891,45 @@ cleanup:
} }
} }
static void
gst_v4l2_allocator_clear_dmabufin (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup * group)
{
GstV4l2Memory *mem;
gint i;
g_return_if_fail (allocator->memory == V4L2_MEMORY_DMABUF);
for (i = 0; i < group->n_mem; i++) {
mem = (GstV4l2Memory *) group->mem[i];
GST_LOG_OBJECT (allocator, "clearing DMABUF import, fd %i plane %d",
mem->dmafd, i);
if (mem->dmafd >= 0)
close (mem->dmafd);
/* Update memory */
mem->mem.maxsize = 0;
mem->mem.offset = 0;
mem->mem.size = 0;
mem->dmafd = -1;
/* Update v4l2 structure */
group->planes[i].length = 0;
group->planes[i].bytesused = 0;
group->planes[i].m.fd = -1;
group->planes[i].data_offset = 0;
}
if (!V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
group->buffer.bytesused = 0;
group->buffer.length = 0;
group->buffer.m.fd = -1;
}
}
GstV4l2MemoryGroup * GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc_dmabufin (GstV4l2Allocator * allocator) gst_v4l2_allocator_alloc_dmabufin (GstV4l2Allocator * allocator)
{ {
@ -901,6 +962,37 @@ gst_v4l2_allocator_alloc_dmabufin (GstV4l2Allocator * allocator)
return group; return group;
} }
static void
gst_v4l2_allocator_clear_userptr (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup * group)
{
GstV4l2Memory *mem;
gint i;
g_return_if_fail (allocator->memory == V4L2_MEMORY_USERPTR);
for (i = 0; i < group->n_mem; i++) {
mem = (GstV4l2Memory *) group->mem[i];
GST_LOG_OBJECT (allocator, "clearing USERPTR %p plane %d size %"
G_GSIZE_FORMAT, mem->data, i, mem->mem.size);
mem->mem.maxsize = 0;
mem->mem.size = 0;
mem->data = NULL;
group->planes[i].length = 0;
group->planes[i].bytesused = 0;
group->planes[i].m.userptr = 0;
}
if (!V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
group->buffer.bytesused = 0;
group->buffer.length = 0;
group->buffer.m.userptr = 0;
}
}
GstV4l2MemoryGroup * GstV4l2MemoryGroup *
gst_v4l2_allocator_alloc_userptr (GstV4l2Allocator * allocator) gst_v4l2_allocator_alloc_userptr (GstV4l2Allocator * allocator)
{ {
@ -1005,46 +1097,6 @@ dup_failed:
} }
} }
void
gst_v4l2_allocator_clear_dmabufin (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup * group)
{
GstV4l2Memory *mem;
gint i;
g_return_if_fail (allocator->memory == V4L2_MEMORY_DMABUF);
for (i = 0; i < group->n_mem; i++) {
mem = (GstV4l2Memory *) group->mem[i];
GST_LOG_OBJECT (allocator, "clearing DMABUF import, fd %i plane %d",
mem->dmafd, i);
if (mem->dmafd >= 0)
close (mem->dmafd);
/* Update memory */
mem->mem.maxsize = 0;
mem->mem.offset = 0;
mem->mem.size = 0;
mem->dmafd = -1;
/* Update v4l2 structure */
group->planes[i].length = 0;
group->planes[i].bytesused = 0;
group->planes[i].m.fd = -1;
group->planes[i].data_offset = 0;
}
if (!V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
group->buffer.bytesused = 0;
group->buffer.length = 0;
group->buffer.m.fd = -1;
}
}
gboolean gboolean
gst_v4l2_allocator_import_userptr (GstV4l2Allocator * allocator, gst_v4l2_allocator_import_userptr (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup * group, gsize img_size, int n_planes, GstV4l2MemoryGroup * group, gsize img_size, int n_planes,
@ -1111,37 +1163,6 @@ n_mem_missmatch:
} }
} }
void
gst_v4l2_allocator_clear_userptr (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup * group)
{
GstV4l2Memory *mem;
gint i;
g_return_if_fail (allocator->memory == V4L2_MEMORY_USERPTR);
for (i = 0; i < group->n_mem; i++) {
mem = (GstV4l2Memory *) group->mem[i];
GST_LOG_OBJECT (allocator, "clearing USERPTR %p plane %d size %"
G_GSIZE_FORMAT, mem->data, i, mem->mem.size);
mem->mem.maxsize = 0;
mem->mem.size = 0;
mem->data = NULL;
group->planes[i].length = 0;
group->planes[i].bytesused = 0;
group->planes[i].m.userptr = 0;
}
if (!V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) {
group->buffer.bytesused = 0;
group->buffer.length = 0;
group->buffer.m.userptr = 0;
}
}
void void
gst_v4l2_allocator_flush (GstV4l2Allocator * allocator) gst_v4l2_allocator_flush (GstV4l2Allocator * allocator)
{ {
@ -1322,21 +1343,21 @@ error:
} }
void void
gst_v4l2_allocator_reset_size (GstV4l2Allocator * allocator, gst_v4l2_allocator_reset_group (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup * group) GstV4l2MemoryGroup * group)
{ {
gsize size; switch (allocator->memory) {
case V4L2_MEMORY_USERPTR:
if (V4L2_TYPE_IS_MULTIPLANAR (allocator->type)) { gst_v4l2_allocator_clear_userptr (allocator, group);
gint i; break;
case V4L2_MEMORY_DMABUF:
for (i = 0; i < group->n_mem; i++) { gst_v4l2_allocator_clear_dmabufin (allocator, group);
size = allocator->format.fmt.pix_mp.plane_fmt[i].sizeimage; break;
gst_memory_resize (group->mem[i], 0, size); case V4L2_MEMORY_MMAP:
} gst_v4l2_allocator_reset_size (allocator, group);
break;
} else { default:
size = allocator->format.fmt.pix.sizeimage; g_assert_not_reached ();
gst_memory_resize (group->mem[0], 0, size); break;
} }
} }

View file

@ -134,18 +134,11 @@ gboolean gst_v4l2_allocator_import_dmabuf (GstV4l2Allocator * alloc
GstV4l2MemoryGroup *group, GstV4l2MemoryGroup *group,
gint n_mem, GstMemory ** dma_mem); gint n_mem, GstMemory ** dma_mem);
void gst_v4l2_allocator_clear_dmabufin (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup *group);
gboolean gst_v4l2_allocator_import_userptr (GstV4l2Allocator * allocator, gboolean gst_v4l2_allocator_import_userptr (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup *group, GstV4l2MemoryGroup *group,
gsize img_size, int n_planes, gsize img_size, int n_planes,
gpointer * data, gsize * offset); gpointer * data, gsize * offset);
void gst_v4l2_allocator_clear_userptr (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup *group);
void gst_v4l2_allocator_flush (GstV4l2Allocator * allocator); void gst_v4l2_allocator_flush (GstV4l2Allocator * allocator);
gboolean gst_v4l2_allocator_qbuf (GstV4l2Allocator * allocator, gboolean gst_v4l2_allocator_qbuf (GstV4l2Allocator * allocator,
@ -153,7 +146,7 @@ gboolean gst_v4l2_allocator_qbuf (GstV4l2Allocator * alloc
GstV4l2MemoryGroup* gst_v4l2_allocator_dqbuf (GstV4l2Allocator * allocator); GstV4l2MemoryGroup* gst_v4l2_allocator_dqbuf (GstV4l2Allocator * allocator);
void gst_v4l2_allocator_reset_size (GstV4l2Allocator * allocator, void gst_v4l2_allocator_reset_group (GstV4l2Allocator * allocator,
GstV4l2MemoryGroup * group); GstV4l2MemoryGroup * group);
G_END_DECLS G_END_DECLS

View file

@ -1334,8 +1334,12 @@ gst_v4l2_buffer_pool_release_buffer (GstBufferPool * bpool, GstBuffer * buffer)
GST_LOG_OBJECT (pool, "buffer %u not queued, putting on free list", GST_LOG_OBJECT (pool, "buffer %u not queued, putting on free list",
index); index);
/* Remove qdata, this will unmap any map data in userptr */
gst_mini_object_set_qdata (GST_MINI_OBJECT (buffer),
GST_V4L2_IMPORT_QUARK, NULL, NULL);
/* reset to default size */ /* reset to default size */
gst_v4l2_allocator_reset_size (pool->vallocator, group); gst_v4l2_allocator_reset_group (pool->vallocator, group);
/* playback, put the buffer back in the queue to refill later. */ /* playback, put the buffer back in the queue to refill later. */
GST_BUFFER_POOL_CLASS (parent_class)->release_buffer (bpool, GST_BUFFER_POOL_CLASS (parent_class)->release_buffer (bpool,