glupload: Add dmabuf upload method.

This upload method detect and optimize uploads of DMABuf memory. This is
done by creating and caching EGLImages wrapper around DMABuf. The
EGLImages are then binded to a texture which get converter using
standard shader.

Example pipeline:

GST_GL_PLATFORM=egl \
gst-launch-1.0 v4l2src device=/dev/video1 io-mode=4 ! \
               video/x-raw,format=NV12 ! glimagesink

https://bugzilla.gnome.org/show_bug.cgi?id=743345
This commit is contained in:
Nicolas Dufresne 2015-11-13 17:24:30 +01:00 committed by Tim-Philipp Müller
parent f91b4eb4ff
commit 8492c045a4

View file

@ -31,6 +31,10 @@
#include "egl/gsteglimagememory.h"
#endif
#if GST_GL_HAVE_DMABUF
#include <gst/allocators/gstdmabuf.h>
#endif
/**
* SECTION:gstglupload
* @short_description: an object that uploads to GL textures
@ -544,7 +548,265 @@ static const UploadMethod _egl_image_upload = {
&_egl_image_upload_perform,
&_egl_image_upload_free
};
#endif
#endif /* GST_GL_HAVE_PLATFORM_EGL */
#if GST_GL_HAVE_DMABUF
struct DmabufUpload
{
GstGLUpload *upload;
GstMemory *eglimage[GST_VIDEO_MAX_PLANES];
GstBuffer *outbuf;
GstGLVideoAllocationParams *params;
};
static GstStaticCaps _dma_buf_upload_caps =
GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE (GST_GL_MEMORY_VIDEO_FORMATS_STR));
static gpointer
_dma_buf_upload_new (GstGLUpload * upload)
{
struct DmabufUpload *dmabuf = g_new0 (struct DmabufUpload, 1);
dmabuf->upload = upload;
return dmabuf;
}
static GstCaps *
_dma_buf_upload_transform_caps (GstGLContext * context,
GstPadDirection direction, GstCaps * caps)
{
GstCaps *ret;
if (direction == GST_PAD_SINK) {
ret = _set_caps_features (caps, GST_CAPS_FEATURE_MEMORY_GL_MEMORY);
} else {
gint i, n;
ret = _set_caps_features (caps, GST_CAPS_FEATURE_MEMORY_SYSTEM_MEMORY);
n = gst_caps_get_size (ret);
for (i = 0; i < n; i++) {
GstStructure *s = gst_caps_get_structure (ret, i);
gst_structure_remove_fields (s, "texture-target", NULL);
}
}
return ret;
}
static GQuark
_eglimage_quark (gint plane)
{
static GQuark quark[4] = { 0 };
static const gchar *quark_str[] = {
"GstGLDMABufEGLImage0",
"GstGLDMABufEGLImage1",
"GstGLDMABufEGLImage2",
"GstGLDMABufEGLImage3",
};
if (!quark[plane])
quark[plane] = g_quark_from_static_string (quark_str[plane]);
return quark[plane];
}
static GstMemory *
_get_cached_eglimage (GstMemory * mem, gint plane)
{
return gst_mini_object_get_qdata (GST_MINI_OBJECT (mem),
_eglimage_quark (plane));
}
static void
_set_cached_eglimage (GstMemory * mem, GstMemory * eglimage, gint plane)
{
return gst_mini_object_set_qdata (GST_MINI_OBJECT (mem),
_eglimage_quark (plane), eglimage, (GDestroyNotify) gst_memory_unref);
}
static gboolean
_dma_buf_upload_accept (gpointer impl, GstBuffer * buffer, GstCaps * in_caps,
GstCaps * out_caps)
{
struct DmabufUpload *dmabuf = impl;
GstVideoInfo *in_info = &dmabuf->upload->priv->in_info;
guint n_planes = GST_VIDEO_INFO_N_PLANES (in_info);
GstVideoMeta *meta;
guint n_mem;
guint mems_idx[GST_VIDEO_MAX_PLANES];
gsize mems_skip[GST_VIDEO_MAX_PLANES];
GstMemory *mems[GST_VIDEO_MAX_PLANES];
guint i;
n_mem = gst_buffer_n_memory (buffer);
meta = gst_buffer_get_video_meta (buffer);
/* dmabuf upload is only supported with EGL contexts. */
if (!GST_IS_GL_CONTEXT_EGL (dmabuf->upload->context))
return FALSE;
if (!gst_gl_context_check_feature (dmabuf->upload->context,
"EGL_KHR_image_base"))
return FALSE;
/* This will eliminate most non-dmabuf out there */
if (!gst_is_dmabuf_memory (gst_buffer_peek_memory (buffer, 0)))
return FALSE;
/* We cannot have multiple dmabuf per plane */
if (n_mem > n_planes)
return FALSE;
/* Update video info based on video meta */
if (meta) {
in_info->width = meta->width;
in_info->height = meta->height;
for (i = 0; i < meta->n_planes; i++) {
in_info->offset[i] = meta->offset[i];
in_info->stride[i] = meta->stride[i];
}
}
if (dmabuf->params)
gst_gl_allocation_params_free ((GstGLAllocationParams *) dmabuf->params);
if (!(dmabuf->params =
gst_gl_video_allocation_params_new (dmabuf->upload->context, NULL,
&dmabuf->upload->priv->in_info, -1, NULL,
GST_GL_TEXTURE_TARGET_2D)))
return FALSE;
/* Find and validate all memories */
for (i = 0; i < n_planes; i++) {
guint plane_size;
guint length;
plane_size = gst_gl_get_plane_data_size (in_info, NULL, i);
if (!gst_buffer_find_memory (buffer, in_info->offset[i], plane_size,
&mems_idx[i], &length, &mems_skip[i]))
return FALSE;
/* We can't have more then one dmabuf per plane */
if (length != 1)
return FALSE;
mems[i] = gst_buffer_peek_memory (buffer, mems_idx[i]);
/* And all memory found must be dmabuf */
if (!gst_is_dmabuf_memory (mems[i]))
return FALSE;
}
/* Now create an EGLImage for each dmabufs */
for (i = 0; i < n_planes; i++) {
/* check if one is cached */
dmabuf->eglimage[i] = _get_cached_eglimage (mems[i], i);
if (dmabuf->eglimage[i])
continue;
/* otherwise create one and cache it */
dmabuf->eglimage[i] =
gst_egl_image_memory_from_dmabuf (dmabuf->upload->context,
gst_dmabuf_memory_get_fd (mems[i]), in_info, i, mems_skip[i]);
if (!dmabuf->eglimage[i])
return FALSE;
_set_cached_eglimage (mems[i], dmabuf->eglimage[i], i);
}
return TRUE;
}
static void
_dma_buf_upload_propose_allocation (gpointer impl, GstQuery * decide_query,
GstQuery * query)
{
/* nothing to do for now. */
}
static void
_dma_buf_upload_perform_gl_thread (GstGLContext * context,
struct DmabufUpload *dmabuf)
{
GstGLMemoryAllocator *allocator;
guint i, n;
allocator =
GST_GL_MEMORY_ALLOCATOR (gst_allocator_find
(GST_GL_MEMORY_PBO_ALLOCATOR_NAME));
/* FIXME: buffer pool */
dmabuf->outbuf = gst_buffer_new ();
gst_gl_memory_setup_buffer (allocator, dmabuf->outbuf, dmabuf->params);
gst_object_unref (allocator);
n = gst_buffer_n_memory (dmabuf->outbuf);
for (i = 0; i < n; i++) {
const GstGLFuncs *gl = NULL;
GstGLMemory *gl_mem =
(GstGLMemory *) gst_buffer_peek_memory (dmabuf->outbuf, i);
if (!dmabuf->eglimage[i]) {
g_clear_pointer (&dmabuf->outbuf, gst_buffer_unref);
return;
}
gl = GST_GL_CONTEXT (((GstEGLImageMemory *) gl_mem)->context)->gl_vtable;
gl->ActiveTexture (GL_TEXTURE0 + i);
gl->BindTexture (GL_TEXTURE_2D, gl_mem->tex_id);
gl->EGLImageTargetTexture2D (GL_TEXTURE_2D,
gst_egl_image_memory_get_image (dmabuf->eglimage[i]));
}
}
static GstGLUploadReturn
_dma_buf_upload_perform (gpointer impl, GstBuffer * buffer, GstBuffer ** outbuf)
{
struct DmabufUpload *dmabuf = impl;
gst_gl_context_thread_add (dmabuf->upload->context,
(GstGLContextThreadFunc) _dma_buf_upload_perform_gl_thread, dmabuf);
if (!dmabuf->outbuf)
return GST_GL_UPLOAD_ERROR;
gst_buffer_add_parent_buffer_meta (dmabuf->outbuf, buffer);
*outbuf = dmabuf->outbuf;
dmabuf->outbuf = NULL;
return GST_GL_UPLOAD_DONE;
}
static void
_dma_buf_upload_free (gpointer impl)
{
struct DmabufUpload *dmabuf = impl;
if (dmabuf->params)
gst_gl_allocation_params_free ((GstGLAllocationParams *) dmabuf->params);
g_free (impl);
}
static const UploadMethod _dma_buf_upload = {
"Dmabuf",
0,
&_dma_buf_upload_caps,
&_dma_buf_upload_new,
&_dma_buf_upload_transform_caps,
&_dma_buf_upload_accept,
&_dma_buf_upload_propose_allocation,
&_dma_buf_upload_perform,
&_dma_buf_upload_free
};
#endif /* GST_GL_HAVE_DMABUF */
struct GLUploadMeta
{
@ -977,6 +1239,9 @@ static const UploadMethod _raw_data_upload = {
static const UploadMethod *upload_methods[] = { &_gl_memory_upload,
#if GST_GL_HAVE_PLATFORM_EGL
&_egl_image_upload,
#endif
#if GST_GL_HAVE_DMABUF
&_dma_buf_upload,
#endif
&_upload_meta_upload, &_raw_data_upload
};