va: basedec: add gstvabasedec helper

This is a helper for all decoders.

It is not an abstract subclass, just merely a helper that avoids code
duplication among the decoders.

Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1673>
This commit is contained in:
Víctor Manuel Jáquez Leal 2020-10-08 19:39:56 +02:00 committed by GStreamer Merge Bot
parent a5dcb35ad6
commit a6398eca17
5 changed files with 793 additions and 1146 deletions

555
sys/va/gstvabasedec.c Normal file
View file

@ -0,0 +1,555 @@
/* GStreamer
* Copyright (C) 2020 Igalia, S.L.
* Author: Víctor Jáquez <vjaquez@igalia.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the0
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#include "gstvabasedec.h"
#include "gstvaallocator.h"
#include "gstvacaps.h"
#include "gstvapool.h"
#include "gstvautils.h"
#include "gstvavideoformat.h"
#define GST_CAT_DEFAULT (base->debug_category)
#define parent_class gst_va_base_dec_parent_class
gpointer gst_va_base_dec_parent_class = NULL;
static gboolean
gst_va_base_dec_open (GstVideoDecoder * decoder)
{
GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
GstVaBaseDecClass *klass = GST_VA_BASE_DEC_GET_CLASS (decoder);
if (!gst_va_ensure_element_data (decoder, klass->render_device_path,
&base->display))
return FALSE;
if (!base->decoder)
base->decoder = gst_va_decoder_new (base->display, klass->codec);
return (base->decoder != NULL);
}
gboolean
gst_va_base_dec_close (GstVideoDecoder * decoder)
{
GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
gst_clear_object (&base->decoder);
gst_clear_object (&base->display);
return TRUE;
}
static gboolean
gst_va_base_dec_stop (GstVideoDecoder * decoder)
{
GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
if (!gst_va_decoder_close (base->decoder))
return FALSE;
if (base->output_state)
gst_video_codec_state_unref (base->output_state);
base->output_state = NULL;
if (base->other_pool)
gst_buffer_pool_set_active (base->other_pool, FALSE);
gst_clear_object (&base->other_pool);
return GST_VIDEO_DECODER_CLASS (parent_class)->stop (decoder);
}
static GstCaps *
gst_va_base_dec_getcaps (GstVideoDecoder * decoder, GstCaps * filter)
{
GstCaps *caps = NULL, *tmp;
GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
if (base->decoder)
caps = gst_va_decoder_get_sinkpad_caps (base->decoder);
if (caps) {
if (filter) {
tmp = gst_caps_intersect_full (filter, caps, GST_CAPS_INTERSECT_FIRST);
gst_caps_unref (caps);
caps = tmp;
}
GST_LOG_OBJECT (base, "Returning caps %" GST_PTR_FORMAT, caps);
} else {
caps = gst_video_decoder_proxy_getcaps (decoder, NULL, filter);
}
return caps;
}
static gboolean
gst_va_base_dec_src_query (GstVideoDecoder * decoder, GstQuery * query)
{
GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
gboolean ret = FALSE;
switch (GST_QUERY_TYPE (query)) {
case GST_QUERY_CONTEXT:{
return gst_va_handle_context_query (GST_ELEMENT_CAST (decoder), query,
base->display);
}
case GST_QUERY_CAPS:{
GstCaps *caps = NULL, *tmp, *filter = NULL;
gboolean fixed_caps;
gst_query_parse_caps (query, &filter);
fixed_caps = GST_PAD_IS_FIXED_CAPS (GST_VIDEO_DECODER_SRC_PAD (decoder));
if (!fixed_caps && base->decoder)
caps = gst_va_decoder_get_srcpad_caps (base->decoder);
if (caps) {
if (filter) {
tmp =
gst_caps_intersect_full (filter, caps, GST_CAPS_INTERSECT_FIRST);
gst_caps_unref (caps);
caps = tmp;
}
GST_LOG_OBJECT (base, "Returning caps %" GST_PTR_FORMAT, caps);
gst_query_set_caps_result (query, caps);
gst_caps_unref (caps);
ret = TRUE;
break;
}
/* else jump to default */
}
default:
ret = GST_VIDEO_DECODER_CLASS (parent_class)->src_query (decoder, query);
break;
}
return ret;
}
static gboolean
gst_va_base_dec_sink_query (GstVideoDecoder * decoder, GstQuery * query)
{
GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
if (GST_QUERY_TYPE (query) == GST_QUERY_CONTEXT) {
return gst_va_handle_context_query (GST_ELEMENT_CAST (decoder), query,
base->display);
}
return GST_VIDEO_DECODER_CLASS (parent_class)->sink_query (decoder, query);
}
static GstAllocator *
_create_allocator (GstVaBaseDec * base, GstCaps * caps)
{
GstAllocator *allocator = NULL;
if (gst_caps_is_dmabuf (caps))
allocator = gst_va_dmabuf_allocator_new (base->display);
else {
GArray *surface_formats =
gst_va_decoder_get_surface_formats (base->decoder);
allocator = gst_va_allocator_new (base->display, surface_formats);
}
return allocator;
}
/* 1. get allocator in query
* 1.1 if allocator is not ours and downstream doesn't handle
* videometa, keep it for other_pool
* 2. get pool in query
* 2.1 if pool is not va, keep it as other_pool if downstream
* doesn't handle videometa or (it doesn't handle alignment and
* the stream needs cropping)
* 2.2 if there's no pool in query and downstream doesn't handle
* videometa, create other_pool as GstVideoPool with the non-va
* from query and query's params
* 3. create our allocator and pool if they aren't in query
* 4. add or update pool and allocator in query
* 5. set our custom pool configuration
*/
static gboolean
gst_va_base_dec_decide_allocation (GstVideoDecoder * decoder, GstQuery * query)
{
GstAllocator *allocator = NULL, *other_allocator = NULL;
GstAllocationParams other_params, params;
GstBufferPool *pool = NULL;
GstCaps *caps = NULL;
GstStructure *config;
GstVideoInfo info;
GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
guint size = 0, min, max;
gboolean update_pool = FALSE, update_allocator = FALSE, has_videoalignment;
g_assert (base->min_buffers > 0);
gst_query_parse_allocation (query, &caps, NULL);
if (!(caps && gst_video_info_from_caps (&info, caps)))
goto wrong_caps;
base->has_videometa = gst_query_find_allocation_meta (query,
GST_VIDEO_META_API_TYPE, NULL);
if (gst_query_get_n_allocation_params (query) > 0) {
gst_query_parse_nth_allocation_param (query, 0, &allocator, &other_params);
if (allocator && !(GST_IS_VA_DMABUF_ALLOCATOR (allocator)
|| GST_IS_VA_ALLOCATOR (allocator))) {
/* save the allocator for the other pool */
other_allocator = allocator;
allocator = NULL;
}
update_allocator = TRUE;
} else {
gst_allocation_params_init (&other_params);
}
gst_allocation_params_init (&params);
if (gst_query_get_n_allocation_pools (query) > 0) {
gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);
if (pool) {
if (!GST_IS_VA_POOL (pool)) {
has_videoalignment = gst_buffer_pool_has_option (pool,
GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT);
if (!base->has_videometa || (!has_videoalignment && base->need_valign)) {
GST_DEBUG_OBJECT (base,
"keeping other pool for copy %" GST_PTR_FORMAT, pool);
gst_object_replace ((GstObject **) & base->other_pool,
(GstObject *) pool);
gst_object_unref (pool); /* decrease previous increase */
}
gst_clear_object (&pool);
}
}
min += base->min_buffers;
size = MAX (size, GST_VIDEO_INFO_SIZE (&info));
update_pool = TRUE;
} else {
size = GST_VIDEO_INFO_SIZE (&info);
if (!base->has_videometa && !gst_caps_is_vamemory (caps)) {
GST_DEBUG_OBJECT (base, "making new other pool for copy");
base->other_pool = gst_video_buffer_pool_new ();
config = gst_buffer_pool_get_config (base->other_pool);
gst_buffer_pool_config_set_params (config, caps, size, 0, 0);
gst_buffer_pool_config_set_allocator (config, other_allocator,
&other_params);
if (!gst_buffer_pool_set_config (base->other_pool, config)) {
GST_ERROR_OBJECT (base, "couldn't configure other pool for copy");
gst_clear_object (&base->other_pool);
}
} else {
gst_clear_object (&other_allocator);
}
min = base->min_buffers;
max = 0;
}
if (!allocator) {
if (!(allocator = _create_allocator (base, caps)))
return FALSE;
}
if (!pool)
pool = gst_va_pool_new ();
{
GstStructure *config = gst_buffer_pool_get_config (pool);
gst_buffer_pool_config_set_params (config, caps, size, min, max);
gst_buffer_pool_config_set_allocator (config, allocator, &params);
gst_buffer_pool_config_add_option (config,
GST_BUFFER_POOL_OPTION_VIDEO_META);
if (base->need_valign) {
gst_buffer_pool_config_add_option (config,
GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT);
gst_buffer_pool_config_set_video_alignment (config, &base->valign);
}
gst_buffer_pool_config_set_va_allocation_params (config,
VA_SURFACE_ATTRIB_USAGE_HINT_DECODER);
if (!gst_buffer_pool_set_config (pool, config))
return FALSE;
}
if (update_allocator)
gst_query_set_nth_allocation_param (query, 0, allocator, &params);
else
gst_query_add_allocation_param (query, allocator, &params);
if (update_pool)
gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max);
else
gst_query_add_allocation_pool (query, pool, size, min, max);
gst_object_unref (allocator);
gst_object_unref (pool);
return GST_VIDEO_DECODER_CLASS (parent_class)->decide_allocation (decoder,
query);
wrong_caps:
{
GST_WARNING_OBJECT (base, "No valid caps");
return FALSE;
}
}
static void
gst_va_base_dec_set_context (GstElement * element, GstContext * context)
{
GstVaDisplay *old_display, *new_display;
GstVaBaseDec *base = GST_VA_BASE_DEC (element);
GstVaBaseDecClass *klass = GST_VA_BASE_DEC_GET_CLASS (base);
gboolean ret;
old_display = base->display ? gst_object_ref (base->display) : NULL;
ret = gst_va_handle_set_context (element, context, klass->render_device_path,
&base->display);
new_display = base->display ? gst_object_ref (base->display) : NULL;
if (!ret
|| (old_display && new_display && old_display != new_display
&& base->decoder)) {
GST_ELEMENT_WARNING (base, RESOURCE, BUSY,
("Can't replace VA display while operating"), (NULL));
}
gst_clear_object (&old_display);
gst_clear_object (&new_display);
GST_ELEMENT_CLASS (parent_class)->set_context (element, context);
}
void
gst_va_base_dec_init (GstVaBaseDec * base, GstDebugCategory * cat)
{
base->debug_category = cat;
}
void
gst_va_base_dec_class_init (GstVaBaseDecClass * klass, GstVaCodecs codec,
const gchar * render_device_path, GstCaps * sink_caps, GstCaps * src_caps,
GstCaps * doc_src_caps, GstCaps * doc_sink_caps)
{
GstPadTemplate *sink_pad_templ, *src_pad_templ;
GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_CLASS (klass);
gst_va_base_dec_parent_class = g_type_class_peek_parent (klass);
klass->codec = codec;
klass->render_device_path = g_strdup (render_device_path);
sink_pad_templ = gst_pad_template_new ("sink", GST_PAD_SINK, GST_PAD_ALWAYS,
sink_caps);
gst_element_class_add_pad_template (element_class, sink_pad_templ);
if (doc_sink_caps) {
gst_pad_template_set_documentation_caps (sink_pad_templ, doc_sink_caps);
gst_caps_unref (doc_sink_caps);
}
src_pad_templ = gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS,
src_caps);
gst_element_class_add_pad_template (element_class, src_pad_templ);
if (doc_src_caps) {
gst_pad_template_set_documentation_caps (src_pad_templ, doc_src_caps);
gst_caps_unref (doc_src_caps);
}
element_class->set_context = GST_DEBUG_FUNCPTR (gst_va_base_dec_set_context);
decoder_class->open = GST_DEBUG_FUNCPTR (gst_va_base_dec_open);
decoder_class->close = GST_DEBUG_FUNCPTR (gst_va_base_dec_close);
decoder_class->stop = GST_DEBUG_FUNCPTR (gst_va_base_dec_stop);
decoder_class->getcaps = GST_DEBUG_FUNCPTR (gst_va_base_dec_getcaps);
decoder_class->src_query = GST_DEBUG_FUNCPTR (gst_va_base_dec_src_query);
decoder_class->sink_query = GST_DEBUG_FUNCPTR (gst_va_base_dec_sink_query);
decoder_class->decide_allocation =
GST_DEBUG_FUNCPTR (gst_va_base_dec_decide_allocation);
}
static GstVideoFormat
_default_video_format_from_chroma (guint chroma_type)
{
switch (chroma_type) {
case VA_RT_FORMAT_YUV420:
case VA_RT_FORMAT_YUV422:
case VA_RT_FORMAT_YUV444:
return GST_VIDEO_FORMAT_NV12;
case VA_RT_FORMAT_YUV420_10:
case VA_RT_FORMAT_YUV422_10:
case VA_RT_FORMAT_YUV444_10:
return GST_VIDEO_FORMAT_P010_10LE;
default:
return GST_VIDEO_FORMAT_UNKNOWN;
}
}
void
gst_va_base_dec_get_preferred_format_and_caps_features (GstVaBaseDec * base,
GstVideoFormat * format, GstCapsFeatures ** capsfeatures)
{
GstCaps *peer_caps, *preferred_caps = NULL;
GstCapsFeatures *features;
GstStructure *structure;
const GValue *v_format;
guint num_structures, i;
g_return_if_fail (base);
peer_caps = gst_pad_get_allowed_caps (GST_VIDEO_DECODER_SRC_PAD (base));
GST_DEBUG_OBJECT (base, "Allowed caps %" GST_PTR_FORMAT, peer_caps);
/* prefer memory:VASurface over other caps features */
num_structures = gst_caps_get_size (peer_caps);
for (i = 0; i < num_structures; i++) {
features = gst_caps_get_features (peer_caps, i);
structure = gst_caps_get_structure (peer_caps, i);
if (gst_caps_features_is_any (features))
continue;
if (gst_caps_features_contains (features, "memory:VAMemory")) {
preferred_caps = gst_caps_new_full (gst_structure_copy (structure), NULL);
gst_caps_set_features_simple (preferred_caps,
gst_caps_features_copy (features));
break;
}
}
if (!preferred_caps)
preferred_caps = peer_caps;
else
gst_clear_caps (&peer_caps);
if (gst_caps_is_empty (preferred_caps)
|| gst_caps_is_any (preferred_caps)) {
/* if any or not linked yet then system memory and nv12 */
if (capsfeatures)
*capsfeatures = NULL;
if (format)
*format = _default_video_format_from_chroma (base->rt_format);
goto bail;
}
features = gst_caps_get_features (preferred_caps, 0);
if (features && capsfeatures)
*capsfeatures = gst_caps_features_copy (features);
if (!format)
goto bail;
structure = gst_caps_get_structure (preferred_caps, 0);
v_format = gst_structure_get_value (structure, "format");
if (!v_format)
*format = _default_video_format_from_chroma (base->rt_format);
else if (G_VALUE_HOLDS_STRING (v_format))
*format = gst_video_format_from_string (g_value_get_string (v_format));
else if (GST_VALUE_HOLDS_LIST (v_format)) {
guint num_values = gst_value_list_get_size (v_format);
for (i = 0; i < num_values; i++) {
GstVideoFormat fmt;
const GValue *v_fmt = gst_value_list_get_value (v_format, i);
if (!v_fmt)
continue;
fmt = gst_video_format_from_string (g_value_get_string (v_fmt));
if (gst_va_chroma_from_video_format (fmt) == base->rt_format) {
*format = fmt;
break;
}
}
if (i == num_values)
*format = _default_video_format_from_chroma (base->rt_format);
}
bail:
gst_clear_caps (&preferred_caps);
}
gboolean
gst_va_base_dec_copy_output_buffer (GstVaBaseDec * base,
GstVideoCodecFrame * codec_frame)
{
GstVideoFrame src_frame;
GstVideoFrame dest_frame;
GstVideoInfo dest_vinfo;
GstVideoInfo *src_vinfo;
GstBuffer *buffer;
GstFlowReturn ret;
g_return_val_if_fail (base && base->output_state, FALSE);
if (!base->other_pool)
return FALSE;
if (!gst_buffer_pool_set_active (base->other_pool, TRUE))
return FALSE;
src_vinfo = &base->output_state->info;
gst_video_info_set_format (&dest_vinfo, GST_VIDEO_INFO_FORMAT (src_vinfo),
base->width, base->height);
ret = gst_buffer_pool_acquire_buffer (base->other_pool, &buffer, NULL);
if (ret != GST_FLOW_OK)
goto fail;
if (!gst_video_frame_map (&src_frame, src_vinfo, codec_frame->output_buffer,
GST_MAP_READ))
goto fail;
if (!gst_video_frame_map (&dest_frame, &dest_vinfo, buffer, GST_MAP_WRITE)) {
gst_video_frame_unmap (&dest_frame);
goto fail;
}
/* gst_video_frame_copy can crop this, but does not know, so let
* make it think it's all right */
GST_VIDEO_INFO_WIDTH (&src_frame.info) = base->width;
GST_VIDEO_INFO_HEIGHT (&src_frame.info) = base->height;
if (!gst_video_frame_copy (&dest_frame, &src_frame)) {
gst_video_frame_unmap (&src_frame);
gst_video_frame_unmap (&dest_frame);
goto fail;
}
gst_video_frame_unmap (&src_frame);
gst_video_frame_unmap (&dest_frame);
gst_buffer_replace (&codec_frame->output_buffer, buffer);
gst_buffer_unref (buffer);
return TRUE;
fail:
GST_ERROR_OBJECT (base, "Failed copy output buffer.");
return FALSE;
}

108
sys/va/gstvabasedec.h Normal file
View file

@ -0,0 +1,108 @@
/* GStreamer
* Copyright (C) 2020 Igalia, S.L.
* Author: Víctor Jáquez <vjaquez@igalia.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the0
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#pragma once
#include <gst/codecs/gsth264decoder.h>
#include <gst/codecs/gstvp8decoder.h>
#include "gstvadevice.h"
#include "gstvadecoder.h"
#include "gstvaprofile.h"
G_BEGIN_DECLS
#define GST_VA_BASE_DEC(obj) ((GstVaBaseDec *)(obj))
#define GST_VA_BASE_DEC_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj), G_TYPE_FROM_INSTANCE (obj), GstVaBaseDecClass))
#define GST_VA_BASE_DEC_CLASS(klass) ((GstVaBaseDecClass *)(klass))
typedef struct _GstVaBaseDec GstVaBaseDec;
typedef struct _GstVaBaseDecClass GstVaBaseDecClass;
struct _GstVaBaseDec
{
/* <private> */
union
{
GstH264Decoder h264;
GstVp8Decoder vp8;
} parent;
GstDebugCategory *debug_category;
GstVaDisplay *display;
GstVaDecoder *decoder;
VAProfile profile;
guint rt_format;
gint width;
gint height;
guint min_buffers;
GstVideoCodecState *output_state;
GstBufferPool *other_pool;
gboolean need_valign;
GstVideoAlignment valign;
gboolean has_videometa;
};
struct _GstVaBaseDecClass
{
/* <private> */
union
{
GstH264DecoderClass h264;
GstVp8DecoderClass vp8;
} parent_class;
GstVaCodecs codec;
gchar *render_device_path;
};
struct CData
{
gchar *render_device_path;
gchar *description;
GstCaps *sink_caps;
GstCaps *src_caps;
};
void gst_va_base_dec_init (GstVaBaseDec * base,
GstDebugCategory * cat);
void gst_va_base_dec_class_init (GstVaBaseDecClass * klass,
GstVaCodecs codec,
const gchar * render_device_path,
GstCaps * sink_caps,
GstCaps * src_caps,
GstCaps * doc_src_caps,
GstCaps * doc_sink_caps);
gboolean gst_va_base_dec_close (GstVideoDecoder * decoder);
void gst_va_base_dec_get_preferred_format_and_caps_features (GstVaBaseDec * base,
GstVideoFormat * format,
GstCapsFeatures ** capsfeatures);
gboolean gst_va_base_dec_copy_output_buffer (GstVaBaseDec * base,
GstVideoCodecFrame * codec_frame);
G_END_DECLS

View file

@ -51,22 +51,15 @@
#include "gstvah264dec.h" #include "gstvah264dec.h"
#include <gst/codecs/gsth264decoder.h> #include "gstvabasedec.h"
#include <va/va_drmcommon.h>
#include "gstvaallocator.h"
#include "gstvacaps.h"
#include "gstvadecoder.h"
#include "gstvadevice.h"
#include "gstvadisplay_drm.h"
#include "gstvapool.h" #include "gstvapool.h"
#include "gstvaprofile.h"
#include "gstvautils.h"
#include "gstvavideoformat.h"
GST_DEBUG_CATEGORY_STATIC (gst_va_h264dec_debug); GST_DEBUG_CATEGORY_STATIC (gst_va_h264dec_debug);
#ifndef GST_DISABLE_GST_DEBUG
#define GST_CAT_DEFAULT gst_va_h264dec_debug #define GST_CAT_DEFAULT gst_va_h264dec_debug
#else
#define GST_CAT_DEFAULT NULL
#endif
#define GST_VA_H264_DEC(obj) ((GstVaH264Dec *) obj) #define GST_VA_H264_DEC(obj) ((GstVaH264Dec *) obj)
#define GST_VA_H264_DEC_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj), G_TYPE_FROM_INSTANCE (obj), GstVaH264DecClass)) #define GST_VA_H264_DEC_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj), G_TYPE_FROM_INSTANCE (obj), GstVaH264DecClass))
@ -77,48 +70,25 @@ typedef struct _GstVaH264DecClass GstVaH264DecClass;
struct _GstVaH264DecClass struct _GstVaH264DecClass
{ {
GstH264DecoderClass parent_class; GstVaBaseDecClass parent_class;
gchar *render_device_path;
}; };
struct _GstVaH264Dec struct _GstVaH264Dec
{ {
GstH264Decoder parent; GstVaBaseDec parent;
GstVaDisplay *display;
GstVaDecoder *decoder;
GstBufferPool *other_pool;
GstFlowReturn last_ret; GstFlowReturn last_ret;
GstVideoCodecState *output_state;
VAProfile profile;
gint display_width;
gint display_height;
gint coded_width; gint coded_width;
gint coded_height; gint coded_height;
guint rt_format;
gint dpb_size; gint dpb_size;
gboolean need_videoalign;
GstVideoAlignment valign;
gboolean need_negotiation; gboolean need_negotiation;
gboolean has_videometa;
gboolean copy_frames; gboolean copy_frames;
}; };
static GstElementClass *parent_class = NULL; #define parent_class gst_va_base_dec_parent_class
extern gpointer gst_va_base_dec_parent_class;
struct CData
{
gchar *render_device_path;
gchar *description;
GstCaps *sink_caps;
GstCaps *src_caps;
};
/* *INDENT-OFF* */ /* *INDENT-OFF* */
static const gchar *src_caps_str = GST_VIDEO_CAPS_MAKE_WITH_FEATURES ("memory:VAMemory", static const gchar *src_caps_str = GST_VIDEO_CAPS_MAKE_WITH_FEATURES ("memory:VAMemory",
@ -130,73 +100,17 @@ static const gchar *sink_caps_str = "video/x-h264";
static gboolean static gboolean
gst_va_h264_dec_end_picture (GstH264Decoder * decoder, GstH264Picture * picture) gst_va_h264_dec_end_picture (GstH264Decoder * decoder, GstH264Picture * picture)
{ {
GstVaH264Dec *self = GST_VA_H264_DEC (decoder); GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
GstVaDecodePicture *va_pic; GstVaDecodePicture *va_pic;
GST_LOG_OBJECT (self, "end picture %p, (poc %d)", GST_LOG_OBJECT (base, "end picture %p, (poc %d)",
picture, picture->pic_order_cnt); picture, picture->pic_order_cnt);
va_pic = gst_h264_picture_get_user_data (picture); va_pic = gst_h264_picture_get_user_data (picture);
return gst_va_decoder_decode (self->decoder, va_pic); return gst_va_decoder_decode (base->decoder, va_pic);
} }
static gboolean
_copy_output_buffer (GstVaH264Dec * self, GstVideoCodecFrame * codec_frame)
{
GstVideoFrame src_frame;
GstVideoFrame dest_frame;
GstVideoInfo dest_vinfo;
GstBuffer *buffer;
GstFlowReturn ret;
if (!self->other_pool)
return FALSE;
if (!gst_buffer_pool_set_active (self->other_pool, TRUE))
return FALSE;
gst_video_info_set_format (&dest_vinfo,
GST_VIDEO_INFO_FORMAT (&self->output_state->info), self->display_width,
self->display_height);
ret = gst_buffer_pool_acquire_buffer (self->other_pool, &buffer, NULL);
if (ret != GST_FLOW_OK)
goto fail;
if (!gst_video_frame_map (&src_frame, &self->output_state->info,
codec_frame->output_buffer, GST_MAP_READ))
goto fail;
if (!gst_video_frame_map (&dest_frame, &dest_vinfo, buffer, GST_MAP_WRITE)) {
gst_video_frame_unmap (&dest_frame);
goto fail;
}
/* gst_video_frame_copy can crop this, but does not know, so let
* make it think it's all right */
GST_VIDEO_INFO_WIDTH (&src_frame.info) = self->display_width;
GST_VIDEO_INFO_HEIGHT (&src_frame.info) = self->display_height;
if (!gst_video_frame_copy (&dest_frame, &src_frame)) {
gst_video_frame_unmap (&src_frame);
gst_video_frame_unmap (&dest_frame);
goto fail;
}
gst_video_frame_unmap (&src_frame);
gst_video_frame_unmap (&dest_frame);
gst_buffer_replace (&codec_frame->output_buffer, buffer);
gst_buffer_unref (buffer);
return TRUE;
fail:
GST_ERROR_OBJECT (self, "Failed copy output buffer.");
return FALSE;
}
static GstFlowReturn static GstFlowReturn
gst_va_h264_dec_output_picture (GstH264Decoder * decoder, gst_va_h264_dec_output_picture (GstH264Decoder * decoder,
GstVideoCodecFrame * frame, GstH264Picture * picture) GstVideoCodecFrame * frame, GstH264Picture * picture)
@ -213,7 +127,7 @@ gst_va_h264_dec_output_picture (GstH264Decoder * decoder,
} }
if (self->copy_frames) if (self->copy_frames)
_copy_output_buffer (self, frame); gst_va_base_dec_copy_output_buffer (GST_VA_BASE_DEC (self), frame);
gst_h264_picture_unref (picture); gst_h264_picture_unref (picture);
@ -381,13 +295,11 @@ gst_va_h264_dec_decode_slice (GstH264Decoder * decoder,
{ {
GstH264SliceHdr *header = &slice->header; GstH264SliceHdr *header = &slice->header;
GstH264NalUnit *nalu = &slice->nalu; GstH264NalUnit *nalu = &slice->nalu;
GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
GstVaDecodePicture *va_pic; GstVaDecodePicture *va_pic;
GstVaH264Dec *self = GST_VA_H264_DEC (decoder);
VASliceParameterBufferH264 slice_param; VASliceParameterBufferH264 slice_param;
gboolean ret; gboolean ret;
GST_TRACE_OBJECT (self, "-");
/* *INDENT-OFF* */ /* *INDENT-OFF* */
slice_param = (VASliceParameterBufferH264) { slice_param = (VASliceParameterBufferH264) {
.slice_data_size = nalu->size, .slice_data_size = nalu->size,
@ -415,11 +327,11 @@ gst_va_h264_dec_decode_slice (GstH264Decoder * decoder,
va_pic = gst_h264_picture_get_user_data (picture); va_pic = gst_h264_picture_get_user_data (picture);
ret = gst_va_decoder_add_slice_buffer (self->decoder, va_pic, &slice_param, ret = gst_va_decoder_add_slice_buffer (base->decoder, va_pic, &slice_param,
sizeof (slice_param), slice->nalu.data + slice->nalu.offset, sizeof (slice_param), slice->nalu.data + slice->nalu.offset,
slice->nalu.size); slice->nalu.size);
if (!ret) { if (!ret) {
gst_va_decoder_destroy_buffers (self->decoder, va_pic); gst_va_decoder_destroy_buffers (base->decoder, va_pic);
return FALSE; return FALSE;
} }
@ -432,14 +344,12 @@ gst_va_h264_dec_start_picture (GstH264Decoder * decoder,
{ {
GstH264PPS *pps; GstH264PPS *pps;
GstH264SPS *sps; GstH264SPS *sps;
GstVaH264Dec *self = GST_VA_H264_DEC (decoder); GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
GstVaDecodePicture *va_pic; GstVaDecodePicture *va_pic;
VAIQMatrixBufferH264 iq_matrix = { 0, }; VAIQMatrixBufferH264 iq_matrix = { 0, };
VAPictureParameterBufferH264 pic_param; VAPictureParameterBufferH264 pic_param;
guint i, n; guint i, n;
GST_TRACE_OBJECT (self, "-");
va_pic = gst_h264_picture_get_user_data (picture); va_pic = gst_h264_picture_get_user_data (picture);
pps = slice->header.pps; pps = slice->header.pps;
@ -516,7 +426,7 @@ gst_va_h264_dec_start_picture (GstH264Decoder * decoder,
_init_vaapi_pic (&pic_param.ReferenceFrames[i]); _init_vaapi_pic (&pic_param.ReferenceFrames[i]);
} }
if (!gst_va_decoder_add_param_buffer (self->decoder, va_pic, if (!gst_va_decoder_add_param_buffer (base->decoder, va_pic,
VAPictureParameterBufferType, &pic_param, sizeof (pic_param))) VAPictureParameterBufferType, &pic_param, sizeof (pic_param)))
goto fail; goto fail;
@ -535,7 +445,7 @@ gst_va_h264_dec_start_picture (GstH264Decoder * decoder,
[i], pps->scaling_lists_8x8[i]); [i], pps->scaling_lists_8x8[i]);
} }
if (!gst_va_decoder_add_param_buffer (self->decoder, va_pic, if (!gst_va_decoder_add_param_buffer (base->decoder, va_pic,
VAIQMatrixBufferType, &iq_matrix, sizeof (iq_matrix))) VAIQMatrixBufferType, &iq_matrix, sizeof (iq_matrix)))
goto fail; goto fail;
@ -543,7 +453,7 @@ gst_va_h264_dec_start_picture (GstH264Decoder * decoder,
fail: fail:
{ {
gst_va_decoder_destroy_buffers (self->decoder, va_pic); gst_va_decoder_destroy_buffers (base->decoder, va_pic);
return FALSE; return FALSE;
} }
} }
@ -639,6 +549,7 @@ static const struct
static VAProfile static VAProfile
_get_profile (GstVaH264Dec * self, const GstH264SPS * sps, gint max_dpb_size) _get_profile (GstVaH264Dec * self, const GstH264SPS * sps, gint max_dpb_size)
{ {
GstVaBaseDec *base = GST_VA_BASE_DEC (self);
VAProfile profiles[4]; VAProfile profiles[4];
gint i = 0, j; gint i = 0, j;
@ -673,7 +584,7 @@ _get_profile (GstVaH264Dec * self, const GstH264SPS * sps, gint max_dpb_size)
} }
for (j = 0; j < i && j < G_N_ELEMENTS (profiles); j++) { for (j = 0; j < i && j < G_N_ELEMENTS (profiles); j++) {
if (gst_va_decoder_has_profile (self->decoder, profiles[j])) if (gst_va_decoder_has_profile (base->decoder, profiles[j]))
return profiles[j]; return profiles[j];
} }
@ -686,6 +597,7 @@ static gboolean
gst_va_h264_dec_new_sequence (GstH264Decoder * decoder, const GstH264SPS * sps, gst_va_h264_dec_new_sequence (GstH264Decoder * decoder, const GstH264SPS * sps,
gint max_dpb_size) gint max_dpb_size)
{ {
GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
GstVaH264Dec *self = GST_VA_H264_DEC (decoder); GstVaH264Dec *self = GST_VA_H264_DEC (decoder);
VAProfile profile; VAProfile profile;
gint display_width; gint display_width;
@ -713,10 +625,10 @@ gst_va_h264_dec_new_sequence (GstH264Decoder * decoder, const GstH264SPS * sps,
if (rt_format == 0) if (rt_format == 0)
return FALSE; return FALSE;
if (gst_va_decoder_format_changed (self->decoder, profile, if (gst_va_decoder_format_changed (base->decoder, profile,
rt_format, sps->width, sps->height)) { rt_format, sps->width, sps->height)) {
self->profile = profile; base->profile = profile;
self->rt_format = rt_format; base->rt_format = rt_format;
self->coded_width = sps->width; self->coded_width = sps->width;
self->coded_height = sps->height; self->coded_height = sps->height;
@ -726,27 +638,28 @@ gst_va_h264_dec_new_sequence (GstH264Decoder * decoder, const GstH264SPS * sps,
self->coded_height); self->coded_height);
} }
if (self->display_width != display_width if (base->width != display_width || base->height != display_height) {
|| self->display_height != display_height) { base->width = display_width;
self->display_width = display_width; base->height = display_height;
self->display_height = display_height;
negotiation_needed = TRUE; negotiation_needed = TRUE;
GST_INFO_OBJECT (self, "Resolution changed to %dx%d", self->display_width, GST_INFO_OBJECT (self, "Resolution changed to %dx%d", base->width,
self->display_height); base->height);
} }
self->need_videoalign = self->display_width < self->coded_width base->need_valign = base->width < self->coded_width
|| self->display_height < self->coded_height; || base->height < self->coded_height;
if (self->need_videoalign) { if (base->need_valign) {
/* *INDENT-OFF* */ /* *INDENT-OFF* */
self->valign = (GstVideoAlignment) { base->valign = (GstVideoAlignment) {
.padding_bottom = self->coded_height - self->display_height, .padding_bottom = self->coded_height - base->height,
.padding_left = self->coded_width - self->display_width, .padding_left = self->coded_width - base->width,
}; };
/* *INDENT-ON* */ /* *INDENT-ON* */
} }
base->min_buffers = self->dpb_size + 4; /* dpb size + scratch surfaces */
if (negotiation_needed) { if (negotiation_needed) {
self->need_negotiation = TRUE; self->need_negotiation = TRUE;
if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (self))) { if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (self))) {
@ -755,7 +668,7 @@ gst_va_h264_dec_new_sequence (GstH264Decoder * decoder, const GstH264SPS * sps,
} }
} }
if (!self->has_videometa) { if (!base->has_videometa) {
GstBufferPool *pool; GstBufferPool *pool;
pool = gst_video_decoder_get_buffer_pool (GST_VIDEO_DECODER (self)); pool = gst_video_decoder_get_buffer_pool (GST_VIDEO_DECODER (self));
@ -769,33 +682,6 @@ gst_va_h264_dec_new_sequence (GstH264Decoder * decoder, const GstH264SPS * sps,
return TRUE; return TRUE;
} }
static gboolean
gst_va_h264_dec_open (GstVideoDecoder * decoder)
{
GstVaH264Dec *self = GST_VA_H264_DEC (decoder);
GstVaH264DecClass *klass = GST_VA_H264_DEC_GET_CLASS (decoder);
if (!gst_va_ensure_element_data (decoder, klass->render_device_path,
&self->display))
return FALSE;
if (!self->decoder)
self->decoder = gst_va_decoder_new (self->display, H264);
return (self->decoder != NULL);
}
static gboolean
gst_va_h264_dec_close (GstVideoDecoder * decoder)
{
GstVaH264Dec *self = GST_VA_H264_DEC (decoder);
gst_clear_object (&self->decoder);
gst_clear_object (&self->display);
return TRUE;
}
static GstCaps * static GstCaps *
_complete_sink_caps (GstCaps * sinkcaps) _complete_sink_caps (GstCaps * sinkcaps)
{ {
@ -825,13 +711,13 @@ _complete_sink_caps (GstCaps * sinkcaps)
} }
static GstCaps * static GstCaps *
gst_va_h264_dec_sink_getcaps (GstVideoDecoder * decoder, GstCaps * filter) gst_va_h264_dec_getcaps (GstVideoDecoder * decoder, GstCaps * filter)
{ {
GstCaps *sinkcaps, *caps = NULL, *tmp; GstCaps *sinkcaps, *caps = NULL, *tmp;
GstVaH264Dec *self = GST_VA_H264_DEC (decoder); GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
if (self->decoder) if (base->decoder)
caps = gst_va_decoder_get_sinkpad_caps (self->decoder); caps = gst_va_decoder_get_sinkpad_caps (base->decoder);
if (caps) { if (caps) {
sinkcaps = _complete_sink_caps (caps); sinkcaps = _complete_sink_caps (caps);
@ -844,7 +730,7 @@ gst_va_h264_dec_sink_getcaps (GstVideoDecoder * decoder, GstCaps * filter)
} else { } else {
caps = sinkcaps; caps = sinkcaps;
} }
GST_LOG_OBJECT (self, "Returning caps %" GST_PTR_FORMAT, caps); GST_LOG_OBJECT (base, "Returning caps %" GST_PTR_FORMAT, caps);
} else if (!caps) { } else if (!caps) {
caps = gst_video_decoder_proxy_getcaps (decoder, NULL, filter); caps = gst_video_decoder_proxy_getcaps (decoder, NULL, filter);
} }
@ -852,182 +738,10 @@ gst_va_h264_dec_sink_getcaps (GstVideoDecoder * decoder, GstCaps * filter)
return caps; return caps;
} }
static gboolean
gst_va_h264_dec_src_query (GstVideoDecoder * decoder, GstQuery * query)
{
GstVaH264Dec *self = GST_VA_H264_DEC (decoder);
gboolean ret = FALSE;
switch (GST_QUERY_TYPE (query)) {
case GST_QUERY_CONTEXT:{
return gst_va_handle_context_query (GST_ELEMENT_CAST (self), query,
self->display);
}
case GST_QUERY_CAPS:{
GstCaps *caps = NULL, *tmp, *filter = NULL;
gboolean fixed_caps;
gst_query_parse_caps (query, &filter);
fixed_caps = GST_PAD_IS_FIXED_CAPS (GST_VIDEO_DECODER_SRC_PAD (decoder));
if (!fixed_caps && self->decoder)
caps = gst_va_decoder_get_srcpad_caps (self->decoder);
if (caps) {
if (filter) {
tmp =
gst_caps_intersect_full (filter, caps, GST_CAPS_INTERSECT_FIRST);
gst_caps_unref (caps);
caps = tmp;
}
GST_LOG_OBJECT (self, "Returning caps %" GST_PTR_FORMAT, caps);
gst_query_set_caps_result (query, caps);
gst_caps_unref (caps);
ret = TRUE;
break;
}
/* else jump to default */
}
default:
ret = GST_VIDEO_DECODER_CLASS (parent_class)->src_query (decoder, query);
break;
}
return ret;
}
static gboolean
gst_va_h264_dec_sink_query (GstVideoDecoder * decoder, GstQuery * query)
{
GstVaH264Dec *self = GST_VA_H264_DEC (decoder);
if (GST_QUERY_TYPE (query) == GST_QUERY_CONTEXT) {
return gst_va_handle_context_query (GST_ELEMENT_CAST (self), query,
self->display);
}
return GST_VIDEO_DECODER_CLASS (parent_class)->sink_query (decoder, query);
}
static gboolean
gst_va_h264_dec_stop (GstVideoDecoder * decoder)
{
GstVaH264Dec *self = GST_VA_H264_DEC (decoder);
if (!gst_va_decoder_close (self->decoder))
return FALSE;
if (self->output_state)
gst_video_codec_state_unref (self->output_state);
self->output_state = NULL;
if (self->other_pool)
gst_buffer_pool_set_active (self->other_pool, FALSE);
gst_clear_object (&self->other_pool);
return GST_VIDEO_DECODER_CLASS (parent_class)->stop (decoder);
}
static GstVideoFormat
_default_video_format_from_chroma (guint chroma_type)
{
switch (chroma_type) {
case VA_RT_FORMAT_YUV420:
case VA_RT_FORMAT_YUV422:
case VA_RT_FORMAT_YUV444:
return GST_VIDEO_FORMAT_NV12;
case VA_RT_FORMAT_YUV420_10:
case VA_RT_FORMAT_YUV422_10:
case VA_RT_FORMAT_YUV444_10:
return GST_VIDEO_FORMAT_P010_10LE;
default:
return GST_VIDEO_FORMAT_UNKNOWN;
}
}
static void
_get_preferred_format_and_caps_features (GstVaH264Dec * self,
GstVideoFormat * format, GstCapsFeatures ** capsfeatures)
{
GstCaps *peer_caps, *preferred_caps = NULL;
GstCapsFeatures *features;
GstStructure *structure;
const GValue *v_format;
guint num_structures, i;
peer_caps = gst_pad_get_allowed_caps (GST_VIDEO_DECODER_SRC_PAD (self));
GST_DEBUG_OBJECT (self, "Allowed caps %" GST_PTR_FORMAT, peer_caps);
/* prefer memory:VASurface over other caps features */
num_structures = gst_caps_get_size (peer_caps);
for (i = 0; i < num_structures; i++) {
features = gst_caps_get_features (peer_caps, i);
structure = gst_caps_get_structure (peer_caps, i);
if (gst_caps_features_is_any (features))
continue;
if (gst_caps_features_contains (features, "memory:VAMemory")) {
preferred_caps = gst_caps_new_full (gst_structure_copy (structure), NULL);
gst_caps_set_features_simple (preferred_caps,
gst_caps_features_copy (features));
break;
}
}
if (!preferred_caps)
preferred_caps = peer_caps;
else
gst_clear_caps (&peer_caps);
if (gst_caps_is_empty (preferred_caps)
|| gst_caps_is_any (preferred_caps)) {
/* if any or not linked yet then system memory and nv12 */
if (capsfeatures)
*capsfeatures = NULL;
if (format)
*format = _default_video_format_from_chroma (self->rt_format);
goto bail;
}
features = gst_caps_get_features (preferred_caps, 0);
if (features && capsfeatures)
*capsfeatures = gst_caps_features_copy (features);
if (!format)
goto bail;
structure = gst_caps_get_structure (preferred_caps, 0);
v_format = gst_structure_get_value (structure, "format");
if (!v_format)
*format = _default_video_format_from_chroma (self->rt_format);
else if (G_VALUE_HOLDS_STRING (v_format))
*format = gst_video_format_from_string (g_value_get_string (v_format));
else if (GST_VALUE_HOLDS_LIST (v_format)) {
guint num_values = gst_value_list_get_size (v_format);
for (i = 0; i < num_values; i++) {
GstVideoFormat fmt;
const GValue *v_fmt = gst_value_list_get_value (v_format, i);
if (!v_fmt)
continue;
fmt = gst_video_format_from_string (g_value_get_string (v_fmt));
if (gst_va_chroma_from_video_format (fmt) == self->rt_format) {
*format = fmt;
break;
}
}
if (i == num_values)
*format = _default_video_format_from_chroma (self->rt_format);
}
bail:
gst_clear_caps (&preferred_caps);
}
static gboolean static gboolean
gst_va_h264_dec_negotiate (GstVideoDecoder * decoder) gst_va_h264_dec_negotiate (GstVideoDecoder * decoder)
{ {
GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
GstVaH264Dec *self = GST_VA_H264_DEC (decoder); GstVaH264Dec *self = GST_VA_H264_DEC (decoder);
GstVideoFormat format = GST_VIDEO_FORMAT_UNKNOWN; GstVideoFormat format = GST_VIDEO_FORMAT_UNKNOWN;
GstCapsFeatures *capsfeatures = NULL; GstCapsFeatures *capsfeatures = NULL;
@ -1039,228 +753,41 @@ gst_va_h264_dec_negotiate (GstVideoDecoder * decoder)
self->need_negotiation = FALSE; self->need_negotiation = FALSE;
if (gst_va_decoder_is_open (self->decoder) if (gst_va_decoder_is_open (base->decoder)
&& !gst_va_decoder_close (self->decoder)) && !gst_va_decoder_close (base->decoder))
return FALSE; return FALSE;
if (!gst_va_decoder_open (self->decoder, self->profile, self->rt_format)) if (!gst_va_decoder_open (base->decoder, base->profile, base->rt_format))
return FALSE; return FALSE;
if (!gst_va_decoder_set_format (self->decoder, self->coded_width, if (!gst_va_decoder_set_format (base->decoder, self->coded_width,
self->coded_height, NULL)) self->coded_height, NULL))
return FALSE; return FALSE;
if (self->output_state) if (base->output_state)
gst_video_codec_state_unref (self->output_state); gst_video_codec_state_unref (base->output_state);
_get_preferred_format_and_caps_features (self, &format, &capsfeatures); gst_va_base_dec_get_preferred_format_and_caps_features (base, &format,
&capsfeatures);
self->output_state = base->output_state =
gst_video_decoder_set_output_state (decoder, format, gst_video_decoder_set_output_state (decoder, format,
self->display_width, self->display_height, h264dec->input_state); base->width, base->height, h264dec->input_state);
self->output_state->caps = gst_video_info_to_caps (&self->output_state->info); base->output_state->caps = gst_video_info_to_caps (&base->output_state->info);
if (capsfeatures) if (capsfeatures)
gst_caps_set_features_simple (self->output_state->caps, capsfeatures); gst_caps_set_features_simple (base->output_state->caps, capsfeatures);
GST_INFO_OBJECT (self, "Negotiated caps %" GST_PTR_FORMAT, GST_INFO_OBJECT (self, "Negotiated caps %" GST_PTR_FORMAT,
self->output_state->caps); base->output_state->caps);
return GST_VIDEO_DECODER_CLASS (parent_class)->negotiate (decoder); return GST_VIDEO_DECODER_CLASS (parent_class)->negotiate (decoder);
} }
static GstAllocator *
_create_allocator (GstVaH264Dec * self, GstCaps * caps)
{
GstAllocator *allocator = NULL;
if (gst_caps_is_dmabuf (caps))
allocator = gst_va_dmabuf_allocator_new (self->display);
else {
GArray *surface_formats =
gst_va_decoder_get_surface_formats (self->decoder);
allocator = gst_va_allocator_new (self->display, surface_formats);
}
return allocator;
}
/* 1. get allocator in query
* 1.1 if allocator is not ours and downstream doesn't handle
* videometa, keep it for other_pool
* 2. get pool in query
* 2.1 if pool is not va, keep it as other_pool if downstream
* doesn't handle videometa or (it doesn't handle alignment and
* the stream needs cropping)
* 2.2 if there's no pool in query and downstream doesn't handle
* videometa, create other_pool as GstVideoPool with the non-va
* from query and query's params
* 3. create our allocator and pool if they aren't in query
* 4. add or update pool and allocator in query
* 5. set our custom pool configuration
*/
static gboolean
gst_va_h264_dec_decide_allocation (GstVideoDecoder * decoder, GstQuery * query)
{
GstAllocator *allocator = NULL, *other_allocator = NULL;
GstAllocationParams other_params, params;
GstBufferPool *pool = NULL;
GstCaps *caps = NULL;
GstStructure *config;
GstVideoInfo info;
GstVaH264Dec *self = GST_VA_H264_DEC (decoder);
guint size = 0, min, max;
gboolean update_pool = FALSE, update_allocator = FALSE, has_videoalignment;
gst_query_parse_allocation (query, &caps, NULL);
if (!(caps && gst_video_info_from_caps (&info, caps)))
goto wrong_caps;
self->has_videometa = gst_query_find_allocation_meta (query,
GST_VIDEO_META_API_TYPE, NULL);
if (gst_query_get_n_allocation_params (query) > 0) {
gst_query_parse_nth_allocation_param (query, 0, &allocator, &other_params);
if (allocator && !(GST_IS_VA_DMABUF_ALLOCATOR (allocator)
|| GST_IS_VA_ALLOCATOR (allocator))) {
/* save the allocator for the other pool */
other_allocator = allocator;
allocator = NULL;
}
update_allocator = TRUE;
} else {
gst_allocation_params_init (&other_params);
}
gst_allocation_params_init (&params);
if (gst_query_get_n_allocation_pools (query) > 0) {
gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);
if (pool) {
if (!GST_IS_VA_POOL (pool)) {
has_videoalignment = gst_buffer_pool_has_option (pool,
GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT);
if (!self->has_videometa || (!has_videoalignment
&& self->need_videoalign)) {
GST_DEBUG_OBJECT (self,
"keeping other pool for copy %" GST_PTR_FORMAT, pool);
gst_object_replace ((GstObject **) & self->other_pool,
(GstObject *) pool);
gst_object_unref (pool); /* decrease previous increase */
}
gst_clear_object (&pool);
}
}
min += self->dpb_size + 4; /* min + dbp size + scratch surfaces */
size = MAX (size, GST_VIDEO_INFO_SIZE (&info));
update_pool = TRUE;
} else {
size = GST_VIDEO_INFO_SIZE (&info);
if (!self->has_videometa && !gst_caps_is_vamemory (caps)) {
GST_DEBUG_OBJECT (self, "making new other pool for copy");
self->other_pool = gst_video_buffer_pool_new ();
config = gst_buffer_pool_get_config (self->other_pool);
gst_buffer_pool_config_set_params (config, caps, size, 0, 0);
gst_buffer_pool_config_set_allocator (config, other_allocator,
&other_params);
if (!gst_buffer_pool_set_config (self->other_pool, config)) {
GST_ERROR_OBJECT (self, "couldn't configure other pool for copy");
gst_clear_object (&self->other_pool);
}
} else {
gst_clear_object (&other_allocator);
}
min = self->dpb_size + 4; /* dpb size + scratch surfaces */
max = 0;
}
if (!allocator) {
if (!(allocator = _create_allocator (self, caps)))
return FALSE;
}
if (!pool)
pool = gst_va_pool_new ();
{
GstStructure *config = gst_buffer_pool_get_config (pool);
gst_buffer_pool_config_set_params (config, caps, size, min, max);
gst_buffer_pool_config_set_allocator (config, allocator, &params);
gst_buffer_pool_config_add_option (config,
GST_BUFFER_POOL_OPTION_VIDEO_META);
if (self->need_videoalign) {
gst_buffer_pool_config_add_option (config,
GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT);
gst_buffer_pool_config_set_video_alignment (config, &self->valign);
}
gst_buffer_pool_config_set_va_allocation_params (config,
VA_SURFACE_ATTRIB_USAGE_HINT_DECODER);
if (!gst_buffer_pool_set_config (pool, config))
return FALSE;
}
if (update_allocator)
gst_query_set_nth_allocation_param (query, 0, allocator, &params);
else
gst_query_add_allocation_param (query, allocator, &params);
if (update_pool)
gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max);
else
gst_query_add_allocation_pool (query, pool, size, min, max);
gst_object_unref (allocator);
gst_object_unref (pool);
return GST_VIDEO_DECODER_CLASS (parent_class)->decide_allocation (decoder,
query);
wrong_caps:
{
GST_WARNING_OBJECT (self, "No valid caps");
return FALSE;
}
}
static void
gst_va_h264_dec_set_context (GstElement * element, GstContext * context)
{
GstVaDisplay *old_display, *new_display;
GstVaH264Dec *self = GST_VA_H264_DEC (element);
GstVaH264DecClass *klass = GST_VA_H264_DEC_GET_CLASS (self);
gboolean ret;
old_display = self->display ? gst_object_ref (self->display) : NULL;
ret = gst_va_handle_set_context (element, context, klass->render_device_path,
&self->display);
new_display = self->display ? gst_object_ref (self->display) : NULL;
if (!ret
|| (old_display && new_display && old_display != new_display
&& self->decoder)) {
GST_ELEMENT_WARNING (element, RESOURCE, BUSY,
("Can't replace VA display while operating"), (NULL));
}
gst_clear_object (&old_display);
gst_clear_object (&new_display);
GST_ELEMENT_CLASS (parent_class)->set_context (element, context);
}
static void static void
gst_va_h264_dec_dispose (GObject * object) gst_va_h264_dec_dispose (GObject * object)
{ {
gst_va_h264_dec_close (GST_VIDEO_DECODER (object)); gst_va_base_dec_close (GST_VIDEO_DECODER (object));
G_OBJECT_CLASS (parent_class)->dispose (object); G_OBJECT_CLASS (parent_class)->dispose (object);
} }
@ -1268,19 +795,13 @@ static void
gst_va_h264_dec_class_init (gpointer g_class, gpointer class_data) gst_va_h264_dec_class_init (gpointer g_class, gpointer class_data)
{ {
GstCaps *src_doc_caps, *sink_doc_caps; GstCaps *src_doc_caps, *sink_doc_caps;
GstPadTemplate *sink_pad_templ, *src_pad_templ;
GObjectClass *gobject_class = G_OBJECT_CLASS (g_class); GObjectClass *gobject_class = G_OBJECT_CLASS (g_class);
GstElementClass *element_class = GST_ELEMENT_CLASS (g_class); GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
GstH264DecoderClass *h264decoder_class = GST_H264_DECODER_CLASS (g_class); GstH264DecoderClass *h264decoder_class = GST_H264_DECODER_CLASS (g_class);
GstVaH264DecClass *klass = GST_VA_H264_DEC_CLASS (g_class);
GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_CLASS (g_class); GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_CLASS (g_class);
struct CData *cdata = class_data; struct CData *cdata = class_data;
gchar *long_name; gchar *long_name;
parent_class = g_type_class_peek_parent (g_class);
klass->render_device_path = g_strdup (cdata->render_device_path);
if (cdata->description) { if (cdata->description) {
long_name = g_strdup_printf ("VA-API H.264 Decoder in %s", long_name = g_strdup_printf ("VA-API H.264 Decoder in %s",
cdata->description); cdata->description);
@ -1293,33 +814,17 @@ gst_va_h264_dec_class_init (gpointer g_class, gpointer class_data)
"VA-API based H.264 video decoder", "VA-API based H.264 video decoder",
"Víctor Jáquez <vjaquez@igalia.com>"); "Víctor Jáquez <vjaquez@igalia.com>");
sink_pad_templ = gst_pad_template_new ("sink", GST_PAD_SINK, GST_PAD_ALWAYS,
cdata->sink_caps);
gst_element_class_add_pad_template (element_class, sink_pad_templ);
sink_doc_caps = gst_caps_from_string (sink_caps_str); sink_doc_caps = gst_caps_from_string (sink_caps_str);
gst_pad_template_set_documentation_caps (sink_pad_templ, sink_doc_caps);
gst_caps_unref (sink_doc_caps);
src_pad_templ = gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS,
cdata->src_caps);
gst_element_class_add_pad_template (element_class, src_pad_templ);
src_doc_caps = gst_caps_from_string (src_caps_str); src_doc_caps = gst_caps_from_string (src_caps_str);
gst_pad_template_set_documentation_caps (src_pad_templ, src_doc_caps);
gst_caps_unref (src_doc_caps); gst_va_base_dec_class_init (GST_VA_BASE_DEC_CLASS (g_class), H264,
cdata->render_device_path, cdata->sink_caps, cdata->src_caps,
src_doc_caps, sink_doc_caps);
gobject_class->dispose = gst_va_h264_dec_dispose; gobject_class->dispose = gst_va_h264_dec_dispose;
element_class->set_context = GST_DEBUG_FUNCPTR (gst_va_h264_dec_set_context); decoder_class->getcaps = GST_DEBUG_FUNCPTR (gst_va_h264_dec_getcaps);
decoder_class->open = GST_DEBUG_FUNCPTR (gst_va_h264_dec_open);
decoder_class->close = GST_DEBUG_FUNCPTR (gst_va_h264_dec_close);
decoder_class->stop = GST_DEBUG_FUNCPTR (gst_va_h264_dec_stop);
decoder_class->getcaps = GST_DEBUG_FUNCPTR (gst_va_h264_dec_sink_getcaps);
decoder_class->src_query = GST_DEBUG_FUNCPTR (gst_va_h264_dec_src_query);
decoder_class->sink_query = GST_DEBUG_FUNCPTR (gst_va_h264_dec_sink_query);
decoder_class->negotiate = GST_DEBUG_FUNCPTR (gst_va_h264_dec_negotiate); decoder_class->negotiate = GST_DEBUG_FUNCPTR (gst_va_h264_dec_negotiate);
decoder_class->decide_allocation =
GST_DEBUG_FUNCPTR (gst_va_h264_dec_decide_allocation);
h264decoder_class->new_sequence = h264decoder_class->new_sequence =
GST_DEBUG_FUNCPTR (gst_va_h264_dec_new_sequence); GST_DEBUG_FUNCPTR (gst_va_h264_dec_new_sequence);
@ -1346,6 +851,7 @@ gst_va_h264_dec_class_init (gpointer g_class, gpointer class_data)
static void static void
gst_va_h264_dec_init (GTypeInstance * instance, gpointer g_class) gst_va_h264_dec_init (GTypeInstance * instance, gpointer g_class)
{ {
gst_va_base_dec_init (GST_VA_BASE_DEC (instance), GST_CAT_DEFAULT);
gst_h264_decoder_set_process_ref_pic_lists (GST_H264_DECODER (instance), gst_h264_decoder_set_process_ref_pic_lists (GST_H264_DECODER (instance),
TRUE); TRUE);
} }

View file

@ -45,22 +45,15 @@
#include "gstvavp8dec.h" #include "gstvavp8dec.h"
#include <gst/codecs/gstvp8decoder.h> #include "gstvabasedec.h"
#include <va/va_drmcommon.h>
#include "gstvaallocator.h"
#include "gstvacaps.h"
#include "gstvadecoder.h"
#include "gstvadevice.h"
#include "gstvadisplay_drm.h"
#include "gstvapool.h" #include "gstvapool.h"
#include "gstvaprofile.h"
#include "gstvautils.h"
#include "gstvavideoformat.h"
GST_DEBUG_CATEGORY_STATIC (gst_va_vp8dec_debug); GST_DEBUG_CATEGORY_STATIC (gst_va_vp8dec_debug);
#ifndef GST_DISABLE_GST_DEBUG
#define GST_CAT_DEFAULT gst_va_vp8dec_debug #define GST_CAT_DEFAULT gst_va_vp8dec_debug
#else
#define GST_CAT_DEFAULT NULL
#endif
#define GST_VA_VP8_DEC(obj) ((GstVaVp8Dec *) obj) #define GST_VA_VP8_DEC(obj) ((GstVaVp8Dec *) obj)
#define GST_VA_VP8_DEC_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj), G_TYPE_FROM_INSTANCE (obj), GstVaVp8DecClass)) #define GST_VA_VP8_DEC_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj), G_TYPE_FROM_INSTANCE (obj), GstVaVp8DecClass))
@ -71,42 +64,21 @@ typedef struct _GstVaVp8DecClass GstVaVp8DecClass;
struct _GstVaVp8DecClass struct _GstVaVp8DecClass
{ {
GstVp8DecoderClass parent_class; GstVaBaseDecClass parent_class;
gchar *render_device_path;
}; };
struct _GstVaVp8Dec struct _GstVaVp8Dec
{ {
GstVp8Decoder parent; GstVaBaseDec parent;
GstVaDisplay *display;
GstVaDecoder *decoder;
GstBufferPool *other_pool;
GstFlowReturn last_ret; GstFlowReturn last_ret;
GstVideoCodecState *output_state;
VAProfile profile;
gint width;
gint height;
gboolean need_negotiation; gboolean need_negotiation;
guint rt_format;
gboolean has_videometa;
gboolean copy_frames; gboolean copy_frames;
}; };
struct CData #define parent_class gst_va_base_dec_parent_class
{ extern gpointer gst_va_base_dec_parent_class;
gchar *render_device_path;
gchar *description;
GstCaps *sink_caps;
GstCaps *src_caps;
};
static GstElementClass *parent_class = NULL;
/* *INDENT-OFF* */ /* *INDENT-OFF* */
static const gchar *src_caps_str = GST_VIDEO_CAPS_MAKE_WITH_FEATURES ("memory:VAMemory", static const gchar *src_caps_str = GST_VIDEO_CAPS_MAKE_WITH_FEATURES ("memory:VAMemory",
@ -115,241 +87,13 @@ static const gchar *src_caps_str = GST_VIDEO_CAPS_MAKE_WITH_FEATURES ("memory:VA
static const gchar *sink_caps_str = "video/x-vp8"; static const gchar *sink_caps_str = "video/x-vp8";
static gboolean
gst_va_vp8_dec_open (GstVideoDecoder * decoder)
{
GstVaVp8Dec *self = GST_VA_VP8_DEC (decoder);
GstVaVp8DecClass *klass = GST_VA_VP8_DEC_GET_CLASS (decoder);
if (!gst_va_ensure_element_data (decoder, klass->render_device_path,
&self->display))
return FALSE;
if (!self->decoder)
self->decoder = gst_va_decoder_new (self->display, VP8);
return (self->decoder != NULL);
}
static gboolean
gst_va_vp8_dec_close (GstVideoDecoder * decoder)
{
GstVaVp8Dec *self = GST_VA_VP8_DEC (decoder);
gst_clear_object (&self->decoder);
gst_clear_object (&self->display);
return TRUE;
}
static GstCaps *
gst_va_vp8_dec_sink_getcaps (GstVideoDecoder * decoder, GstCaps * filter)
{
GstCaps *sinkcaps, *caps = NULL, *tmp;
GstVaVp8Dec *self = GST_VA_VP8_DEC (decoder);
if (self->decoder)
caps = gst_va_decoder_get_sinkpad_caps (self->decoder);
if (caps) {
sinkcaps = gst_caps_copy (caps);
gst_caps_unref (caps);
if (filter) {
tmp = gst_caps_intersect_full (filter, sinkcaps,
GST_CAPS_INTERSECT_FIRST);
gst_caps_unref (sinkcaps);
caps = tmp;
} else {
caps = sinkcaps;
}
GST_LOG_OBJECT (self, "Returning caps %" GST_PTR_FORMAT, caps);
} else if (!caps) {
caps = gst_video_decoder_proxy_getcaps (decoder, NULL, filter);
}
return caps;
}
static gboolean
gst_va_vp8_dec_src_query (GstVideoDecoder * decoder, GstQuery * query)
{
GstVaVp8Dec *self = GST_VA_VP8_DEC (decoder);
gboolean ret = FALSE;
switch (GST_QUERY_TYPE (query)) {
case GST_QUERY_CONTEXT:{
return gst_va_handle_context_query (GST_ELEMENT_CAST (self), query,
self->display);
}
case GST_QUERY_CAPS:{
GstCaps *caps = NULL, *tmp, *filter = NULL;
gboolean fixed_caps;
gst_query_parse_caps (query, &filter);
fixed_caps = GST_PAD_IS_FIXED_CAPS (GST_VIDEO_DECODER_SRC_PAD (decoder));
if (!fixed_caps && self->decoder)
caps = gst_va_decoder_get_srcpad_caps (self->decoder);
if (caps) {
if (filter) {
tmp =
gst_caps_intersect_full (filter, caps, GST_CAPS_INTERSECT_FIRST);
gst_caps_unref (caps);
caps = tmp;
}
GST_LOG_OBJECT (self, "Returning caps %" GST_PTR_FORMAT, caps);
gst_query_set_caps_result (query, caps);
gst_caps_unref (caps);
ret = TRUE;
break;
}
/* else jump to default */
}
default:
ret = GST_VIDEO_DECODER_CLASS (parent_class)->src_query (decoder, query);
break;
}
return ret;
}
static gboolean
gst_va_vp8_dec_sink_query (GstVideoDecoder * decoder, GstQuery * query)
{
GstVaVp8Dec *self = GST_VA_VP8_DEC (decoder);
if (GST_QUERY_TYPE (query) == GST_QUERY_CONTEXT) {
return gst_va_handle_context_query (GST_ELEMENT_CAST (self), query,
self->display);
}
return GST_VIDEO_DECODER_CLASS (parent_class)->sink_query (decoder, query);
}
static gboolean
gst_va_vp8_dec_stop (GstVideoDecoder * decoder)
{
GstVaVp8Dec *self = GST_VA_VP8_DEC (decoder);
if (!gst_va_decoder_close (self->decoder))
return FALSE;
if (self->output_state)
gst_video_codec_state_unref (self->output_state);
self->output_state = NULL;
if (self->other_pool)
gst_buffer_pool_set_active (self->other_pool, FALSE);
gst_clear_object (&self->other_pool);
return GST_VIDEO_DECODER_CLASS (parent_class)->stop (decoder);
}
static GstVideoFormat
_default_video_format_from_chroma (guint chroma_type)
{
switch (chroma_type) {
case VA_RT_FORMAT_YUV420:
case VA_RT_FORMAT_YUV422:
case VA_RT_FORMAT_YUV444:
return GST_VIDEO_FORMAT_NV12;
case VA_RT_FORMAT_YUV420_10:
case VA_RT_FORMAT_YUV422_10:
case VA_RT_FORMAT_YUV444_10:
return GST_VIDEO_FORMAT_P010_10LE;
default:
return GST_VIDEO_FORMAT_UNKNOWN;
}
}
static void
_get_preferred_format_and_caps_features (GstVaVp8Dec * self,
GstVideoFormat * format, GstCapsFeatures ** capsfeatures)
{
GstCaps *peer_caps, *preferred_caps = NULL;
GstCapsFeatures *features;
GstStructure *structure;
const GValue *v_format;
guint num_structures, i;
peer_caps = gst_pad_get_allowed_caps (GST_VIDEO_DECODER_SRC_PAD (self));
GST_DEBUG_OBJECT (self, "Allowed caps %" GST_PTR_FORMAT, peer_caps);
/* prefer memory:VASurface over other caps features */
num_structures = gst_caps_get_size (peer_caps);
for (i = 0; i < num_structures; i++) {
features = gst_caps_get_features (peer_caps, i);
structure = gst_caps_get_structure (peer_caps, i);
if (gst_caps_features_is_any (features))
continue;
if (gst_caps_features_contains (features, "memory:VAMemory")) {
preferred_caps = gst_caps_new_full (gst_structure_copy (structure), NULL);
gst_caps_set_features_simple (preferred_caps,
gst_caps_features_copy (features));
break;
}
}
if (!preferred_caps)
preferred_caps = peer_caps;
else
gst_clear_caps (&peer_caps);
if (gst_caps_is_empty (preferred_caps)
|| gst_caps_is_any (preferred_caps)) {
/* if any or not linked yet then system memory and nv12 */
if (capsfeatures)
*capsfeatures = NULL;
if (format)
*format = _default_video_format_from_chroma (self->rt_format);
goto bail;
}
features = gst_caps_get_features (preferred_caps, 0);
if (features && capsfeatures)
*capsfeatures = gst_caps_features_copy (features);
if (!format)
goto bail;
structure = gst_caps_get_structure (preferred_caps, 0);
v_format = gst_structure_get_value (structure, "format");
if (!v_format)
*format = _default_video_format_from_chroma (self->rt_format);
else if (G_VALUE_HOLDS_STRING (v_format))
*format = gst_video_format_from_string (g_value_get_string (v_format));
else if (GST_VALUE_HOLDS_LIST (v_format)) {
guint num_values = gst_value_list_get_size (v_format);
for (i = 0; i < num_values; i++) {
GstVideoFormat fmt;
const GValue *v_fmt = gst_value_list_get_value (v_format, i);
if (!v_fmt)
continue;
fmt = gst_video_format_from_string (g_value_get_string (v_fmt));
if (gst_va_chroma_from_video_format (fmt) == self->rt_format) {
*format = fmt;
break;
}
}
if (i == num_values)
*format = _default_video_format_from_chroma (self->rt_format);
}
bail:
gst_clear_caps (&preferred_caps);
}
static gboolean static gboolean
gst_va_vp8_dec_negotiate (GstVideoDecoder * decoder) gst_va_vp8_dec_negotiate (GstVideoDecoder * decoder)
{ {
GstCapsFeatures *capsfeatures = NULL;
GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
GstVaVp8Dec *self = GST_VA_VP8_DEC (decoder); GstVaVp8Dec *self = GST_VA_VP8_DEC (decoder);
GstVideoFormat format = GST_VIDEO_FORMAT_UNKNOWN; GstVideoFormat format = GST_VIDEO_FORMAT_UNKNOWN;
GstCapsFeatures *capsfeatures = NULL;
GstVp8Decoder *vp8dec = GST_VP8_DECODER (decoder); GstVp8Decoder *vp8dec = GST_VP8_DECODER (decoder);
/* Ignore downstream renegotiation request. */ /* Ignore downstream renegotiation request. */
@ -358,215 +102,37 @@ gst_va_vp8_dec_negotiate (GstVideoDecoder * decoder)
self->need_negotiation = FALSE; self->need_negotiation = FALSE;
if (gst_va_decoder_is_open (self->decoder) if (gst_va_decoder_is_open (base->decoder)
&& !gst_va_decoder_close (self->decoder)) && !gst_va_decoder_close (base->decoder))
return FALSE; return FALSE;
if (!gst_va_decoder_open (self->decoder, self->profile, self->rt_format)) if (!gst_va_decoder_open (base->decoder, base->profile, base->rt_format))
return FALSE; return FALSE;
if (!gst_va_decoder_set_format (self->decoder, self->width, self->height, if (!gst_va_decoder_set_format (base->decoder, base->width, base->height,
NULL)) NULL))
return FALSE; return FALSE;
if (self->output_state) if (base->output_state)
gst_video_codec_state_unref (self->output_state); gst_video_codec_state_unref (base->output_state);
_get_preferred_format_and_caps_features (self, &format, &capsfeatures); gst_va_base_dec_get_preferred_format_and_caps_features (base, &format,
&capsfeatures);
self->output_state = base->output_state =
gst_video_decoder_set_output_state (decoder, format, gst_video_decoder_set_output_state (decoder, format,
self->width, self->height, vp8dec->input_state); base->width, base->height, vp8dec->input_state);
self->output_state->caps = gst_video_info_to_caps (&self->output_state->info); base->output_state->caps = gst_video_info_to_caps (&base->output_state->info);
if (capsfeatures) if (capsfeatures)
gst_caps_set_features_simple (self->output_state->caps, capsfeatures); gst_caps_set_features_simple (base->output_state->caps, capsfeatures);
GST_INFO_OBJECT (self, "Negotiated caps %" GST_PTR_FORMAT, GST_INFO_OBJECT (self, "Negotiated caps %" GST_PTR_FORMAT,
self->output_state->caps); base->output_state->caps);
return GST_VIDEO_DECODER_CLASS (parent_class)->negotiate (decoder); return GST_VIDEO_DECODER_CLASS (parent_class)->negotiate (decoder);
} }
static GstAllocator *
_create_allocator (GstVaVp8Dec * self, GstCaps * caps)
{
GstAllocator *allocator = NULL;
if (gst_caps_is_dmabuf (caps)) {
allocator = gst_va_dmabuf_allocator_new (self->display);
} else {
GArray *surface_formats =
gst_va_decoder_get_surface_formats (self->decoder);
allocator = gst_va_allocator_new (self->display, surface_formats);
}
return allocator;
}
/* 1. get allocator in query
* 1.1 if allocator is not ours and downstream doesn't handle
* videometa, keep it for other_pool
* 2. get pool in query
* 2.1 if pool is not va, keep it as other_pool if downstream
* doesn't handle videometa or (it doesn't handle alignment and
* the stream needs cropping)
* 2.2 if there's no pool in query and downstream doesn't handle
* videometa, create other_pool as GstVideoPool with the non-va
* from query and query's params
* 3. create our allocator and pool if they aren't in query
* 4. add or update pool and allocator in query
* 5. set our custom pool configuration
*/
static gboolean
gst_va_vp8_dec_decide_allocation (GstVideoDecoder * decoder, GstQuery * query)
{
GstAllocator *allocator = NULL, *other_allocator = NULL;
GstAllocationParams other_params, params;
GstBufferPool *pool = NULL;
GstCaps *caps = NULL;
GstStructure *config;
GstVideoInfo info;
GstVaVp8Dec *self = GST_VA_VP8_DEC (decoder);
guint size = 0, min, max;
gboolean update_pool = FALSE, update_allocator = FALSE;
gst_query_parse_allocation (query, &caps, NULL);
if (!(caps && gst_video_info_from_caps (&info, caps)))
goto wrong_caps;
self->has_videometa = gst_query_find_allocation_meta (query,
GST_VIDEO_META_API_TYPE, NULL);
if (gst_query_get_n_allocation_params (query) > 0) {
gst_query_parse_nth_allocation_param (query, 0, &allocator, &other_params);
if (allocator && !(GST_IS_VA_DMABUF_ALLOCATOR (allocator)
|| GST_IS_VA_ALLOCATOR (allocator))) {
/* save the allocator for the other pool */
other_allocator = allocator;
allocator = NULL;
}
update_allocator = TRUE;
} else {
gst_allocation_params_init (&other_params);
}
gst_allocation_params_init (&params);
if (gst_query_get_n_allocation_pools (query) > 0) {
gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);
if (pool) {
if (!GST_IS_VA_POOL (pool)) {
if (!self->has_videometa) {
GST_DEBUG_OBJECT (self,
"keeping other pool for copy %" GST_PTR_FORMAT, pool);
gst_object_replace ((GstObject **) & self->other_pool,
(GstObject *) pool);
gst_object_unref (pool); /* decrease previous increase */
}
gst_clear_object (&pool);
}
}
min = MAX (3 + 4, min); /* max num pic references + scratch surfaces */
size = MAX (size, GST_VIDEO_INFO_SIZE (&info));
update_pool = TRUE;
} else {
size = GST_VIDEO_INFO_SIZE (&info);
if (!self->has_videometa && !gst_caps_is_vamemory (caps)) {
GST_DEBUG_OBJECT (self, "making new other pool for copy");
self->other_pool = gst_video_buffer_pool_new ();
config = gst_buffer_pool_get_config (self->other_pool);
gst_buffer_pool_config_set_params (config, caps, size, 0, 0);
gst_buffer_pool_config_set_allocator (config, other_allocator,
&other_params);
if (!gst_buffer_pool_set_config (self->other_pool, config)) {
GST_ERROR_OBJECT (self, "couldn't configure other pool for copy");
gst_clear_object (&self->other_pool);
}
} else {
gst_clear_object (&other_allocator);
}
min = 3 + 4; /* max num pic references + scratch surfaces */
max = 0;
}
if (!allocator) {
if (!(allocator = _create_allocator (self, caps)))
return FALSE;
}
if (!pool)
pool = gst_va_pool_new ();
{
GstStructure *config = gst_buffer_pool_get_config (pool);
gst_buffer_pool_config_set_params (config, caps, size, min, max);
gst_buffer_pool_config_set_allocator (config, allocator, &params);
gst_buffer_pool_config_add_option (config,
GST_BUFFER_POOL_OPTION_VIDEO_META);
gst_buffer_pool_config_set_va_allocation_params (config,
VA_SURFACE_ATTRIB_USAGE_HINT_DECODER);
if (!gst_buffer_pool_set_config (pool, config))
return FALSE;
}
if (update_allocator)
gst_query_set_nth_allocation_param (query, 0, allocator, &params);
else
gst_query_add_allocation_param (query, allocator, &params);
if (update_pool)
gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max);
else
gst_query_add_allocation_pool (query, pool, size, min, max);
gst_object_unref (allocator);
gst_object_unref (pool);
return GST_VIDEO_DECODER_CLASS (parent_class)->decide_allocation (decoder,
query);
wrong_caps:
{
GST_WARNING_OBJECT (self, "No valid caps");
return FALSE;
}
}
static void
gst_va_vp8_dec_set_context (GstElement * element, GstContext * context)
{
GstVaDisplay *old_display, *new_display;
GstVaVp8Dec *self = GST_VA_VP8_DEC (element);
GstVaVp8DecClass *klass = GST_VA_VP8_DEC_GET_CLASS (self);
gboolean ret;
old_display = self->display ? gst_object_ref (self->display) : NULL;
ret = gst_va_handle_set_context (element, context, klass->render_device_path,
&self->display);
new_display = self->display ? gst_object_ref (self->display) : NULL;
if (!ret
|| (old_display && new_display && old_display != new_display
&& self->decoder)) {
GST_ELEMENT_WARNING (element, RESOURCE, BUSY,
("Can't replace VA display while operating"), (NULL));
}
gst_clear_object (&old_display);
gst_clear_object (&new_display);
GST_ELEMENT_CLASS (parent_class)->set_context (element, context);
}
static VAProfile static VAProfile
_get_profile (GstVaVp8Dec * self, const GstVp8FrameHdr * frame_hdr) _get_profile (GstVaVp8Dec * self, const GstVp8FrameHdr * frame_hdr)
{ {
@ -583,6 +149,7 @@ static gboolean
gst_va_vp8_dec_new_sequence (GstVp8Decoder * decoder, gst_va_vp8_dec_new_sequence (GstVp8Decoder * decoder,
const GstVp8FrameHdr * frame_hdr) const GstVp8FrameHdr * frame_hdr)
{ {
GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
GstVaVp8Dec *self = GST_VA_VP8_DEC (decoder); GstVaVp8Dec *self = GST_VA_VP8_DEC (decoder);
VAProfile profile; VAProfile profile;
guint rt_format; guint rt_format;
@ -594,7 +161,7 @@ gst_va_vp8_dec_new_sequence (GstVp8Decoder * decoder,
if (profile == VAProfileNone) if (profile == VAProfileNone)
return FALSE; return FALSE;
if (!gst_va_decoder_has_profile (self->decoder, profile)) { if (!gst_va_decoder_has_profile (base->decoder, profile)) {
GST_ERROR_OBJECT (self, "Profile %s is not supported", GST_ERROR_OBJECT (self, "Profile %s is not supported",
gst_va_profile_name (profile)); gst_va_profile_name (profile));
return FALSE; return FALSE;
@ -603,15 +170,17 @@ gst_va_vp8_dec_new_sequence (GstVp8Decoder * decoder,
/* VP8 always use 8 bits 4:2:0 */ /* VP8 always use 8 bits 4:2:0 */
rt_format = VA_RT_FORMAT_YUV420; rt_format = VA_RT_FORMAT_YUV420;
if (gst_va_decoder_format_changed (self->decoder, profile, if (gst_va_decoder_format_changed (base->decoder, profile,
rt_format, frame_hdr->width, frame_hdr->height)) { rt_format, frame_hdr->width, frame_hdr->height)) {
self->profile = profile; base->profile = profile;
self->width = frame_hdr->width; base->width = frame_hdr->width;
self->height = frame_hdr->height; base->height = frame_hdr->height;
self->rt_format = rt_format; base->rt_format = rt_format;
negotiation_needed = TRUE; negotiation_needed = TRUE;
} }
base->min_buffers = 3 + 4; /* max num pic references + scratch surfaces */
if (negotiation_needed) { if (negotiation_needed) {
self->need_negotiation = TRUE; self->need_negotiation = TRUE;
if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (self))) { if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (self))) {
@ -620,7 +189,7 @@ gst_va_vp8_dec_new_sequence (GstVp8Decoder * decoder,
} }
} }
if (!self->has_videometa) { if (!base->has_videometa) {
GstBufferPool *pool; GstBufferPool *pool;
pool = gst_video_decoder_get_buffer_pool (GST_VIDEO_DECODER (self)); pool = gst_video_decoder_get_buffer_pool (GST_VIDEO_DECODER (self));
@ -665,7 +234,7 @@ static gboolean
_fill_quant_matrix (GstVp8Decoder * decoder, GstVp8Picture * picture, _fill_quant_matrix (GstVp8Decoder * decoder, GstVp8Picture * picture,
GstVp8Parser * parser) GstVp8Parser * parser)
{ {
GstVaVp8Dec *self = GST_VA_VP8_DEC (decoder); GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
GstVp8FrameHdr const *frame_hdr = &picture->frame_hdr; GstVp8FrameHdr const *frame_hdr = &picture->frame_hdr;
GstVp8Segmentation *const seg = &parser->segmentation; GstVp8Segmentation *const seg = &parser->segmentation;
VAIQMatrixBufferVP8 iq_matrix = { }; VAIQMatrixBufferVP8 iq_matrix = { };
@ -677,7 +246,7 @@ _fill_quant_matrix (GstVp8Decoder * decoder, GstVp8Picture * picture,
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
if (seg->segmentation_enabled) { if (seg->segmentation_enabled) {
qi_base = seg->quantizer_update_value[i]; qi_base = seg->quantizer_update_value[i];
if (!seg->segment_feature_mode) // 0 means delta update if (!seg->segment_feature_mode) /* 0 means delta update */
qi_base += frame_hdr->quant_indices.y_ac_qi; qi_base += frame_hdr->quant_indices.y_ac_qi;
} else } else
qi_base = frame_hdr->quant_indices.y_ac_qi; qi_base = frame_hdr->quant_indices.y_ac_qi;
@ -696,20 +265,15 @@ _fill_quant_matrix (GstVp8Decoder * decoder, GstVp8Picture * picture,
iq_matrix.quantization_index[i][5] = CLAMP (qi, 0, QI_MAX); iq_matrix.quantization_index[i][5] = CLAMP (qi, 0, QI_MAX);
} }
if (!gst_va_decoder_add_param_buffer (self->decoder, return gst_va_decoder_add_param_buffer (base->decoder,
gst_vp8_picture_get_user_data (picture), gst_vp8_picture_get_user_data (picture), VAIQMatrixBufferType, &iq_matrix,
VAIQMatrixBufferType, &iq_matrix, sizeof (iq_matrix))) { sizeof (iq_matrix));
GST_WARNING ("fill Inverse Quantization Matrix Buffer error");
return FALSE;
}
return TRUE;
} }
static gboolean static gboolean
_fill_probability_table (GstVp8Decoder * decoder, GstVp8Picture * picture) _fill_probability_table (GstVp8Decoder * decoder, GstVp8Picture * picture)
{ {
GstVaVp8Dec *self = GST_VA_VP8_DEC (decoder); GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
GstVp8FrameHdr const *frame_hdr = &picture->frame_hdr; GstVp8FrameHdr const *frame_hdr = &picture->frame_hdr;
VAProbabilityDataBufferVP8 prob_table = { }; VAProbabilityDataBufferVP8 prob_table = { };
@ -717,21 +281,16 @@ _fill_probability_table (GstVp8Decoder * decoder, GstVp8Picture * picture)
memcpy (prob_table.dct_coeff_probs, frame_hdr->token_probs.prob, memcpy (prob_table.dct_coeff_probs, frame_hdr->token_probs.prob,
sizeof (frame_hdr->token_probs.prob)); sizeof (frame_hdr->token_probs.prob));
if (!gst_va_decoder_add_param_buffer (self->decoder, return gst_va_decoder_add_param_buffer (base->decoder,
gst_vp8_picture_get_user_data (picture), gst_vp8_picture_get_user_data (picture), VAProbabilityBufferType,
VAProbabilityBufferType, &prob_table, sizeof (prob_table))) { &prob_table, sizeof (prob_table));
GST_WARNING ("fill Coefficient Probability Data Buffer error");
return FALSE;
}
return TRUE;
} }
static gboolean static gboolean
_fill_picture (GstVp8Decoder * decoder, GstVp8Picture * picture, _fill_picture (GstVp8Decoder * decoder, GstVp8Picture * picture,
GstVp8Parser * parser) GstVp8Parser * parser)
{ {
GstVaVp8Dec *self = GST_VA_VP8_DEC (decoder); GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
GstVaDecodePicture *va_pic; GstVaDecodePicture *va_pic;
VAPictureParameterBufferVP8 pic_param; VAPictureParameterBufferVP8 pic_param;
GstVp8FrameHdr const *frame_hdr = &picture->frame_hdr; GstVp8FrameHdr const *frame_hdr = &picture->frame_hdr;
@ -746,8 +305,8 @@ _fill_picture (GstVp8Decoder * decoder, GstVp8Picture * picture,
/* *INDENT-OFF* */ /* *INDENT-OFF* */
pic_param = (VAPictureParameterBufferVP8) { pic_param = (VAPictureParameterBufferVP8) {
.frame_width = self->width, .frame_width = base->width,
.frame_height = self->height, .frame_height = base->height,
.last_ref_frame = VA_INVALID_SURFACE, .last_ref_frame = VA_INVALID_SURFACE,
.golden_ref_frame = VA_INVALID_SURFACE, .golden_ref_frame = VA_INVALID_SURFACE,
.alt_ref_frame = VA_INVALID_SURFACE, .alt_ref_frame = VA_INVALID_SURFACE,
@ -824,18 +383,15 @@ _fill_picture (GstVp8Decoder * decoder, GstVp8Picture * picture,
sizeof (frame_hdr->mv_probs)); sizeof (frame_hdr->mv_probs));
va_pic = gst_vp8_picture_get_user_data (picture); va_pic = gst_vp8_picture_get_user_data (picture);
if (!gst_va_decoder_add_param_buffer (self->decoder, va_pic, return gst_va_decoder_add_param_buffer (base->decoder, va_pic,
VAPictureParameterBufferType, &pic_param, sizeof (pic_param))) VAPictureParameterBufferType, &pic_param, sizeof (pic_param));
return FALSE;
return TRUE;
} }
static gboolean static gboolean
_add_slice (GstVp8Decoder * decoder, GstVp8Picture * picture, _add_slice (GstVp8Decoder * decoder, GstVp8Picture * picture,
GstVp8Parser * parser) GstVp8Parser * parser)
{ {
GstVaVp8Dec *self = GST_VA_VP8_DEC (decoder); GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
GstVp8FrameHdr const *frame_hdr = &picture->frame_hdr; GstVp8FrameHdr const *frame_hdr = &picture->frame_hdr;
VASliceParameterBufferVP8 slice_param; VASliceParameterBufferVP8 slice_param;
GstVaDecodePicture *va_pic; GstVaDecodePicture *va_pic;
@ -858,7 +414,7 @@ _add_slice (GstVp8Decoder * decoder, GstVp8Picture * picture,
slice_param.partition_size[i] = 0; slice_param.partition_size[i] = 0;
va_pic = gst_vp8_picture_get_user_data (picture); va_pic = gst_vp8_picture_get_user_data (picture);
return gst_va_decoder_add_slice_buffer (self->decoder, va_pic, &slice_param, return gst_va_decoder_add_slice_buffer (base->decoder, va_pic, &slice_param,
sizeof (slice_param), (gpointer) picture->data, picture->size); sizeof (slice_param), (gpointer) picture->data, picture->size);
} }
@ -866,11 +422,9 @@ static gboolean
gst_va_vp8_dec_decode_picture (GstVp8Decoder * decoder, GstVp8Picture * picture, gst_va_vp8_dec_decode_picture (GstVp8Decoder * decoder, GstVp8Picture * picture,
GstVp8Parser * parser) GstVp8Parser * parser)
{ {
GstVaVp8Dec *self = GST_VA_VP8_DEC (decoder); GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
GstVaDecodePicture *va_pic; GstVaDecodePicture *va_pic;
GST_TRACE_OBJECT (self, "-");
if (!_fill_picture (decoder, picture, parser)) if (!_fill_picture (decoder, picture, parser))
goto error; goto error;
@ -881,9 +435,9 @@ gst_va_vp8_dec_decode_picture (GstVp8Decoder * decoder, GstVp8Picture * picture,
error: error:
{ {
GST_WARNING_OBJECT (self, "Decode the picture error"); GST_WARNING_OBJECT (base, "Decode the picture error");
va_pic = gst_vp8_picture_get_user_data (picture); va_pic = gst_vp8_picture_get_user_data (picture);
gst_va_decoder_destroy_buffers (self->decoder, va_pic); gst_va_decoder_destroy_buffers (base->decoder, va_pic);
return FALSE; return FALSE;
} }
} }
@ -891,70 +445,15 @@ error:
static gboolean static gboolean
gst_va_vp8_dec_end_picture (GstVp8Decoder * decoder, GstVp8Picture * picture) gst_va_vp8_dec_end_picture (GstVp8Decoder * decoder, GstVp8Picture * picture)
{ {
GstVaVp8Dec *self = GST_VA_VP8_DEC (decoder); GstVaBaseDec *base = GST_VA_BASE_DEC (decoder);
GstVaDecodePicture *va_pic; GstVaDecodePicture *va_pic;
GST_LOG_OBJECT (self, "end picture %p, (system_frame_number %d)", GST_LOG_OBJECT (base, "end picture %p, (system_frame_number %d)",
picture, picture->system_frame_number); picture, picture->system_frame_number);
va_pic = gst_vp8_picture_get_user_data (picture); va_pic = gst_vp8_picture_get_user_data (picture);
return gst_va_decoder_decode (self->decoder, va_pic); return gst_va_decoder_decode (base->decoder, va_pic);
}
static gboolean
_copy_output_buffer (GstVaVp8Dec * self, GstVideoCodecFrame * codec_frame)
{
GstVideoFrame src_frame;
GstVideoFrame dest_frame;
GstVideoInfo dest_vinfo;
GstBuffer *buffer;
GstFlowReturn ret;
if (!self->other_pool)
return FALSE;
if (!gst_buffer_pool_set_active (self->other_pool, TRUE))
return FALSE;
gst_video_info_set_format (&dest_vinfo,
GST_VIDEO_INFO_FORMAT (&self->output_state->info), self->width,
self->height);
ret = gst_buffer_pool_acquire_buffer (self->other_pool, &buffer, NULL);
if (ret != GST_FLOW_OK)
goto fail;
if (!gst_video_frame_map (&src_frame, &self->output_state->info,
codec_frame->output_buffer, GST_MAP_READ))
goto fail;
if (!gst_video_frame_map (&dest_frame, &dest_vinfo, buffer, GST_MAP_WRITE)) {
gst_video_frame_unmap (&dest_frame);
goto fail;
}
/* gst_video_frame_copy can crop this, but does not know, so let
* make it think it's all right */
GST_VIDEO_INFO_WIDTH (&src_frame.info) = self->width;
GST_VIDEO_INFO_HEIGHT (&src_frame.info) = self->height;
if (!gst_video_frame_copy (&dest_frame, &src_frame)) {
gst_video_frame_unmap (&src_frame);
gst_video_frame_unmap (&dest_frame);
goto fail;
}
gst_video_frame_unmap (&src_frame);
gst_video_frame_unmap (&dest_frame);
gst_buffer_replace (&codec_frame->output_buffer, buffer);
gst_buffer_unref (buffer);
return TRUE;
fail:
GST_ERROR_OBJECT (self, "Failed copy output buffer.");
return FALSE;
} }
static GstFlowReturn static GstFlowReturn
@ -974,7 +473,7 @@ gst_va_vp8_dec_output_picture (GstVp8Decoder * decoder,
} }
if (self->copy_frames) if (self->copy_frames)
_copy_output_buffer (self, frame); gst_va_base_dec_copy_output_buffer (GST_VA_BASE_DEC (self), frame);
gst_vp8_picture_unref (picture); gst_vp8_picture_unref (picture);
@ -984,12 +483,13 @@ gst_va_vp8_dec_output_picture (GstVp8Decoder * decoder,
static void static void
gst_va_vp8_dec_init (GTypeInstance * instance, gpointer g_class) gst_va_vp8_dec_init (GTypeInstance * instance, gpointer g_class)
{ {
gst_va_base_dec_init (GST_VA_BASE_DEC (instance), GST_CAT_DEFAULT);
} }
static void static void
gst_va_vp8_dec_dispose (GObject * object) gst_va_vp8_dec_dispose (GObject * object)
{ {
gst_va_vp8_dec_close (GST_VIDEO_DECODER (object)); gst_va_base_dec_close (GST_VIDEO_DECODER (object));
G_OBJECT_CLASS (parent_class)->dispose (object); G_OBJECT_CLASS (parent_class)->dispose (object);
} }
@ -997,19 +497,13 @@ static void
gst_va_vp8_dec_class_init (gpointer g_class, gpointer class_data) gst_va_vp8_dec_class_init (gpointer g_class, gpointer class_data)
{ {
GstCaps *src_doc_caps, *sink_doc_caps; GstCaps *src_doc_caps, *sink_doc_caps;
GstPadTemplate *sink_pad_templ, *src_pad_templ;
GObjectClass *gobject_class = G_OBJECT_CLASS (g_class); GObjectClass *gobject_class = G_OBJECT_CLASS (g_class);
GstElementClass *element_class = GST_ELEMENT_CLASS (g_class); GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
GstVp8DecoderClass *vp8decoder_class = GST_VP8_DECODER_CLASS (g_class); GstVp8DecoderClass *vp8decoder_class = GST_VP8_DECODER_CLASS (g_class);
GstVaVp8DecClass *klass = GST_VA_VP8_DEC_CLASS (g_class);
GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_CLASS (g_class); GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_CLASS (g_class);
struct CData *cdata = class_data; struct CData *cdata = class_data;
gchar *long_name; gchar *long_name;
parent_class = g_type_class_peek_parent (g_class);
klass->render_device_path = g_strdup (cdata->render_device_path);
if (cdata->description) { if (cdata->description) {
long_name = g_strdup_printf ("VA-API VP8 Decoder in %s", long_name = g_strdup_printf ("VA-API VP8 Decoder in %s",
cdata->description); cdata->description);
@ -1021,33 +515,16 @@ gst_va_vp8_dec_class_init (gpointer g_class, gpointer class_data)
"Codec/Decoder/Video/Hardware", "Codec/Decoder/Video/Hardware",
"VA-API based VP8 video decoder", "He Junyan <junyan.he@intel.com>"); "VA-API based VP8 video decoder", "He Junyan <junyan.he@intel.com>");
sink_pad_templ = gst_pad_template_new ("sink", GST_PAD_SINK, GST_PAD_ALWAYS,
cdata->sink_caps);
gst_element_class_add_pad_template (element_class, sink_pad_templ);
sink_doc_caps = gst_caps_from_string (sink_caps_str); sink_doc_caps = gst_caps_from_string (sink_caps_str);
gst_pad_template_set_documentation_caps (sink_pad_templ, sink_doc_caps);
gst_caps_unref (sink_doc_caps);
src_pad_templ = gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS,
cdata->src_caps);
gst_element_class_add_pad_template (element_class, src_pad_templ);
src_doc_caps = gst_caps_from_string (src_caps_str); src_doc_caps = gst_caps_from_string (src_caps_str);
gst_pad_template_set_documentation_caps (src_pad_templ, src_doc_caps);
gst_caps_unref (src_doc_caps); gst_va_base_dec_class_init (GST_VA_BASE_DEC_CLASS (g_class), VP8,
cdata->render_device_path, cdata->sink_caps, cdata->src_caps,
src_doc_caps, sink_doc_caps);
gobject_class->dispose = gst_va_vp8_dec_dispose; gobject_class->dispose = gst_va_vp8_dec_dispose;
element_class->set_context = GST_DEBUG_FUNCPTR (gst_va_vp8_dec_set_context);
decoder_class->open = GST_DEBUG_FUNCPTR (gst_va_vp8_dec_open);
decoder_class->close = GST_DEBUG_FUNCPTR (gst_va_vp8_dec_close);
decoder_class->stop = GST_DEBUG_FUNCPTR (gst_va_vp8_dec_stop);
decoder_class->src_query = GST_DEBUG_FUNCPTR (gst_va_vp8_dec_src_query);
decoder_class->sink_query = GST_DEBUG_FUNCPTR (gst_va_vp8_dec_sink_query);
decoder_class->negotiate = GST_DEBUG_FUNCPTR (gst_va_vp8_dec_negotiate); decoder_class->negotiate = GST_DEBUG_FUNCPTR (gst_va_vp8_dec_negotiate);
decoder_class->decide_allocation =
GST_DEBUG_FUNCPTR (gst_va_vp8_dec_decide_allocation);
decoder_class->getcaps = GST_DEBUG_FUNCPTR (gst_va_vp8_dec_sink_getcaps);
vp8decoder_class->new_sequence = vp8decoder_class->new_sequence =
GST_DEBUG_FUNCPTR (gst_va_vp8_dec_new_sequence); GST_DEBUG_FUNCPTR (gst_va_vp8_dec_new_sequence);

View file

@ -1,6 +1,7 @@
va_sources = [ va_sources = [
'plugin.c', 'plugin.c',
'gstvaallocator.c', 'gstvaallocator.c',
'gstvabasedec.c',
'gstvacaps.c', 'gstvacaps.c',
'gstvadecoder.c', 'gstvadecoder.c',
'gstvadisplay.c', 'gstvadisplay.c',