codecs: vp9decoder: Sync up with h264decoder implementation

* Pass GstVideoCodecFrame to new_picture() and output_picture()
* Pass the last reference of GstVp9Picture to subclass if possible

Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1455>
This commit is contained in:
Seungha Yang 2020-07-21 23:08:15 +09:00 committed by GStreamer Merge Bot
parent 8821165648
commit e754d5a5b2
3 changed files with 69 additions and 42 deletions

View file

@ -362,6 +362,7 @@ gst_vp9_decoder_handle_frame (GstVideoDecoder * decoder,
offset = 0;
for (i = 0; i < superframe_info.frames_in_superframe; i++) {
GstVideoCodecFrame *cur_frame = NULL;
cur_hdr = &frame_hdr[i];
if (cur_hdr->show_existing_frame) {
@ -386,16 +387,13 @@ gst_vp9_decoder_handle_frame (GstVideoDecoder * decoder,
picture->pts = GST_BUFFER_PTS (in_buf);
picture->size = 0;
if (i == frame_idx_to_consume) {
gst_video_codec_frame_set_user_data (frame,
gst_vp9_picture_ref (picture),
(GDestroyNotify) gst_vp9_picture_unref);
}
if (i == frame_idx_to_consume)
cur_frame = gst_video_codec_frame_ref (frame);
g_assert (klass->output_picture);
ret = klass->output_picture (self, picture);
gst_vp9_picture_unref (picture);
/* transfer ownership of picture */
ret = klass->output_picture (self, cur_frame, picture);
picture = NULL;
} else {
picture = gst_vp9_picture_new ();
@ -409,17 +407,11 @@ gst_vp9_decoder_handle_frame (GstVideoDecoder * decoder,
picture->subsampling_y = priv->parser->subsampling_y;
picture->bit_depth = priv->parser->bit_depth;
if (i == frame_idx_to_consume) {
/* This allows accessing the frame from the picture. */
picture->system_frame_number = frame->system_frame_number;
gst_video_codec_frame_set_user_data (frame,
gst_vp9_picture_ref (picture),
(GDestroyNotify) gst_vp9_picture_unref);
}
if (i == frame_idx_to_consume)
cur_frame = gst_video_codec_frame_ref (frame);
if (klass->new_picture) {
if (!klass->new_picture (self, picture)) {
if (!klass->new_picture (self, cur_frame, picture)) {
GST_ERROR_OBJECT (self, "new picture error");
goto unmap_and_error;
}
@ -446,11 +438,16 @@ gst_vp9_decoder_handle_frame (GstVideoDecoder * decoder,
}
}
/* Just pass our picture to dpb object.
* Even if this picture does not need to be added to dpb
* (i.e., not a reference frame), gst_vp9_dpb_add() will take care of
* the case as well */
gst_vp9_dpb_add (priv->dpb, gst_vp9_picture_ref (picture));
g_assert (klass->output_picture);
ret = klass->output_picture (self, picture);
/* transfer ownership of picture */
gst_vp9_dpb_add (priv->dpb, picture);
ret = klass->output_picture (self, cur_frame, picture);
picture = NULL;
}

View file

@ -87,7 +87,17 @@ struct _GstVp9DecoderClass
gboolean (*new_sequence) (GstVp9Decoder * decoder,
const GstVp9FrameHdr * frame_hdr);
/**
* GstVp9Decoder:new_picture:
* @decoder: a #GstVp9Decoder
* @frame: (nullable): (transfer none): a #GstVideoCodecFrame
* @picture: (transfer none): a #GstVp9Picture
*
* FIXME 1.20: vp9parse element can splitting super frames,
* and then we can ensure non-null @frame
*/
gboolean (*new_picture) (GstVp9Decoder * decoder,
GstVideoCodecFrame * frame,
GstVp9Picture * picture);
GstVp9Picture * (*duplicate_picture) (GstVp9Decoder * decoder,
@ -103,7 +113,17 @@ struct _GstVp9DecoderClass
gboolean (*end_picture) (GstVp9Decoder * decoder,
GstVp9Picture * picture);
/**
* GstVp9Decoder:output_picture:
* @decoder: a #GstVp9Decoder
* @frame: (nullable): (transfer full): a #GstVideoCodecFrame
* @picture: (transfer full): a #GstVp9Picture
*
* FIXME 1.20: vp9parse element can splitting super frames,
* and then we can ensure non-null @frame
*/
GstFlowReturn (*output_picture) (GstVp9Decoder * decoder,
GstVideoCodecFrame * frame,
GstVp9Picture * picture);
/*< private >*/

View file

@ -135,11 +135,11 @@ static gboolean gst_d3d11_vp9_dec_src_query (GstVideoDecoder * decoder,
static gboolean gst_d3d11_vp9_dec_new_sequence (GstVp9Decoder * decoder,
const GstVp9FrameHdr * frame_hdr);
static gboolean gst_d3d11_vp9_dec_new_picture (GstVp9Decoder * decoder,
GstVp9Picture * picture);
GstVideoCodecFrame * frame, GstVp9Picture * picture);
static GstVp9Picture *gst_d3d11_vp9_dec_duplicate_picture (GstVp9Decoder *
decoder, GstVp9Picture * picture);
static GstFlowReturn gst_d3d11_vp9_dec_output_picture (GstVp9Decoder *
decoder, GstVp9Picture * picture);
decoder, GstVideoCodecFrame * frame, GstVp9Picture * picture);
static gboolean gst_d3d11_vp9_dec_start_picture (GstVp9Decoder * decoder,
GstVp9Picture * picture);
static gboolean gst_d3d11_vp9_dec_decode_picture (GstVp9Decoder * decoder,
@ -407,7 +407,8 @@ gst_d3d11_vp9_dec_new_sequence (GstVp9Decoder * decoder,
}
static gboolean
gst_d3d11_vp9_dec_new_picture (GstVp9Decoder * decoder, GstVp9Picture * picture)
gst_d3d11_vp9_dec_new_picture (GstVp9Decoder * decoder,
GstVideoCodecFrame * frame, GstVp9Picture * picture)
{
GstD3D11Vp9Dec *self = GST_D3D11_VP9_DEC (decoder);
GstBuffer *view_buffer;
@ -464,10 +465,10 @@ gst_d3d11_vp9_dec_duplicate_picture (GstVp9Decoder * decoder,
static GstFlowReturn
gst_d3d11_vp9_dec_output_picture (GstVp9Decoder * decoder,
GstVp9Picture * picture)
GstVideoCodecFrame * frame, GstVp9Picture * picture)
{
GstD3D11Vp9Dec *self = GST_D3D11_VP9_DEC (decoder);
GstVideoCodecFrame *frame = NULL;
GstVideoDecoder *vdec = GST_VIDEO_DECODER (decoder);
GstBuffer *output_buffer = NULL;
GstFlowReturn ret;
GstBuffer *view_buffer;
@ -478,21 +479,20 @@ gst_d3d11_vp9_dec_output_picture (GstVp9Decoder * decoder,
if (!view_buffer) {
GST_ERROR_OBJECT (self, "Could not get output view");
return GST_FLOW_ERROR;
goto error;
}
frame = gst_video_decoder_get_frame (GST_VIDEO_DECODER (self),
picture->system_frame_number);
if (!picture->frame_hdr.show_frame) {
GST_LOG_OBJECT (self, "Decode only picture %p", picture);
if (frame) {
GST_VIDEO_CODEC_FRAME_SET_DECODE_ONLY (frame);
gst_vp9_picture_unref (picture);
return gst_video_decoder_finish_frame (GST_VIDEO_DECODER (self), frame);
return gst_video_decoder_finish_frame (vdec, frame);
} else {
GST_WARNING_OBJECT (self,
"Failed to find codec frame for picture %p", picture);
/* expected case if we are decoding super frame */
gst_vp9_picture_unref (picture);
return GST_FLOW_OK;
}
}
@ -514,18 +514,19 @@ gst_d3d11_vp9_dec_output_picture (GstVp9Decoder * decoder,
mem = gst_buffer_peek_memory (output_buffer, 0);
GST_MINI_OBJECT_FLAG_SET (mem, GST_D3D11_MEMORY_TRANSFER_NEED_DOWNLOAD);
} else {
output_buffer =
gst_video_decoder_allocate_output_buffer (GST_VIDEO_DECODER (self));
output_buffer = gst_video_decoder_allocate_output_buffer (vdec);
}
if (!output_buffer) {
GST_ERROR_OBJECT (self, "Couldn't allocate output buffer");
return GST_FLOW_ERROR;
goto error;
}
if (!frame) {
GST_WARNING_OBJECT (self,
"Failed to find codec frame for picture %p", picture);
/* this is the case where super frame has multiple displayable
* (non-decode-only) subframes. Should be rare case but it's possible
* in theory */
GST_WARNING_OBJECT (self, "No codec frame for picture %p", picture);
GST_BUFFER_PTS (output_buffer) = picture->pts;
GST_BUFFER_DTS (output_buffer) = GST_CLOCK_TIME_NONE;
@ -543,24 +544,33 @@ gst_d3d11_vp9_dec_output_picture (GstVp9Decoder * decoder,
picture->frame_hdr.width, picture->frame_hdr.height,
view_buffer, output_buffer)) {
GST_ERROR_OBJECT (self, "Failed to copy buffer");
if (frame)
gst_video_decoder_drop_frame (GST_VIDEO_DECODER (self), frame);
else
gst_buffer_unref (output_buffer);
return GST_FLOW_ERROR;
goto error;
}
GST_LOG_OBJECT (self, "Finish frame %" GST_TIME_FORMAT,
GST_TIME_ARGS (GST_BUFFER_PTS (output_buffer)));
gst_vp9_picture_unref (picture);
if (frame) {
ret = gst_video_decoder_finish_frame (GST_VIDEO_DECODER (self), frame);
ret = gst_video_decoder_finish_frame (vdec, frame);
} else {
ret = gst_pad_push (GST_VIDEO_DECODER_SRC_PAD (self), output_buffer);
}
return ret;
error:
if (frame) {
/* normal case */
gst_video_decoder_drop_frame (vdec, frame);
} else if (output_buffer) {
/* in case of super frame with multiple displayable subframes */
gst_buffer_unref (output_buffer);
}
gst_vp9_picture_unref (picture);
return GST_FLOW_ERROR;
}
static GstD3D11DecoderOutputView *