Merge branch 'set_preferred-output-delay' into 'main'

va: encoder: Add preferred-output-delay property to increase performance

See merge request gstreamer/gstreamer!4359
This commit is contained in:
He Junyan 2024-05-16 13:58:44 +00:00
commit 752925f316
9 changed files with 344 additions and 125 deletions

View file

@ -368,6 +368,13 @@ va_ensure_image (GstVaDisplay * display, VASurfaceID surface,
gboolean
va_check_surface (GstVaDisplay * display, VASurfaceID surface)
{
return va_check_surface_has_status (display, surface, 0);
}
gboolean
va_check_surface_has_status (GstVaDisplay * display, VASurfaceID surface,
VASurfaceStatus surface_status)
{
VADisplay dpy = gst_va_display_get_va_dpy (display);
VAStatus status;
@ -375,12 +382,18 @@ va_check_surface (GstVaDisplay * display, VASurfaceID surface)
status = vaQuerySurfaceStatus (dpy, surface, &state);
if (status != VA_STATUS_SUCCESS)
if (status != VA_STATUS_SUCCESS) {
GST_ERROR ("vaQuerySurfaceStatus: %s", vaErrorStr (status));
return FALSE;
}
GST_LOG ("surface %#x status %d", surface, state);
return (status == VA_STATUS_SUCCESS);
/* Just query the surface, no flag to compare, we succeed. */
if (!surface_status)
return TRUE;
return ((state & surface_status) == surface_status);
}
gboolean

View file

@ -51,6 +51,10 @@ gboolean va_sync_surface (GstVaDisplay * displa
GST_VA_API
gboolean va_check_surface (GstVaDisplay * display,
VASurfaceID surface);
GST_VA_API
gboolean va_check_surface_has_status (GstVaDisplay * display,
VASurfaceID surface,
VASurfaceStatus surface_status);
gboolean va_copy_surface (GstVaDisplay * display,
VASurfaceID dst,

View file

@ -161,7 +161,7 @@ struct _GstVaAV1Ref
struct _GstVaAV1EncFrame
{
GstVaEncodePicture *picture;
GstVaEncFrame base;
GstAV1FrameType type;
guint8 temporal_id;
guint8 spatial_id;
@ -507,7 +507,7 @@ gst_va_av1_enc_frame_new (void)
frame->type = FRAME_TYPE_INVALID;
frame->temporal_id = 0;
frame->spatial_id = 0;
frame->picture = NULL;
frame->base.picture = NULL;
frame->pyramid_level = 0;
frame->flags = 0;
frame->bidir_ref = FALSE;
@ -524,7 +524,7 @@ gst_va_av1_enc_frame_free (gpointer pframe)
{
GstVaAV1EncFrame *frame = pframe;
g_clear_pointer (&frame->picture, gst_va_encode_picture_free);
g_clear_pointer (&frame->base.picture, gst_va_encode_picture_free);
g_free (frame);
}
@ -534,7 +534,7 @@ gst_va_av1_enc_new_frame (GstVaBaseEnc * base, GstVideoCodecFrame * frame)
GstVaAV1EncFrame *frame_in;
frame_in = gst_va_av1_enc_frame_new ();
gst_video_codec_frame_set_user_data (frame, frame_in,
gst_va_set_enc_frame (frame, (GstVaEncFrame *) frame_in,
gst_va_av1_enc_frame_free);
return TRUE;
@ -2761,13 +2761,15 @@ gst_va_av1_enc_reconfig (GstVaBaseEnc * base)
VAProfile profile;
gboolean do_renegotiation = TRUE, do_reopen, need_negotiation;
guint max_ref_frames, max_surfaces = 0,
rt_format, depth = 0, chrome = 0, codedbuf_size;
rt_format, depth = 0, chrome = 0, codedbuf_size, latency_num;
gint width, height;
GstClockTime latency;
width = GST_VIDEO_INFO_WIDTH (&base->in_info);
height = GST_VIDEO_INFO_HEIGHT (&base->in_info);
format = GST_VIDEO_INFO_FORMAT (&base->in_info);
codedbuf_size = base->codedbuf_size;
latency_num = base->preferred_output_delay + self->gop.gf_group_size - 1;
need_negotiation =
!gst_va_encoder_get_reconstruct_pool_config (base->encoder, &reconf_caps,
@ -2800,6 +2802,13 @@ gst_va_av1_enc_reconfig (GstVaBaseEnc * base)
gst_va_base_enc_reset_state (base);
if (base->is_live) {
base->preferred_output_delay = 0;
} else {
/* FIXME: An experience value for most of the platforms. */
base->preferred_output_delay = 4;
}
base->profile = profile;
base->rt_format = rt_format;
self->depth = depth;
@ -2843,7 +2852,22 @@ gst_va_av1_enc_reconfig (GstVaBaseEnc * base)
_av1_calculate_coded_size (self);
max_ref_frames = GST_AV1_NUM_REF_FRAMES + 3 /* scratch frames */ ;
/* Let the downstream know the new latency. */
if (latency_num != base->preferred_output_delay + self->gop.gf_group_size - 1) {
need_negotiation = TRUE;
latency_num = base->preferred_output_delay + self->gop.gf_group_size - 1;
}
/* Set the latency */
latency = gst_util_uint64_scale (latency_num,
GST_VIDEO_INFO_FPS_D (&base->input_state->info) * GST_SECOND,
GST_VIDEO_INFO_FPS_N (&base->input_state->info));
gst_video_encoder_set_latency (venc, latency, latency);
max_ref_frames = GST_AV1_NUM_REF_FRAMES;
max_ref_frames += base->preferred_output_delay;
base->min_buffers = max_ref_frames;
max_ref_frames += 3 /* scratch frames */ ;
/* second check after calculations */
do_reopen |=
@ -3052,7 +3076,7 @@ _av1_add_sequence_header (GstVaAV1Enc * self, GstVaAV1EncFrame * frame,
*size_offset += size;
if (!gst_va_encoder_add_packed_header (base->encoder, frame->picture,
if (!gst_va_encoder_add_packed_header (base->encoder, frame->base.picture,
VAEncPackedHeaderAV1_SPS, packed_sps, size * 8, FALSE)) {
GST_ERROR_OBJECT (self, "Failed to add packed sequence header.");
return FALSE;
@ -3234,8 +3258,8 @@ _av1_fill_frame_param (GstVaAV1Enc * self, GstVaAV1EncFrame * va_frame,
.frame_width_minus_1 = base->width - 1,
.frame_height_minus_1 = base->height - 1,
.reconstructed_frame =
gst_va_encode_picture_get_reconstruct_surface (va_frame->picture),
.coded_buf = va_frame->picture->coded_buffer,
gst_va_encode_picture_get_reconstruct_surface (va_frame->base.picture),
.coded_buf = va_frame->base.picture->coded_buffer,
.primary_ref_frame = primary_ref_frame,
.order_hint = va_frame->order_hint,
.refresh_frame_flags = refresh_frame_flags,
@ -3357,7 +3381,7 @@ _av1_fill_frame_param (GstVaAV1Enc * self, GstVaAV1EncFrame * va_frame,
pic_param->reference_frames[i] =
gst_va_encode_picture_get_reconstruct_surface
(_enc_frame (self->gop.ref_list[i])->picture);
(_enc_frame (self->gop.ref_list[i])->base.picture);
}
for (i = 0; i < 7; i++) {
@ -3663,7 +3687,7 @@ _av1_add_tile_group_param (GstVaAV1Enc * self, GstVaAV1EncFrame * va_frame,
tile_group_param.tg_end = (index + 1) * div - 1;
}
if (!gst_va_encoder_add_param (base->encoder, va_frame->picture,
if (!gst_va_encoder_add_param (base->encoder, va_frame->base.picture,
VAEncSliceParameterBufferType, &tile_group_param,
sizeof (VAEncTileGroupBufferAV1))) {
GST_ERROR_OBJECT (self, "Failed to add one tile group parameter");
@ -3738,14 +3762,14 @@ _av1_encode_one_frame (GstVaAV1Enc * self, GstVaAV1EncFrame * va_frame,
}
}
if (!gst_va_encoder_add_param (base->encoder, va_frame->picture,
if (!gst_va_encoder_add_param (base->encoder, va_frame->base.picture,
VAEncPictureParameterBufferType, &pic_param, sizeof (pic_param))) {
GST_ERROR_OBJECT (self, "Failed to create the frame parameter");
return FALSE;
}
if ((self->packed_headers & VA_ENC_PACKED_HEADER_PICTURE) &&
!gst_va_encoder_add_packed_header (base->encoder, va_frame->picture,
!gst_va_encoder_add_packed_header (base->encoder, va_frame->base.picture,
VAEncPackedHeaderAV1_PPS, packed_frame_hdr, frame_hdr_size * 8,
FALSE)) {
GST_ERROR_OBJECT (self, "Failed to add the packed frame header");
@ -3759,7 +3783,7 @@ _av1_encode_one_frame (GstVaAV1Enc * self, GstVaAV1EncFrame * va_frame,
}
}
if (!gst_va_encoder_encode (base->encoder, va_frame->picture)) {
if (!gst_va_encoder_encode (base->encoder, va_frame->base.picture)) {
GST_ERROR_OBJECT (self, "Encode frame error");
return FALSE;
}
@ -3840,8 +3864,8 @@ gst_va_av1_enc_encode_frame (GstVaBaseEnc * base,
} else {
guint size_offset = 0;
g_assert (va_frame->picture == NULL);
va_frame->picture = gst_va_encode_picture_new (base->encoder,
g_assert (va_frame->base.picture == NULL);
va_frame->base.picture = gst_va_encode_picture_new (base->encoder,
gst_frame->input_buffer);
_av1_find_ref_to_update (base, gst_frame);
@ -3860,25 +3884,27 @@ gst_va_av1_enc_encode_frame (GstVaBaseEnc * base,
/* Repeat the sequence for each key. */
if (va_frame->frame_num == 0) {
if (!gst_va_base_enc_add_rate_control_parameter (base, va_frame->picture,
if (!gst_va_base_enc_add_rate_control_parameter (base,
va_frame->base.picture,
self->rc.rc_ctrl_mode, self->rc.max_bitrate_bits,
self->rc.target_percentage, self->rc.base_qindex,
self->rc.min_qindex, self->rc.max_qindex, self->rc.mbbrc))
return FALSE;
if (!gst_va_base_enc_add_quality_level_parameter (base, va_frame->picture,
self->rc.target_usage))
if (!gst_va_base_enc_add_quality_level_parameter (base,
va_frame->base.picture, self->rc.target_usage))
return FALSE;
if (!gst_va_base_enc_add_frame_rate_parameter (base, va_frame->picture))
if (!gst_va_base_enc_add_frame_rate_parameter (base,
va_frame->base.picture))
return FALSE;
if (!gst_va_base_enc_add_hrd_parameter (base, va_frame->picture,
if (!gst_va_base_enc_add_hrd_parameter (base, va_frame->base.picture,
self->rc.rc_ctrl_mode, self->rc.cpb_length_bits))
return FALSE;
_av1_fill_sequence_param (self, &seq_param);
if (!_av1_add_sequence_param (self, va_frame->picture, &seq_param))
if (!_av1_add_sequence_param (self, va_frame->base.picture, &seq_param))
return FALSE;
_av1_fill_sequence_header (self, &seq_param);
@ -3936,7 +3962,7 @@ _av1_create_tu_output_buffer (GstVaAV1Enc * self,
}
frame_size = gst_va_base_enc_copy_output_data (base,
frame_enc->picture, data + offset, total_sz - offset);
frame_enc->base.picture, data + offset, total_sz - offset);
if (frame_size <= 0) {
GST_ERROR_OBJECT (self, "Fails to copy the output data of "
"system_frame_number %d, frame_num: %d",
@ -3956,7 +3982,7 @@ _av1_create_tu_output_buffer (GstVaAV1Enc * self,
}
frame_size = gst_va_base_enc_copy_output_data (base,
frame_enc->picture, data + offset, total_sz - offset);
frame_enc->base.picture, data + offset, total_sz - offset);
if (frame_size <= 0) {
GST_ERROR_OBJECT (self, "Fails to copy the output data of "
"system_frame_number %d, frame_num: %d",
@ -4050,7 +4076,7 @@ gst_va_av1_enc_prepare_output (GstVaBaseEnc * base,
if (self->frames_in_tu_num > 0) {
buf = _av1_create_tu_output_buffer (self, frame);
} else {
buf = gst_va_base_enc_create_output_buffer (base, frame_enc->picture,
buf = gst_va_base_enc_create_output_buffer (base, frame_enc->base.picture,
(frame_enc->cached_frame_header_size > 0 ?
frame_enc->cached_frame_header : NULL),
frame_enc->cached_frame_header_size);

View file

@ -32,6 +32,8 @@
#define GST_CAT_DEFAULT gst_va_base_enc_debug
GST_DEBUG_CATEGORY_STATIC (GST_CAT_DEFAULT);
#define GST_FLOW_OUTPUT_NOT_READY GST_FLOW_CUSTOM_SUCCESS_2
struct _GstVaBaseEncPrivate
{
GstVideoInfo sinkpad_info;
@ -73,6 +75,8 @@ gst_va_base_enc_reset_state_default (GstVaBaseEnc * base)
base->profile = VAProfileNone;
base->rt_format = 0;
base->codedbuf_size = 0;
base->preferred_output_delay = 0;
base->min_buffers = 1;
g_atomic_int_set (&base->reconf, FALSE);
}
@ -414,7 +418,7 @@ gst_va_base_enc_propose_allocation (GstVideoEncoder * venc, GstQuery * query)
if (!(allocator = _allocator_from_caps (base, caps)))
return FALSE;
pool = gst_va_pool_new_with_config (caps, 1, 0, usage_hint,
pool = gst_va_pool_new_with_config (caps, base->min_buffers, 0, usage_hint,
GST_VA_FEATURE_AUTO, allocator, &params);
if (!pool) {
gst_object_unref (allocator);
@ -425,7 +429,7 @@ gst_va_base_enc_propose_allocation (GstVideoEncoder * venc, GstQuery * query)
goto config_failed;
gst_query_add_allocation_param (query, allocator, &params);
gst_query_add_allocation_pool (query, pool, size, 1, 0);
gst_query_add_allocation_pool (query, pool, size, base->min_buffers, 0);
GST_DEBUG_OBJECT (base,
"proposing %" GST_PTR_FORMAT " with allocator %" GST_PTR_FORMAT,
@ -505,6 +509,33 @@ _push_out_one_buffer (GstVaBaseEnc * base)
return ret;
}
static GstFlowReturn
_try_to_push_out_one_buffer (GstVaBaseEnc * base)
{
GstVideoCodecFrame *frame_out;
GstVaEncFrame *frame_enc;
VASurfaceID surface;
gboolean ready;
frame_out = g_queue_peek_head (&base->output_list);
if (frame_out == NULL)
return GST_FLOW_OUTPUT_NOT_READY;
frame_enc = gst_va_get_enc_frame (frame_out);
surface = gst_va_encode_picture_get_reconstruct_surface (frame_enc->picture);
ready = va_check_surface_has_status (base->display, surface, VASurfaceReady);
GST_LOG_OBJECT (base, "Output of system_frame_number %d is %s",
frame_out->system_frame_number, ready ? "ready" : "not ready");
if (!ready)
return GST_FLOW_OUTPUT_NOT_READY;
return _push_out_one_buffer (base);
}
static GstFlowReturn
gst_va_base_enc_drain (GstVideoEncoder * venc)
{
@ -655,17 +686,41 @@ gst_va_base_enc_handle_frame (GstVideoEncoder * venc,
/* pass it to reorder list and we should not use it again. */
frame = NULL;
while (frame_encode) {
ret = base_class->encode_frame (base, frame_encode, FALSE);
if (frame_encode) {
while (frame_encode) {
ret = base_class->encode_frame (base, frame_encode, FALSE);
if (ret != GST_FLOW_OK)
goto error_encode;
while (ret == GST_FLOW_OK && g_queue_get_length (&base->output_list) >
base->preferred_output_delay)
ret = _push_out_one_buffer (base);
if (ret != GST_FLOW_OK)
goto error_push_buffer;
/* Try to push out all ready frames. */
do {
ret = _try_to_push_out_one_buffer (base);
} while (ret == GST_FLOW_OK);
if (ret == GST_FLOW_OUTPUT_NOT_READY)
ret = GST_FLOW_OK;
if (ret != GST_FLOW_OK)
goto error_push_buffer;
frame_encode = NULL;
if (!base_class->reorder_frame (base, NULL, FALSE, &frame_encode))
goto error_reorder;
}
} else {
/* Try to push out all ready frames. */
do {
ret = _try_to_push_out_one_buffer (base);
} while (ret == GST_FLOW_OK);
if (ret == GST_FLOW_OUTPUT_NOT_READY)
ret = GST_FLOW_OK;
if (ret != GST_FLOW_OK)
goto error_encode;
while (g_queue_get_length (&base->output_list) > 0)
ret = _push_out_one_buffer (base);
frame_encode = NULL;
if (!base_class->reorder_frame (base, NULL, FALSE, &frame_encode))
goto error_reorder;
goto error_push_buffer;
}
return ret;
@ -706,6 +761,12 @@ error_encode:
gst_video_encoder_finish_frame (venc, frame_encode);
return ret;
}
error_push_buffer:
{
GST_ELEMENT_ERROR (venc, STREAM, ENCODE,
("Failed to push one frame."), (NULL));
return ret;
}
}
static GstFlowReturn
@ -718,6 +779,7 @@ static gboolean
gst_va_base_enc_set_format (GstVideoEncoder * venc, GstVideoCodecState * state)
{
GstVaBaseEnc *base = GST_VA_BASE_ENC (venc);
GstQuery *query;
g_return_val_if_fail (state->caps != NULL, FALSE);
@ -738,6 +800,13 @@ gst_va_base_enc_set_format (GstVideoEncoder * venc, GstVideoCodecState * state)
gst_video_codec_state_unref (base->input_state);
base->input_state = gst_video_codec_state_ref (state);
/* in case live streaming, we should run on low-latency mode */
base->is_live = FALSE;
query = gst_query_new_latency ();
if (gst_pad_peer_query (GST_VIDEO_ENCODER_SINK_PAD (venc), query))
gst_query_parse_latency (query, &base->is_live, NULL, NULL);
gst_query_unref (query);
if (!gst_va_base_enc_reset (base))
return FALSE;
@ -890,6 +959,7 @@ gst_va_base_enc_init (GstVaBaseEnc * self)
g_queue_init (&self->ref_list);
g_queue_init (&self->output_list);
gst_video_info_init (&self->in_info);
self->min_buffers = 1;
self->dts_queue = gst_vec_deque_new_for_struct (sizeof (GstClockTime), 8);

View file

@ -35,10 +35,16 @@ G_BEGIN_DECLS
#define GST_VA_BASE_ENC_ENTRYPOINT(obj) (GST_VA_BASE_ENC_GET_CLASS(obj)->entrypoint)
typedef struct _GstVaEncFrame GstVaEncFrame;
typedef struct _GstVaBaseEnc GstVaBaseEnc;
typedef struct _GstVaBaseEncClass GstVaBaseEncClass;
typedef struct _GstVaBaseEncPrivate GstVaBaseEncPrivate;
struct _GstVaEncFrame
{
GstVaEncodePicture *picture;
};
struct _GstVaBaseEnc
{
GstVideoEncoder parent_instance;
@ -48,11 +54,15 @@ struct _GstVaBaseEnc
gboolean reconf;
gboolean is_live;
VAProfile profile;
gint width;
gint height;
guint rt_format;
guint codedbuf_size;
/* The min buffer number required for reorder and output delay. */
guint min_buffers;
GstClockTime start_pts;
GstClockTime frame_duration;
@ -61,6 +71,7 @@ struct _GstVaBaseEnc
GQueue ref_list;
GQueue output_list;
GstVecDeque *dts_queue;
guint preferred_output_delay;
GstVideoCodecState *input_state;
union {
@ -156,6 +167,22 @@ void gst_va_base_enc_update_property_bool (GstVaBaseEnc * base,
gboolean new_val,
GParamSpec * pspec);
static inline gpointer
gst_va_get_enc_frame (GstVideoCodecFrame * frame)
{
GstVaEncFrame *enc_frame = gst_video_codec_frame_get_user_data (frame);
g_assert (enc_frame);
return enc_frame;
}
static inline void
gst_va_set_enc_frame (GstVideoCodecFrame * frame,
GstVaEncFrame * frame_in, GDestroyNotify notify)
{
gst_video_codec_frame_set_user_data (frame, frame_in, notify);
}
G_DEFINE_AUTOPTR_CLEANUP_FUNC(GstVaBaseEnc, gst_object_unref)
G_END_DECLS

View file

@ -262,7 +262,7 @@ struct _GstVaH264Enc
struct _GstVaH264EncFrame
{
GstVaEncodePicture *picture;
GstVaEncFrame base;
GstH264SliceType type;
gboolean is_ref;
guint pyramid_level;
@ -378,7 +378,7 @@ gst_va_enc_frame_new (void)
frame = g_new (GstVaH264EncFrame, 1);
frame->frame_num = 0;
frame->unused_for_reference_pic_num = -1;
frame->picture = NULL;
frame->base.picture = NULL;
frame->last_frame = FALSE;
return frame;
@ -388,16 +388,14 @@ static void
gst_va_enc_frame_free (gpointer pframe)
{
GstVaH264EncFrame *frame = pframe;
g_clear_pointer (&frame->picture, gst_va_encode_picture_free);
g_clear_pointer (&frame->base.picture, gst_va_encode_picture_free);
g_free (frame);
}
static inline GstVaH264EncFrame *
_enc_frame (GstVideoCodecFrame * frame)
{
GstVaH264EncFrame *enc_frame = gst_video_codec_frame_get_user_data (frame);
g_assert (enc_frame);
return enc_frame;
return gst_va_get_enc_frame (frame);
}
/* Normalizes bitrate (and CPB size) for HRD conformance */
@ -1555,13 +1553,16 @@ gst_va_h264_enc_reconfig (GstVaBaseEnc * base)
GstVideoFormat format, reconf_format = GST_VIDEO_FORMAT_UNKNOWN;
VAProfile profile = VAProfileNone;
gboolean do_renegotiation = TRUE, do_reopen, need_negotiation;
guint max_ref_frames, max_surfaces = 0, rt_format = 0, codedbuf_size;
guint max_ref_frames, max_surfaces = 0, rt_format = 0,
codedbuf_size, latency_num;
gint width, height;
GstClockTime latency;
width = GST_VIDEO_INFO_WIDTH (&base->in_info);
height = GST_VIDEO_INFO_HEIGHT (&base->in_info);
format = GST_VIDEO_INFO_FORMAT (&base->in_info);
codedbuf_size = base->codedbuf_size;
latency_num = base->preferred_output_delay + self->gop.ip_period - 1;
need_negotiation =
!gst_va_encoder_get_reconstruct_pool_config (base->encoder, &reconf_caps,
@ -1586,6 +1587,13 @@ gst_va_h264_enc_reconfig (GstVaBaseEnc * base)
gst_va_base_enc_reset_state (base);
if (base->is_live) {
base->preferred_output_delay = 0;
} else {
/* FIXME: An experience value for most of the platforms. */
base->preferred_output_delay = 4;
}
base->profile = profile;
base->rt_format = rt_format;
base->width = width;
@ -1638,7 +1646,22 @@ gst_va_h264_enc_reconfig (GstVaBaseEnc * base)
self->cc = self->cc && self->packed_headers & VA_ENC_PACKED_HEADER_RAW_DATA;
update_property_bool (base, &self->prop.cc, self->cc, PROP_CC);
max_ref_frames = self->gop.num_ref_frames + 3 /* scratch frames */ ;
/* Let the downstream know the new latency. */
if (latency_num != base->preferred_output_delay + self->gop.ip_period - 1) {
need_negotiation = TRUE;
latency_num = base->preferred_output_delay + self->gop.ip_period - 1;
}
/* Set the latency */
latency = gst_util_uint64_scale (latency_num,
GST_VIDEO_INFO_FPS_D (&base->input_state->info) * GST_SECOND,
GST_VIDEO_INFO_FPS_N (&base->input_state->info));
gst_video_encoder_set_latency (venc, latency, latency);
max_ref_frames = self->gop.num_ref_frames;
max_ref_frames += base->preferred_output_delay;
base->min_buffers = max_ref_frames;
max_ref_frames += 3 /* scratch frames */ ;
/* second check after calculations */
do_reopen |=
@ -2177,7 +2200,7 @@ _add_sequence_header (GstVaH264Enc * self, GstVaH264EncFrame * frame)
return FALSE;
}
if (!gst_va_encoder_add_packed_header (base->encoder, frame->picture,
if (!gst_va_encoder_add_packed_header (base->encoder, frame->base.picture,
VAEncPackedHeaderSequence, packed_sps, size * 8, FALSE)) {
GST_ERROR_OBJECT (self, "Failed to add the packed sequence header");
return FALSE;
@ -2304,10 +2327,10 @@ _fill_picture_parameter (GstVaH264Enc * self, GstVaH264EncFrame * frame,
*pic_param = (VAEncPictureParameterBufferH264) {
.CurrPic = {
.picture_id =
gst_va_encode_picture_get_reconstruct_surface (frame->picture),
gst_va_encode_picture_get_reconstruct_surface (frame->base.picture),
.TopFieldOrderCnt = frame->poc,
},
.coded_buf = frame->picture->coded_buffer,
.coded_buf = frame->base.picture->coded_buffer,
/* Only support one sps and pps now. */
.pic_parameter_set_id = 0,
.seq_parameter_set_id = 0,
@ -2356,7 +2379,7 @@ _fill_picture_parameter (GstVaH264Enc * self, GstVaH264EncFrame * frame,
f = _enc_frame (g_queue_peek_nth (&base->ref_list, i));
pic_param->ReferenceFrames[i].picture_id =
gst_va_encode_picture_get_reconstruct_surface (f->picture);
gst_va_encode_picture_get_reconstruct_surface (f->base.picture);
pic_param->ReferenceFrames[i].TopFieldOrderCnt = f->poc;
pic_param->ReferenceFrames[i].flags =
VA_PICTURE_H264_SHORT_TERM_REFERENCE;
@ -2375,7 +2398,7 @@ _add_picture_parameter (GstVaH264Enc * self, GstVaH264EncFrame * frame,
{
GstVaBaseEnc *base = GST_VA_BASE_ENC (self);
if (!gst_va_encoder_add_param (base->encoder, frame->picture,
if (!gst_va_encoder_add_param (base->encoder, frame->base.picture,
VAEncPictureParameterBufferType, pic_param,
sizeof (VAEncPictureParameterBufferH264))) {
GST_ERROR_OBJECT (self, "Failed to create the picture parameter");
@ -2439,7 +2462,7 @@ _add_picture_header (GstVaH264Enc * self, GstVaH264EncFrame * frame,
return FALSE;
}
if (!gst_va_encoder_add_packed_header (base->encoder, frame->picture,
if (!gst_va_encoder_add_packed_header (base->encoder, frame->base.picture,
VAEncPackedHeaderPicture, packed_pps, size * 8, FALSE)) {
GST_ERROR_OBJECT (self, "Failed to add the packed picture header");
return FALSE;
@ -2517,7 +2540,8 @@ _add_one_slice (GstVaH264Enc * self, GstVaH264EncFrame * frame,
if (frame->type != GST_H264_I_SLICE) {
for (; i < list0_num; i++) {
slice->RefPicList0[i].picture_id =
gst_va_encode_picture_get_reconstruct_surface (list0[i]->picture);
gst_va_encode_picture_get_reconstruct_surface
(list0[i]->base.picture);
slice->RefPicList0[i].TopFieldOrderCnt = list0[i]->poc;
slice->RefPicList0[i].flags |= VA_PICTURE_H264_SHORT_TERM_REFERENCE;
slice->RefPicList0[i].frame_idx = list0[i]->frame_num;
@ -2532,7 +2556,8 @@ _add_one_slice (GstVaH264Enc * self, GstVaH264EncFrame * frame,
if (frame->type == GST_H264_B_SLICE) {
for (; i < list1_num; i++) {
slice->RefPicList1[i].picture_id =
gst_va_encode_picture_get_reconstruct_surface (list1[i]->picture);
gst_va_encode_picture_get_reconstruct_surface
(list1[i]->base.picture);
slice->RefPicList1[i].TopFieldOrderCnt = list1[i]->poc;
slice->RefPicList1[i].flags |= VA_PICTURE_H264_SHORT_TERM_REFERENCE;
slice->RefPicList1[i].frame_idx = list1[i]->frame_num;
@ -2543,7 +2568,7 @@ _add_one_slice (GstVaH264Enc * self, GstVaH264EncFrame * frame,
slice->RefPicList1[i].flags = VA_PICTURE_H264_INVALID;
}
if (!gst_va_encoder_add_param (base->encoder, frame->picture,
if (!gst_va_encoder_add_param (base->encoder, frame->base.picture,
VAEncSliceParameterBufferType, slice,
sizeof (VAEncSliceParameterBufferH264))) {
GST_ERROR_OBJECT (self, "Failed to create the slice parameter");
@ -2774,7 +2799,7 @@ _add_slice_header (GstVaH264Enc * self, GstVaH264EncFrame * frame,
return FALSE;
}
if (!gst_va_encoder_add_packed_header (base->encoder, frame->picture,
if (!gst_va_encoder_add_packed_header (base->encoder, frame->base.picture,
VAEncPackedHeaderSlice, packed_slice_hdr, size * 8 + trail_bits,
FALSE)) {
GST_ERROR_OBJECT (self, "Failed to add the packed slice header");
@ -2814,7 +2839,7 @@ _add_aud (GstVaH264Enc * self, GstVaH264EncFrame * frame)
return FALSE;
}
if (!gst_va_encoder_add_packed_header (base->encoder, frame->picture,
if (!gst_va_encoder_add_packed_header (base->encoder, frame->base.picture,
VAEncPackedHeaderRawData, aud_data, size * 8, FALSE)) {
GST_ERROR_OBJECT (self, "Failed to add the AUD");
return FALSE;
@ -2929,7 +2954,7 @@ _add_sei_cc (GstVaH264Enc * self, GstVideoCodecFrame * gst_frame)
goto out;
}
if (!gst_va_encoder_add_packed_header (base->encoder, frame->picture,
if (!gst_va_encoder_add_packed_header (base->encoder, frame->base.picture,
VAEncPackedHeaderRawData, packed_sei, sei_size * 8, FALSE)) {
GST_WARNING_OBJECT (self, "Failed to add SEI CC data");
goto out;
@ -2966,24 +2991,24 @@ _encode_one_frame (GstVaH264Enc * self, GstVideoCodecFrame * gst_frame)
if (frame->poc == 0) {
VAEncSequenceParameterBufferH264 sequence;
if (!gst_va_base_enc_add_rate_control_parameter (base, frame->picture,
if (!gst_va_base_enc_add_rate_control_parameter (base, frame->base.picture,
self->rc.rc_ctrl_mode, self->rc.max_bitrate_bits,
self->rc.target_percentage, self->rc.qp_i, self->rc.min_qp,
self->rc.max_qp, self->rc.mbbrc))
return FALSE;
if (!gst_va_base_enc_add_quality_level_parameter (base, frame->picture,
if (!gst_va_base_enc_add_quality_level_parameter (base, frame->base.picture,
self->rc.target_usage))
return FALSE;
if (!gst_va_base_enc_add_frame_rate_parameter (base, frame->picture))
if (!gst_va_base_enc_add_frame_rate_parameter (base, frame->base.picture))
return FALSE;
if (!gst_va_base_enc_add_hrd_parameter (base, frame->picture,
if (!gst_va_base_enc_add_hrd_parameter (base, frame->base.picture,
self->rc.rc_ctrl_mode, self->rc.cpb_length_bits))
return FALSE;
if (!gst_va_base_enc_add_trellis_parameter (base, frame->picture,
if (!gst_va_base_enc_add_trellis_parameter (base, frame->base.picture,
self->use_trellis))
return FALSE;
@ -2991,7 +3016,7 @@ _encode_one_frame (GstVaH264Enc * self, GstVideoCodecFrame * gst_frame)
if (!_fill_sps (self, &sequence))
return FALSE;
if (!_add_sequence_parameter (self, frame->picture, &sequence))
if (!_add_sequence_parameter (self, frame->base.picture, &sequence))
return FALSE;
if ((self->packed_headers & VA_ENC_PACKED_HEADER_SEQUENCE)
@ -3088,7 +3113,7 @@ _encode_one_frame (GstVaH264Enc * self, GstVideoCodecFrame * gst_frame)
slice_start_mb += slice_mbs;
}
if (!gst_va_encoder_encode (base->encoder, frame->picture)) {
if (!gst_va_encoder_encode (base->encoder, frame->base.picture)) {
GST_ERROR_OBJECT (self, "Encode frame error");
return FALSE;
}
@ -3129,7 +3154,7 @@ gst_va_h264_enc_prepare_output (GstVaBaseEnc * base,
}
buf = gst_va_base_enc_create_output_buffer (base,
frame_enc->picture, NULL, 0);
frame_enc->base.picture, NULL, 0);
if (!buf) {
GST_ERROR_OBJECT (base, "Failed to create output buffer");
return FALSE;
@ -3236,11 +3261,11 @@ gst_va_h264_enc_encode_frame (GstVaBaseEnc * base,
frame = _enc_frame (gst_frame);
frame->last_frame = is_last;
g_assert (frame->picture == NULL);
frame->picture = gst_va_encode_picture_new (base->encoder,
g_assert (frame->base.picture == NULL);
frame->base.picture = gst_va_encode_picture_new (base->encoder,
gst_frame->input_buffer);
if (!frame->picture) {
if (!frame->base.picture) {
GST_ERROR_OBJECT (self, "Failed to create the encode picture");
return GST_FLOW_ERROR;
}
@ -3280,7 +3305,8 @@ gst_va_h264_enc_new_frame (GstVaBaseEnc * base, GstVideoCodecFrame * frame)
GstVaH264EncFrame *frame_in;
frame_in = gst_va_enc_frame_new ();
gst_video_codec_frame_set_user_data (frame, frame_in, gst_va_enc_frame_free);
gst_va_set_enc_frame (frame, (GstVaEncFrame *) frame_in,
gst_va_enc_frame_free);
gst_va_base_enc_push_dts (base, frame, self->gop.num_reorder_frames);

View file

@ -341,7 +341,7 @@ struct _GstVaH265Enc
struct _GstVaH265EncFrame
{
GstVaEncodePicture *picture;
GstVaEncFrame base;
GstH265SliceType type;
gboolean is_ref;
guint pyramid_level;
@ -451,7 +451,7 @@ gst_va_h265_enc_frame_new (void)
frame = g_new (GstVaH265EncFrame, 1);
frame->last_frame = FALSE;
frame->picture = NULL;
frame->base.picture = NULL;
return frame;
}
@ -460,16 +460,14 @@ static void
gst_va_h265_enc_frame_free (gpointer pframe)
{
GstVaH265EncFrame *frame = pframe;
g_clear_pointer (&frame->picture, gst_va_encode_picture_free);
g_clear_pointer (&frame->base.picture, gst_va_encode_picture_free);
g_free (frame);
}
static inline GstVaH265EncFrame *
_enc_frame (GstVideoCodecFrame * frame)
{
GstVaH265EncFrame *enc_frame = gst_video_codec_frame_get_user_data (frame);
g_assert (enc_frame);
return enc_frame;
return gst_va_get_enc_frame (frame);
}
static inline gboolean
@ -1155,7 +1153,7 @@ _h265_add_vps_header (GstVaH265Enc * self, GstVaH265EncFrame * frame)
/* VPS does not have its own packed header define, just reuse
VAEncPackedHeaderSequence */
if (!gst_va_encoder_add_packed_header (base->encoder, frame->picture,
if (!gst_va_encoder_add_packed_header (base->encoder, frame->base.picture,
VAEncPackedHeaderSequence, packed_vps, size * 8, FALSE)) {
GST_ERROR_OBJECT (self, "Failed to add packed VPS header.");
return FALSE;
@ -1182,7 +1180,7 @@ _h265_add_sps_header (GstVaH265Enc * self, GstVaH265EncFrame * frame)
return FALSE;
}
if (!gst_va_encoder_add_packed_header (base->encoder, frame->picture,
if (!gst_va_encoder_add_packed_header (base->encoder, frame->base.picture,
VAEncPackedHeaderSequence, packed_sps, size * 8, FALSE)) {
GST_ERROR_OBJECT (self, "Failed to add packed SPS header.");
return FALSE;
@ -1208,7 +1206,7 @@ _h265_add_pps_header (GstVaH265Enc * self, GstVaH265EncFrame * frame,
return FALSE;
}
if (!gst_va_encoder_add_packed_header (base->encoder, frame->picture,
if (!gst_va_encoder_add_packed_header (base->encoder, frame->base.picture,
VAEncPackedHeaderPicture, packed_pps, size * 8, FALSE)) {
GST_ERROR_OBJECT (self, "Failed to add the packed picture header");
return FALSE;
@ -1235,7 +1233,7 @@ _h265_add_slice_header (GstVaH265Enc * self, GstVaH265EncFrame * frame,
return FALSE;
}
if (!gst_va_encoder_add_packed_header (base->encoder, frame->picture,
if (!gst_va_encoder_add_packed_header (base->encoder, frame->base.picture,
VAEncPackedHeaderSlice, packed_slice_hdr, size * 8, FALSE)) {
GST_ERROR_OBJECT (self, "Failed to add the packed slice header");
return FALSE;
@ -1274,7 +1272,7 @@ _h265_add_aud (GstVaH265Enc * self, GstVaH265EncFrame * frame)
return FALSE;
}
if (!gst_va_encoder_add_packed_header (base->encoder, frame->picture,
if (!gst_va_encoder_add_packed_header (base->encoder, frame->base.picture,
VAEncPackedHeaderRawData, aud_data, size * 8, FALSE)) {
GST_ERROR_OBJECT (self, "Failed to add the AUD");
return FALSE;
@ -1485,11 +1483,11 @@ _h265_fill_picture_parameter (GstVaH265Enc * self, GstVaH265EncFrame * frame,
*pic_param = (VAEncPictureParameterBufferHEVC) {
.decoded_curr_pic.picture_id =
gst_va_encode_picture_get_reconstruct_surface (frame->picture),
gst_va_encode_picture_get_reconstruct_surface (frame->base.picture),
.decoded_curr_pic.pic_order_cnt = frame->poc,
.decoded_curr_pic.flags = 0,
.coded_buf = frame->picture->coded_buffer,
.coded_buf = frame->base.picture->coded_buffer,
.last_picture = frame->last_frame,
.pic_init_qp = self->rc.qp_i,
.diff_cu_qp_delta_depth = self->features.diff_cu_qp_delta_depth,
@ -1559,7 +1557,7 @@ _h265_fill_picture_parameter (GstVaH265Enc * self, GstVaH265EncFrame * frame,
f = _enc_frame (g_queue_peek_nth (&base->ref_list, i));
pic_param->reference_frames[i].picture_id =
gst_va_encode_picture_get_reconstruct_surface (f->picture);
gst_va_encode_picture_get_reconstruct_surface (f->base.picture);
pic_param->reference_frames[i].pic_order_cnt = f->poc;
pic_param->reference_frames[i].flags = 0;
}
@ -1727,7 +1725,8 @@ _h265_fill_slice_parameter (GstVaH265Enc * self, GstVaH265EncFrame * frame,
if (frame_type != GST_H265_I_SLICE) {
for (; i < list0_num; i++) {
slice->ref_pic_list0[i].picture_id =
gst_va_encode_picture_get_reconstruct_surface (list0[i]->picture);
gst_va_encode_picture_get_reconstruct_surface
(list0[i]->base.picture);
slice->ref_pic_list0[i].pic_order_cnt = list0[i]->poc;
}
}
@ -1740,7 +1739,8 @@ _h265_fill_slice_parameter (GstVaH265Enc * self, GstVaH265EncFrame * frame,
if (frame_type == GST_H265_B_SLICE) {
for (; i < list1_num; i++) {
slice->ref_pic_list1[i].picture_id =
gst_va_encode_picture_get_reconstruct_surface (list1[i]->picture);
gst_va_encode_picture_get_reconstruct_surface
(list1[i]->base.picture);
slice->ref_pic_list1[i].pic_order_cnt = list1[i]->poc;
}
}
@ -1758,7 +1758,7 @@ _h265_add_sequence_parameter (GstVaH265Enc * self, GstVaH265EncFrame * frame,
{
GstVaBaseEnc *base = GST_VA_BASE_ENC (self);
if (!gst_va_encoder_add_param (base->encoder, frame->picture,
if (!gst_va_encoder_add_param (base->encoder, frame->base.picture,
VAEncSequenceParameterBufferType, sequence, sizeof (*sequence))) {
GST_ERROR_OBJECT (self, "Failed to create the sequence parameter");
return FALSE;
@ -1773,7 +1773,7 @@ _h265_add_picture_parameter (GstVaH265Enc * self, GstVaH265EncFrame * frame,
{
GstVaBaseEnc *base = GST_VA_BASE_ENC (self);
if (!gst_va_encoder_add_param (base->encoder, frame->picture,
if (!gst_va_encoder_add_param (base->encoder, frame->base.picture,
VAEncPictureParameterBufferType, pic_param,
sizeof (VAEncPictureParameterBufferHEVC))) {
GST_ERROR_OBJECT (self, "Failed to create the picture parameter");
@ -1789,7 +1789,7 @@ _h265_add_slice_parameter (GstVaH265Enc * self, GstVaH265EncFrame * frame,
{
GstVaBaseEnc *base = GST_VA_BASE_ENC (self);
if (!gst_va_encoder_add_param (base->encoder, frame->picture,
if (!gst_va_encoder_add_param (base->encoder, frame->base.picture,
VAEncSliceParameterBufferType, slice,
sizeof (VAEncSliceParameterBufferHEVC))) {
GST_ERROR_OBJECT (self, "Failed to add the slice parameter");
@ -1877,24 +1877,24 @@ _h265_encode_one_frame (GstVaH265Enc * self, GstVideoCodecFrame * gst_frame)
if (frame->poc == 0) {
VAEncSequenceParameterBufferHEVC sequence;
if (!gst_va_base_enc_add_rate_control_parameter (base, frame->picture,
if (!gst_va_base_enc_add_rate_control_parameter (base, frame->base.picture,
self->rc.rc_ctrl_mode, self->rc.max_bitrate_bits,
self->rc.target_percentage, self->rc.qp_i, self->rc.min_qp,
self->rc.max_qp, self->rc.mbbrc))
return FALSE;
if (!gst_va_base_enc_add_quality_level_parameter (base, frame->picture,
if (!gst_va_base_enc_add_quality_level_parameter (base, frame->base.picture,
self->rc.target_usage))
return FALSE;
if (!gst_va_base_enc_add_frame_rate_parameter (base, frame->picture))
if (!gst_va_base_enc_add_frame_rate_parameter (base, frame->base.picture))
return FALSE;
if (!gst_va_base_enc_add_hrd_parameter (base, frame->picture,
if (!gst_va_base_enc_add_hrd_parameter (base, frame->base.picture,
self->rc.rc_ctrl_mode, self->rc.cpb_length_bits))
return FALSE;
if (!gst_va_base_enc_add_trellis_parameter (base, frame->picture,
if (!gst_va_base_enc_add_trellis_parameter (base, frame->base.picture,
self->features.use_trellis))
return FALSE;
@ -2013,7 +2013,7 @@ _h265_encode_one_frame (GstVaH265Enc * self, GstVideoCodecFrame * gst_frame)
negative_pocs, num_negative_pics, positive_pocs, num_positive_pics))
return FALSE;
if (!gst_va_encoder_encode (base->encoder, frame->picture)) {
if (!gst_va_encoder_encode (base->encoder, frame->base.picture)) {
GST_ERROR_OBJECT (self, "Encode frame error");
return FALSE;
}
@ -2431,11 +2431,11 @@ gst_va_h265_enc_encode_frame (GstVaBaseEnc * base,
frame = _enc_frame (gst_frame);
frame->last_frame = is_last;
g_assert (frame->picture == NULL);
frame->picture = gst_va_encode_picture_new (base->encoder,
g_assert (frame->base.picture == NULL);
frame->base.picture = gst_va_encode_picture_new (base->encoder,
gst_frame->input_buffer);
if (!frame->picture) {
if (!frame->base.picture) {
GST_ERROR_OBJECT (base, "Failed to create the encode picture");
return GST_FLOW_ERROR;
}
@ -4498,14 +4498,17 @@ gst_va_h265_enc_reconfig (GstVaBaseEnc * base)
GstVideoFormat format, reconf_format = GST_VIDEO_FORMAT_UNKNOWN;
VAProfile profile = VAProfileNone;
gboolean do_renegotiation = TRUE, do_reopen, need_negotiation;
guint max_ref_frames, max_surfaces = 0, rt_format = 0, codedbuf_size;
guint max_ref_frames, max_surfaces = 0, rt_format = 0,
codedbuf_size, latency_num;
gint width, height;
guint alignment;
GstClockTime latency;
width = GST_VIDEO_INFO_WIDTH (&base->in_info);
height = GST_VIDEO_INFO_HEIGHT (&base->in_info);
format = GST_VIDEO_INFO_FORMAT (&base->in_info);
codedbuf_size = base->codedbuf_size;
latency_num = base->preferred_output_delay + self->gop.ip_period - 1;
need_negotiation =
!gst_va_encoder_get_reconstruct_pool_config (base->encoder, &reconf_caps,
@ -4530,6 +4533,13 @@ gst_va_h265_enc_reconfig (GstVaBaseEnc * base)
gst_va_base_enc_reset_state (base);
if (base->is_live) {
base->preferred_output_delay = 0;
} else {
/* FIXME: An experience value for most of the platforms. */
base->preferred_output_delay = 4;
}
base->profile = profile;
base->rt_format = rt_format;
base->width = width;
@ -4625,8 +4635,22 @@ gst_va_h265_enc_reconfig (GstVaBaseEnc * base)
self->aud = self->aud && self->packed_headers & VA_ENC_PACKED_HEADER_RAW_DATA;
update_property_bool (base, &self->prop.aud, self->aud, PROP_AUD);
/* Let the downstream know the new latency. */
if (latency_num != base->preferred_output_delay + self->gop.ip_period - 1) {
need_negotiation = TRUE;
latency_num = base->preferred_output_delay + self->gop.ip_period - 1;
}
/* Set the latency */
latency = gst_util_uint64_scale (latency_num,
GST_VIDEO_INFO_FPS_D (&base->input_state->info) * GST_SECOND,
GST_VIDEO_INFO_FPS_N (&base->input_state->info));
gst_video_encoder_set_latency (venc, latency, latency);
max_ref_frames = self->gop.b_pyramid ?
self->gop.highest_pyramid_level + 2 : self->gop.num_ref_frames;
max_ref_frames += base->preferred_output_delay;
base->min_buffers = max_ref_frames;
max_ref_frames += 3 /* scratch frames */ ;
/* second check after calculations */
@ -4717,7 +4741,7 @@ gst_va_h265_enc_new_frame (GstVaBaseEnc * base, GstVideoCodecFrame * frame)
GstVaH265EncFrame *frame_in;
frame_in = gst_va_h265_enc_frame_new ();
gst_video_codec_frame_set_user_data (frame, frame_in,
gst_va_set_enc_frame (frame, (GstVaEncFrame *) frame_in,
gst_va_h265_enc_frame_free);
gst_va_base_enc_push_dts (base, frame, self->gop.num_reorder_frames);
@ -4745,7 +4769,7 @@ gst_va_h265_enc_prepare_output (GstVaBaseEnc * base,
}
buf = gst_va_base_enc_create_output_buffer (base,
frame_enc->picture, NULL, 0);
frame_enc->base.picture, NULL, 0);
if (!buf) {
GST_ERROR_OBJECT (base, "Failed to create output buffer");
return FALSE;

View file

@ -153,7 +153,7 @@ struct _GstVaVp9GFGroup
struct _GstVaVp9EncFrame
{
GstVaEncodePicture *picture;
GstVaEncFrame base;
GstVp9FrameType type;
/* VP9 does not define a frame number.
This is a virtual number after the key frame. */
@ -264,7 +264,7 @@ gst_va_vp9_enc_frame_new (void)
frame = g_new (GstVaVp9EncFrame, 1);
frame->frame_num = -1;
frame->type = FRAME_TYPE_INVALID;
frame->picture = NULL;
frame->base.picture = NULL;
frame->pyramid_level = 0;
frame->flags = 0;
frame->bidir_ref = FALSE;
@ -281,7 +281,7 @@ gst_va_vp9_enc_frame_free (gpointer pframe)
{
GstVaVp9EncFrame *frame = pframe;
g_clear_pointer (&frame->picture, gst_va_encode_picture_free);
g_clear_pointer (&frame->base.picture, gst_va_encode_picture_free);
g_free (frame);
}
@ -291,7 +291,7 @@ gst_va_vp9_enc_new_frame (GstVaBaseEnc * base, GstVideoCodecFrame * frame)
GstVaVp9EncFrame *frame_in;
frame_in = gst_va_vp9_enc_frame_new ();
gst_video_codec_frame_set_user_data (frame, frame_in,
gst_va_set_enc_frame (frame, (GstVaEncFrame *) frame_in,
gst_va_vp9_enc_frame_free);
return TRUE;
@ -2096,13 +2096,15 @@ gst_va_vp9_enc_reconfig (GstVaBaseEnc * base)
VAProfile profile;
gboolean do_renegotiation = TRUE, do_reopen, need_negotiation;
guint max_ref_frames, max_surfaces = 0,
rt_format, depth = 0, chrome = 0, codedbuf_size;
rt_format, depth = 0, chrome = 0, codedbuf_size, latency_num;
gint width, height;
GstClockTime latency;
width = GST_VIDEO_INFO_WIDTH (&base->in_info);
height = GST_VIDEO_INFO_HEIGHT (&base->in_info);
format = GST_VIDEO_INFO_FORMAT (&base->in_info);
codedbuf_size = base->codedbuf_size;
latency_num = base->preferred_output_delay + self->gop.gf_group_size - 1;
need_negotiation =
!gst_va_encoder_get_reconstruct_pool_config (base->encoder, &reconf_caps,
@ -2135,6 +2137,13 @@ gst_va_vp9_enc_reconfig (GstVaBaseEnc * base)
gst_va_base_enc_reset_state (base);
if (base->is_live) {
base->preferred_output_delay = 0;
} else {
/* FIXME: An experience value for most of the platforms. */
base->preferred_output_delay = 4;
}
base->profile = profile;
base->rt_format = rt_format;
self->depth = depth;
@ -2168,7 +2177,22 @@ gst_va_vp9_enc_reconfig (GstVaBaseEnc * base)
if (!_vp9_init_packed_headers (self))
return FALSE;
max_ref_frames = GST_VP9_REF_FRAMES + 3 /* scratch frames */ ;
/* Let the downstream know the new latency. */
if (latency_num != base->preferred_output_delay + self->gop.gf_group_size - 1) {
need_negotiation = TRUE;
latency_num = base->preferred_output_delay + self->gop.gf_group_size - 1;
}
/* Set the latency */
latency = gst_util_uint64_scale (latency_num,
GST_VIDEO_INFO_FPS_D (&base->input_state->info) * GST_SECOND,
GST_VIDEO_INFO_FPS_N (&base->input_state->info));
gst_video_encoder_set_latency (venc, latency, latency);
max_ref_frames = GST_VP9_REF_FRAMES;
max_ref_frames += base->preferred_output_delay;
base->min_buffers = max_ref_frames;
max_ref_frames += 3 /* scratch frames */ ;
/* second check after calculations */
do_reopen |=
@ -2322,10 +2346,10 @@ _vp9_fill_frame_param (GstVaVp9Enc * self, GstVaVp9EncFrame * va_frame,
.frame_width_dst = base->width,
.frame_height_dst = base->height,
.reconstructed_frame =
gst_va_encode_picture_get_reconstruct_surface (va_frame->picture),
gst_va_encode_picture_get_reconstruct_surface (va_frame->base.picture),
/* Set it later. */
.reference_frames = { 0, },
.coded_buf = va_frame->picture->coded_buffer,
.coded_buf = va_frame->base.picture->coded_buffer,
.ref_flags.bits = {
.force_kf = 0,
/* Set all the refs later if inter frame. */
@ -2392,7 +2416,7 @@ _vp9_fill_frame_param (GstVaVp9Enc * self, GstVaVp9EncFrame * va_frame,
pic_param->reference_frames[i] =
gst_va_encode_picture_get_reconstruct_surface
(_enc_frame (self->gop.ref_list[i])->picture);
(_enc_frame (self->gop.ref_list[i])->base.picture);
}
@ -2428,13 +2452,13 @@ _vp9_encode_one_frame (GstVaVp9Enc * self, GstVaVp9EncFrame * va_frame)
return FALSE;
}
if (!gst_va_encoder_add_param (base->encoder, va_frame->picture,
if (!gst_va_encoder_add_param (base->encoder, va_frame->base.picture,
VAEncPictureParameterBufferType, &pic_param, sizeof (pic_param))) {
GST_ERROR_OBJECT (self, "Failed to create the frame parameter");
return FALSE;
}
if (!gst_va_encoder_encode (base->encoder, va_frame->picture)) {
if (!gst_va_encoder_encode (base->encoder, va_frame->base.picture)) {
GST_ERROR_OBJECT (self, "Encode frame error");
return FALSE;
}
@ -2504,33 +2528,35 @@ gst_va_vp9_enc_encode_frame (GstVaBaseEnc * base,
g_assert (va_frame->flags & FRAME_FLAG_ALREADY_ENCODED);
_vp9_add_repeat_frame_header (self, va_frame);
} else {
g_assert (va_frame->picture == NULL);
va_frame->picture = gst_va_encode_picture_new (base->encoder,
g_assert (va_frame->base.picture == NULL);
va_frame->base.picture = gst_va_encode_picture_new (base->encoder,
gst_frame->input_buffer);
_vp9_find_ref_to_update (base, gst_frame);
/* Repeat the sequence for each key. */
if (va_frame->frame_num == 0) {
if (!gst_va_base_enc_add_rate_control_parameter (base, va_frame->picture,
if (!gst_va_base_enc_add_rate_control_parameter (base,
va_frame->base.picture,
self->rc.rc_ctrl_mode, self->rc.max_bitrate_bits,
self->rc.target_percentage, self->rc.base_qindex,
self->rc.min_qindex, self->rc.max_qindex, self->rc.mbbrc))
return FALSE;
if (!gst_va_base_enc_add_quality_level_parameter (base, va_frame->picture,
self->rc.target_usage))
if (!gst_va_base_enc_add_quality_level_parameter (base,
va_frame->base.picture, self->rc.target_usage))
return FALSE;
if (!gst_va_base_enc_add_frame_rate_parameter (base, va_frame->picture))
if (!gst_va_base_enc_add_frame_rate_parameter (base,
va_frame->base.picture))
return FALSE;
if (!gst_va_base_enc_add_hrd_parameter (base, va_frame->picture,
if (!gst_va_base_enc_add_hrd_parameter (base, va_frame->base.picture,
self->rc.rc_ctrl_mode, self->rc.cpb_length_bits))
return FALSE;
_vp9_fill_sequence_param (self, &seq_param);
if (!_vp9_add_sequence_param (self, va_frame->picture, &seq_param))
if (!_vp9_add_sequence_param (self, va_frame->base.picture, &seq_param))
return FALSE;
}
@ -2576,7 +2602,7 @@ _vp9_create_super_frame_output_buffer (GstVaVp9Enc * self,
frame_enc = _enc_frame (self->frames_in_super[num]);
frame_size[num] = gst_va_base_enc_copy_output_data (base,
frame_enc->picture, data + offset, total_sz - offset);
frame_enc->base.picture, data + offset, total_sz - offset);
if (frame_size[num] <= 0) {
GST_ERROR_OBJECT (self, "Fails to copy the output data of "
"system_frame_number %d, frame_num: %d",
@ -2590,7 +2616,7 @@ _vp9_create_super_frame_output_buffer (GstVaVp9Enc * self,
frame_enc = _enc_frame (last_frame);
frame_size[num] = gst_va_base_enc_copy_output_data (base,
frame_enc->picture, data + offset, total_sz - offset);
frame_enc->base.picture, data + offset, total_sz - offset);
if (frame_size[num] <= 0) {
GST_ERROR_OBJECT (self, "Fails to copy the output data of "
"system_frame_number %d, frame_num: %d",
@ -2699,7 +2725,7 @@ gst_va_vp9_enc_prepare_output (GstVaBaseEnc * base,
buf = _vp9_create_super_frame_output_buffer (self, frame);
} else {
buf = gst_va_base_enc_create_output_buffer (base,
frame_enc->picture, NULL, 0);
frame_enc->base.picture, NULL, 0);
}
if (!buf) {
GST_ERROR_OBJECT (base, "Failed to create output buffer%s",

View file

@ -31,6 +31,7 @@ static GMainLoop *loop = NULL;
static gint width = 640;
static gint height = 480;
static guint rc_ctrl = 0;
static gboolean alive = FALSE;
G_LOCK_DEFINE_STATIC (input_lock);
@ -455,6 +456,8 @@ main (gint argc, gchar ** argv)
{"codec", 'c', 0, G_OPTION_ARG_STRING, &codec,
"Codec to test: "
"[ *h264, h265, vp9, av1, h264lp, h265lp, vp9lp, av1lp ]"},
{"alive", 'a', 0, G_OPTION_ARG_NONE, &alive,
"Set test source as a live stream"},
{NULL}
};
const struct {
@ -516,7 +519,7 @@ main (gint argc, gchar ** argv)
pipeline = gst_pipeline_new (NULL);
MAKE_ELEMENT_AND_ADD (src, "videotestsrc");
g_object_set (src, "pattern", 1, NULL);
g_object_set (src, "pattern", 1, "is-live", alive, NULL);
MAKE_ELEMENT_AND_ADD (capsfilter, "capsfilter");
MAKE_ELEMENT_AND_ADD (convert, "videoconvert");