cccombiner: implement samples selection API

Call gst_aggregator_selected_samples() after identifying the
caption buffers that will be added as a meta on the next video
buffer.

Implement GstAggregator.peek_next_sample.

Add an example that demonstrates usage of the new API in
combination with the existing buffer-consumed signal.

Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1390>
This commit is contained in:
Mathieu Duponchelle 2020-07-01 03:59:56 +02:00 committed by GStreamer Merge Bot
parent 480ede1aa7
commit 265128e7f7
2 changed files with 97 additions and 0 deletions

View file

@ -98,6 +98,7 @@ gst_cc_combiner_collect_captions (GstCCCombiner * self, gboolean timeout)
GST_LOG_OBJECT (self, "No caption pad, passing through video");
video_buf = self->current_video_buffer;
self->current_video_buffer = NULL;
gst_aggregator_selected_samples (GST_AGGREGATOR_CAST (self));
goto done;
}
@ -203,6 +204,8 @@ gst_cc_combiner_collect_captions (GstCCCombiner * self, gboolean timeout)
gst_aggregator_pad_drop_buffer (caption_pad);
} while (TRUE);
gst_aggregator_selected_samples (GST_AGGREGATOR_CAST (self));
if (self->current_frame_captions->len > 0) {
guint i;
@ -592,6 +595,57 @@ gst_cc_combiner_sink_query (GstAggregator * aggregator,
return ret;
}
static GstSample *
gst_cc_combiner_peek_next_sample (GstAggregator * agg,
GstAggregatorPad * aggpad)
{
GstAggregatorPad *caption_pad, *video_pad;
GstCCCombiner *self = GST_CCCOMBINER (agg);
GstSample *res = NULL;
caption_pad =
GST_AGGREGATOR_PAD_CAST (gst_element_get_static_pad (GST_ELEMENT_CAST
(self), "caption"));
video_pad =
GST_AGGREGATOR_PAD_CAST (gst_element_get_static_pad (GST_ELEMENT_CAST
(self), "sink"));
if (aggpad == caption_pad) {
if (self->current_frame_captions->len > 0) {
GstCaps *caps = gst_pad_get_current_caps (GST_PAD (aggpad));
GstBufferList *buflist = gst_buffer_list_new ();
guint i;
for (i = 0; i < self->current_frame_captions->len; i++) {
CaptionData *caption_data =
&g_array_index (self->current_frame_captions, CaptionData, i);
gst_buffer_list_add (buflist, gst_buffer_ref (caption_data->buffer));
}
res = gst_sample_new (NULL, caps, &aggpad->segment, NULL);
gst_caps_unref (caps);
gst_sample_set_buffer_list (res, buflist);
gst_buffer_list_unref (buflist);
}
} else if (aggpad == video_pad) {
if (self->current_video_buffer) {
GstCaps *caps = gst_pad_get_current_caps (GST_PAD (aggpad));
res = gst_sample_new (self->current_video_buffer,
caps, &aggpad->segment, NULL);
gst_caps_unref (caps);
}
}
if (caption_pad)
gst_object_unref (caption_pad);
if (video_pad)
gst_object_unref (video_pad);
return res;
}
static void
gst_cc_combiner_class_init (GstCCCombinerClass * klass)
{
@ -627,6 +681,7 @@ gst_cc_combiner_class_init (GstCCCombinerClass * klass)
aggregator_class->get_next_time = gst_aggregator_simple_get_next_time;
aggregator_class->src_query = gst_cc_combiner_src_query;
aggregator_class->sink_query = gst_cc_combiner_sink_query;
aggregator_class->peek_next_sample = gst_cc_combiner_peek_next_sample;
GST_DEBUG_CATEGORY_INIT (gst_cc_combiner_debug, "cccombiner",
0, "Closed Caption combiner");

View file

@ -66,6 +66,36 @@ GST_START_TEST (no_captions)
GST_END_TEST;
GstBuffer *expected_video_buffer = NULL;
GstBuffer *expected_caption_buffer = NULL;
static void
samples_selected_cb (GstAggregator * agg)
{
GstBufferList *buflist;
GstPad *caption_pad =
gst_element_get_static_pad (GST_ELEMENT (agg), "caption");
GstPad *video_pad = gst_element_get_static_pad (GST_ELEMENT (agg), "sink");
GstSample *video_sample =
gst_aggregator_peek_next_sample (agg, GST_AGGREGATOR_PAD (video_pad));
GstSample *captions_sample =
gst_aggregator_peek_next_sample (agg, GST_AGGREGATOR_PAD (caption_pad));
fail_unless (video_sample != NULL);
fail_unless (captions_sample != NULL);
fail_unless (gst_sample_get_buffer (video_sample) == expected_video_buffer);
gst_sample_unref (video_sample);
buflist = gst_sample_get_buffer_list (captions_sample);
fail_unless_equals_int (gst_buffer_list_length (buflist), 1);
fail_unless (gst_buffer_list_get (buflist, 0) == expected_caption_buffer);
gst_sample_unref (captions_sample);
gst_object_unref (caption_pad);
gst_object_unref (video_pad);
}
GST_START_TEST (captions_and_eos)
{
GstHarness *h, *h2;
@ -73,6 +103,7 @@ GST_START_TEST (captions_and_eos)
GstPad *caption_pad;
GstCaps *caps;
GstVideoCaptionMeta *meta;
GstBuffer *second_video_buf, *second_caption_buf;
h = gst_harness_new_with_padnames ("cccombiner", "sink", "src");
h2 = gst_harness_new_with_element (h->element, NULL, NULL);
@ -80,6 +111,10 @@ GST_START_TEST (captions_and_eos)
gst_harness_add_element_sink_pad (h2, caption_pad);
gst_object_unref (caption_pad);
g_object_set (h->element, "emit-signals", TRUE, NULL);
g_signal_connect (h->element, "samples-selected",
G_CALLBACK (samples_selected_cb), NULL);
gst_harness_set_src_caps_str (h, foo_bar_caps.string);
gst_harness_set_src_caps_str (h2, cea708_cc_data_caps.string);
@ -87,11 +122,13 @@ GST_START_TEST (captions_and_eos)
buf = gst_buffer_new_and_alloc (128);
GST_BUFFER_PTS (buf) = 0;
GST_BUFFER_DURATION (buf) = 40 * GST_MSECOND;
expected_video_buffer = buf;
gst_harness_push (h, buf);
buf = gst_buffer_new_and_alloc (128);
GST_BUFFER_PTS (buf) = 0;
GST_BUFFER_DURATION (buf) = 40 * GST_MSECOND;
expected_caption_buffer = buf;
gst_harness_push (h2, buf);
/* And another one: the first video buffer should be retrievable
@ -99,17 +136,22 @@ GST_START_TEST (captions_and_eos)
buf = gst_buffer_new_and_alloc (128);
GST_BUFFER_PTS (buf) = 40 * GST_MSECOND;
GST_BUFFER_DURATION (buf) = 40 * GST_MSECOND;
second_video_buf = buf;
gst_harness_push (h, buf);
buf = gst_buffer_new_and_alloc (128);
GST_BUFFER_PTS (buf) = 40 * GST_MSECOND;
GST_BUFFER_DURATION (buf) = 40 * GST_MSECOND;
second_caption_buf = buf;
gst_harness_push (h2, buf);
/* Pull the first output buffer */
outbuf = gst_harness_pull (h);
fail_unless (outbuf != NULL);
expected_video_buffer = second_video_buf;
expected_caption_buffer = second_caption_buf;
meta = gst_buffer_get_video_caption_meta (outbuf);
fail_unless (meta != NULL);
fail_unless_equals_int (meta->caption_type,