From 105814e2c78f9867c61531b9e8166e4ae994296f Mon Sep 17 00:00:00 2001 From: Jan Schmidt Date: Tue, 30 Aug 2011 18:21:31 +1000 Subject: [PATCH 01/21] playsink: Try include 'pitch', if no other sink is provided As a default, try the pipeline 'pitch ! audioconvert ! autoaudiosink' before trying plain autoaudiosink --- gst/playback/gstplaysink.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/gst/playback/gstplaysink.c b/gst/playback/gstplaysink.c index 01f00a5cd3..cd92c332ab 100644 --- a/gst/playback/gstplaysink.c +++ b/gst/playback/gstplaysink.c @@ -1664,6 +1664,14 @@ gen_audio_chain (GstPlaySink * playsink, gboolean raw) chain->sink = try_element (playsink, playsink->audio_sink, FALSE); } else { /* only try fallback if no specific sink was chosen */ + if (chain->sink == NULL) { + GST_DEBUG_OBJECT (playsink, + "trying pitch ! audioconvert ! autoaudiosink"); + elem = + gst_parse_bin_from_description + ("pitch ! audioconvert ! autoaudiosink", TRUE, NULL); + chain->sink = try_element (playsink, elem, TRUE); + } if (chain->sink == NULL) { GST_DEBUG_OBJECT (playsink, "trying autoaudiosink"); elem = gst_element_factory_make ("autoaudiosink", "audiosink"); From 33d491a04f45045a3efc3d5c24136b8a229f5cf3 Mon Sep 17 00:00:00 2001 From: Jan Schmidt Date: Tue, 30 Aug 2011 18:21:31 +1000 Subject: [PATCH 02/21] seek: Accept pipeline descriptions for audiosink/videosink Make the element_factory_make_or_warn utility function try parsing the input string as a bin if element_factory_make() fails. This makes the --audiosink/--videosink commandline options accept a pipeline string. --- tests/examples/seek/seek.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tests/examples/seek/seek.c b/tests/examples/seek/seek.c index d79209ccbd..216a00a317 100644 --- a/tests/examples/seek/seek.c +++ b/tests/examples/seek/seek.c @@ -164,6 +164,16 @@ gst_element_factory_make_or_warn (const gchar * type, const gchar * name) { GstElement *element = gst_element_factory_make (type, name); +#ifndef GST_DISABLE_PARSE + if (!element) { + /* Try parsing it as a pipeline description */ + element = gst_parse_bin_from_description (type, TRUE, NULL); + if (element) { + gst_element_set_name (element, name); + } + } +#endif + if (!element) { g_warning ("Failed to create element %s of type %s", name, type); } From 425d3ae7bcba493ef88f8b37fb50bfc042ead730 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Dr=C3=B6ge?= Date: Wed, 31 Aug 2011 12:39:18 +0200 Subject: [PATCH 03/21] decodebin2: Keep the chain mutex locked while connecting to the notify::caps signal --- gst/playback/gstdecodebin2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gst/playback/gstdecodebin2.c b/gst/playback/gstdecodebin2.c index 77942609d0..c8f88c3cb3 100644 --- a/gst/playback/gstdecodebin2.c +++ b/gst/playback/gstdecodebin2.c @@ -1616,9 +1616,9 @@ setup_caps_delay: ppad->event_probe_id = gst_pad_add_event_probe (pad, (GCallback) pad_event_cb, ppad); chain->pending_pads = g_list_prepend (chain->pending_pads, ppad); - CHAIN_MUTEX_UNLOCK (chain); g_signal_connect (G_OBJECT (pad), "notify::caps", G_CALLBACK (caps_notify_cb), chain); + CHAIN_MUTEX_UNLOCK (chain); return; } } From 49b301bcd6df7a210582db836c0574a40acc27ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Dr=C3=B6ge?= Date: Wed, 31 Aug 2011 14:45:08 +0200 Subject: [PATCH 04/21] playsink: Only unref ts_offset elements if they're not NULL --- gst/playback/gstplaysink.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/gst/playback/gstplaysink.c b/gst/playback/gstplaysink.c index cd92c332ab..35e31d6428 100644 --- a/gst/playback/gstplaysink.c +++ b/gst/playback/gstplaysink.c @@ -2311,7 +2311,8 @@ gst_play_sink_reconfigure (GstPlaySink * playsink) add_chain (GST_PLAY_CHAIN (playsink->videochain), FALSE); activate_chain (GST_PLAY_CHAIN (playsink->videochain), FALSE); - g_object_unref (playsink->videochain->ts_offset); + if (playsink->videochain->ts_offset) + g_object_unref (playsink->videochain->ts_offset); playsink->videochain->ts_offset = NULL; } @@ -2365,7 +2366,8 @@ gst_play_sink_reconfigure (GstPlaySink * playsink) disconnect_chain (playsink->audiochain, playsink); playsink->audiochain->volume = NULL; playsink->audiochain->mute = NULL; - g_object_unref (playsink->audiochain->ts_offset); + if (playsink->audiochain->ts_offset) + g_object_unref (playsink->audiochain->ts_offset); playsink->audiochain->ts_offset = NULL; free_chain ((GstPlayChain *) playsink->audiochain); playsink->audiochain = NULL; @@ -2434,7 +2436,8 @@ gst_play_sink_reconfigure (GstPlaySink * playsink) disconnect_chain (playsink->audiochain, playsink); playsink->audiochain->volume = NULL; playsink->audiochain->mute = NULL; - g_object_unref (playsink->audiochain->ts_offset); + if (playsink->audiochain->ts_offset) + g_object_unref (playsink->audiochain->ts_offset); playsink->audiochain->ts_offset = NULL; } add_chain (GST_PLAY_CHAIN (playsink->audiochain), FALSE); @@ -3441,7 +3444,8 @@ gst_play_sink_change_state (GstElement * element, GstStateChange transition) disconnect_chain (playsink->audiochain, playsink); playsink->audiochain->volume = NULL; playsink->audiochain->mute = NULL; - g_object_unref (playsink->audiochain->ts_offset); + if (playsink->audiochain->ts_offset) + g_object_unref (playsink->audiochain->ts_offset); playsink->audiochain->ts_offset = NULL; } ret = GST_STATE_CHANGE_SUCCESS; From f2438913f907407690d6fcf84ce7621056017f69 Mon Sep 17 00:00:00 2001 From: Sjoerd Simons Date: Mon, 22 Aug 2011 15:52:57 +0200 Subject: [PATCH 05/21] Correct added versions --- gst/videorate/gstvideorate.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gst/videorate/gstvideorate.c b/gst/videorate/gstvideorate.c index 8045b12640..6060bd520b 100644 --- a/gst/videorate/gstvideorate.c +++ b/gst/videorate/gstvideorate.c @@ -208,7 +208,7 @@ gst_video_rate_class_init (GstVideoRateClass * klass) * * Only drop frames, no duplicates are produced. * - * Since: 0.10.34 + * Since: 0.10.36 */ g_object_class_install_property (object_class, ARG_DROP_ONLY, g_param_spec_boolean ("drop-only", "Only Drop", @@ -222,7 +222,7 @@ gst_video_rate_class_init (GstVideoRateClass * klass) * where the framerate is calculated using a moving average over the * configured. * - * Since: 0.10.34 + * Since: 0.10.36 */ g_object_class_install_property (object_class, ARG_AVERAGE_PERIOD, g_param_spec_uint64 ("average-period", "Period over which to average", From ea46b3c706ea99257d2b31cb5c7fb281f43b3178 Mon Sep 17 00:00:00 2001 From: Sjoerd Simons Date: Tue, 23 Aug 2011 10:11:52 +0200 Subject: [PATCH 06/21] videorate: Port to basetransform --- gst/videorate/Makefile.am | 4 +- gst/videorate/gstvideorate.c | 410 +++++++++++++---------------------- gst/videorate/gstvideorate.h | 7 +- 3 files changed, 153 insertions(+), 268 deletions(-) diff --git a/gst/videorate/Makefile.am b/gst/videorate/Makefile.am index fe9b9961c8..b275abab99 100644 --- a/gst/videorate/Makefile.am +++ b/gst/videorate/Makefile.am @@ -3,9 +3,9 @@ noinst_HEADERS = gstvideorate.h plugin_LTLIBRARIES = libgstvideorate.la libgstvideorate_la_SOURCES = gstvideorate.c -libgstvideorate_la_CFLAGS = $(GST_CFLAGS) +libgstvideorate_la_CFLAGS = $(GST_CFLAGS) $(GST_BASE_CFLAGS) libgstvideorate_la_LDFLAGS = $(GST_PLUGIN_LDFLAGS) -libgstvideorate_la_LIBADD = $(GST_LIBS) +libgstvideorate_la_LIBADD = $(GST_LIBS) $(GST_BASE_LIBS) libgstvideorate_la_LIBTOOLFLAGS = --tag=disable-static Android.mk: Makefile.am $(BUILT_SOURCES) diff --git a/gst/videorate/gstvideorate.c b/gst/videorate/gstvideorate.c index 6060bd520b..9af4f8d31e 100644 --- a/gst/videorate/gstvideorate.c +++ b/gst/videorate/gstvideorate.c @@ -122,24 +122,38 @@ static GstStaticPadTemplate gst_video_rate_sink_template = static void gst_video_rate_swap_prev (GstVideoRate * videorate, GstBuffer * buffer, gint64 time); -static gboolean gst_video_rate_event (GstPad * pad, GstEvent * event); -static gboolean gst_video_rate_query (GstPad * pad, GstQuery * query); -static GstFlowReturn gst_video_rate_chain (GstPad * pad, GstBuffer * buffer); +static gboolean gst_video_rate_event (GstBaseTransform * trans, + GstEvent * event); +static gboolean gst_video_rate_query (GstBaseTransform * trans, + GstPadDirection direction, GstQuery * query); + +static gboolean gst_video_rate_setcaps (GstBaseTransform * trans, + GstCaps * in_caps, GstCaps * out_caps); + +static GstCaps *gst_video_rate_transform_caps (GstBaseTransform * trans, + GstPadDirection direction, GstCaps * caps); + +static void gst_video_rate_fixate_caps (GstBaseTransform * trans, + GstPadDirection direction, GstCaps * caps, GstCaps * othercaps); + +static GstFlowReturn gst_video_rate_prepare_output_buffer (GstBaseTransform * + trans, GstBuffer * input, gint size, GstCaps * caps, GstBuffer ** buf); +static GstFlowReturn gst_video_rate_transform_ip (GstBaseTransform * trans, + GstBuffer * buf); + +static gboolean gst_video_rate_start (GstBaseTransform * trans); + static void gst_video_rate_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec); static void gst_video_rate_get_property (GObject * object, guint prop_id, GValue * value, GParamSpec * pspec); -static GstStateChangeReturn gst_video_rate_change_state (GstElement * element, - GstStateChange transition); - -/*static guint gst_video_rate_signals[LAST_SIGNAL] = { 0 }; */ - static GParamSpec *pspec_drop = NULL; static GParamSpec *pspec_duplicate = NULL; -GST_BOILERPLATE (GstVideoRate, gst_video_rate, GstElement, GST_TYPE_ELEMENT); +GST_BOILERPLATE (GstVideoRate, gst_video_rate, + GstBaseTransform, GST_TYPE_BASE_TRANSFORM); static void gst_video_rate_base_init (gpointer g_class) @@ -161,13 +175,24 @@ static void gst_video_rate_class_init (GstVideoRateClass * klass) { GObjectClass *object_class = G_OBJECT_CLASS (klass); - GstElementClass *element_class = GST_ELEMENT_CLASS (klass); + GstBaseTransformClass *base_class = GST_BASE_TRANSFORM_CLASS (klass); parent_class = g_type_class_peek_parent (klass); object_class->set_property = gst_video_rate_set_property; object_class->get_property = gst_video_rate_get_property; + base_class->set_caps = GST_DEBUG_FUNCPTR (gst_video_rate_setcaps); + base_class->transform_caps = + GST_DEBUG_FUNCPTR (gst_video_rate_transform_caps); + base_class->transform_ip = GST_DEBUG_FUNCPTR (gst_video_rate_transform_ip); + base_class->prepare_output_buffer = + GST_DEBUG_FUNCPTR (gst_video_rate_prepare_output_buffer); + base_class->event = GST_DEBUG_FUNCPTR (gst_video_rate_event); + base_class->start = GST_DEBUG_FUNCPTR (gst_video_rate_start); + base_class->fixate_caps = GST_DEBUG_FUNCPTR (gst_video_rate_fixate_caps); + base_class->query = GST_DEBUG_FUNCPTR (gst_video_rate_query); + g_object_class_install_property (object_class, ARG_IN, g_param_spec_uint64 ("in", "In", "Number of input frames", 0, G_MAXUINT64, 0, @@ -229,208 +254,105 @@ gst_video_rate_class_init (GstVideoRateClass * klass) "Period over which to average the framerate (in ns) (0 = disabled)", 0, G_MAXINT64, DEFAULT_AVERAGE_PERIOD, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); - - element_class->change_state = GST_DEBUG_FUNCPTR (gst_video_rate_change_state); -} - -/* return the caps that can be used on out_pad given in_caps on in_pad */ -static gboolean -gst_video_rate_transformcaps (GstPad * in_pad, GstCaps * in_caps, - GstPad * out_pad, GstCaps ** out_caps) -{ - GstCaps *intersect; - const GstCaps *in_templ; - gint i; - GSList *extra_structures = NULL; - GSList *iter; - - in_templ = gst_pad_get_pad_template_caps (in_pad); - intersect = gst_caps_intersect (in_caps, in_templ); - - /* all possible framerates are allowed */ - for (i = 0; i < gst_caps_get_size (intersect); i++) { - GstStructure *structure; - - structure = gst_caps_get_structure (intersect, i); - - if (gst_structure_has_field (structure, "framerate")) { - GstStructure *copy_structure; - - copy_structure = gst_structure_copy (structure); - gst_structure_set (copy_structure, - "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1, NULL); - extra_structures = g_slist_append (extra_structures, copy_structure); - } - } - - /* append the extra structures */ - for (iter = extra_structures; iter != NULL; iter = g_slist_next (iter)) { - gst_caps_append_structure (intersect, (GstStructure *) iter->data); - } - g_slist_free (extra_structures); - - *out_caps = intersect; - - return TRUE; } static GstCaps * -gst_video_rate_getcaps (GstPad * pad) +gst_video_rate_transform_caps (GstBaseTransform * trans, + GstPadDirection direction, GstCaps * caps) { - GstVideoRate *videorate; - GstPad *otherpad; - GstCaps *caps; + GstCaps *ret; + GstStructure *s; - videorate = GST_VIDEO_RATE (GST_PAD_PARENT (pad)); + /* Should always be called with simple caps */ + g_return_val_if_fail (GST_CAPS_IS_SIMPLE (caps), NULL); - otherpad = (pad == videorate->srcpad) ? videorate->sinkpad : - videorate->srcpad; + ret = gst_caps_copy (caps); - /* we can do what the peer can */ - caps = gst_pad_peer_get_caps (otherpad); - if (caps) { - GstCaps *transform; + s = gst_structure_copy (gst_caps_get_structure (caps, 0)); - gst_video_rate_transformcaps (otherpad, caps, pad, &transform); - gst_caps_unref (caps); - caps = transform; - } else { - /* no peer, our padtemplate is enough then */ - caps = gst_caps_copy (gst_pad_get_pad_template_caps (pad)); - } + /* set the framerate as a range */ + gst_structure_set (s, "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, + G_MAXINT, 1, NULL); - return caps; + gst_caps_append_structure (ret, s); + + return ret; +} + +static void +gst_video_rate_fixate_caps (GstBaseTransform * trans, + GstPadDirection direction, GstCaps * caps, GstCaps * othercaps) +{ + GstStructure *s; + gint num, denom; + + s = gst_caps_get_structure (caps, 0); + if (G_UNLIKELY (!gst_structure_get_fraction (s, "framerate", &num, &denom))) + return; + + s = gst_caps_get_structure (othercaps, 0); + gst_structure_fixate_field_nearest_fraction (s, "framerate", num, denom); } static gboolean -gst_video_rate_setcaps (GstPad * pad, GstCaps * caps) +gst_video_rate_setcaps (GstBaseTransform * trans, GstCaps * in_caps, + GstCaps * out_caps) { GstVideoRate *videorate; GstStructure *structure; gboolean ret = TRUE; - GstPad *otherpad, *opeer; gint rate_numerator, rate_denominator; - videorate = GST_VIDEO_RATE (gst_pad_get_parent (pad)); + videorate = GST_VIDEO_RATE (trans); - GST_DEBUG_OBJECT (pad, "setcaps called %" GST_PTR_FORMAT, caps); + GST_DEBUG_OBJECT (trans, "setcaps called in: %" GST_PTR_FORMAT + " out: %" GST_PTR_FORMAT, in_caps, out_caps); - structure = gst_caps_get_structure (caps, 0); + structure = gst_caps_get_structure (in_caps, 0); if (!gst_structure_get_fraction (structure, "framerate", &rate_numerator, &rate_denominator)) goto no_framerate; - if (pad == videorate->srcpad) { - /* out_frame_count is scaled by the frame rate caps when calculating next_ts. - * when the frame rate caps change, we must update base_ts and reset - * out_frame_count */ - if (videorate->to_rate_numerator) { - videorate->base_ts += - gst_util_uint64_scale (videorate->out_frame_count, - videorate->to_rate_denominator * GST_SECOND, - videorate->to_rate_numerator); - } - videorate->out_frame_count = 0; - videorate->to_rate_numerator = rate_numerator; - videorate->to_rate_denominator = rate_denominator; + videorate->from_rate_numerator = rate_numerator; + videorate->from_rate_denominator = rate_denominator; + + structure = gst_caps_get_structure (out_caps, 0); + if (!gst_structure_get_fraction (structure, "framerate", + &rate_numerator, &rate_denominator)) + goto no_framerate; + + /* out_frame_count is scaled by the frame rate caps when calculating next_ts. + * when the frame rate caps change, we must update base_ts and reset + * out_frame_count */ + if (videorate->to_rate_numerator) { + videorate->base_ts += + gst_util_uint64_scale (videorate->out_frame_count, + videorate->to_rate_denominator * GST_SECOND, + videorate->to_rate_numerator); + } + videorate->out_frame_count = 0; + videorate->to_rate_numerator = rate_numerator; + videorate->to_rate_denominator = rate_denominator; + + if (rate_numerator) videorate->wanted_diff = gst_util_uint64_scale_int (GST_SECOND, rate_denominator, rate_numerator); - otherpad = videorate->sinkpad; - } else { - videorate->from_rate_numerator = rate_numerator; - videorate->from_rate_denominator = rate_denominator; - otherpad = videorate->srcpad; - } + else + videorate->wanted_diff = 0; - /* now try to find something for the peer */ - opeer = gst_pad_get_peer (otherpad); - if (opeer) { - if (gst_pad_accept_caps (opeer, caps)) { - /* the peer accepts the caps as they are */ - gst_pad_set_caps (otherpad, caps); - - ret = TRUE; - } else { - GstCaps *peercaps; - GstCaps *transform = NULL; - - ret = FALSE; - - /* see how we can transform the input caps */ - if (!gst_video_rate_transformcaps (pad, caps, otherpad, &transform)) - goto no_transform; - - /* see what the peer can do */ - peercaps = gst_pad_get_caps (opeer); - - GST_DEBUG_OBJECT (opeer, "icaps %" GST_PTR_FORMAT, peercaps); - GST_DEBUG_OBJECT (videorate, "transform %" GST_PTR_FORMAT, transform); - - /* filter against our possibilities */ - caps = gst_caps_intersect (peercaps, transform); - gst_caps_unref (peercaps); - gst_caps_unref (transform); - - GST_DEBUG_OBJECT (videorate, "intersect %" GST_PTR_FORMAT, caps); - - /* could turn up empty, due to e.g. colorspace etc */ - if (gst_caps_get_size (caps) == 0) { - gst_caps_unref (caps); - goto no_transform; - } - - /* take first possibility */ - gst_caps_truncate (caps); - structure = gst_caps_get_structure (caps, 0); - - /* and fixate */ - gst_structure_fixate_field_nearest_fraction (structure, "framerate", - rate_numerator, rate_denominator); - - gst_structure_get_fraction (structure, "framerate", - &rate_numerator, &rate_denominator); - - if (otherpad == videorate->srcpad) { - videorate->to_rate_numerator = rate_numerator; - videorate->to_rate_denominator = rate_denominator; - } else { - videorate->from_rate_numerator = rate_numerator; - videorate->from_rate_denominator = rate_denominator; - } - - if (gst_structure_has_field (structure, "interlaced")) - gst_structure_fixate_field_boolean (structure, "interlaced", FALSE); - if (gst_structure_has_field (structure, "color-matrix")) - gst_structure_fixate_field_string (structure, "color-matrix", "sdtv"); - if (gst_structure_has_field (structure, "chroma-site")) - gst_structure_fixate_field_string (structure, "chroma-site", "mpeg2"); - if (gst_structure_has_field (structure, "pixel-aspect-ratio")) - gst_structure_fixate_field_nearest_fraction (structure, - "pixel-aspect-ratio", 1, 1); - - gst_pad_set_caps (otherpad, caps); - gst_caps_unref (caps); - ret = TRUE; - } - gst_object_unref (opeer); - } done: /* After a setcaps, our caps may have changed. In that case, we can't use * the old buffer, if there was one (it might have different dimensions) */ GST_DEBUG_OBJECT (videorate, "swapping old buffers"); gst_video_rate_swap_prev (videorate, NULL, GST_CLOCK_TIME_NONE); + videorate->last_ts = GST_CLOCK_TIME_NONE; + videorate->average = 0; - gst_object_unref (videorate); return ret; no_framerate: { GST_DEBUG_OBJECT (videorate, "no framerate specified"); - goto done; - } -no_transform: - { - GST_DEBUG_OBJECT (videorate, "no framerate transform possible"); ret = FALSE; goto done; } @@ -459,38 +381,19 @@ gst_video_rate_reset (GstVideoRate * videorate) static void gst_video_rate_init (GstVideoRate * videorate, GstVideoRateClass * klass) { - videorate->sinkpad = - gst_pad_new_from_static_template (&gst_video_rate_sink_template, "sink"); - gst_pad_set_event_function (videorate->sinkpad, - GST_DEBUG_FUNCPTR (gst_video_rate_event)); - gst_pad_set_chain_function (videorate->sinkpad, - GST_DEBUG_FUNCPTR (gst_video_rate_chain)); - gst_pad_set_getcaps_function (videorate->sinkpad, - GST_DEBUG_FUNCPTR (gst_video_rate_getcaps)); - gst_pad_set_setcaps_function (videorate->sinkpad, - GST_DEBUG_FUNCPTR (gst_video_rate_setcaps)); - gst_element_add_pad (GST_ELEMENT (videorate), videorate->sinkpad); - - videorate->srcpad = - gst_pad_new_from_static_template (&gst_video_rate_src_template, "src"); - gst_pad_set_query_function (videorate->srcpad, - GST_DEBUG_FUNCPTR (gst_video_rate_query)); - gst_pad_set_getcaps_function (videorate->srcpad, - GST_DEBUG_FUNCPTR (gst_video_rate_getcaps)); - gst_pad_set_setcaps_function (videorate->srcpad, - GST_DEBUG_FUNCPTR (gst_video_rate_setcaps)); - gst_element_add_pad (GST_ELEMENT (videorate), videorate->srcpad); - gst_video_rate_reset (videorate); videorate->silent = DEFAULT_SILENT; videorate->new_pref = DEFAULT_NEW_PREF; videorate->drop_only = DEFAULT_DROP_ONLY; videorate->average_period = DEFAULT_AVERAGE_PERIOD; + videorate->average_period_set = DEFAULT_AVERAGE_PERIOD; videorate->from_rate_numerator = 0; videorate->from_rate_denominator = 0; videorate->to_rate_numerator = 0; videorate->to_rate_denominator = 0; + + gst_base_transform_set_gap_aware (GST_BASE_TRANSFORM (videorate), TRUE); } /* flush the oldest buffer */ @@ -542,13 +445,12 @@ gst_video_rate_flush_prev (GstVideoRate * videorate, gboolean duplicate) /* adapt for looping, bring back to time in current segment. */ GST_BUFFER_TIMESTAMP (outbuf) = push_ts - videorate->segment.accum; } - gst_buffer_set_caps (outbuf, GST_PAD_CAPS (videorate->srcpad)); GST_LOG_OBJECT (videorate, "old is best, dup, pushing buffer outgoing ts %" GST_TIME_FORMAT, GST_TIME_ARGS (push_ts)); - res = gst_pad_push (videorate->srcpad, outbuf); + res = gst_pad_push (GST_BASE_TRANSFORM_SRC_PAD (videorate), outbuf); return res; @@ -567,7 +469,7 @@ gst_video_rate_swap_prev (GstVideoRate * videorate, GstBuffer * buffer, GST_LOG_OBJECT (videorate, "swap_prev: storing buffer %p in prev", buffer); if (videorate->prevbuf) gst_buffer_unref (videorate->prevbuf); - videorate->prevbuf = buffer; + videorate->prevbuf = buffer != NULL ? gst_buffer_ref (buffer) : NULL; videorate->prev_ts = time; } @@ -593,12 +495,11 @@ gst_video_rate_notify_duplicate (GstVideoRate * videorate) #define MAGIC_LIMIT 25 static gboolean -gst_video_rate_event (GstPad * pad, GstEvent * event) +gst_video_rate_event (GstBaseTransform * trans, GstEvent * event) { GstVideoRate *videorate; - gboolean ret; - videorate = GST_VIDEO_RATE (gst_pad_get_parent (pad)); + videorate = GST_VIDEO_RATE (trans); switch (GST_EVENT_TYPE (event)) { case GST_EVENT_NEWSEGMENT: @@ -715,12 +616,7 @@ gst_video_rate_event (GstPad * pad, GstEvent * event) break; } - ret = gst_pad_push_event (videorate->srcpad, event); - -done: - gst_object_unref (videorate); - - return ret; + return TRUE; /* ERRORS */ format_error: @@ -728,18 +624,20 @@ format_error: GST_WARNING_OBJECT (videorate, "Got segment but doesn't have GST_FORMAT_TIME value"); gst_event_unref (event); - ret = FALSE; - goto done; + return FALSE; } } static gboolean -gst_video_rate_query (GstPad * pad, GstQuery * query) +gst_video_rate_query (GstBaseTransform * trans, GstPadDirection direction, + GstQuery * query) { - GstVideoRate *videorate; + GstVideoRate *videorate = GST_VIDEO_RATE (trans); gboolean res = FALSE; + GstPad *otherpad; - videorate = GST_VIDEO_RATE (gst_pad_get_parent (pad)); + otherpad = (direction == GST_PAD_SRC) ? + GST_BASE_TRANSFORM_SINK_PAD (trans) : GST_BASE_TRANSFORM_SRC_PAD (trans); switch (GST_QUERY_TYPE (query)) { case GST_QUERY_LATENCY: @@ -749,7 +647,8 @@ gst_video_rate_query (GstPad * pad, GstQuery * query) guint64 latency; GstPad *peer; - if ((peer = gst_pad_get_peer (videorate->sinkpad))) { + if (videorate->average_period == 0 + && (peer = gst_pad_get_peer (otherpad))) { if ((res = gst_pad_query (peer, query))) { gst_query_parse_latency (query, &live, &min, &max); @@ -787,16 +686,15 @@ gst_video_rate_query (GstPad * pad, GstQuery * query) break; } default: - res = gst_pad_query_default (pad, query); + res = parent_class->query (trans, direction, query); break; } - gst_object_unref (videorate); return res; } static GstFlowReturn -gst_video_rate_chain_max_avg (GstVideoRate * videorate, GstBuffer * buf) +gst_video_rate_trans_ip_max_avg (GstVideoRate * videorate, GstBuffer * buf) { GstClockTime ts = GST_BUFFER_TIMESTAMP (buf); @@ -840,26 +738,39 @@ gst_video_rate_chain_max_avg (GstVideoRate * videorate, GstBuffer * buf) push: videorate->out++; - - return gst_pad_push (videorate->srcpad, buf); + return GST_FLOW_OK; drop: - gst_buffer_unref (buf); if (!videorate->silent) gst_video_rate_notify_drop (videorate); + return GST_BASE_TRANSFORM_FLOW_DROPPED; +} + +static GstFlowReturn +gst_video_rate_prepare_output_buffer (GstBaseTransform * trans, + GstBuffer * input, gint size, GstCaps * caps, GstBuffer ** buf) +{ + if (gst_buffer_is_metadata_writable (input)) { + gst_buffer_set_caps (input, caps); + *buf = gst_buffer_ref (input); + } else { + *buf = gst_buffer_create_sub (input, 0, GST_BUFFER_SIZE (input)); + gst_buffer_set_caps (*buf, caps); + } + return GST_FLOW_OK; } static GstFlowReturn -gst_video_rate_chain (GstPad * pad, GstBuffer * buffer) +gst_video_rate_transform_ip (GstBaseTransform * trans, GstBuffer * buffer) { GstVideoRate *videorate; - GstFlowReturn res = GST_FLOW_OK; + GstFlowReturn res = GST_BASE_TRANSFORM_FLOW_DROPPED; GstClockTime intime, in_ts, in_dur; GstClockTime avg_period; gboolean skip = FALSE; - videorate = GST_VIDEO_RATE (GST_PAD_PARENT (pad)); + videorate = GST_VIDEO_RATE (trans); /* make sure the denominators are not 0 */ if (videorate->from_rate_denominator == 0 || @@ -887,7 +798,7 @@ gst_video_rate_chain (GstPad * pad, GstBuffer * buffer) } if (videorate->average_period > 0) - return gst_video_rate_chain_max_avg (videorate, buffer); + return gst_video_rate_trans_ip_max_avg (videorate, buffer); in_ts = GST_BUFFER_TIMESTAMP (buffer); in_dur = GST_BUFFER_DURATION (buffer); @@ -951,7 +862,6 @@ gst_video_rate_chain (GstPad * pad, GstBuffer * buffer) videorate->drop++; if (!videorate->silent) gst_video_rate_notify_drop (videorate); - gst_buffer_unref (buffer); goto done; } @@ -975,13 +885,13 @@ gst_video_rate_chain (GstPad * pad, GstBuffer * buffer) /* output first one when its the best */ if (diff1 <= diff2) { + GstFlowReturn r; count++; /* on error the _flush function posted a warning already */ - if ((res = - gst_video_rate_flush_prev (videorate, + if ((r = gst_video_rate_flush_prev (videorate, count > 1)) != GST_FLOW_OK) { - gst_buffer_unref (buffer); + res = r; goto done; } } @@ -1029,7 +939,6 @@ done: not_negotiated: { GST_WARNING_OBJECT (videorate, "no framerate negotiated"); - gst_buffer_unref (buffer); res = GST_FLOW_NOT_NEGOTIATED; goto done; } @@ -1038,11 +947,18 @@ invalid_buffer: { GST_WARNING_OBJECT (videorate, "Got buffer with GST_CLOCK_TIME_NONE timestamp, discarding it"); - gst_buffer_unref (buffer); + res = GST_BASE_TRANSFORM_FLOW_DROPPED; goto done; } } +static gboolean +gst_video_rate_start (GstBaseTransform * trans) +{ + gst_video_rate_reset (GST_VIDEO_RATE (trans)); + return TRUE; +} + static void gst_video_rate_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec) @@ -1064,7 +980,7 @@ gst_video_rate_set_property (GObject * object, videorate->drop_only = g_value_get_boolean (value); break; case ARG_AVERAGE_PERIOD: - videorate->average_period = g_value_get_uint64 (value); + videorate->average_period_set = g_value_get_uint64 (value); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); @@ -1106,7 +1022,7 @@ gst_video_rate_get_property (GObject * object, g_value_set_boolean (value, videorate->drop_only); break; case ARG_AVERAGE_PERIOD: - g_value_set_uint64 (value, videorate->average_period); + g_value_set_uint64 (value, videorate->average_period_set); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); @@ -1115,36 +1031,6 @@ gst_video_rate_get_property (GObject * object, GST_OBJECT_UNLOCK (videorate); } -static GstStateChangeReturn -gst_video_rate_change_state (GstElement * element, GstStateChange transition) -{ - GstStateChangeReturn ret; - GstVideoRate *videorate; - - videorate = GST_VIDEO_RATE (element); - - switch (transition) { - case GST_STATE_CHANGE_READY_TO_PAUSED: - videorate->discont = TRUE; - videorate->last_ts = -1; - break; - default: - break; - } - - ret = parent_class->change_state (element, transition); - - switch (transition) { - case GST_STATE_CHANGE_PAUSED_TO_READY: - gst_video_rate_reset (videorate); - break; - default: - break; - } - - return ret; -} - static gboolean plugin_init (GstPlugin * plugin) { diff --git a/gst/videorate/gstvideorate.h b/gst/videorate/gstvideorate.h index 037633dae6..a8e1de1db5 100644 --- a/gst/videorate/gstvideorate.h +++ b/gst/videorate/gstvideorate.h @@ -21,6 +21,7 @@ #define __GST_VIDEO_RATE_H__ #include +#include G_BEGIN_DECLS @@ -45,9 +46,7 @@ typedef struct _GstVideoRateClass GstVideoRateClass; */ struct _GstVideoRate { - GstElement element; - - GstPad *sinkpad, *srcpad; + GstBaseTransform parent; /* video state */ gint from_rate_numerator, from_rate_denominator; @@ -81,7 +80,7 @@ struct _GstVideoRate struct _GstVideoRateClass { - GstElementClass parent_class; + GstBaseTransformClass parent_class; }; GType gst_video_rate_get_type (void); From 08ac05a06c30cc8a5608d85130209db5c731d3eb Mon Sep 17 00:00:00 2001 From: Sjoerd Simons Date: Thu, 25 Aug 2011 15:14:58 +0100 Subject: [PATCH 07/21] videorate: fix dynamically changing average period The average_period_set variable can be accessed in different threads, so always lock it when reading. Furthermore when switching to averaging mode we should make sure we don't have cached buffers that aren't used in that mode. And any modeswitch will cause the latency to change, so we should post a NewLatency message --- gst/videorate/gstvideorate.c | 42 +++++++++++++++++++++++++----------- 1 file changed, 30 insertions(+), 12 deletions(-) diff --git a/gst/videorate/gstvideorate.c b/gst/videorate/gstvideorate.c index 9af4f8d31e..32f15fe6c7 100644 --- a/gst/videorate/gstvideorate.c +++ b/gst/videorate/gstvideorate.c @@ -645,10 +645,14 @@ gst_video_rate_query (GstBaseTransform * trans, GstPadDirection direction, GstClockTime min, max; gboolean live; guint64 latency; + guint64 avg_period; GstPad *peer; - if (videorate->average_period == 0 - && (peer = gst_pad_get_peer (otherpad))) { + GST_OBJECT_LOCK (videorate); + avg_period = videorate->average_period_set; + GST_OBJECT_UNLOCK (videorate); + + if (avg_period == 0 && (peer = gst_pad_get_peer (otherpad))) { if ((res = gst_pad_query (peer, query))) { gst_query_parse_latency (query, &live, &min, &max); @@ -682,8 +686,10 @@ gst_video_rate_query (GstBaseTransform * trans, GstPadDirection direction, gst_query_set_latency (query, live, min, max); } gst_object_unref (peer); + break; } - break; + /* Simple fallthrough if we don't have a latency or not a peer that we + * can't ask about its latency yet.. */ } default: res = parent_class->query (trans, direction, query); @@ -783,17 +789,25 @@ gst_video_rate_transform_ip (GstBaseTransform * trans, GstBuffer * buffer) /* MT-safe switching between modes */ if (G_UNLIKELY (avg_period != videorate->average_period)) { + gboolean switch_mode = (avg_period == 0 || videorate->average_period == 0); videorate->average_period = avg_period; videorate->last_ts = GST_CLOCK_TIME_NONE; - if (avg_period && !videorate->average) { - /* enabling average mode */ - videorate->average = 0; - } else { - /* enable regular mode */ - gst_video_rate_swap_prev (videorate, NULL, 0); - /* arrange for skip-to-first behaviour */ - videorate->next_ts = GST_CLOCK_TIME_NONE; - skip = TRUE; + + if (switch_mode) { + if (avg_period) { + /* enabling average mode */ + videorate->average = 0; + /* make sure no cached buffers from regular mode are left */ + gst_video_rate_swap_prev (videorate, NULL, 0); + } else { + /* enable regular mode */ + videorate->next_ts = GST_CLOCK_TIME_NONE; + skip = TRUE; + } + + /* max averaging mode has a no latency, normal mode does */ + gst_element_post_message (GST_ELEMENT (videorate), + gst_message_new_latency (GST_OBJECT (videorate))); } } @@ -980,7 +994,9 @@ gst_video_rate_set_property (GObject * object, videorate->drop_only = g_value_get_boolean (value); break; case ARG_AVERAGE_PERIOD: + GST_OBJECT_LOCK (videorate); videorate->average_period_set = g_value_get_uint64 (value); + GST_OBJECT_UNLOCK (videorate); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); @@ -1022,7 +1038,9 @@ gst_video_rate_get_property (GObject * object, g_value_set_boolean (value, videorate->drop_only); break; case ARG_AVERAGE_PERIOD: + GST_OBJECT_LOCK (videorate); g_value_set_uint64 (value, videorate->average_period_set); + GST_OBJECT_UNLOCK (videorate); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); From 924f74398192e34e18d312ea9af9ff2a81480a71 Mon Sep 17 00:00:00 2001 From: David Schleef Date: Mon, 16 May 2011 14:46:52 -0700 Subject: [PATCH 08/21] playback: Add define for colorspace element Single point of change if you want to switch from ffmpegcolorspace to colorspace. --- gst/playback/Makefile.am | 14 +++++----- gst/playback/gstplaybin.c | 10 +++---- gst/playback/gstplaysink.c | 8 +++--- gst/playback/gstplaysinkvideoconvert.c | 6 ++--- gst/playback/gstsubtitleoverlay.c | 36 +++++++++++++------------- 5 files changed, 38 insertions(+), 36 deletions(-) diff --git a/gst/playback/Makefile.am b/gst/playback/Makefile.am index 2fe90e6e85..9c8273f15c 100644 --- a/gst/playback/Makefile.am +++ b/gst/playback/Makefile.am @@ -8,6 +8,8 @@ built_headers = gstplay-marshal.h plugin_LTLIBRARIES = libgstplaybin.la libgstdecodebin.la libgstdecodebin2.la +csp_cflags = -DCOLORSPACE=\"ffmpegcolorspace\" + libgstplaybin_la_SOURCES = \ gstplayback.c \ gstplaybin.c \ @@ -23,7 +25,7 @@ libgstplaybin_la_SOURCES = \ gststreamsynchronizer.c nodist_libgstplaybin_la_SOURCES = $(built_sources) -libgstplaybin_la_CFLAGS = $(GST_PLUGINS_BASE_CFLAGS) $(GST_CFLAGS) +libgstplaybin_la_CFLAGS = $(GST_PLUGINS_BASE_CFLAGS) $(GST_CFLAGS) $(csp_cflags) libgstplaybin_la_LDFLAGS = $(GST_PLUGIN_LDFLAGS) libgstplaybin_la_LIBADD = \ $(top_builddir)/gst-libs/gst/pbutils/libgstpbutils-@GST_MAJORMINOR@.la \ @@ -34,7 +36,7 @@ libgstplaybin_la_LIBTOOLFLAGS = --tag=disable-static libgstdecodebin_la_SOURCES = gstdecodebin.c nodist_libgstdecodebin_la_SOURCES = $(built_sources) -libgstdecodebin_la_CFLAGS = $(GST_PLUGINS_BASE_CFLAGS) $(GST_CFLAGS) +libgstdecodebin_la_CFLAGS = $(GST_PLUGINS_BASE_CFLAGS) $(GST_CFLAGS) $(csp_cflags) libgstdecodebin_la_LDFLAGS = $(GST_PLUGIN_LDFLAGS) libgstdecodebin_la_LIBADD = \ $(top_builddir)/gst-libs/gst/pbutils/libgstpbutils-@GST_MAJORMINOR@.la \ @@ -43,7 +45,7 @@ libgstdecodebin_la_LIBTOOLFLAGS = --tag=disable-static libgstdecodebin2_la_SOURCES = gstdecodebin2.c gsturidecodebin.c gstplay-enum.c nodist_libgstdecodebin2_la_SOURCES = $(built_sources) -libgstdecodebin2_la_CFLAGS = $(GST_PLUGINS_BASE_CFLAGS) $(GST_CFLAGS) +libgstdecodebin2_la_CFLAGS = $(GST_PLUGINS_BASE_CFLAGS) $(GST_CFLAGS) $(csp_cflags) libgstdecodebin2_la_LDFLAGS = $(GST_PLUGIN_LDFLAGS) libgstdecodebin2_la_LIBADD = \ $(top_builddir)/gst-libs/gst/pbutils/libgstpbutils-@GST_MAJORMINOR@.la \ @@ -78,7 +80,7 @@ Android.mk: Makefile.am $(BUILT_SOURCES) -:REL_TOP $(top_srcdir) -:ABS_TOP $(abs_top_srcdir) \ -:SOURCES $(libgstdecodebin_la_SOURCES) \ $(nodist_libgstdecodebin_la_SOURCES) \ - -:CFLAGS $(DEFS) $(DEFAULT_INCLUDES) $(libgstdecodebin_la_CFLAGS) \ + -:CFLAGS $(DEFS) $(DEFAULT_INCLUDES) $(libgstdecodebin_la_CFLAGS) $(csp_cflags) \ -:LDFLAGS $(libgstdecodebin_la_LDFLAGS) \ $(libgstdecodebin_la_LIBADD) \ -ldl \ @@ -90,7 +92,7 @@ Android.mk: Makefile.am $(BUILT_SOURCES) -:REL_TOP $(top_srcdir) -:ABS_TOP $(abs_top_srcdir) \ -:SOURCES $(libgstdecodebin2_la_SOURCES) \ $(nodist_libgstdecodebin2_la_SOURCES) \ - -:CFLAGS $(DEFS) $(DEFAULT_INCLUDES) $(libgstdecodebin2_la_CFLAGS) \ + -:CFLAGS $(DEFS) $(DEFAULT_INCLUDES) $(libgstdecodebin2_la_CFLAGS) $(csp_cflags) \ -:LDFLAGS $(libgstdecodebin2_la_LDFLAGS) \ $(libgstdecodebin2_la_LIBADD) \ -ldl \ @@ -102,7 +104,7 @@ Android.mk: Makefile.am $(BUILT_SOURCES) -:REL_TOP $(top_srcdir) -:ABS_TOP $(abs_top_srcdir) \ -:SOURCES $(libgstplaybin_la_SOURCES) \ $(nodist_libgstplaybin_la_SOURCES) \ - -:CFLAGS $(DEFS) $(DEFAULT_INCLUDES) $(libgstplaybin_la_CFLAGS) \ + -:CFLAGS $(DEFS) $(DEFAULT_INCLUDES) $(libgstplaybin_la_CFLAGS) $(csp_cflags) \ -:LDFLAGS $(libgstplaybin_la_LDFLAGS) \ $(libgstplaybin_la_LIBADD) \ -ldl \ diff --git a/gst/playback/gstplaybin.c b/gst/playback/gstplaybin.c index 57b5dbe1f8..847246f0f8 100644 --- a/gst/playback/gstplaybin.c +++ b/gst/playback/gstplaybin.c @@ -859,7 +859,7 @@ gen_video_element (GstPlayBin * play_bin) element = gst_bin_new ("vbin"); gst_bin_add (GST_BIN_CAST (element), sink); - conv = gst_element_factory_make ("ffmpegcolorspace", "vconv"); + conv = gst_element_factory_make (COLORSPACE, "vconv"); if (conv == NULL) goto no_colorspace; gst_bin_add (GST_BIN_CAST (element), conv); @@ -905,10 +905,10 @@ no_sinks: } no_colorspace: { - post_missing_element_message (play_bin, "ffmpegcolorspace"); + post_missing_element_message (play_bin, COLORSPACE); GST_ELEMENT_ERROR (play_bin, CORE, MISSING_PLUGIN, (_("Missing element '%s' - check your GStreamer installation."), - "ffmpegcolorspace"), (NULL)); + COLORSPACE), (NULL)); gst_object_unref (element); return NULL; } @@ -972,7 +972,7 @@ add_text_element (GstPlayBin * play_bin, GstElement * vbin) play_bin->textoverlay_element = GST_ELEMENT_CAST (gst_object_ref (overlay)); /* we know this will succeed, as the video bin already created one before */ - csp = gst_element_factory_make ("ffmpegcolorspace", "subtitlecsp"); + csp = gst_element_factory_make (COLORSPACE, "subtitlecsp"); /* Add our elements */ gst_bin_add_many (GST_BIN_CAST (element), csp, overlay, vbin, NULL); @@ -1046,7 +1046,7 @@ add_spu_element (GstPlayBin * play_bin, GstElement * vbin) play_bin->spu_element = GST_ELEMENT_CAST (gst_object_ref (overlay)); /* we know this will succeed, as the video bin already created one before */ - csp = gst_element_factory_make ("ffmpegcolorspace", "spucsp"); + csp = gst_element_factory_make (COLORSPACE, "spucsp"); /* Add our elements */ gst_bin_add_many (GST_BIN_CAST (element), csp, overlay, vbin, NULL); diff --git a/gst/playback/gstplaysink.c b/gst/playback/gstplaysink.c index 35e31d6428..0e70ce75a9 100644 --- a/gst/playback/gstplaysink.c +++ b/gst/playback/gstplaysink.c @@ -1133,13 +1133,13 @@ gen_video_deinterlace_chain (GstPlaySink * playsink) bin = GST_BIN_CAST (chain->chain.bin); gst_object_ref_sink (bin); - GST_DEBUG_OBJECT (playsink, "creating ffmpegcolorspace"); - chain->conv = gst_element_factory_make ("ffmpegcolorspace", "vdconv"); + GST_DEBUG_OBJECT (playsink, "creating " COLORSPACE); + chain->conv = gst_element_factory_make (COLORSPACE, "vdconv"); if (chain->conv == NULL) { - post_missing_element_message (playsink, "ffmpegcolorspace"); + post_missing_element_message (playsink, COLORSPACE); GST_ELEMENT_WARNING (playsink, CORE, MISSING_PLUGIN, (_("Missing element '%s' - check your GStreamer installation."), - "ffmpegcolorspace"), ("video rendering might fail")); + COLORSPACE), ("video rendering might fail")); } else { gst_bin_add (bin, chain->conv); head = chain->conv; diff --git a/gst/playback/gstplaysinkvideoconvert.c b/gst/playback/gstplaysinkvideoconvert.c index 19986fee3c..cf765ce969 100644 --- a/gst/playback/gstplaysinkvideoconvert.c +++ b/gst/playback/gstplaysinkvideoconvert.c @@ -132,12 +132,12 @@ pad_blocked_cb (GstPad * pad, gboolean blocked, GstPlaySinkVideoConvert * self) gst_ghost_pad_set_target (GST_GHOST_PAD_CAST (self->sinkpad), NULL); gst_ghost_pad_set_target (GST_GHOST_PAD_CAST (self->srcpad), NULL); - self->conv = gst_element_factory_make ("ffmpegcolorspace", "conv"); + self->conv = gst_element_factory_make (COLORSPACE, "conv"); if (self->conv == NULL) { - post_missing_element_message (self, "ffmpegcolorspace"); + post_missing_element_message (self, COLORSPACE); GST_ELEMENT_WARNING (self, CORE, MISSING_PLUGIN, (_("Missing element '%s' - check your GStreamer installation."), - "ffmpegcolorspace"), ("video rendering might fail")); + COLORSPACE), ("video rendering might fail")); } else { gst_bin_add (bin, self->conv); gst_element_sync_state_with_parent (self->conv); diff --git a/gst/playback/gstsubtitleoverlay.c b/gst/playback/gstsubtitleoverlay.c index 0ec7ee5fdd..9a090e81ed 100644 --- a/gst/playback/gstsubtitleoverlay.c +++ b/gst/playback/gstsubtitleoverlay.c @@ -919,7 +919,7 @@ _pad_blocked_cb (GstPad * pad, gboolean blocked, gpointer user_data) gst_object_unref (src); if (G_UNLIKELY (!_create_element (self, &self->post_colorspace, - "ffmpegcolorspace", NULL, "post-colorspace", FALSE))) { + COLORSPACE, NULL, "post-colorspace", FALSE))) { continue; } @@ -931,13 +931,13 @@ _pad_blocked_cb (GstPad * pad, gboolean blocked, gpointer user_data) sink = gst_element_get_static_pad (self->post_colorspace, "sink"); if (G_UNLIKELY (!sink)) { - GST_WARNING_OBJECT (self, "Can't get sink pad from ffmpegcolorspace"); + GST_WARNING_OBJECT (self, "Can't get sink pad from " COLORSPACE); gst_object_unref (src); continue; } if (G_UNLIKELY (gst_pad_link (src, sink) != GST_PAD_LINK_OK)) { - GST_WARNING_OBJECT (self, "Can't link overlay with ffmpegcolorspace"); + GST_WARNING_OBJECT (self, "Can't link overlay with " COLORSPACE); gst_object_unref (src); gst_object_unref (sink); continue; @@ -946,7 +946,7 @@ _pad_blocked_cb (GstPad * pad, gboolean blocked, gpointer user_data) gst_object_unref (sink); if (G_UNLIKELY (!_create_element (self, &self->pre_colorspace, - "ffmpegcolorspace", NULL, "pre-colorspace", FALSE))) { + COLORSPACE, NULL, "pre-colorspace", FALSE))) { continue; } @@ -958,13 +958,13 @@ _pad_blocked_cb (GstPad * pad, gboolean blocked, gpointer user_data) src = gst_element_get_static_pad (self->pre_colorspace, "src"); if (G_UNLIKELY (!src)) { - GST_WARNING_OBJECT (self, "Can't get srcpad from ffmpegcolorspace"); + GST_WARNING_OBJECT (self, "Can't get srcpad from " COLORSPACE); gst_object_unref (sink); continue; } if (G_UNLIKELY (gst_pad_link (src, sink) != GST_PAD_LINK_OK)) { - GST_WARNING_OBJECT (self, "Can't link ffmpegcolorspace to textoverlay"); + GST_WARNING_OBJECT (self, "Can't link " COLORSPACE " to textoverlay"); gst_object_unref (src); gst_object_unref (sink); continue; @@ -975,7 +975,7 @@ _pad_blocked_cb (GstPad * pad, gboolean blocked, gpointer user_data) /* Set src ghostpad target */ src = gst_element_get_static_pad (self->post_colorspace, "src"); if (G_UNLIKELY (!src)) { - GST_WARNING_OBJECT (self, "Can't get src pad from ffmpegcolorspace"); + GST_WARNING_OBJECT (self, "Can't get src pad from " COLORSPACE); continue; } @@ -994,7 +994,7 @@ _pad_blocked_cb (GstPad * pad, gboolean blocked, gpointer user_data) sink = gst_element_get_static_pad (self->pre_colorspace, "sink"); if (G_UNLIKELY (!sink)) { - GST_WARNING_OBJECT (self, "Can't get sink pad from ffmpegcolorspace"); + GST_WARNING_OBJECT (self, "Can't get sink pad from " COLORSPACE); continue; } @@ -1038,7 +1038,7 @@ _pad_blocked_cb (GstPad * pad, gboolean blocked, gpointer user_data) /* Set the sink ghostpad targets */ sink = gst_element_get_static_pad (self->pre_colorspace, "sink"); if (G_UNLIKELY (!sink)) { - GST_WARNING_OBJECT (self, "Can't get sink pad from ffmpegcolorspace"); + GST_WARNING_OBJECT (self, "Can't get sink pad from " COLORSPACE); continue; } @@ -1088,7 +1088,7 @@ _pad_blocked_cb (GstPad * pad, gboolean blocked, gpointer user_data) /* First link everything internally */ if (G_UNLIKELY (!_create_element (self, &self->post_colorspace, - "ffmpegcolorspace", NULL, "post-colorspace", FALSE))) { + COLORSPACE, NULL, "post-colorspace", FALSE))) { continue; } @@ -1100,13 +1100,13 @@ _pad_blocked_cb (GstPad * pad, gboolean blocked, gpointer user_data) sink = gst_element_get_static_pad (self->post_colorspace, "sink"); if (G_UNLIKELY (!sink)) { - GST_WARNING_OBJECT (self, "Can't get sink pad from ffmpegcolorspace"); + GST_WARNING_OBJECT (self, "Can't get sink pad from " COLORSPACE); gst_object_unref (src); continue; } if (G_UNLIKELY (gst_pad_link (src, sink) != GST_PAD_LINK_OK)) { - GST_WARNING_OBJECT (self, "Can't link renderer with ffmpegcolorspace"); + GST_WARNING_OBJECT (self, "Can't link renderer with " COLORSPACE); gst_object_unref (src); gst_object_unref (sink); continue; @@ -1115,7 +1115,7 @@ _pad_blocked_cb (GstPad * pad, gboolean blocked, gpointer user_data) gst_object_unref (sink); if (G_UNLIKELY (!_create_element (self, &self->pre_colorspace, - "ffmpegcolorspace", NULL, "pre-colorspace", FALSE))) { + COLORSPACE, NULL, "pre-colorspace", FALSE))) { continue; } @@ -1127,13 +1127,13 @@ _pad_blocked_cb (GstPad * pad, gboolean blocked, gpointer user_data) src = gst_element_get_static_pad (self->pre_colorspace, "src"); if (G_UNLIKELY (!src)) { - GST_WARNING_OBJECT (self, "Can't get srcpad from ffmpegcolorspace"); + GST_WARNING_OBJECT (self, "Can't get srcpad from " COLORSPACE); gst_object_unref (sink); continue; } if (G_UNLIKELY (gst_pad_link (src, sink) != GST_PAD_LINK_OK)) { - GST_WARNING_OBJECT (self, "Can't link ffmpegcolorspace to renderer"); + GST_WARNING_OBJECT (self, "Can't link " COLORSPACE " to renderer"); gst_object_unref (src); gst_object_unref (sink); continue; @@ -1144,7 +1144,7 @@ _pad_blocked_cb (GstPad * pad, gboolean blocked, gpointer user_data) /* Set src ghostpad target */ src = gst_element_get_static_pad (self->post_colorspace, "src"); if (G_UNLIKELY (!src)) { - GST_WARNING_OBJECT (self, "Can't get src pad from ffmpegcolorspace"); + GST_WARNING_OBJECT (self, "Can't get src pad from " COLORSPACE); continue; } @@ -1163,7 +1163,7 @@ _pad_blocked_cb (GstPad * pad, gboolean blocked, gpointer user_data) sink = gst_element_get_static_pad (self->pre_colorspace, "sink"); if (G_UNLIKELY (!sink)) { - GST_WARNING_OBJECT (self, "Can't get sink pad from ffmpegcolorspace"); + GST_WARNING_OBJECT (self, "Can't get sink pad from " COLORSPACE); continue; } @@ -1205,7 +1205,7 @@ _pad_blocked_cb (GstPad * pad, gboolean blocked, gpointer user_data) /* Set the sink ghostpad targets */ sink = gst_element_get_static_pad (self->pre_colorspace, "sink"); if (G_UNLIKELY (!sink)) { - GST_WARNING_OBJECT (self, "Can't get sink pad from ffmpegcolorspace"); + GST_WARNING_OBJECT (self, "Can't get sink pad from " COLORSPACE); continue; } From 4e38577b30bda1ccf43c15a4211e957eee078897 Mon Sep 17 00:00:00 2001 From: David Schleef Date: Thu, 17 Mar 2011 19:13:58 -0700 Subject: [PATCH 09/21] videoscale: Add modified Lanczos scaling method Adds a Lanczos-derived scaling method, which is rather slow, but very high quality. Adds a few properties that can be used to tune various scaling properties: sharpness, sharpen, envelope, dither. Not currently Orcified, but was designed with that in mind. --- gst/videoscale/Makefile.am | 3 +- gst/videoscale/gstvideoscale.c | 115 ++- gst/videoscale/gstvideoscale.h | 10 +- gst/videoscale/vs_image.h | 7 + gst/videoscale/vs_lanczos.c | 1558 ++++++++++++++++++++++++++++++++ 5 files changed, 1689 insertions(+), 4 deletions(-) create mode 100644 gst/videoscale/vs_lanczos.c diff --git a/gst/videoscale/Makefile.am b/gst/videoscale/Makefile.am index 79d2cb4536..1d68f80d45 100644 --- a/gst/videoscale/Makefile.am +++ b/gst/videoscale/Makefile.am @@ -8,7 +8,8 @@ libgstvideoscale_la_SOURCES = \ vs_image.c \ vs_scanline.c \ vs_4tap.c \ - vs_fill_borders.c + vs_fill_borders.c \ + vs_lanczos.c nodist_libgstvideoscale_la_SOURCES = $(ORC_NODIST_SOURCES) diff --git a/gst/videoscale/gstvideoscale.c b/gst/videoscale/gstvideoscale.c index b941ed001c..f452e622a9 100644 --- a/gst/videoscale/gstvideoscale.c +++ b/gst/videoscale/gstvideoscale.c @@ -89,13 +89,22 @@ GST_DEBUG_CATEGORY (video_scale_debug); #define DEFAULT_PROP_METHOD GST_VIDEO_SCALE_BILINEAR #define DEFAULT_PROP_ADD_BORDERS FALSE +#define DEFAULT_PROP_SHARPNESS 1.0 +#define DEFAULT_PROP_SHARPEN 0.0 +#define DEFAULT_PROP_DITHER FALSE +#define DEFAULT_PROP_SUBMETHOD 1 +#define DEFAULT_PROP_ENVELOPE 2.0 enum { PROP_0, PROP_METHOD, - PROP_ADD_BORDERS - /* FILL ME */ + PROP_ADD_BORDERS, + PROP_SHARPNESS, + PROP_SHARPEN, + PROP_DITHER, + PROP_SUBMETHOD, + PROP_ENVELOPE }; #undef GST_VIDEO_SIZE_RANGE @@ -144,6 +153,7 @@ gst_video_scale_method_get_type (void) {GST_VIDEO_SCALE_NEAREST, "Nearest Neighbour", "nearest-neighbour"}, {GST_VIDEO_SCALE_BILINEAR, "Bilinear", "bilinear"}, {GST_VIDEO_SCALE_4TAP, "4-tap", "4-tap"}, + {GST_VIDEO_SCALE_LANCZOS, "Lanczos", "lanczos"}, {0, NULL, NULL}, }; @@ -251,6 +261,36 @@ gst_video_scale_class_init (GstVideoScaleClass * klass) DEFAULT_PROP_ADD_BORDERS, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); + g_object_class_install_property (gobject_class, PROP_SHARPNESS, + g_param_spec_double ("sharpness", "Sharpness", + "Sharpness of filter", 0.0, 2.0, DEFAULT_PROP_SHARPNESS, + G_PARAM_CONSTRUCT | G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); + + g_object_class_install_property (gobject_class, PROP_SHARPEN, + g_param_spec_double ("sharpen", "Sharpen", + "Sharpening", 0.0, 1.0, DEFAULT_PROP_SHARPEN, + G_PARAM_CONSTRUCT | G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); + + g_object_class_install_property (gobject_class, PROP_DITHER, + g_param_spec_boolean ("dither", "Dither", + "Add dither (only used for Lanczos method)", + DEFAULT_PROP_DITHER, + G_PARAM_CONSTRUCT | G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); + +#if 0 + /* I am hiding submethod for now, since it's poorly named, poorly + * documented, and will probably just get people into trouble. */ + g_object_class_install_property (gobject_class, PROP_SUBMETHOD, + g_param_spec_int ("submethod", "submethod", + "submethod", 0, 3, DEFAULT_PROP_SUBMETHOD, + G_PARAM_CONSTRUCT | G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); +#endif + + g_object_class_install_property (gobject_class, PROP_ENVELOPE, + g_param_spec_double ("envelope", "Envelope", + "Size of filter envelope", 0.0, 5.0, DEFAULT_PROP_ENVELOPE, + G_PARAM_CONSTRUCT | G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); + trans_class->transform_caps = GST_DEBUG_FUNCPTR (gst_video_scale_transform_caps); trans_class->set_caps = GST_DEBUG_FUNCPTR (gst_video_scale_set_caps); @@ -267,6 +307,11 @@ gst_video_scale_init (GstVideoScale * videoscale, GstVideoScaleClass * klass) videoscale->tmp_buf = NULL; videoscale->method = DEFAULT_PROP_METHOD; videoscale->add_borders = DEFAULT_PROP_ADD_BORDERS; + videoscale->submethod = DEFAULT_PROP_SUBMETHOD; + videoscale->sharpness = DEFAULT_PROP_SHARPNESS; + videoscale->sharpen = DEFAULT_PROP_SHARPEN; + videoscale->dither = DEFAULT_PROP_DITHER; + videoscale->envelope = DEFAULT_PROP_ENVELOPE; } static void @@ -296,6 +341,31 @@ gst_video_scale_set_property (GObject * object, guint prop_id, GST_OBJECT_UNLOCK (vscale); gst_base_transform_reconfigure (GST_BASE_TRANSFORM_CAST (vscale)); break; + case PROP_SHARPNESS: + GST_OBJECT_LOCK (vscale); + vscale->sharpness = g_value_get_double (value); + GST_OBJECT_UNLOCK (vscale); + break; + case PROP_SHARPEN: + GST_OBJECT_LOCK (vscale); + vscale->sharpen = g_value_get_double (value); + GST_OBJECT_UNLOCK (vscale); + break; + case PROP_DITHER: + GST_OBJECT_LOCK (vscale); + vscale->dither = g_value_get_boolean (value); + GST_OBJECT_UNLOCK (vscale); + break; + case PROP_SUBMETHOD: + GST_OBJECT_LOCK (vscale); + vscale->submethod = g_value_get_int (value); + GST_OBJECT_UNLOCK (vscale); + break; + case PROP_ENVELOPE: + GST_OBJECT_LOCK (vscale); + vscale->envelope = g_value_get_double (value); + GST_OBJECT_UNLOCK (vscale); + break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; @@ -319,6 +389,31 @@ gst_video_scale_get_property (GObject * object, guint prop_id, GValue * value, g_value_set_boolean (value, vscale->add_borders); GST_OBJECT_UNLOCK (vscale); break; + case PROP_SHARPNESS: + GST_OBJECT_LOCK (vscale); + g_value_set_double (value, vscale->sharpness); + GST_OBJECT_UNLOCK (vscale); + break; + case PROP_SHARPEN: + GST_OBJECT_LOCK (vscale); + g_value_set_double (value, vscale->sharpen); + GST_OBJECT_UNLOCK (vscale); + break; + case PROP_DITHER: + GST_OBJECT_LOCK (vscale); + g_value_set_boolean (value, vscale->dither); + GST_OBJECT_UNLOCK (vscale); + break; + case PROP_SUBMETHOD: + GST_OBJECT_LOCK (vscale); + g_value_set_int (value, vscale->submethod); + GST_OBJECT_UNLOCK (vscale); + break; + case PROP_ENVELOPE: + GST_OBJECT_LOCK (vscale); + g_value_set_double (value, vscale->envelope); + GST_OBJECT_UNLOCK (vscale); + break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; @@ -1078,6 +1173,11 @@ gst_video_scale_transform (GstBaseTransform * trans, GstBuffer * in, case GST_VIDEO_SCALE_4TAP: vs_image_scale_4tap_RGBA (&dest, &src, videoscale->tmp_buf); break; + case GST_VIDEO_SCALE_LANCZOS: + vs_image_scale_lanczos_AYUV (&dest, &src, videoscale->tmp_buf, + videoscale->sharpness, videoscale->dither, videoscale->submethod, + videoscale->envelope, videoscale->sharpen); + break; default: goto unknown_mode; } @@ -1217,6 +1317,17 @@ gst_video_scale_transform (GstBaseTransform * trans, GstBuffer * in, vs_image_scale_4tap_Y (&dest_u, &src_u, videoscale->tmp_buf); vs_image_scale_4tap_Y (&dest_v, &src_v, videoscale->tmp_buf); break; + case GST_VIDEO_SCALE_LANCZOS: + vs_image_scale_lanczos_Y (&dest, &src, videoscale->tmp_buf, + videoscale->sharpness, videoscale->dither, videoscale->submethod, + videoscale->envelope, videoscale->sharpen); + vs_image_scale_lanczos_Y (&dest_u, &src_u, videoscale->tmp_buf, + videoscale->sharpness, videoscale->dither, videoscale->submethod, + videoscale->envelope, videoscale->sharpen); + vs_image_scale_lanczos_Y (&dest_v, &src_v, videoscale->tmp_buf, + videoscale->sharpness, videoscale->dither, videoscale->submethod, + videoscale->envelope, videoscale->sharpen); + break; default: goto unknown_mode; } diff --git a/gst/videoscale/gstvideoscale.h b/gst/videoscale/gstvideoscale.h index a09d76942f..655268d2f0 100644 --- a/gst/videoscale/gstvideoscale.h +++ b/gst/videoscale/gstvideoscale.h @@ -47,13 +47,15 @@ GST_DEBUG_CATEGORY_EXTERN (video_scale_debug); * @GST_VIDEO_SCALE_NEAREST: use nearest neighbour scaling (fast and ugly) * @GST_VIDEO_SCALE_BILINEAR: use bilinear scaling (slower but prettier). * @GST_VIDEO_SCALE_4TAP: use a 4-tap filter for scaling (slow). + * @GST_VIDEO_SCALE_LANCZOS: use a multitap Lanczos filter for scaling (slow). * * The videoscale method to use. */ typedef enum { GST_VIDEO_SCALE_NEAREST, GST_VIDEO_SCALE_BILINEAR, - GST_VIDEO_SCALE_4TAP + GST_VIDEO_SCALE_4TAP, + GST_VIDEO_SCALE_LANCZOS } GstVideoScaleMethod; typedef struct _GstVideoScale GstVideoScale; @@ -67,8 +69,14 @@ typedef struct _GstVideoScaleClass GstVideoScaleClass; struct _GstVideoScale { GstVideoFilter element; + /* properties */ GstVideoScaleMethod method; gboolean add_borders; + double sharpness; + double sharpen; + gboolean dither; + int submethod; + double envelope; /* negotiated stuff */ GstVideoFormat format; diff --git a/gst/videoscale/vs_image.h b/gst/videoscale/vs_image.h index 3a23dd44f5..2312acc7d5 100644 --- a/gst/videoscale/vs_image.h +++ b/gst/videoscale/vs_image.h @@ -28,6 +28,7 @@ #ifndef __VS_IMAGE_H__ #define __VS_IMAGE_H__ +#include #include <_stdint.h> typedef struct _VSImage VSImage; @@ -48,6 +49,9 @@ void vs_image_scale_nearest_RGBA (const VSImage *dest, const VSImage *src, uint8_t *tmpbuf); void vs_image_scale_linear_RGBA (const VSImage *dest, const VSImage *src, uint8_t *tmpbuf); +void vs_image_scale_lanczos_AYUV (const VSImage * dest, const VSImage * src, + uint8_t * tmpbuf, double sharpness, gboolean dither, int submethod, + double a, double sharpen); void vs_image_scale_nearest_RGB (const VSImage *dest, const VSImage *src, uint8_t *tmpbuf); @@ -68,6 +72,9 @@ void vs_image_scale_nearest_Y (const VSImage *dest, const VSImage *src, uint8_t *tmpbuf); void vs_image_scale_linear_Y (const VSImage *dest, const VSImage *src, uint8_t *tmpbuf); +void vs_image_scale_lanczos_Y (const VSImage *dest, const VSImage *src, + uint8_t *tmpbuf, double sharpness, gboolean dither, int submethod, + double a, double sharpen); void vs_image_scale_nearest_RGB565 (const VSImage *dest, const VSImage *src, uint8_t *tmpbuf); diff --git a/gst/videoscale/vs_lanczos.c b/gst/videoscale/vs_lanczos.c new file mode 100644 index 0000000000..1c87ba3dd3 --- /dev/null +++ b/gst/videoscale/vs_lanczos.c @@ -0,0 +1,1558 @@ +/* + * Image Scaling Functions + * Copyright (c) 2011 David A. Schleef + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +/* + * + * Modified Lanczos scaling algorithm + * ================================== + * + * This algorithm was developed by the author. The primary goals of + * the algorithm are high-quality video downscaling for medium scale + * factors (in the range of 1.3x to 5.0x) using methods that can be + * converted to SIMD code. Concerns with existing algorithms were + * mainly related to either over-soft filtering (Lanczos) or aliasing + * (bilinear or any other method with inadequate sampling). + * + * The problems with bilinear scaling are apparent when downscaling + * more than a factor of 2. For example, when downscaling by a factor + * of 3, only two-thirds of the input pixels contribute to the output + * pixels. This is only considering scaling in one direction; after + * scaling both vertically and horizontally in a 2-D image, fewer than + * half of the input pixels contribute to the output, so it should not + * be surprising that the output is suboptimal. + * + * The problems with Lanczos scaling are more subtle. From a theoretical + * perspective, Lanczos is an optimal algorithm for resampling equally- + * spaced values. This theoretical perspective is based on analysis + * done in frequency space, thus, Lanczos works very well for audio + * resampling, since the ear hears primarily in frequency space. The + * human visual system is sensitive primarily in the spatial domain, + * therefore any resampling algorithm should take this into account. + * This difference is immediately clear in the size of resampling + * window or envelope that is chosen for resampling: for audio, an + * envelope of a=64 is typical, in image scaling, the envelope is + * usually a=2 or a=3. + * + * One result of the HVS being sensitive in the spatial domain (and + * also probably due to oversampling capabilities of the retina and + * visual cortex) is that it is less sensitive to the exact magnitude + * of high-frequency visual signals than to the appropriate amount of + * energy in the nearby frequency band. A Lanczos kernel with a=2 + * or a=3 strongly decreases the amount of energy in the high frequency + * bands. The energy in this area can be increased by increasing a, + * which brings in energy from different areas of the image (bad for + * reasons mentioned above), or by oversampling the input data. We + * have chosen two methods for doing the latter. Firstly, there is + * a sharpness parameter, which increases the cutoff frequency of the + * filter, aliasing higher frequency noise into the passband. And + * secondly, there is the sharpen parameter, which increases the + * contribution of high-frequency (but in-band) components. + * + * An alternate explanation of the usefulness of a sharpening filter + * is that many natural images have a roughly 1/f spectrum. In order + * for a downsampled image to look more "natural" when high frequencies + * are removed, the frequencies in the pass band near the cutoff + * frequency are amplified, causing the spectrum to be more roughly + * 1/f. I said "roughly", not "literally". + * + * This alternate explanation is useful for understanding the author's + * secondary motivation for developing this algorithm, namely, as a + * method of video compression. Several recent techniques (such as + * HTTP Live Streaming and SVC) use image scaling as a method to get + * increased compression out of nominally non-scalable codecs such as + * H.264. For optimal quality, it is thusly important to consider + * the scaler and encoder as a combined unit. Tuning of the sharpness + * and sharpen parameters was performed using the Toro encoder tuner, + * where scaled and encoded video was compared to unscaled and encoded + * video. This tuning suggested values that were very close to the + * values chosen by manual inspection of scaled images and video. + * + * The optimal values of sharpen and sharpness were slightly different + * depending whether the comparison was still images or video. Video + * comparisons were more sensitive to aliasing, since the aliasing + * artifacts tended to move or "crawl" around the video. The default + * values are for video; image scaling may prefer higher values. + * + * A number of related techniques were rejected for various reasons. + * An early technique of selecting the sharpness factor locally based + * on edge detection (in order to use a higher sharpness values without + * the corresponding aliasing on edges) worked very well for still + * images, but caused too much "crawling" on textures in video. Also, + * this method is slow, as it does not parallelize well. + * + * Non-separable techniques were rejected because the fastest would + * have been at least 4x slower. + * + * It is infrequently appreciated that image scaling should ideally be + * done in linear light space. Converting to linear light space has + * a similar effect to a sharpening filter. This approach was not + * taken because the added benefit is minor compared to the additional + * computational cost. Morever, the benefit is decreased by increasing + * the strength of the sharpening filter. + * + */ +#include + +#include "vs_scanline.h" +#include "vs_image.h" + +#include "gstvideoscaleorc.h" +#include +#include + +#define NEED_CLAMP(x,a,b) ((x) < (a) || (x) > (b)) + +#define ROUND_UP_2(x) (((x)+1)&~1) +#define ROUND_UP_4(x) (((x)+3)&~3) +#define ROUND_UP_8(x) (((x)+7)&~7) + +#define SRC_LINE(i) (scale->src->pixels + scale->src->stride * (i)) + +#define TMP_LINE_S16(i) ((gint16 *)scale->tmpdata + (i)*(scale->dest->width)) +#define TMP_LINE_S32(i) ((gint32 *)scale->tmpdata + (i)*(scale->dest->width)) +#define TMP_LINE_FLOAT(i) ((float *)scale->tmpdata + (i)*(scale->dest->width)) +#define TMP_LINE_DOUBLE(i) ((double *)scale->tmpdata + (i)*(scale->dest->width)) +#define TMP_LINE_S16_AYUV(i) ((gint16 *)scale->tmpdata + (i)*4*(scale->dest->width)) +#define TMP_LINE_S32_AYUV(i) ((gint32 *)scale->tmpdata + (i)*4*(scale->dest->width)) +#define TMP_LINE_FLOAT_AYUV(i) ((float *)scale->tmpdata + (i)*4*(scale->dest->width)) +#define TMP_LINE_DOUBLE_AYUV(i) ((double *)scale->tmpdata + (i)*4*(scale->dest->width)) + +#define PTR_OFFSET(a,b) ((void *)((char *)(a) + (b))) + +typedef void (*HorizResampleFunc) (void *dest, const gint32 * offsets, + const void *taps, const void *src, int n_taps, int shift, int n); + +typedef struct _Scale1D Scale1D; +struct _Scale1D +{ + int n; + double offset; + double scale; + + double fx; + double ex; + int dx; + + int n_taps; + gint32 *offsets; + void *taps; +}; + +typedef struct _Scale Scale; +struct _Scale +{ + const VSImage *dest; + const VSImage *src; + + double sharpness; + gboolean dither; + + void *tmpdata; + + HorizResampleFunc horiz_resample_func; + + Scale1D x_scale1d; + Scale1D y_scale1d; +}; + +static void +vs_image_scale_lanczos_Y_int16 (const VSImage * dest, const VSImage * src, + uint8_t * tmpbuf, double sharpness, gboolean dither, double a, + double sharpen); +static void vs_image_scale_lanczos_Y_int32 (const VSImage * dest, + const VSImage * src, uint8_t * tmpbuf, double sharpness, gboolean dither, + double a, double sharpen); +static void vs_image_scale_lanczos_Y_float (const VSImage * dest, + const VSImage * src, uint8_t * tmpbuf, double sharpness, gboolean dither, + double a, double sharpen); +static void vs_image_scale_lanczos_Y_double (const VSImage * dest, + const VSImage * src, uint8_t * tmpbuf, double sharpness, gboolean dither, + double a, double sharpen); +static void +vs_image_scale_lanczos_AYUV_int16 (const VSImage * dest, const VSImage * src, + uint8_t * tmpbuf, double sharpness, gboolean dither, double a, + double sharpen); +static void vs_image_scale_lanczos_AYUV_int32 (const VSImage * dest, + const VSImage * src, uint8_t * tmpbuf, double sharpness, gboolean dither, + double a, double sharpen); +static void vs_image_scale_lanczos_AYUV_float (const VSImage * dest, + const VSImage * src, uint8_t * tmpbuf, double sharpness, gboolean dither, + double a, double sharpen); +static void vs_image_scale_lanczos_AYUV_double (const VSImage * dest, + const VSImage * src, uint8_t * tmpbuf, double sharpness, gboolean dither, + double a, double sharpen); + +static double +sinc (double x) +{ + if (x == 0) + return 1; + return sin (G_PI * x) / (G_PI * x); +} + +static double +envelope (double x) +{ + if (x <= -1 || x >= 1) + return 0; + return sinc (x); +} + +static int +scale1d_get_n_taps (int src_size, int dest_size, double a, double sharpness) +{ + double scale; + double fx; + int dx; + + scale = src_size / (double) dest_size; + if (scale > 1.0) { + fx = (1.0 / scale) * sharpness; + } else { + fx = (1.0) * sharpness; + } + dx = ceil (a / fx); + + return 2 * dx; +} + +static void +scale1d_cleanup (Scale1D * scale) +{ + g_free (scale->taps); + g_free (scale->offsets); +} + +/* + * Calculates a set of taps for each destination element in double + * format. Each set of taps sums to 1.0. + * + */ +static void +scale1d_calculate_taps (Scale1D * scale, int src_size, int dest_size, + int n_taps, double a, double sharpness, double sharpen) +{ + int j; + double *tap_array; + gint32 *offsets; + double scale_offset; + double scale_increment; + int dx; + double fx; + double ex; + + scale->scale = src_size / (double) dest_size; + scale->offset = scale->scale / 2 - 0.5; + + if (scale->scale > 1.0) { + scale->fx = (1.0 / scale->scale) * sharpness; + } else { + scale->fx = (1.0) * sharpness; + } + scale->ex = scale->fx / a; + scale->dx = ceil (a / scale->fx); + + g_assert (n_taps >= 2 * scale->dx); + scale->n_taps = n_taps; + + scale->taps = g_malloc (sizeof (double) * scale->n_taps * dest_size); + scale->offsets = g_malloc (sizeof (gint32) * dest_size); + tap_array = scale->taps; + offsets = scale->offsets; + + scale_offset = scale->offset; + scale_increment = scale->scale; + dx = scale->dx; + fx = scale->fx; + ex = scale->ex; + + for (j = 0; j < dest_size; j++) { + double x; + int xi; + int l; + double weight; + double *taps; + + x = scale_offset + scale_increment * j; + x = CLAMP (x, 0, src_size); + xi = ceil (x) - dx; + + offsets[j] = xi; + weight = 0; + taps = tap_array + j * n_taps; + + for (l = 0; l < n_taps; l++) { + int xl = xi + l; + taps[l] = sinc ((x - xl) * fx) * envelope ((x - xl) * ex); + taps[l] -= sharpen * envelope ((x - xl) * ex); + weight += taps[l]; + } + g_assert (envelope ((x - (xi - 1)) * ex) == 0); + g_assert (envelope ((x - (xi + n_taps)) * ex) == 0); + for (l = 0; l < n_taps; l++) { + taps[l] /= weight; + } + + if (xi < 0) { + int shift = -xi; + + for (l = 0; l < shift; l++) { + taps[shift] += taps[l]; + } + for (l = 0; l < n_taps - shift; l++) { + taps[l] = taps[shift + l]; + } + for (; l < n_taps; l++) { + taps[l] = 0; + } + offsets[j] += shift; + } + + if (xi > src_size - n_taps) { + int shift = xi - (src_size - n_taps); + + for (l = 0; l < shift; l++) { + taps[n_taps - shift - 1] += taps[n_taps - shift + l]; + } + for (l = 0; l < n_taps - shift; l++) { + taps[n_taps - 1 - l] = taps[n_taps - 1 - shift - l]; + } + for (l = 0; l < shift; l++) { + taps[l] = 0; + } + offsets[j] -= shift; + } + } +} + +/* + * Calculates a set of taps for each destination element in float + * format. Each set of taps sums to 1.0. + */ +static void +scale1d_calculate_taps_float (Scale1D * scale, int src_size, int dest_size, + int n_taps, double a, double sharpness, double sharpen) +{ + double *taps_d; + float *taps_f; + int j; + + scale1d_calculate_taps (scale, src_size, dest_size, n_taps, a, sharpness, + sharpen); + + taps_d = scale->taps; + taps_f = g_malloc (sizeof (float) * scale->n_taps * dest_size); + + for (j = 0; j < dest_size * n_taps; j++) { + taps_f[j] = taps_d[j]; + } + + g_free (taps_d); + scale->taps = taps_f; +} + +/* + * Calculates a set of taps for each destination element in gint32 + * format. Each set of taps sums to (very nearly) (1<taps; + taps_i = g_malloc (sizeof (gint32) * scale->n_taps * dest_size); + + multiplier = (1 << shift); + + for (j = 0; j < dest_size; j++) { + for (i = 0; i < n_taps; i++) { + taps_i[j * n_taps + i] = + floor (0.5 + taps_d[j * n_taps + i] * multiplier); + } + } + + g_free (taps_d); + scale->taps = taps_i; +} + +/* + * Calculates a set of taps for each destination element in gint16 + * format. Each set of taps sums to (1<taps; + taps_i = g_malloc (sizeof (gint16) * scale->n_taps * dest_size); + + multiplier = (1 << shift); + + /* Various methods for converting floating point taps to integer. + * The dB values are the SSIM value between scaling an image via + * the floating point pathway vs. the integer pathway using the + * given code to generate the taps. Only one image was tested, + * scaling from 1920x1080 to 640x360. Several variations of the + * methods were also tested, with nothing appearing useful. */ +#if 0 + /* Standard round to integer. This causes bad DC errors. */ + /* 44.588 dB */ + for (j = 0; j < dest_size; j++) { + for (i = 0; i < n_taps; i++) { + taps_i[j * n_taps + i] = + floor (0.5 + taps_d[j * n_taps + i] * multiplier); + } + } +#endif +#if 0 + /* Dithering via error propogation. Works pretty well, but + * really we want to propogate errors across rows, which would + * mean having several sets of tap arrays. Possible, but more work, + * and it may not even be better. */ + /* 57.0961 dB */ + { + double err = 0; + for (j = 0; j < dest_size; j++) { + for (i = 0; i < n_taps; i++) { + err += taps_d[j * n_taps + i] * multiplier; + taps_i[j * n_taps + i] = floor (err); + err -= floor (err); + } + } + } +#endif +#if 1 + /* Round to integer, but with an adjustable bias that we use to + * eliminate the DC error. This search method is a bit crude, and + * could perhaps be improved somewhat. */ + /* 60.4851 dB */ + for (j = 0; j < dest_size; j++) { + int k; + for (k = 0; k < 100; k++) { + int sum = 0; + double offset; + + offset = k * 0.01; + for (i = 0; i < n_taps; i++) { + taps_i[j * n_taps + i] = + floor (offset + taps_d[j * n_taps + i] * multiplier); + sum += taps_i[j * n_taps + i]; + } + + if (sum >= (1 << shift)) + break; + } + } +#endif +#if 0 + /* Round to integer, but adjust the multiplier. The search method is + * wrong a lot, but was sufficient enough to calculate dB error. */ + /* 58.6517 dB */ + for (j = 0; j < dest_size; j++) { + int k; + int sum = 0; + for (k = 0; k < 200; k++) { + sum = 0; + + multiplier = (1 << shift) - 1.0 + k * 0.01; + for (i = 0; i < n_taps; i++) { + taps_i[j * n_taps + i] = + floor (0.5 + taps_d[j * n_taps + i] * multiplier); + sum += taps_i[j * n_taps + i]; + } + + if (sum >= (1 << shift)) + break; + } + if (sum != (1 << shift)) { + GST_ERROR ("%g %d", multiplier, sum); + } + } +#endif +#if 0 + /* Round to integer, but subtract the error from the largest tap */ + /* 58.3677 dB */ + for (j = 0; j < dest_size; j++) { + int err = -multiplier; + for (i = 0; i < n_taps; i++) { + taps_i[j * n_taps + i] = + floor (0.5 + taps_d[j * n_taps + i] * multiplier); + err += taps_i[j * n_taps + i]; + } + if (taps_i[j * n_taps + (n_taps / 2 - 1)] > + taps_i[j * n_taps + (n_taps / 2)]) { + taps_i[j * n_taps + (n_taps / 2 - 1)] -= err; + } else { + taps_i[j * n_taps + (n_taps / 2)] -= err; + } + } +#endif + + g_free (taps_d); + scale->taps = taps_i; +} + + +void +vs_image_scale_lanczos_Y (const VSImage * dest, const VSImage * src, + uint8_t * tmpbuf, double sharpness, gboolean dither, int submethod, + double a, double sharpen) +{ + switch (submethod) { + case 0: + default: + vs_image_scale_lanczos_Y_int16 (dest, src, tmpbuf, sharpness, dither, a, + sharpen); + break; + case 1: + vs_image_scale_lanczos_Y_int32 (dest, src, tmpbuf, sharpness, dither, a, + sharpen); + break; + case 2: + vs_image_scale_lanczos_Y_float (dest, src, tmpbuf, sharpness, dither, a, + sharpen); + break; + case 3: + vs_image_scale_lanczos_Y_double (dest, src, tmpbuf, sharpness, dither, a, + sharpen); + break; + } +} + +void +vs_image_scale_lanczos_AYUV (const VSImage * dest, const VSImage * src, + uint8_t * tmpbuf, double sharpness, gboolean dither, int submethod, + double a, double sharpen) +{ + switch (submethod) { + case 0: + default: + vs_image_scale_lanczos_AYUV_int16 (dest, src, tmpbuf, sharpness, dither, + a, sharpen); + break; + case 1: + vs_image_scale_lanczos_AYUV_int32 (dest, src, tmpbuf, sharpness, dither, + a, sharpen); + break; + case 2: + vs_image_scale_lanczos_AYUV_float (dest, src, tmpbuf, sharpness, dither, + a, sharpen); + break; + case 3: + vs_image_scale_lanczos_AYUV_double (dest, src, tmpbuf, sharpness, dither, + a, sharpen); + break; + } +} + + + +#define RESAMPLE_HORIZ_FLOAT(function, dest_type, tap_type, src_type, _n_taps) \ +static void \ +function (dest_type *dest, const gint32 *offsets, \ + const tap_type *taps, const src_type *src, int n_taps, int shift, int n) \ +{ \ + int i; \ + int k; \ + dest_type sum; \ + const src_type *srcline; \ + const tap_type *tapsline; \ + for (i = 0; i < n; i++) { \ + srcline = src + offsets[i]; \ + tapsline = taps + i * _n_taps; \ + sum = 0; \ + for (k = 0; k < _n_taps; k++) { \ + sum += srcline[k] * tapsline[k]; \ + } \ + dest[i] = sum; \ + } \ +} + +#define RESAMPLE_HORIZ(function, dest_type, tap_type, src_type, _n_taps, _shift) \ +static void \ +function (dest_type *dest, const gint32 *offsets, \ + const tap_type *taps, const src_type *src, int n_taps, int shift, int n) \ +{ \ + int i; \ + int k; \ + dest_type sum; \ + const src_type *srcline; \ + const tap_type *tapsline; \ + int offset; \ + if (_shift > 0) offset = (1<<_shift)>>1; \ + else offset = 0; \ + for (i = 0; i < n; i++) { \ + srcline = src + offsets[i]; \ + tapsline = taps + i * _n_taps; \ + sum = 0; \ + for (k = 0; k < _n_taps; k++) { \ + sum += srcline[k] * tapsline[k]; \ + } \ + dest[i] = (sum + offset) >> _shift; \ + } \ +} + +#define RESAMPLE_HORIZ_AYUV_FLOAT(function, dest_type, tap_type, src_type, _n_taps) \ +static void \ +function (dest_type *dest, const gint32 *offsets, \ + const tap_type *taps, const src_type *src, int n_taps, int shift, int n) \ +{ \ + int i; \ + int k; \ + dest_type sum1; \ + dest_type sum2; \ + dest_type sum3; \ + dest_type sum4; \ + const src_type *srcline; \ + const tap_type *tapsline; \ + for (i = 0; i < n; i++) { \ + srcline = src + 4*offsets[i]; \ + tapsline = taps + i * _n_taps; \ + sum1 = 0; \ + sum2 = 0; \ + sum3 = 0; \ + sum4 = 0; \ + for (k = 0; k < _n_taps; k++) { \ + sum1 += srcline[k*4+0] * tapsline[k]; \ + sum2 += srcline[k*4+1] * tapsline[k]; \ + sum3 += srcline[k*4+2] * tapsline[k]; \ + sum4 += srcline[k*4+3] * tapsline[k]; \ + } \ + dest[i*4+0] = sum1; \ + dest[i*4+1] = sum2; \ + dest[i*4+2] = sum3; \ + dest[i*4+3] = sum4; \ + } \ +} + +#define RESAMPLE_HORIZ_AYUV(function, dest_type, tap_type, src_type, _n_taps, _shift) \ +static void \ +function (dest_type *dest, const gint32 *offsets, \ + const tap_type *taps, const src_type *src, int n_taps, int shift, int n) \ +{ \ + int i; \ + int k; \ + dest_type sum1; \ + dest_type sum2; \ + dest_type sum3; \ + dest_type sum4; \ + const src_type *srcline; \ + const tap_type *tapsline; \ + int offset; \ + if (_shift > 0) offset = (1<<_shift)>>1; \ + else offset = 0; \ + for (i = 0; i < n; i++) { \ + srcline = src + 4*offsets[i]; \ + tapsline = taps + i * _n_taps; \ + sum1 = 0; \ + sum2 = 0; \ + sum3 = 0; \ + sum4 = 0; \ + for (k = 0; k < _n_taps; k++) { \ + sum1 += srcline[k*4+0] * tapsline[k]; \ + sum2 += srcline[k*4+1] * tapsline[k]; \ + sum3 += srcline[k*4+2] * tapsline[k]; \ + sum4 += srcline[k*4+3] * tapsline[k]; \ + } \ + dest[i*4+0] = (sum1 + offset) >> _shift; \ + dest[i*4+1] = (sum2 + offset) >> _shift; \ + dest[i*4+2] = (sum3 + offset) >> _shift; \ + dest[i*4+3] = (sum4 + offset) >> _shift; \ + } \ +} + +/* *INDENT-OFF* */ +RESAMPLE_HORIZ_FLOAT (resample_horiz_double_u8_generic, double, double, + guint8, n_taps) +RESAMPLE_HORIZ_FLOAT (resample_horiz_float_u8_generic, float, float, + guint8, n_taps) +RESAMPLE_HORIZ_AYUV_FLOAT (resample_horiz_double_ayuv_generic, double, double, + guint8, n_taps) +RESAMPLE_HORIZ_AYUV_FLOAT (resample_horiz_float_ayuv_generic, float, float, + guint8, n_taps) + +RESAMPLE_HORIZ (resample_horiz_int32_int32_u8_generic, gint32, gint32, + guint8, n_taps, shift) +RESAMPLE_HORIZ (resample_horiz_int16_int16_u8_generic, gint16, gint16, + guint8, n_taps, shift) +RESAMPLE_HORIZ_AYUV (resample_horiz_int32_int32_ayuv_generic, gint32, gint32, + guint8, n_taps, shift) +RESAMPLE_HORIZ_AYUV (resample_horiz_int16_int16_ayuv_generic, gint16, gint16, + guint8, n_taps, shift) + +/* Candidates for orcification */ +RESAMPLE_HORIZ (resample_horiz_int32_int32_u8_taps16_shift0, gint32, gint32, + guint8, 16, 0) +RESAMPLE_HORIZ (resample_horiz_int32_int32_u8_taps12_shift0, gint32, gint32, + guint8, 12, 0) +RESAMPLE_HORIZ (resample_horiz_int32_int32_u8_taps8_shift0, gint32, gint32, + guint8, 8, 0) +RESAMPLE_HORIZ (resample_horiz_int32_int32_u8_taps4_shift0, gint32, gint32, + guint8, 4, 0) +RESAMPLE_HORIZ (resample_horiz_int16_int16_u8_taps16_shift0, gint16, gint16, + guint8, 16, 0) +RESAMPLE_HORIZ (resample_horiz_int16_int16_u8_taps12_shift0, gint16, gint16, + guint8, 12, 0) +RESAMPLE_HORIZ (resample_horiz_int16_int16_u8_taps8_shift0, gint16, gint16, + guint8, 8, 0) +RESAMPLE_HORIZ (resample_horiz_int16_int16_u8_taps4_shift0, gint16, gint16, + guint8, 4, 0) + +RESAMPLE_HORIZ_AYUV (resample_horiz_int32_int32_ayuv_taps16_shift0, gint32, gint32, + guint8, 16, 0) +RESAMPLE_HORIZ_AYUV (resample_horiz_int32_int32_ayuv_taps12_shift0, gint32, gint32, + guint8, 12, 0) +RESAMPLE_HORIZ_AYUV (resample_horiz_int32_int32_ayuv_taps8_shift0, gint32, gint32, + guint8, 8, 0) +RESAMPLE_HORIZ_AYUV (resample_horiz_int32_int32_ayuv_taps4_shift0, gint32, gint32, + guint8, 4, 0) +RESAMPLE_HORIZ_AYUV (resample_horiz_int16_int16_ayuv_taps16_shift0, gint16, gint16, + guint8, 16, 0) +RESAMPLE_HORIZ_AYUV (resample_horiz_int16_int16_ayuv_taps12_shift0, gint16, gint16, + guint8, 12, 0) +RESAMPLE_HORIZ_AYUV (resample_horiz_int16_int16_ayuv_taps8_shift0, gint16, gint16, + guint8, 8, 0) +RESAMPLE_HORIZ_AYUV (resample_horiz_int16_int16_ayuv_taps4_shift0, gint16, gint16, + guint8, 4, 0) +/* *INDENT-ON* */ + +#define RESAMPLE_VERT(function, tap_type, src_type, _n_taps, _shift) \ +static void \ +function (guint8 *dest, \ + const tap_type *taps, const src_type *src, int stride, int n_taps, \ + int shift, int n) \ +{ \ + int i; \ + int l; \ + gint32 sum_y; \ + gint32 offset = (1<<_shift) >> 1; \ + for (i = 0; i < n; i++) { \ + sum_y = 0; \ + for (l = 0; l < n_taps; l++) { \ + const src_type *line = PTR_OFFSET(src, stride * l); \ + sum_y += line[i] * taps[l]; \ + } \ + dest[i] = CLAMP ((sum_y + offset) >> _shift, 0, 255); \ + } \ +} + +#define RESAMPLE_VERT_DITHER(function, tap_type, src_type, _n_taps, _shift) \ +static void \ +function (guint8 *dest, \ + const tap_type *taps, const src_type *src, int stride, int n_taps, \ + int shift, int n) \ +{ \ + int i; \ + int l; \ + gint32 sum_y; \ + gint32 err_y = 0; \ + gint32 mask = (1<<_shift) - 1; \ + for (i = 0; i < n; i++) { \ + sum_y = 0; \ + for (l = 0; l < n_taps; l++) { \ + const src_type *line = PTR_OFFSET(src, stride * l); \ + sum_y += line[i] * taps[l]; \ + } \ + err_y += sum_y; \ + dest[i] = CLAMP (err_y >> _shift, 0, 255); \ + err_y &= mask; \ + } \ +} + +/* *INDENT-OFF* */ +RESAMPLE_VERT (resample_vert_int32_generic, gint32, gint32, n_taps, shift) +RESAMPLE_VERT_DITHER (resample_vert_dither_int32_generic, gint32, gint32, + n_taps, shift) +RESAMPLE_VERT (resample_vert_int16_generic, gint16, gint16, n_taps, shift); +RESAMPLE_VERT_DITHER (resample_vert_dither_int16_generic, gint16, gint16, + n_taps, shift) +/* *INDENT-ON* */ + +#define RESAMPLE_VERT_FLOAT(function, tap_type, src_type, _n_taps, _shift) \ +static void \ +function (guint8 *dest, \ + const tap_type *taps, const src_type *src, int stride, int n_taps, \ + int shift, int n) \ +{ \ + int i; \ + int l; \ + src_type sum_y; \ + for (i = 0; i < n; i++) { \ + sum_y = 0; \ + for (l = 0; l < n_taps; l++) { \ + const src_type *line = PTR_OFFSET(src, stride * l); \ + sum_y += line[i] * taps[l]; \ + } \ + dest[i] = CLAMP (floor(0.5 + sum_y), 0, 255); \ + } \ +} + +#define RESAMPLE_VERT_FLOAT_DITHER(function, tap_type, src_type, _n_taps, _shift) \ +static void \ +function (guint8 *dest, \ + const tap_type *taps, const src_type *src, int stride, int n_taps, \ + int shift, int n) \ +{ \ + int i; \ + int l; \ + src_type sum_y; \ + src_type err_y = 0; \ + for (i = 0; i < n; i++) { \ + sum_y = 0; \ + for (l = 0; l < n_taps; l++) { \ + const src_type *line = PTR_OFFSET(src, stride * l); \ + sum_y += line[i] * taps[l]; \ + } \ + err_y += sum_y; \ + dest[i] = CLAMP (floor (err_y), 0, 255); \ + err_y -= floor (err_y); \ + } \ +} + +/* *INDENT-OFF* */ +RESAMPLE_VERT_FLOAT (resample_vert_double_generic, double, double, n_taps, + shift) +RESAMPLE_VERT_FLOAT_DITHER (resample_vert_dither_double_generic, double, double, + n_taps, shift) + +RESAMPLE_VERT_FLOAT (resample_vert_float_generic, float, float, n_taps, shift) +RESAMPLE_VERT_FLOAT_DITHER (resample_vert_dither_float_generic, float, float, + n_taps, shift) +/* *INDENT-ON* */ + +#define S16_SHIFT1 7 +#define S16_SHIFT2 7 +#define S16_MIDSHIFT 0 +#define S16_POSTSHIFT (S16_SHIFT1+S16_SHIFT2-S16_MIDSHIFT) + +static void +vs_scale_lanczos_Y_int16 (Scale * scale) +{ + int j; + int yi; + int tmp_yi; + + tmp_yi = 0; + + for (j = 0; j < scale->dest->height; j++) { + guint8 *destline; + gint16 *taps; + + destline = scale->dest->pixels + scale->dest->stride * j; + + yi = scale->y_scale1d.offsets[j]; + + while (tmp_yi < yi + scale->y_scale1d.n_taps) { + scale->horiz_resample_func (TMP_LINE_S16 (tmp_yi), + scale->x_scale1d.offsets, scale->x_scale1d.taps, SRC_LINE (tmp_yi), + scale->x_scale1d.n_taps, S16_MIDSHIFT, scale->dest->width); + tmp_yi++; + } + + taps = (gint16 *) scale->y_scale1d.taps + j * scale->y_scale1d.n_taps; + if (scale->dither) { + resample_vert_dither_int16_generic (destline, + taps, TMP_LINE_S16 (scale->y_scale1d.offsets[j]), + sizeof (gint16) * scale->dest->width, scale->y_scale1d.n_taps, + S16_POSTSHIFT, scale->dest->width); + } else { + resample_vert_int16_generic (destline, + taps, TMP_LINE_S16 (scale->y_scale1d.offsets[j]), + sizeof (gint16) * scale->dest->width, scale->y_scale1d.n_taps, + S16_POSTSHIFT, scale->dest->width); + } + } +} + +void +vs_image_scale_lanczos_Y_int16 (const VSImage * dest, const VSImage * src, + uint8_t * tmpbuf, double sharpness, gboolean dither, double a, + double sharpen) +{ + Scale s = { 0 }; + Scale *scale = &s; + int n_taps; + + scale->dest = dest; + scale->src = src; + + n_taps = scale1d_get_n_taps (src->width, dest->width, a, sharpness); + n_taps = ROUND_UP_4 (n_taps); + scale1d_calculate_taps_int16 (&scale->x_scale1d, + src->width, dest->width, n_taps, a, sharpness, sharpen, S16_SHIFT1); + + n_taps = scale1d_get_n_taps (src->height, dest->height, a, sharpness); + scale1d_calculate_taps_int16 (&scale->y_scale1d, + src->height, dest->height, n_taps, a, sharpness, sharpen, S16_SHIFT2); + + scale->dither = dither; + + switch (scale->x_scale1d.n_taps) { + case 4: + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_int16_int16_u8_taps4_shift0; + break; + case 8: + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_int16_int16_u8_taps8_shift0; + break; + case 12: + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_int16_int16_u8_taps12_shift0; + break; + case 16: + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_int16_int16_u8_taps16_shift0; + break; + default: + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_int16_int16_u8_generic; + break; + } + + scale->tmpdata = + g_malloc (sizeof (gint16) * scale->dest->width * scale->src->height); + + vs_scale_lanczos_Y_int16 (scale); + + scale1d_cleanup (&scale->x_scale1d); + scale1d_cleanup (&scale->y_scale1d); + g_free (scale->tmpdata); +} + + +#define S32_SHIFT1 11 +#define S32_SHIFT2 11 +#define S32_MIDSHIFT 0 +#define S32_POSTSHIFT (S32_SHIFT1+S32_SHIFT2-S32_MIDSHIFT) + +static void +vs_scale_lanczos_Y_int32 (Scale * scale) +{ + int j; + int yi; + int tmp_yi; + + tmp_yi = 0; + + for (j = 0; j < scale->dest->height; j++) { + guint8 *destline; + gint32 *taps; + + destline = scale->dest->pixels + scale->dest->stride * j; + + yi = scale->y_scale1d.offsets[j]; + + while (tmp_yi < yi + scale->y_scale1d.n_taps) { + scale->horiz_resample_func (TMP_LINE_S32 (tmp_yi), + scale->x_scale1d.offsets, scale->x_scale1d.taps, SRC_LINE (tmp_yi), + scale->x_scale1d.n_taps, S32_MIDSHIFT, scale->dest->width); + tmp_yi++; + } + + taps = (gint32 *) scale->y_scale1d.taps + j * scale->y_scale1d.n_taps; + if (scale->dither) { + resample_vert_dither_int32_generic (destline, + taps, TMP_LINE_S32 (scale->y_scale1d.offsets[j]), + sizeof (gint32) * scale->dest->width, + scale->y_scale1d.n_taps, S32_POSTSHIFT, scale->dest->width); + } else { + resample_vert_int32_generic (destline, + taps, TMP_LINE_S32 (scale->y_scale1d.offsets[j]), + sizeof (gint32) * scale->dest->width, + scale->y_scale1d.n_taps, S32_POSTSHIFT, scale->dest->width); + } + } +} + +void +vs_image_scale_lanczos_Y_int32 (const VSImage * dest, const VSImage * src, + uint8_t * tmpbuf, double sharpness, gboolean dither, double a, + double sharpen) +{ + Scale s = { 0 }; + Scale *scale = &s; + int n_taps; + + scale->dest = dest; + scale->src = src; + + n_taps = scale1d_get_n_taps (src->width, dest->width, a, sharpness); + n_taps = ROUND_UP_4 (n_taps); + scale1d_calculate_taps_int32 (&scale->x_scale1d, + src->width, dest->width, n_taps, a, sharpness, sharpen, S32_SHIFT1); + + n_taps = scale1d_get_n_taps (src->height, dest->height, a, sharpness); + scale1d_calculate_taps_int32 (&scale->y_scale1d, + src->height, dest->height, n_taps, a, sharpness, sharpen, S32_SHIFT2); + + scale->dither = dither; + + switch (scale->x_scale1d.n_taps) { + case 4: + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_int32_int32_u8_taps4_shift0; + break; + case 8: + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_int32_int32_u8_taps8_shift0; + break; + case 12: + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_int32_int32_u8_taps12_shift0; + break; + case 16: + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_int32_int32_u8_taps16_shift0; + break; + default: + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_int32_int32_u8_generic; + break; + } + + scale->tmpdata = + g_malloc (sizeof (int32_t) * scale->dest->width * scale->src->height); + + vs_scale_lanczos_Y_int32 (scale); + + scale1d_cleanup (&scale->x_scale1d); + scale1d_cleanup (&scale->y_scale1d); + g_free (scale->tmpdata); +} + +static void +vs_scale_lanczos_Y_double (Scale * scale) +{ + int j; + int yi; + int tmp_yi; + + tmp_yi = 0; + + for (j = 0; j < scale->dest->height; j++) { + guint8 *destline; + double *taps; + + destline = scale->dest->pixels + scale->dest->stride * j; + + yi = scale->y_scale1d.offsets[j]; + + while (tmp_yi < yi + scale->y_scale1d.n_taps) { + scale->horiz_resample_func (TMP_LINE_DOUBLE (tmp_yi), + scale->x_scale1d.offsets, scale->x_scale1d.taps, SRC_LINE (tmp_yi), + scale->x_scale1d.n_taps, 0, scale->dest->width); + tmp_yi++; + } + + taps = (double *) scale->y_scale1d.taps + j * scale->y_scale1d.n_taps; + if (scale->dither) { + resample_vert_dither_double_generic (destline, + taps, TMP_LINE_DOUBLE (scale->y_scale1d.offsets[j]), + sizeof (double) * scale->dest->width, + scale->y_scale1d.n_taps, 0, scale->dest->width); + } else { + resample_vert_double_generic (destline, + taps, TMP_LINE_DOUBLE (scale->y_scale1d.offsets[j]), + sizeof (double) * scale->dest->width, + scale->y_scale1d.n_taps, 0, scale->dest->width); + } + } +} + +void +vs_image_scale_lanczos_Y_double (const VSImage * dest, const VSImage * src, + uint8_t * tmpbuf, double sharpness, gboolean dither, double a, + double sharpen) +{ + Scale s = { 0 }; + Scale *scale = &s; + int n_taps; + + scale->dest = dest; + scale->src = src; + + n_taps = scale1d_get_n_taps (src->width, dest->width, a, sharpness); + scale1d_calculate_taps (&scale->x_scale1d, + src->width, dest->width, n_taps, a, sharpness, sharpen); + + n_taps = scale1d_get_n_taps (src->height, dest->height, a, sharpness); + scale1d_calculate_taps (&scale->y_scale1d, + src->height, dest->height, n_taps, a, sharpness, sharpen); + + scale->dither = dither; + + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_double_u8_generic; + + scale->tmpdata = + g_malloc (sizeof (double) * scale->dest->width * scale->src->height); + + vs_scale_lanczos_Y_double (scale); + + scale1d_cleanup (&scale->x_scale1d); + scale1d_cleanup (&scale->y_scale1d); + g_free (scale->tmpdata); +} + +static void +vs_scale_lanczos_Y_float (Scale * scale) +{ + int j; + int yi; + int tmp_yi; + + tmp_yi = 0; + + for (j = 0; j < scale->dest->height; j++) { + guint8 *destline; + float *taps; + + destline = scale->dest->pixels + scale->dest->stride * j; + + yi = scale->y_scale1d.offsets[j]; + + while (tmp_yi < yi + scale->y_scale1d.n_taps) { + scale->horiz_resample_func (TMP_LINE_FLOAT (tmp_yi), + scale->x_scale1d.offsets, scale->x_scale1d.taps, SRC_LINE (tmp_yi), + scale->x_scale1d.n_taps, 0, scale->dest->width); + tmp_yi++; + } + + taps = (float *) scale->y_scale1d.taps + j * scale->y_scale1d.n_taps; + if (scale->dither) { + resample_vert_dither_float_generic (destline, + taps, TMP_LINE_FLOAT (scale->y_scale1d.offsets[j]), + sizeof (float) * scale->dest->width, + scale->y_scale1d.n_taps, 0, scale->dest->width); + } else { + resample_vert_float_generic (destline, + taps, TMP_LINE_FLOAT (scale->y_scale1d.offsets[j]), + sizeof (float) * scale->dest->width, + scale->y_scale1d.n_taps, 0, scale->dest->width); + } + } +} + +void +vs_image_scale_lanczos_Y_float (const VSImage * dest, const VSImage * src, + uint8_t * tmpbuf, double sharpness, gboolean dither, double a, + double sharpen) +{ + Scale s = { 0 }; + Scale *scale = &s; + int n_taps; + + scale->dest = dest; + scale->src = src; + + n_taps = scale1d_get_n_taps (src->width, dest->width, a, sharpness); + scale1d_calculate_taps_float (&scale->x_scale1d, + src->width, dest->width, n_taps, a, sharpness, sharpen); + + n_taps = scale1d_get_n_taps (src->height, dest->height, a, sharpness); + scale1d_calculate_taps_float (&scale->y_scale1d, + src->height, dest->height, n_taps, a, sharpness, sharpen); + + scale->dither = dither; + + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_float_u8_generic; + + scale->tmpdata = + g_malloc (sizeof (float) * scale->dest->width * scale->src->height); + + vs_scale_lanczos_Y_float (scale); + + scale1d_cleanup (&scale->x_scale1d); + scale1d_cleanup (&scale->y_scale1d); + g_free (scale->tmpdata); +} + + + + + +static void +vs_scale_lanczos_AYUV_int16 (Scale * scale) +{ + int j; + int yi; + int tmp_yi; + + tmp_yi = 0; + + for (j = 0; j < scale->dest->height; j++) { + guint8 *destline; + gint16 *taps; + + destline = scale->dest->pixels + scale->dest->stride * j; + + yi = scale->y_scale1d.offsets[j]; + + while (tmp_yi < yi + scale->y_scale1d.n_taps) { + scale->horiz_resample_func (TMP_LINE_S16_AYUV (tmp_yi), + scale->x_scale1d.offsets, scale->x_scale1d.taps, SRC_LINE (tmp_yi), + scale->x_scale1d.n_taps, S16_MIDSHIFT, scale->dest->width); + tmp_yi++; + } + + taps = (gint16 *) scale->y_scale1d.taps + j * scale->y_scale1d.n_taps; + if (scale->dither) { + resample_vert_dither_int16_generic (destline, + taps, TMP_LINE_S16_AYUV (scale->y_scale1d.offsets[j]), + sizeof (gint16) * 4 * scale->dest->width, + scale->y_scale1d.n_taps, S16_POSTSHIFT, scale->dest->width * 4); + } else { + resample_vert_int16_generic (destline, + taps, TMP_LINE_S16_AYUV (scale->y_scale1d.offsets[j]), + sizeof (gint16) * 4 * scale->dest->width, + scale->y_scale1d.n_taps, S16_POSTSHIFT, scale->dest->width * 4); + } + } +} + +void +vs_image_scale_lanczos_AYUV_int16 (const VSImage * dest, const VSImage * src, + uint8_t * tmpbuf, double sharpness, gboolean dither, double a, + double sharpen) +{ + Scale s = { 0 }; + Scale *scale = &s; + int n_taps; + + scale->dest = dest; + scale->src = src; + + n_taps = scale1d_get_n_taps (src->width, dest->width, a, sharpness); + n_taps = ROUND_UP_4 (n_taps); + scale1d_calculate_taps_int16 (&scale->x_scale1d, + src->width, dest->width, n_taps, a, sharpness, sharpen, S16_SHIFT1); + + n_taps = scale1d_get_n_taps (src->height, dest->height, a, sharpness); + scale1d_calculate_taps_int16 (&scale->y_scale1d, + src->height, dest->height, n_taps, a, sharpness, sharpen, S16_SHIFT2); + + scale->dither = dither; + + switch (scale->x_scale1d.n_taps) { + case 4: + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_int16_int16_ayuv_taps4_shift0; + break; + case 8: + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_int16_int16_ayuv_taps8_shift0; + break; + case 12: + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_int16_int16_ayuv_taps12_shift0; + break; + case 16: + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_int16_int16_ayuv_taps16_shift0; + break; + default: + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_int16_int16_ayuv_generic; + break; + } + + scale->tmpdata = + g_malloc (sizeof (gint16) * scale->dest->width * scale->src->height * 4); + + vs_scale_lanczos_AYUV_int16 (scale); + + scale1d_cleanup (&scale->x_scale1d); + scale1d_cleanup (&scale->y_scale1d); + g_free (scale->tmpdata); +} + + +static void +vs_scale_lanczos_AYUV_int32 (Scale * scale) +{ + int j; + int yi; + int tmp_yi; + + tmp_yi = 0; + + for (j = 0; j < scale->dest->height; j++) { + guint8 *destline; + gint32 *taps; + + destline = scale->dest->pixels + scale->dest->stride * j; + + yi = scale->y_scale1d.offsets[j]; + + while (tmp_yi < yi + scale->y_scale1d.n_taps) { + scale->horiz_resample_func (TMP_LINE_S32_AYUV (tmp_yi), + scale->x_scale1d.offsets, scale->x_scale1d.taps, SRC_LINE (tmp_yi), + scale->x_scale1d.n_taps, S32_MIDSHIFT, scale->dest->width); + tmp_yi++; + } + + taps = (gint32 *) scale->y_scale1d.taps + j * scale->y_scale1d.n_taps; + if (scale->dither) { + resample_vert_dither_int32_generic (destline, + taps, TMP_LINE_S32_AYUV (scale->y_scale1d.offsets[j]), + sizeof (gint32) * 4 * scale->dest->width, scale->y_scale1d.n_taps, + S32_POSTSHIFT, scale->dest->width * 4); + } else { + resample_vert_int32_generic (destline, + taps, TMP_LINE_S32_AYUV (scale->y_scale1d.offsets[j]), + sizeof (gint32) * 4 * scale->dest->width, scale->y_scale1d.n_taps, + S32_POSTSHIFT, scale->dest->width * 4); + } + } +} + +void +vs_image_scale_lanczos_AYUV_int32 (const VSImage * dest, const VSImage * src, + uint8_t * tmpbuf, double sharpness, gboolean dither, double a, + double sharpen) +{ + Scale s = { 0 }; + Scale *scale = &s; + int n_taps; + + scale->dest = dest; + scale->src = src; + + n_taps = scale1d_get_n_taps (src->width, dest->width, a, sharpness); + n_taps = ROUND_UP_4 (n_taps); + scale1d_calculate_taps_int32 (&scale->x_scale1d, + src->width, dest->width, n_taps, a, sharpness, sharpen, S32_SHIFT1); + + n_taps = scale1d_get_n_taps (src->height, dest->height, a, sharpness); + scale1d_calculate_taps_int32 (&scale->y_scale1d, + src->height, dest->height, n_taps, a, sharpness, sharpen, S32_SHIFT2); + + scale->dither = dither; + + switch (scale->x_scale1d.n_taps) { + case 4: + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_int32_int32_ayuv_taps4_shift0; + break; + case 8: + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_int32_int32_ayuv_taps8_shift0; + break; + case 12: + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_int32_int32_ayuv_taps12_shift0; + break; + case 16: + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_int32_int32_ayuv_taps16_shift0; + break; + default: + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_int32_int32_ayuv_generic; + break; + } + + scale->tmpdata = + g_malloc (sizeof (int32_t) * scale->dest->width * scale->src->height * 4); + + vs_scale_lanczos_AYUV_int32 (scale); + + scale1d_cleanup (&scale->x_scale1d); + scale1d_cleanup (&scale->y_scale1d); + g_free (scale->tmpdata); +} + +static void +vs_scale_lanczos_AYUV_double (Scale * scale) +{ + int j; + int yi; + int tmp_yi; + + tmp_yi = 0; + + for (j = 0; j < scale->dest->height; j++) { + guint8 *destline; + double *taps; + + destline = scale->dest->pixels + scale->dest->stride * j; + + yi = scale->y_scale1d.offsets[j]; + + while (tmp_yi < yi + scale->y_scale1d.n_taps) { + scale->horiz_resample_func (TMP_LINE_DOUBLE_AYUV (tmp_yi), + scale->x_scale1d.offsets, scale->x_scale1d.taps, SRC_LINE (tmp_yi), + scale->x_scale1d.n_taps, 0, scale->dest->width); + tmp_yi++; + } + + taps = (double *) scale->y_scale1d.taps + j * scale->y_scale1d.n_taps; + if (scale->dither) { + resample_vert_dither_double_generic (destline, + taps, TMP_LINE_DOUBLE_AYUV (scale->y_scale1d.offsets[j]), + sizeof (double) * 4 * scale->dest->width, + scale->y_scale1d.n_taps, 0, scale->dest->width * 4); + } else { + resample_vert_double_generic (destline, + taps, TMP_LINE_DOUBLE_AYUV (scale->y_scale1d.offsets[j]), + sizeof (double) * 4 * scale->dest->width, + scale->y_scale1d.n_taps, 0, scale->dest->width * 4); + } + } +} + +void +vs_image_scale_lanczos_AYUV_double (const VSImage * dest, const VSImage * src, + uint8_t * tmpbuf, double sharpness, gboolean dither, double a, + double sharpen) +{ + Scale s = { 0 }; + Scale *scale = &s; + int n_taps; + + scale->dest = dest; + scale->src = src; + + n_taps = scale1d_get_n_taps (src->width, dest->width, a, sharpness); + scale1d_calculate_taps (&scale->x_scale1d, + src->width, dest->width, n_taps, a, sharpness, sharpen); + + n_taps = scale1d_get_n_taps (src->height, dest->height, a, sharpness); + scale1d_calculate_taps (&scale->y_scale1d, + src->height, dest->height, n_taps, a, sharpness, sharpen); + + scale->dither = dither; + + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_double_ayuv_generic; + + scale->tmpdata = + g_malloc (sizeof (double) * scale->dest->width * scale->src->height * 4); + + vs_scale_lanczos_AYUV_double (scale); + + scale1d_cleanup (&scale->x_scale1d); + scale1d_cleanup (&scale->y_scale1d); + g_free (scale->tmpdata); +} + +static void +vs_scale_lanczos_AYUV_float (Scale * scale) +{ + int j; + int yi; + int tmp_yi; + + tmp_yi = 0; + + for (j = 0; j < scale->dest->height; j++) { + guint8 *destline; + float *taps; + + destline = scale->dest->pixels + scale->dest->stride * j; + + yi = scale->y_scale1d.offsets[j]; + + while (tmp_yi < yi + scale->y_scale1d.n_taps) { + scale->horiz_resample_func (TMP_LINE_FLOAT_AYUV (tmp_yi), + scale->x_scale1d.offsets, scale->x_scale1d.taps, SRC_LINE (tmp_yi), + scale->x_scale1d.n_taps, 0, scale->dest->width); + tmp_yi++; + } + + taps = (float *) scale->y_scale1d.taps + j * scale->y_scale1d.n_taps; + if (scale->dither) { + resample_vert_dither_float_generic (destline, + taps, TMP_LINE_FLOAT_AYUV (scale->y_scale1d.offsets[j]), + sizeof (float) * 4 * scale->dest->width, scale->y_scale1d.n_taps, 0, + scale->dest->width * 4); + } else { + resample_vert_float_generic (destline, + taps, TMP_LINE_FLOAT_AYUV (scale->y_scale1d.offsets[j]), + sizeof (float) * 4 * scale->dest->width, scale->y_scale1d.n_taps, 0, + scale->dest->width * 4); + } + } +} + +void +vs_image_scale_lanczos_AYUV_float (const VSImage * dest, const VSImage * src, + uint8_t * tmpbuf, double sharpness, gboolean dither, double a, + double sharpen) +{ + Scale s = { 0 }; + Scale *scale = &s; + int n_taps; + + scale->dest = dest; + scale->src = src; + + n_taps = scale1d_get_n_taps (src->width, dest->width, a, sharpness); + scale1d_calculate_taps_float (&scale->x_scale1d, + src->width, dest->width, n_taps, a, sharpness, sharpen); + + n_taps = scale1d_get_n_taps (src->height, dest->height, a, sharpness); + scale1d_calculate_taps_float (&scale->y_scale1d, + src->height, dest->height, n_taps, a, sharpness, sharpen); + + scale->dither = dither; + + scale->horiz_resample_func = + (HorizResampleFunc) resample_horiz_float_ayuv_generic; + + scale->tmpdata = + g_malloc (sizeof (float) * scale->dest->width * scale->src->height * 4); + + vs_scale_lanczos_AYUV_float (scale); + + scale1d_cleanup (&scale->x_scale1d); + scale1d_cleanup (&scale->y_scale1d); + g_free (scale->tmpdata); +} From 89a899fd9d19982967467a851a68fba691233469 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Dr=C3=B6ge?= Date: Mon, 5 Sep 2011 11:55:59 +0200 Subject: [PATCH 10/21] playsink: Use gst_object_unref() instead of g_object_unref() for better debugging --- gst/playback/gstplaysink.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/gst/playback/gstplaysink.c b/gst/playback/gstplaysink.c index 0e70ce75a9..4a6f038bc0 100644 --- a/gst/playback/gstplaysink.c +++ b/gst/playback/gstplaysink.c @@ -2312,7 +2312,7 @@ gst_play_sink_reconfigure (GstPlaySink * playsink) add_chain (GST_PLAY_CHAIN (playsink->videochain), FALSE); activate_chain (GST_PLAY_CHAIN (playsink->videochain), FALSE); if (playsink->videochain->ts_offset) - g_object_unref (playsink->videochain->ts_offset); + gst_object_unref (playsink->videochain->ts_offset); playsink->videochain->ts_offset = NULL; } @@ -2367,7 +2367,7 @@ gst_play_sink_reconfigure (GstPlaySink * playsink) playsink->audiochain->volume = NULL; playsink->audiochain->mute = NULL; if (playsink->audiochain->ts_offset) - g_object_unref (playsink->audiochain->ts_offset); + gst_object_unref (playsink->audiochain->ts_offset); playsink->audiochain->ts_offset = NULL; free_chain ((GstPlayChain *) playsink->audiochain); playsink->audiochain = NULL; @@ -2437,7 +2437,7 @@ gst_play_sink_reconfigure (GstPlaySink * playsink) playsink->audiochain->volume = NULL; playsink->audiochain->mute = NULL; if (playsink->audiochain->ts_offset) - g_object_unref (playsink->audiochain->ts_offset); + gst_object_unref (playsink->audiochain->ts_offset); playsink->audiochain->ts_offset = NULL; } add_chain (GST_PLAY_CHAIN (playsink->audiochain), FALSE); @@ -3445,7 +3445,7 @@ gst_play_sink_change_state (GstElement * element, GstStateChange transition) playsink->audiochain->volume = NULL; playsink->audiochain->mute = NULL; if (playsink->audiochain->ts_offset) - g_object_unref (playsink->audiochain->ts_offset); + gst_object_unref (playsink->audiochain->ts_offset); playsink->audiochain->ts_offset = NULL; } ret = GST_STATE_CHANGE_SUCCESS; From 705ca1d55a62037eb7fdba88cd806fa20803ebce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Dr=C3=B6ge?= Date: Mon, 5 Sep 2011 12:02:23 +0200 Subject: [PATCH 11/21] playsink: Don't leak the videochain ts-offset element Also don't leak the audiochain ts-offset element if one is found but the sink doesn't support volume settings. --- gst/playback/gstplaysink.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/gst/playback/gstplaysink.c b/gst/playback/gstplaysink.c index 4a6f038bc0..90c71ebc8a 100644 --- a/gst/playback/gstplaysink.c +++ b/gst/playback/gstplaysink.c @@ -3444,10 +3444,17 @@ gst_play_sink_change_state (GstElement * element, GstStateChange transition) disconnect_chain (playsink->audiochain, playsink); playsink->audiochain->volume = NULL; playsink->audiochain->mute = NULL; - if (playsink->audiochain->ts_offset) - gst_object_unref (playsink->audiochain->ts_offset); + } + + if (playsink->audiochain && playsink->audiochain->ts_offset) { + gst_object_unref (playsink->audiochain->ts_offset); playsink->audiochain->ts_offset = NULL; } + + if (playsink->videochain && playsink->videochain->ts_offset) { + gst_object_unref (playsink->videochain->ts_offset); + playsink->videochain->ts_offset = NULL; + } ret = GST_STATE_CHANGE_SUCCESS; break; default: From b1c00adf313748e8c4532512364740f2d10bd88e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim-Philipp=20M=C3=BCller?= Date: Mon, 5 Sep 2011 14:40:24 +0100 Subject: [PATCH 12/21] Revert "playsink: Try include 'pitch', if no other sink is provided" This reverts commit 105814e2c78f9867c61531b9e8166e4ae994296f. The general consensus seems to be that we should revert this for now. If such behaviour is desired, we should probably enable it via a flag. And maybe use the scaletempo plugin instead. --- gst/playback/gstplaysink.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/gst/playback/gstplaysink.c b/gst/playback/gstplaysink.c index 90c71ebc8a..32d0ae79c1 100644 --- a/gst/playback/gstplaysink.c +++ b/gst/playback/gstplaysink.c @@ -1664,14 +1664,6 @@ gen_audio_chain (GstPlaySink * playsink, gboolean raw) chain->sink = try_element (playsink, playsink->audio_sink, FALSE); } else { /* only try fallback if no specific sink was chosen */ - if (chain->sink == NULL) { - GST_DEBUG_OBJECT (playsink, - "trying pitch ! audioconvert ! autoaudiosink"); - elem = - gst_parse_bin_from_description - ("pitch ! audioconvert ! autoaudiosink", TRUE, NULL); - chain->sink = try_element (playsink, elem, TRUE); - } if (chain->sink == NULL) { GST_DEBUG_OBJECT (playsink, "trying autoaudiosink"); elem = gst_element_factory_make ("autoaudiosink", "audiosink"); From 81c94597715f53b63cfb99f12b8ac50f98c8498f Mon Sep 17 00:00:00 2001 From: Stefan Sauer Date: Mon, 5 Sep 2011 15:51:25 +0200 Subject: [PATCH 13/21] tests: supress ERROR log output for some tests Be nice when we tests for correct error handling and don't spam stdout. --- tests/check/libs/profile.c | 4 ++++ tests/check/libs/tag.c | 2 ++ tests/check/libs/video.c | 4 ++++ 3 files changed, 10 insertions(+) diff --git a/tests/check/libs/profile.c b/tests/check/libs/profile.c index 285a58ebd4..c0002082ee 100644 --- a/tests/check/libs/profile.c +++ b/tests/check/libs/profile.c @@ -137,6 +137,8 @@ GST_START_TEST (test_target_naming) { GstEncodingTarget *target; + gst_debug_set_threshold_for_name ("default", GST_LEVEL_NONE); + /* NULL values */ ASSERT_CRITICAL (target = gst_encoding_target_new (NULL, NULL, NULL, NULL)); fail_if (target != NULL); @@ -398,6 +400,8 @@ GST_START_TEST (test_loading_profile) GValue strvalue = { 0, }; GValue miniobjectvalue = { 0, }; + gst_debug_set_threshold_for_name ("default", GST_LEVEL_NONE); + /* Test loading using short method and all arguments */ target = gst_encoding_target_load ("myponytarget", "herding", NULL); fail_unless (target != NULL); diff --git a/tests/check/libs/tag.c b/tests/check/libs/tag.c index 5b53e8900d..17401c37ee 100644 --- a/tests/check/libs/tag.c +++ b/tests/check/libs/tag.c @@ -765,6 +765,8 @@ GST_START_TEST (test_license_utils) gchar *path, *data = NULL; gsize data_len; + gst_debug_set_threshold_for_name ("tag-licenses", GST_LEVEL_NONE); + /* test jurisdiction-specific license */ fail_unless_equals_int (gst_tag_get_license_flags (SPECIFIC_L), 0x01010703); fail_unless_equals_string (gst_tag_get_license_nick (SPECIFIC_L), diff --git a/tests/check/libs/video.c b/tests/check/libs/video.c index a10ac64cde..47327d4825 100644 --- a/tests/check/libs/video.c +++ b/tests/check/libs/video.c @@ -627,6 +627,8 @@ GST_START_TEST (test_convert_frame) gint i; guint8 *data; + gst_debug_set_threshold_for_name ("default", GST_LEVEL_NONE); + from_buffer = gst_buffer_new_and_alloc (640 * 480 * 4); data = GST_BUFFER_DATA (from_buffer); @@ -696,6 +698,8 @@ GST_START_TEST (test_convert_frame_async) GMainLoop *loop; ConvertFrameContext cf_data = { NULL, NULL, NULL }; + gst_debug_set_threshold_for_name ("default", GST_LEVEL_NONE); + from_buffer = gst_buffer_new_and_alloc (640 * 480 * 4); data = GST_BUFFER_DATA (from_buffer); From de4fc848faf59281869be5de5abe7fdd500512f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Dr=C3=B6ge?= Date: Mon, 5 Sep 2011 20:31:04 +0200 Subject: [PATCH 14/21] decodebin2: Actually iterate over the factories instead of only taking the first one --- gst/playback/gstdecodebin2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gst/playback/gstdecodebin2.c b/gst/playback/gstdecodebin2.c index c8f88c3cb3..e137b9345c 100644 --- a/gst/playback/gstdecodebin2.c +++ b/gst/playback/gstdecodebin2.c @@ -1477,7 +1477,7 @@ analyze_new_pad (GstDecodeBin * dbin, GstElement * src, GstPad * pad, for (i = 0; i < factories->n_values && !dontuse; i++) { GstElementFactory *factory = - g_value_get_object (g_value_array_get_nth (factories, 0)); + g_value_get_object (g_value_array_get_nth (factories, i)); GstCaps *tcaps; /* We are only interested in skipping decoders */ From 2768ed75e099dac907528e13f4f7ef5a035b651a Mon Sep 17 00:00:00 2001 From: Thiago Santos Date: Wed, 3 Aug 2011 13:31:59 -0300 Subject: [PATCH 15/21] encodebin: Select muxer further Sort muxers based on their caps and ranking before iterating to find one that fits the profile. Sorting is done by putting the elements that have a pad template that can produce the exact caps that is on the profile. For example: when asking for "video/quicktime, variant=iso", muxers that have this exact caps on their pad templates will be put first on the list than ones that have only "video/quicktime". https://bugzilla.gnome.org/show_bug.cgi?id=651496 --- gst/encoding/gstencodebin.c | 85 ++++++++++++++++++++++++++++++++++--- 1 file changed, 78 insertions(+), 7 deletions(-) diff --git a/gst/encoding/gstencodebin.c b/gst/encoding/gstencodebin.c index e4a5833a50..062bae745f 100644 --- a/gst/encoding/gstencodebin.c +++ b/gst/encoding/gstencodebin.c @@ -1479,17 +1479,59 @@ cleanup: } static gboolean -_factory_can_sink_caps (GstElementFactory * factory, const GstCaps * caps) +_gst_caps_match_foreach (GQuark field_id, const GValue * value, gpointer data) +{ + GstStructure *structure = data; + const GValue *other_value = gst_structure_id_get_value (structure, field_id); + + if (G_UNLIKELY (other_value == NULL)) + return FALSE; + if (gst_value_compare (value, other_value) == GST_VALUE_EQUAL) { + return TRUE; + } + + return FALSE; +} + +/* + * checks that there is at least one structure on caps_a that has + * all its fields exactly the same as one structure on caps_b + */ +static gboolean +_gst_caps_match (const GstCaps * caps_a, const GstCaps * caps_b) +{ + gint i, j; + gboolean res = FALSE; + + for (i = 0; i < gst_caps_get_size (caps_a); i++) { + GstStructure *structure_a = gst_caps_get_structure (caps_a, i); + for (j = 0; j < gst_caps_get_size (caps_b); j++) { + GstStructure *structure_b = gst_caps_get_structure (caps_b, j); + + res = gst_structure_foreach (structure_a, _gst_caps_match_foreach, + structure_b); + if (res) + goto end; + } + } +end: + return res; +} + +static gboolean +_factory_can_handle_caps (GstElementFactory * factory, const GstCaps * caps, + GstPadDirection dir, gboolean exact) { GList *templates = factory->staticpadtemplates; while (templates) { GstStaticPadTemplate *template = (GstStaticPadTemplate *) templates->data; - if (template->direction == GST_PAD_SINK) { + if (template->direction == dir) { GstCaps *tmp = gst_static_caps_get (&template->static_caps); - if (gst_caps_can_intersect (tmp, caps)) { + if ((exact && _gst_caps_match (caps, tmp)) || + (!exact && gst_caps_can_intersect (tmp, caps))) { gst_caps_unref (tmp); return TRUE; } @@ -1540,6 +1582,31 @@ beach: return formatter; } +static gint +compare_elements (gconstpointer a, gconstpointer b, gpointer udata) +{ + GstCaps *caps = udata; + GstElementFactory *fac_a = (GstElementFactory *) a; + GstElementFactory *fac_b = (GstElementFactory *) b; + + /* FIXME not quite sure this is the best algorithm to order the elements + * Some caps similarity comparison algorithm would fit better than going + * boolean (equals/not equals). + */ + gboolean equals_a = _factory_can_handle_caps (fac_a, caps, GST_PAD_SRC, TRUE); + gboolean equals_b = _factory_can_handle_caps (fac_b, caps, GST_PAD_SRC, TRUE); + + if (equals_a == equals_b) { + return gst_plugin_feature_get_rank ((GstPluginFeature *) fac_b) - + gst_plugin_feature_get_rank ((GstPluginFeature *) fac_a); + } else if (equals_a) { + return -1; + } else if (equals_b) { + return 1; + } + return 0; +} + static inline GstElement * _get_muxer (GstEncodeBin * ebin) { @@ -1562,6 +1629,10 @@ _get_muxer (GstEncodeBin * ebin) gst_element_factory_list_filter (ebin->formatters, format, GST_PAD_SRC, TRUE); + muxers = g_list_sort_with_data (muxers, compare_elements, (gpointer) format); + formatters = + g_list_sort_with_data (formatters, compare_elements, (gpointer) format); + muxers = g_list_concat (muxers, formatters); if (muxers == NULL) @@ -1582,10 +1653,10 @@ _get_muxer (GstEncodeBin * ebin) for (tmp = profiles; tmp; tmp = tmp->next) { GstEncodingProfile *sprof = (GstEncodingProfile *) tmp->data; - if (!_factory_can_sink_caps (muxerfact, - gst_encoding_profile_get_format (sprof))) { - GST_DEBUG ("Skipping muxer because it can't sink caps %" GST_PTR_FORMAT, - gst_encoding_profile_get_format (sprof)); + if (!_factory_can_handle_caps (muxerfact, + gst_encoding_profile_get_format (sprof), GST_PAD_SINK, FALSE)) { + GST_DEBUG ("Skipping muxer because it can't sink caps %" + GST_PTR_FORMAT, gst_encoding_profile_get_format (sprof)); cansinkstreams = FALSE; break; } From 86e6343759e9d296a646c7d17cdc6228b2dd3eb4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim-Philipp=20M=C3=BCller?= Date: Mon, 5 Sep 2011 15:01:09 +0100 Subject: [PATCH 16/21] audio: rename GstBaseAudioDecoder/Encoder to GstAudioDecoder/Encoder API: gst_gst_audio_decoder_finish_frame() API: gst_gst_audio_decoder_get_audio_info() API: gst_gst_audio_decoder_get_byte_time() API: gst_gst_audio_decoder_get_delay() API: gst_gst_audio_decoder_get_latency() API: gst_gst_audio_decoder_get_max_errors() API: gst_gst_audio_decoder_get_min_latenc()y API: gst_gst_audio_decoder_get_parse_state() API: gst_gst_audio_decoder_get_plc() API: gst_gst_audio_decoder_get_plc_aware() API: gst_gst_audio_decoder_get_tolerance() API: gst_gst_audio_decoder_get_type() API: gst_gst_audio_decoder_set_byte_time() API: gst_gst_audio_decoder_set_latency() API: gst_gst_audio_decoder_set_max_errors() API: gst_gst_audio_decoder_set_min_latency() API: gst_gst_audio_decoder_set_plc() API: gst_gst_audio_decoder_set_plc_aware() API: gst_gst_audio_decoder_set_tolerance() API: gst_gst_audio_encoder_finish_frame() API: gst_gst_audio_encoder_get_audio_info() API: gst_gst_audio_encoder_get_frame_max() API: gst_gst_audio_encoder_get_frame_samples() API: gst_gst_audio_encoder_get_hard_resync() API: gst_gst_audio_encoder_get_latency() API: gst_gst_audio_encoder_get_lookahead() API: gst_gst_audio_encoder_get_mark_granule() API: gst_gst_audio_encoder_get_perfect_timestamp() API: gst_gst_audio_encoder_get_tolerance() API: gst_gst_audio_encoder_get_type() API: gst_gst_audio_encoder_proxy_getcaps() API: gst_gst_audio_encoder_set_frame_max() API: gst_gst_audio_encoder_set_frame_samples() API: gst_gst_audio_encoder_set_hard_resync() API: gst_gst_audio_encoder_set_latency() API: gst_gst_audio_encoder_set_lookahead() API: gst_gst_audio_encoder_set_mark_granule() API: gst_gst_audio_encoder_set_perfect_timestamp() API: gst_gst_audio_encoder_set_tolerance() https://bugzilla.gnome.org/show_bug.cgi?id=642690 --- gst-libs/gst/audio/Makefile.am | 9 +- ...stbaseaudiodecoder.c => gstaudiodecoder.c} | 512 +++++++++--------- ...stbaseaudiodecoder.h => gstaudiodecoder.h} | 166 +++--- ...stbaseaudioencoder.c => gstaudioencoder.c} | 455 ++++++++-------- gst-libs/gst/audio/gstaudioencoder.h | 244 +++++++++ gst-libs/gst/audio/gstbaseaudioencoder.h | 235 -------- win32/common/libgstaudio.def | 80 +-- 7 files changed, 845 insertions(+), 856 deletions(-) rename gst-libs/gst/audio/{gstbaseaudiodecoder.c => gstaudiodecoder.c} (79%) rename gst-libs/gst/audio/{gstbaseaudiodecoder.h => gstaudiodecoder.h} (55%) rename gst-libs/gst/audio/{gstbaseaudioencoder.c => gstaudioencoder.c} (79%) create mode 100644 gst-libs/gst/audio/gstaudioencoder.h delete mode 100644 gst-libs/gst/audio/gstbaseaudioencoder.h diff --git a/gst-libs/gst/audio/Makefile.am b/gst-libs/gst/audio/Makefile.am index b710c20810..e26ac5db10 100644 --- a/gst-libs/gst/audio/Makefile.am +++ b/gst-libs/gst/audio/Makefile.am @@ -16,14 +16,15 @@ lib_LTLIBRARIES = \ CLEANFILES = $(BUILT_SOURCES) +# FIXME 0.11: rename GstBaseAudioSink to GstAudioBaseSink or merge with GstAudioSink libgstaudio_@GST_MAJORMINOR@_la_SOURCES = \ audio.c \ gstringbuffer.c \ gstaudioclock.c \ mixerutils.c \ multichannel.c \ - gstbaseaudiodecoder.c \ - gstbaseaudioencoder.c \ + gstaudiodecoder.c \ + gstaudioencoder.c \ gstbaseaudiosink.c \ gstbaseaudiosrc.c \ gstaudiofilter.c \ @@ -38,8 +39,8 @@ libgstaudio_@GST_MAJORMINOR@include_HEADERS = \ gstringbuffer.h \ gstaudioclock.h \ gstaudiofilter.h \ - gstbaseaudiodecoder.h \ - gstbaseaudioencoder.h \ + gstaudiodecoder.h \ + gstaudioencoder.h \ gstbaseaudiosink.h \ gstbaseaudiosrc.h \ gstaudiosink.h \ diff --git a/gst-libs/gst/audio/gstbaseaudiodecoder.c b/gst-libs/gst/audio/gstaudiodecoder.c similarity index 79% rename from gst-libs/gst/audio/gstbaseaudiodecoder.c rename to gst-libs/gst/audio/gstaudiodecoder.c index 08441b90a8..087baac1f1 100644 --- a/gst-libs/gst/audio/gstbaseaudiodecoder.c +++ b/gst-libs/gst/audio/gstaudiodecoder.c @@ -22,7 +22,7 @@ */ /** - * SECTION:gstbaseaudiodecoder + * SECTION:gstaudiodecoder * @short_description: Base class for audio decoders * @see_also: #GstBaseTransform * @since: 0.10.36 @@ -30,29 +30,29 @@ * This base class is for audio decoders turning encoded data into * raw audio samples. * - * GstBaseAudioDecoder and subclass should cooperate as follows. + * GstAudioDecoder and subclass should cooperate as follows. * * * Configuration * - * Initially, GstBaseAudioDecoder calls @start when the decoder element + * Initially, GstAudioDecoder calls @start when the decoder element * is activated, which allows subclass to perform any global setup. * Base class (context) parameters can already be set according to subclass * capabilities (or possibly upon receive more information in subsequent * @set_format). * * - * GstBaseAudioDecoder calls @set_format to inform subclass of the format + * GstAudioDecoder calls @set_format to inform subclass of the format * of input audio data that it is about to receive. * While unlikely, it might be called more than once, if changing input * parameters require reconfiguration. * * - * GstBaseAudioDecoder calls @stop at end of all processing. + * GstAudioDecoder calls @stop at end of all processing. * * * - * As of configuration stage, and throughout processing, GstBaseAudioDecoder + * As of configuration stage, and throughout processing, GstAudioDecoder * provides various (context) parameters, e.g. describing the format of * output audio data (valid when output caps have been caps) or current parsing state. * Conversely, subclass can and should configure context to inform @@ -71,7 +71,7 @@ * * * If codec processing results in decoded data, subclass should call - * @gst_base_audio_decoder_finish_frame to have decoded data pushed + * @gst_audio_decoder_finish_frame to have decoded data pushed * downstream. * * @@ -82,7 +82,7 @@ * setting src pad caps. * * - * During the parsing process GstBaseAudioDecoderClass will handle both + * During the parsing process GstAudioDecoderClass will handle both * srcpad and sinkpad events. Sink events will be passed to subclass * if @event callback has been provided. * @@ -91,7 +91,7 @@ * * Shutdown phase * - * GstBaseAudioDecoder class calls @stop to inform the subclass that data + * GstAudioDecoder class calls @stop to inform the subclass that data * parsing will be stopped. * * @@ -102,7 +102,7 @@ * source and sink pads. The pads need to be named "sink" and "src". It also * needs to set the fixed caps on srcpad, when the format is ensured. This * is typically when base class calls subclass' @set_format function, though - * it might be delayed until calling @gst_base_audio_decoder_finish_frame. + * it might be delayed until calling @gst_audio_decoder_finish_frame. * * In summary, above process should have subclass concentrating on * codec data processing while leaving other matters to base class, @@ -112,7 +112,7 @@ * In particular, base class will try to arrange for perfect output timestamps * as much as possible while tracking upstream timestamps. * To this end, if deviation between the next ideal expected perfect timestamp - * and upstream exceeds #GstBaseAudioDecoder:tolerance, then resync to upstream + * and upstream exceeds #GstAudioDecoder:tolerance, then resync to upstream * occurs (which would happen always if the tolerance mechanism is disabled). * * In non-live pipelines, baseclass can also (configurably) arrange for @@ -138,7 +138,7 @@ * * * Accept data in @handle_frame and provide encoded results to - * @gst_base_audio_decoder_finish_frame. If it is prepared to perform + * @gst_audio_decoder_finish_frame. If it is prepared to perform * PLC, it should also accept NULL data in @handle_frame and provide for * data for indicated duration. * @@ -150,17 +150,17 @@ #endif #define GST_USE_UNSTABLE_API -#include "gstbaseaudiodecoder.h" +#include "gstaudiodecoder.h" #include #include -GST_DEBUG_CATEGORY (baseaudiodecoder_debug); -#define GST_CAT_DEFAULT baseaudiodecoder_debug +GST_DEBUG_CATEGORY (audiodecoder_debug); +#define GST_CAT_DEFAULT audiodecoder_debug -#define GST_BASE_AUDIO_DECODER_GET_PRIVATE(obj) \ - (G_TYPE_INSTANCE_GET_PRIVATE ((obj), GST_TYPE_BASE_AUDIO_DECODER, \ - GstBaseAudioDecoderPrivate)) +#define GST_AUDIO_DECODER_GET_PRIVATE(obj) \ + (G_TYPE_INSTANCE_GET_PRIVATE ((obj), GST_TYPE_AUDIO_DECODER, \ + GstAudioDecoderPrivate)) enum { @@ -179,7 +179,7 @@ enum #define DEFAULT_TOLERANCE 0 #define DEFAULT_PLC FALSE -typedef struct _GstBaseAudioDecoderContext +typedef struct _GstAudioDecoderContext { /* input */ /* (output) audio format */ @@ -199,9 +199,9 @@ typedef struct _GstBaseAudioDecoderContext /* MT-protected (with LOCK) */ GstClockTime min_latency; GstClockTime max_latency; -} GstBaseAudioDecoderContext; +} GstAudioDecoderContext; -struct _GstBaseAudioDecoderPrivate +struct _GstAudioDecoderPrivate { /* activation status */ gboolean active; @@ -253,7 +253,7 @@ struct _GstBaseAudioDecoderPrivate GList *queued; /* context storage */ - GstBaseAudioDecoderContext ctx; + GstAudioDecoderContext ctx; /* properties */ GstClockTime latency; @@ -263,48 +263,39 @@ struct _GstBaseAudioDecoderPrivate }; -static void gst_base_audio_decoder_finalize (GObject * object); -static void gst_base_audio_decoder_set_property (GObject * object, +static void gst_audio_decoder_finalize (GObject * object); +static void gst_audio_decoder_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec); -static void gst_base_audio_decoder_get_property (GObject * object, +static void gst_audio_decoder_get_property (GObject * object, guint prop_id, GValue * value, GParamSpec * pspec); -static void gst_base_audio_decoder_clear_queues (GstBaseAudioDecoder * dec); -static GstFlowReturn gst_base_audio_decoder_chain_reverse (GstBaseAudioDecoder * +static void gst_audio_decoder_clear_queues (GstAudioDecoder * dec); +static GstFlowReturn gst_audio_decoder_chain_reverse (GstAudioDecoder * dec, GstBuffer * buf); -static GstStateChangeReturn gst_base_audio_decoder_change_state (GstElement * +static GstStateChangeReturn gst_audio_decoder_change_state (GstElement * element, GstStateChange transition); -static gboolean gst_base_audio_decoder_sink_event (GstPad * pad, - GstEvent * event); -static gboolean gst_base_audio_decoder_src_event (GstPad * pad, - GstEvent * event); -static gboolean gst_base_audio_decoder_sink_setcaps (GstPad * pad, - GstCaps * caps); -static gboolean gst_base_audio_decoder_src_setcaps (GstPad * pad, - GstCaps * caps); -static GstFlowReturn gst_base_audio_decoder_chain (GstPad * pad, - GstBuffer * buf); -static gboolean gst_base_audio_decoder_src_query (GstPad * pad, - GstQuery * query); -static gboolean gst_base_audio_decoder_sink_query (GstPad * pad, - GstQuery * query); -static const GstQueryType *gst_base_audio_decoder_get_query_types (GstPad * - pad); -static void gst_base_audio_decoder_reset (GstBaseAudioDecoder * dec, - gboolean full); +static gboolean gst_audio_decoder_sink_event (GstPad * pad, GstEvent * event); +static gboolean gst_audio_decoder_src_event (GstPad * pad, GstEvent * event); +static gboolean gst_audio_decoder_sink_setcaps (GstPad * pad, GstCaps * caps); +static gboolean gst_audio_decoder_src_setcaps (GstPad * pad, GstCaps * caps); +static GstFlowReturn gst_audio_decoder_chain (GstPad * pad, GstBuffer * buf); +static gboolean gst_audio_decoder_src_query (GstPad * pad, GstQuery * query); +static gboolean gst_audio_decoder_sink_query (GstPad * pad, GstQuery * query); +static const GstQueryType *gst_audio_decoder_get_query_types (GstPad * pad); +static void gst_audio_decoder_reset (GstAudioDecoder * dec, gboolean full); -GST_BOILERPLATE (GstBaseAudioDecoder, gst_base_audio_decoder, GstElement, +GST_BOILERPLATE (GstAudioDecoder, gst_audio_decoder, GstElement, GST_TYPE_ELEMENT); static void -gst_base_audio_decoder_base_init (gpointer g_class) +gst_audio_decoder_base_init (gpointer g_class) { } static void -gst_base_audio_decoder_class_init (GstBaseAudioDecoderClass * klass) +gst_audio_decoder_class_init (GstAudioDecoderClass * klass) { GObjectClass *gobject_class; GstElementClass *element_class; @@ -314,16 +305,16 @@ gst_base_audio_decoder_class_init (GstBaseAudioDecoderClass * klass) parent_class = g_type_class_peek_parent (klass); - g_type_class_add_private (klass, sizeof (GstBaseAudioDecoderPrivate)); + g_type_class_add_private (klass, sizeof (GstAudioDecoderPrivate)); - GST_DEBUG_CATEGORY_INIT (baseaudiodecoder_debug, "baseaudiodecoder", 0, - "baseaudiodecoder element"); + GST_DEBUG_CATEGORY_INIT (audiodecoder_debug, "audiodecoder", 0, + "audio decoder base class"); - gobject_class->set_property = gst_base_audio_decoder_set_property; - gobject_class->get_property = gst_base_audio_decoder_get_property; - gobject_class->finalize = gst_base_audio_decoder_finalize; + gobject_class->set_property = gst_audio_decoder_set_property; + gobject_class->get_property = gst_audio_decoder_get_property; + gobject_class->finalize = gst_audio_decoder_finalize; - element_class->change_state = gst_base_audio_decoder_change_state; + element_class->change_state = gst_audio_decoder_change_state; /* Properties */ g_object_class_install_property (gobject_class, PROP_LATENCY, @@ -345,14 +336,13 @@ gst_base_audio_decoder_class_init (GstBaseAudioDecoderClass * klass) } static void -gst_base_audio_decoder_init (GstBaseAudioDecoder * dec, - GstBaseAudioDecoderClass * klass) +gst_audio_decoder_init (GstAudioDecoder * dec, GstAudioDecoderClass * klass) { GstPadTemplate *pad_template; - GST_DEBUG_OBJECT (dec, "gst_base_audio_decoder_init"); + GST_DEBUG_OBJECT (dec, "gst_audio_decoder_init"); - dec->priv = GST_BASE_AUDIO_DECODER_GET_PRIVATE (dec); + dec->priv = GST_AUDIO_DECODER_GET_PRIVATE (dec); /* Setup sink pad */ pad_template = @@ -361,13 +351,13 @@ gst_base_audio_decoder_init (GstBaseAudioDecoder * dec, dec->sinkpad = gst_pad_new_from_template (pad_template, "sink"); gst_pad_set_event_function (dec->sinkpad, - GST_DEBUG_FUNCPTR (gst_base_audio_decoder_sink_event)); + GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_event)); gst_pad_set_setcaps_function (dec->sinkpad, - GST_DEBUG_FUNCPTR (gst_base_audio_decoder_sink_setcaps)); + GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_setcaps)); gst_pad_set_chain_function (dec->sinkpad, - GST_DEBUG_FUNCPTR (gst_base_audio_decoder_chain)); + GST_DEBUG_FUNCPTR (gst_audio_decoder_chain)); gst_pad_set_query_function (dec->sinkpad, - GST_DEBUG_FUNCPTR (gst_base_audio_decoder_sink_query)); + GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_query)); gst_element_add_pad (GST_ELEMENT (dec), dec->sinkpad); GST_DEBUG_OBJECT (dec, "sinkpad created"); @@ -378,13 +368,13 @@ gst_base_audio_decoder_init (GstBaseAudioDecoder * dec, dec->srcpad = gst_pad_new_from_template (pad_template, "src"); gst_pad_set_setcaps_function (dec->srcpad, - GST_DEBUG_FUNCPTR (gst_base_audio_decoder_src_setcaps)); + GST_DEBUG_FUNCPTR (gst_audio_decoder_src_setcaps)); gst_pad_set_event_function (dec->srcpad, - GST_DEBUG_FUNCPTR (gst_base_audio_decoder_src_event)); + GST_DEBUG_FUNCPTR (gst_audio_decoder_src_event)); gst_pad_set_query_function (dec->srcpad, - GST_DEBUG_FUNCPTR (gst_base_audio_decoder_src_query)); + GST_DEBUG_FUNCPTR (gst_audio_decoder_src_query)); gst_pad_set_query_type_function (dec->srcpad, - GST_DEBUG_FUNCPTR (gst_base_audio_decoder_get_query_types)); + GST_DEBUG_FUNCPTR (gst_audio_decoder_get_query_types)); gst_pad_use_fixed_caps (dec->srcpad); gst_element_add_pad (GST_ELEMENT (dec), dec->srcpad); GST_DEBUG_OBJECT (dec, "srcpad created"); @@ -399,14 +389,14 @@ gst_base_audio_decoder_init (GstBaseAudioDecoder * dec, dec->priv->plc = DEFAULT_PLC; /* init state */ - gst_base_audio_decoder_reset (dec, TRUE); + gst_audio_decoder_reset (dec, TRUE); GST_DEBUG_OBJECT (dec, "init ok"); } static void -gst_base_audio_decoder_reset (GstBaseAudioDecoder * dec, gboolean full) +gst_audio_decoder_reset (GstAudioDecoder * dec, gboolean full) { - GST_DEBUG_OBJECT (dec, "gst_base_audio_decoder_reset"); + GST_DEBUG_OBJECT (dec, "gst_audio_decoder_reset"); GST_OBJECT_LOCK (dec); @@ -416,7 +406,7 @@ gst_base_audio_decoder_reset (GstBaseAudioDecoder * dec, gboolean full) dec->priv->samples_out = 0; dec->priv->agg = -1; dec->priv->error_count = 0; - gst_base_audio_decoder_clear_queues (dec); + gst_audio_decoder_clear_queues (dec); gst_audio_info_clear (&dec->priv->ctx.info); memset (&dec->priv->ctx, 0, sizeof (dec->priv->ctx)); @@ -446,12 +436,12 @@ gst_base_audio_decoder_reset (GstBaseAudioDecoder * dec, gboolean full) } static void -gst_base_audio_decoder_finalize (GObject * object) +gst_audio_decoder_finalize (GObject * object) { - GstBaseAudioDecoder *dec; + GstAudioDecoder *dec; - g_return_if_fail (GST_IS_BASE_AUDIO_DECODER (object)); - dec = GST_BASE_AUDIO_DECODER (object); + g_return_if_fail (GST_IS_AUDIO_DECODER (object)); + dec = GST_AUDIO_DECODER (object); if (dec->priv->adapter) { g_object_unref (dec->priv->adapter); @@ -466,13 +456,13 @@ gst_base_audio_decoder_finalize (GObject * object) /* automagically perform sanity checking of src caps; * also extracts output data format */ static gboolean -gst_base_audio_decoder_src_setcaps (GstPad * pad, GstCaps * caps) +gst_audio_decoder_src_setcaps (GstPad * pad, GstCaps * caps) { - GstBaseAudioDecoder *dec; + GstAudioDecoder *dec; gboolean res = TRUE; guint old_rate; - dec = GST_BASE_AUDIO_DECODER (gst_pad_get_parent (pad)); + dec = GST_AUDIO_DECODER (gst_pad_get_parent (pad)); GST_DEBUG_OBJECT (dec, "setting src caps %" GST_PTR_FORMAT, caps); @@ -505,14 +495,14 @@ refuse_caps: } static gboolean -gst_base_audio_decoder_sink_setcaps (GstPad * pad, GstCaps * caps) +gst_audio_decoder_sink_setcaps (GstPad * pad, GstCaps * caps) { - GstBaseAudioDecoder *dec; - GstBaseAudioDecoderClass *klass; + GstAudioDecoder *dec; + GstAudioDecoderClass *klass; gboolean res = TRUE; - dec = GST_BASE_AUDIO_DECODER (gst_pad_get_parent (pad)); - klass = GST_BASE_AUDIO_DECODER_GET_CLASS (dec); + dec = GST_AUDIO_DECODER (gst_pad_get_parent (pad)); + klass = GST_AUDIO_DECODER_GET_CLASS (dec); GST_DEBUG_OBJECT (dec, "caps: %" GST_PTR_FORMAT, caps); @@ -532,7 +522,7 @@ gst_base_audio_decoder_sink_setcaps (GstPad * pad, GstCaps * caps) } static void -gst_base_audio_decoder_setup (GstBaseAudioDecoder * dec) +gst_audio_decoder_setup (GstAudioDecoder * dec) { GstQuery *query; gboolean res; @@ -553,20 +543,20 @@ gst_base_audio_decoder_setup (GstBaseAudioDecoder * dec) /* mini aggregator combining output buffers into fewer larger ones, * if so allowed/configured */ static GstFlowReturn -gst_base_audio_decoder_output (GstBaseAudioDecoder * dec, GstBuffer * buf) +gst_audio_decoder_output (GstAudioDecoder * dec, GstBuffer * buf) { - GstBaseAudioDecoderClass *klass; - GstBaseAudioDecoderPrivate *priv; - GstBaseAudioDecoderContext *ctx; + GstAudioDecoderClass *klass; + GstAudioDecoderPrivate *priv; + GstAudioDecoderContext *ctx; GstFlowReturn ret = GST_FLOW_OK; GstBuffer *inbuf = NULL; - klass = GST_BASE_AUDIO_DECODER_GET_CLASS (dec); + klass = GST_AUDIO_DECODER_GET_CLASS (dec); priv = dec->priv; ctx = &dec->priv->ctx; if (G_UNLIKELY (priv->agg < 0)) - gst_base_audio_decoder_setup (dec); + gst_audio_decoder_setup (dec); if (G_LIKELY (buf)) { g_return_val_if_fail (ctx->info.bpf != 0, GST_FLOW_ERROR); @@ -693,11 +683,11 @@ again: } GstFlowReturn -gst_base_audio_decoder_finish_frame (GstBaseAudioDecoder * dec, GstBuffer * buf, +gst_audio_decoder_finish_frame (GstAudioDecoder * dec, GstBuffer * buf, gint frames) { - GstBaseAudioDecoderPrivate *priv; - GstBaseAudioDecoderContext *ctx; + GstAudioDecoderPrivate *priv; + GstAudioDecoderContext *ctx; gint samples = 0; GstClockTime ts, next_ts; @@ -823,7 +813,7 @@ gst_base_audio_decoder_finish_frame (GstBaseAudioDecoder * dec, GstBuffer * buf, dec->priv->error_count--; exit: - return gst_base_audio_decoder_output (dec, buf); + return gst_audio_decoder_output (dec, buf); /* ERRORS */ wrong_buffer: @@ -846,8 +836,8 @@ overflow: } static GstFlowReturn -gst_base_audio_decoder_handle_frame (GstBaseAudioDecoder * dec, - GstBaseAudioDecoderClass * klass, GstBuffer * buffer) +gst_audio_decoder_handle_frame (GstAudioDecoder * dec, + GstAudioDecoderClass * klass, GstBuffer * buffer) { if (G_LIKELY (buffer)) { /* keep around for admin */ @@ -866,19 +856,19 @@ gst_base_audio_decoder_handle_frame (GstBaseAudioDecoder * dec, /* maybe subclass configurable instead, but this allows for a whole lot of * raw samples, so at least quite some encoded ... */ -#define GST_BASE_AUDIO_DECODER_MAX_SYNC 10 * 8 * 2 * 1024 +#define GST_AUDIO_DECODER_MAX_SYNC 10 * 8 * 2 * 1024 static GstFlowReturn -gst_base_audio_decoder_push_buffers (GstBaseAudioDecoder * dec, gboolean force) +gst_audio_decoder_push_buffers (GstAudioDecoder * dec, gboolean force) { - GstBaseAudioDecoderClass *klass; - GstBaseAudioDecoderPrivate *priv; - GstBaseAudioDecoderContext *ctx; + GstAudioDecoderClass *klass; + GstAudioDecoderPrivate *priv; + GstAudioDecoderContext *ctx; GstFlowReturn ret = GST_FLOW_OK; GstBuffer *buffer; gint av, flush; - klass = GST_BASE_AUDIO_DECODER_GET_CLASS (dec); + klass = GST_AUDIO_DECODER_GET_CLASS (dec); priv = dec->priv; ctx = &dec->priv->ctx; @@ -914,7 +904,7 @@ gst_base_audio_decoder_push_buffers (GstBaseAudioDecoder * dec, gboolean force) flush = offset; /* avoid parsing indefinitely */ priv->sync_flush += offset; - if (priv->sync_flush > GST_BASE_AUDIO_DECODER_MAX_SYNC) + if (priv->sync_flush > GST_AUDIO_DECODER_MAX_SYNC) goto parse_failed; } @@ -950,7 +940,7 @@ gst_base_audio_decoder_push_buffers (GstBaseAudioDecoder * dec, gboolean force) buffer = NULL; } - ret = gst_base_audio_decoder_handle_frame (dec, klass, buffer); + ret = gst_audio_decoder_handle_frame (dec, klass, buffer); /* do not keep pushing it ... */ if (G_UNLIKELY (!av)) { @@ -974,7 +964,7 @@ parse_failed: } static GstFlowReturn -gst_base_audio_decoder_drain (GstBaseAudioDecoder * dec) +gst_audio_decoder_drain (GstAudioDecoder * dec) { GstFlowReturn ret; @@ -985,11 +975,11 @@ gst_base_audio_decoder_drain (GstBaseAudioDecoder * dec) /* chain eventually calls upon drain as well, but by that time * gather list should be clear, so ok ... */ if (dec->segment.rate < 0.0 && dec->priv->gather) - gst_base_audio_decoder_chain_reverse (dec, NULL); + gst_audio_decoder_chain_reverse (dec, NULL); /* have subclass give all it can */ - ret = gst_base_audio_decoder_push_buffers (dec, TRUE); + ret = gst_audio_decoder_push_buffers (dec, TRUE); /* ensure all output sent */ - ret = gst_base_audio_decoder_output (dec, NULL); + ret = gst_audio_decoder_output (dec, NULL); /* everything should be away now */ if (dec->priv->frames.length) { /* not fatal/impossible though if subclass/codec eats stuff */ @@ -1007,19 +997,19 @@ gst_base_audio_decoder_drain (GstBaseAudioDecoder * dec) /* hard == FLUSH, otherwise discont */ static GstFlowReturn -gst_base_audio_decoder_flush (GstBaseAudioDecoder * dec, gboolean hard) +gst_audio_decoder_flush (GstAudioDecoder * dec, gboolean hard) { - GstBaseAudioDecoderClass *klass; + GstAudioDecoderClass *klass; GstFlowReturn ret = GST_FLOW_OK; - klass = GST_BASE_AUDIO_DECODER_GET_CLASS (dec); + klass = GST_AUDIO_DECODER_GET_CLASS (dec); GST_LOG_OBJECT (dec, "flush hard %d", hard); if (!hard) { - ret = gst_base_audio_decoder_drain (dec); + ret = gst_audio_decoder_drain (dec); } else { - gst_base_audio_decoder_clear_queues (dec); + gst_audio_decoder_clear_queues (dec); gst_segment_init (&dec->segment, GST_FORMAT_TIME); dec->priv->error_count = 0; } @@ -1028,14 +1018,13 @@ gst_base_audio_decoder_flush (GstBaseAudioDecoder * dec, gboolean hard) if (klass->flush && dec->priv->samples_out > 0) klass->flush (dec, hard); /* and get (re)set for the sequel */ - gst_base_audio_decoder_reset (dec, FALSE); + gst_audio_decoder_reset (dec, FALSE); return ret; } static GstFlowReturn -gst_base_audio_decoder_chain_forward (GstBaseAudioDecoder * dec, - GstBuffer * buffer) +gst_audio_decoder_chain_forward (GstAudioDecoder * dec, GstBuffer * buffer) { GstFlowReturn ret; @@ -1046,16 +1035,16 @@ gst_base_audio_decoder_chain_forward (GstBaseAudioDecoder * dec, dec->priv->drained = FALSE; /* hand to subclass */ - ret = gst_base_audio_decoder_push_buffers (dec, FALSE); + ret = gst_audio_decoder_push_buffers (dec, FALSE); GST_LOG_OBJECT (dec, "chain-done"); return ret; } static void -gst_base_audio_decoder_clear_queues (GstBaseAudioDecoder * dec) +gst_audio_decoder_clear_queues (GstAudioDecoder * dec) { - GstBaseAudioDecoderPrivate *priv = dec->priv; + GstAudioDecoderPrivate *priv = dec->priv; g_list_foreach (priv->queued, (GFunc) gst_mini_object_unref, NULL); g_list_free (priv->queued); @@ -1132,9 +1121,9 @@ gst_base_audio_decoder_clear_queues (GstBaseAudioDecoder * dec) * Etc.. */ static GstFlowReturn -gst_base_audio_decoder_flush_decode (GstBaseAudioDecoder * dec) +gst_audio_decoder_flush_decode (GstAudioDecoder * dec) { - GstBaseAudioDecoderPrivate *priv = dec->priv; + GstAudioDecoderPrivate *priv = dec->priv; GstFlowReturn res = GST_FLOW_OK; GList *walk; @@ -1143,7 +1132,7 @@ gst_base_audio_decoder_flush_decode (GstBaseAudioDecoder * dec) GST_DEBUG_OBJECT (dec, "flushing buffers to decoder"); /* clear buffer and decoder state */ - gst_base_audio_decoder_flush (dec, FALSE); + gst_audio_decoder_flush (dec, FALSE); while (walk) { GList *next; @@ -1155,7 +1144,7 @@ gst_base_audio_decoder_flush_decode (GstBaseAudioDecoder * dec) next = g_list_next (walk); /* decode buffer, resulting data prepended to output queue */ gst_buffer_ref (buf); - res = gst_base_audio_decoder_chain_forward (dec, buf); + res = gst_audio_decoder_chain_forward (dec, buf); /* if we generated output, we can discard the buffer, else we * keep it in the queue */ @@ -1170,7 +1159,7 @@ gst_base_audio_decoder_flush_decode (GstBaseAudioDecoder * dec) } /* drain any aggregation (or otherwise) leftover */ - gst_base_audio_decoder_drain (dec); + gst_audio_decoder_drain (dec); /* now send queued data downstream */ while (priv->queued) { @@ -1198,10 +1187,9 @@ gst_base_audio_decoder_flush_decode (GstBaseAudioDecoder * dec) } static GstFlowReturn -gst_base_audio_decoder_chain_reverse (GstBaseAudioDecoder * dec, - GstBuffer * buf) +gst_audio_decoder_chain_reverse (GstAudioDecoder * dec, GstBuffer * buf) { - GstBaseAudioDecoderPrivate *priv = dec->priv; + GstAudioDecoderPrivate *priv = dec->priv; GstFlowReturn result = GST_FLOW_OK; /* if we have a discont, move buffers to the decode list */ @@ -1217,7 +1205,7 @@ gst_base_audio_decoder_chain_reverse (GstBaseAudioDecoder * dec, priv->decode = g_list_prepend (priv->decode, gbuf); } /* decode stuff in the decode queue */ - gst_base_audio_decoder_flush_decode (dec); + gst_audio_decoder_flush_decode (dec); } if (G_LIKELY (buf)) { @@ -1234,12 +1222,12 @@ gst_base_audio_decoder_chain_reverse (GstBaseAudioDecoder * dec, } static GstFlowReturn -gst_base_audio_decoder_chain (GstPad * pad, GstBuffer * buffer) +gst_audio_decoder_chain (GstPad * pad, GstBuffer * buffer) { - GstBaseAudioDecoder *dec; + GstAudioDecoder *dec; GstFlowReturn ret; - dec = GST_BASE_AUDIO_DECODER (GST_PAD_PARENT (pad)); + dec = GST_AUDIO_DECODER (GST_PAD_PARENT (pad)); GST_LOG_OBJECT (dec, "received buffer of size %d with ts %" GST_TIME_FORMAT @@ -1255,7 +1243,7 @@ gst_base_audio_decoder_chain (GstPad * pad, GstBuffer * buffer) samples = dec->priv->samples; GST_DEBUG_OBJECT (dec, "handling discont"); - gst_base_audio_decoder_flush (dec, FALSE); + gst_audio_decoder_flush (dec, FALSE); dec->priv->discont = TRUE; /* buffer may claim DISCONT loudly, if it can't tell us where we are now, @@ -1269,9 +1257,9 @@ gst_base_audio_decoder_chain (GstPad * pad, GstBuffer * buffer) } if (dec->segment.rate > 0.0) - ret = gst_base_audio_decoder_chain_forward (dec, buffer); + ret = gst_audio_decoder_chain_forward (dec, buffer); else - ret = gst_base_audio_decoder_chain_reverse (dec, buffer); + ret = gst_audio_decoder_chain_reverse (dec, buffer); return ret; } @@ -1279,15 +1267,14 @@ gst_base_audio_decoder_chain (GstPad * pad, GstBuffer * buffer) /* perform upstream byte <-> time conversion (duration, seeking) * if subclass allows and if enough data for moderately decent conversion */ static inline gboolean -gst_base_audio_decoder_do_byte (GstBaseAudioDecoder * dec) +gst_audio_decoder_do_byte (GstAudioDecoder * dec) { return dec->priv->ctx.do_byte_time && dec->priv->ctx.info.bpf && dec->priv->ctx.info.rate <= dec->priv->samples_out; } static gboolean -gst_base_audio_decoder_sink_eventfunc (GstBaseAudioDecoder * dec, - GstEvent * event) +gst_audio_decoder_sink_eventfunc (GstAudioDecoder * dec, GstEvent * event) { gboolean handled = FALSE; @@ -1339,7 +1326,7 @@ gst_base_audio_decoder_sink_eventfunc (GstBaseAudioDecoder * dec, } /* finish current segment */ - gst_base_audio_decoder_drain (dec); + gst_audio_decoder_drain (dec); if (update) { /* time progressed without data, see if we can fill the gap with @@ -1350,19 +1337,19 @@ gst_base_audio_decoder_sink_eventfunc (GstBaseAudioDecoder * dec, GST_TIME_ARGS (dec->segment.last_stop)); if (dec->priv->plc && dec->priv->ctx.do_plc && dec->segment.rate > 0.0 && dec->segment.last_stop < start) { - GstBaseAudioDecoderClass *klass; + GstAudioDecoderClass *klass; GstBuffer *buf; - klass = GST_BASE_AUDIO_DECODER_GET_CLASS (dec); + klass = GST_AUDIO_DECODER_GET_CLASS (dec); /* hand subclass empty frame with duration that needs covering */ buf = gst_buffer_new (); GST_BUFFER_DURATION (buf) = start - dec->segment.last_stop; /* best effort, not much error handling */ - gst_base_audio_decoder_handle_frame (dec, klass, buf); + gst_audio_decoder_handle_frame (dec, klass, buf); } } else { /* prepare for next one */ - gst_base_audio_decoder_flush (dec, FALSE); + gst_audio_decoder_flush (dec, FALSE); /* and that's where we time from, * in case upstream does not come up with anything better * (e.g. upstream BYTE) */ @@ -1386,11 +1373,11 @@ gst_base_audio_decoder_sink_eventfunc (GstBaseAudioDecoder * dec, case GST_EVENT_FLUSH_STOP: /* prepare for fresh start */ - gst_base_audio_decoder_flush (dec, TRUE); + gst_audio_decoder_flush (dec, TRUE); break; case GST_EVENT_EOS: - gst_base_audio_decoder_drain (dec); + gst_audio_decoder_drain (dec); break; default: @@ -1401,15 +1388,15 @@ gst_base_audio_decoder_sink_eventfunc (GstBaseAudioDecoder * dec, } static gboolean -gst_base_audio_decoder_sink_event (GstPad * pad, GstEvent * event) +gst_audio_decoder_sink_event (GstPad * pad, GstEvent * event) { - GstBaseAudioDecoder *dec; - GstBaseAudioDecoderClass *klass; + GstAudioDecoder *dec; + GstAudioDecoderClass *klass; gboolean handled = FALSE; gboolean ret = TRUE; - dec = GST_BASE_AUDIO_DECODER (gst_pad_get_parent (pad)); - klass = GST_BASE_AUDIO_DECODER_GET_CLASS (dec); + dec = GST_AUDIO_DECODER (gst_pad_get_parent (pad)); + klass = GST_AUDIO_DECODER_GET_CLASS (dec); GST_DEBUG_OBJECT (dec, "received event %d, %s", GST_EVENT_TYPE (event), GST_EVENT_TYPE_NAME (event)); @@ -1418,7 +1405,7 @@ gst_base_audio_decoder_sink_event (GstPad * pad, GstEvent * event) handled = klass->event (dec, event); if (!handled) - handled = gst_base_audio_decoder_sink_eventfunc (dec, event); + handled = gst_audio_decoder_sink_eventfunc (dec, event); if (!handled) ret = gst_pad_event_default (pad, event); @@ -1430,7 +1417,7 @@ gst_base_audio_decoder_sink_event (GstPad * pad, GstEvent * event) } static gboolean -gst_base_audio_decoder_do_seek (GstBaseAudioDecoder * dec, GstEvent * event) +gst_audio_decoder_do_seek (GstAudioDecoder * dec, GstEvent * event) { GstSeekFlags flags; GstSeekType start_type, end_type; @@ -1489,12 +1476,12 @@ gst_base_audio_decoder_do_seek (GstBaseAudioDecoder * dec, GstEvent * event) } static gboolean -gst_base_audio_decoder_src_event (GstPad * pad, GstEvent * event) +gst_audio_decoder_src_event (GstPad * pad, GstEvent * event) { - GstBaseAudioDecoder *dec; + GstAudioDecoder *dec; gboolean res = FALSE; - dec = GST_BASE_AUDIO_DECODER (gst_pad_get_parent (pad)); + dec = GST_AUDIO_DECODER (gst_pad_get_parent (pad)); GST_DEBUG_OBJECT (dec, "received event %d, %s", GST_EVENT_TYPE (event), GST_EVENT_TYPE_NAME (event)); @@ -1520,8 +1507,8 @@ gst_base_audio_decoder_src_event (GstPad * pad, GstEvent * event) /* if upstream fails for a time seek, maybe we can help if allowed */ if (format == GST_FORMAT_TIME) { - if (gst_base_audio_decoder_do_byte (dec)) - res = gst_base_audio_decoder_do_seek (dec, event); + if (gst_audio_decoder_do_byte (dec)) + res = gst_audio_decoder_do_seek (dec, event); break; } @@ -1559,7 +1546,7 @@ convert_error: } /* - * gst_base_audio_encoded_audio_convert: + * gst_audio_encoded_audio_convert: * @fmt: audio format of the encoded audio * @bytes: number of encoded bytes * @samples: number of encoded samples @@ -1573,9 +1560,9 @@ convert_error: * BYTE and TIME format by using estimated bitrate based on * @samples and @bytes (and @fmt). */ -/* FIXME: make gst_base_audio_encoded_audio_convert() public? */ +/* FIXME: make gst_audio_encoded_audio_convert() public? */ static gboolean -gst_base_audio_encoded_audio_convert (GstAudioInfo * fmt, +gst_audio_encoded_audio_convert (GstAudioInfo * fmt, gint64 bytes, gint64 samples, GstFormat src_format, gint64 src_value, GstFormat * dest_format, gint64 * dest_value) { @@ -1630,12 +1617,12 @@ exit: } static gboolean -gst_base_audio_decoder_sink_query (GstPad * pad, GstQuery * query) +gst_audio_decoder_sink_query (GstPad * pad, GstQuery * query) { gboolean res = TRUE; - GstBaseAudioDecoder *dec; + GstAudioDecoder *dec; - dec = GST_BASE_AUDIO_DECODER (gst_pad_get_parent (pad)); + dec = GST_AUDIO_DECODER (gst_pad_get_parent (pad)); switch (GST_QUERY_TYPE (query)) { case GST_QUERY_FORMATS: @@ -1650,7 +1637,7 @@ gst_base_audio_decoder_sink_query (GstPad * pad, GstQuery * query) gint64 src_val, dest_val; gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val); - if (!(res = gst_base_audio_encoded_audio_convert (&dec->priv->ctx.info, + if (!(res = gst_audio_encoded_audio_convert (&dec->priv->ctx.info, dec->priv->bytes_in, dec->priv->samples_out, src_fmt, src_val, &dest_fmt, &dest_val))) goto error; @@ -1668,9 +1655,9 @@ error: } static const GstQueryType * -gst_base_audio_decoder_get_query_types (GstPad * pad) +gst_audio_decoder_get_query_types (GstPad * pad) { - static const GstQueryType gst_base_audio_decoder_src_query_types[] = { + static const GstQueryType gst_audio_decoder_src_query_types[] = { GST_QUERY_POSITION, GST_QUERY_DURATION, GST_QUERY_CONVERT, @@ -1678,7 +1665,7 @@ gst_base_audio_decoder_get_query_types (GstPad * pad) 0 }; - return gst_base_audio_decoder_src_query_types; + return gst_audio_decoder_src_query_types; } /* FIXME ? are any of these queries (other than latency) a decoder's business ?? @@ -1686,13 +1673,13 @@ gst_base_audio_decoder_get_query_types (GstPad * pad) * segment stuff etc at all * Supposedly that's backward compatibility ... */ static gboolean -gst_base_audio_decoder_src_query (GstPad * pad, GstQuery * query) +gst_audio_decoder_src_query (GstPad * pad, GstQuery * query) { - GstBaseAudioDecoder *dec; + GstAudioDecoder *dec; GstPad *peerpad; gboolean res = FALSE; - dec = GST_BASE_AUDIO_DECODER (GST_PAD_PARENT (pad)); + dec = GST_AUDIO_DECODER (GST_PAD_PARENT (pad)); peerpad = gst_pad_get_peer (GST_PAD (dec->sinkpad)); GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query); @@ -1708,7 +1695,7 @@ gst_base_audio_decoder_src_query (GstPad * pad, GstQuery * query) gst_query_parse_duration (query, &format, NULL); /* try answering TIME by converting from BYTE if subclass allows */ - if (format == GST_FORMAT_TIME && gst_base_audio_decoder_do_byte (dec)) { + if (format == GST_FORMAT_TIME && gst_audio_decoder_do_byte (dec)) { gint64 value; format = GST_FORMAT_BYTES; @@ -1807,21 +1794,21 @@ gst_base_audio_decoder_src_query (GstPad * pad, GstQuery * query) } static gboolean -gst_base_audio_decoder_stop (GstBaseAudioDecoder * dec) +gst_audio_decoder_stop (GstAudioDecoder * dec) { - GstBaseAudioDecoderClass *klass; + GstAudioDecoderClass *klass; gboolean ret = TRUE; - GST_DEBUG_OBJECT (dec, "gst_base_audio_decoder_stop"); + GST_DEBUG_OBJECT (dec, "gst_audio_decoder_stop"); - klass = GST_BASE_AUDIO_DECODER_GET_CLASS (dec); + klass = GST_AUDIO_DECODER_GET_CLASS (dec); if (klass->stop) { ret = klass->stop (dec); } /* clean up */ - gst_base_audio_decoder_reset (dec, TRUE); + gst_audio_decoder_reset (dec, TRUE); if (ret) dec->priv->active = FALSE; @@ -1830,17 +1817,17 @@ gst_base_audio_decoder_stop (GstBaseAudioDecoder * dec) } static gboolean -gst_base_audio_decoder_start (GstBaseAudioDecoder * dec) +gst_audio_decoder_start (GstAudioDecoder * dec) { - GstBaseAudioDecoderClass *klass; + GstAudioDecoderClass *klass; gboolean ret = TRUE; - GST_DEBUG_OBJECT (dec, "gst_base_audio_decoder_start"); + GST_DEBUG_OBJECT (dec, "gst_audio_decoder_start"); - klass = GST_BASE_AUDIO_DECODER_GET_CLASS (dec); + klass = GST_AUDIO_DECODER_GET_CLASS (dec); /* arrange clean state */ - gst_base_audio_decoder_reset (dec, TRUE); + gst_audio_decoder_reset (dec, TRUE); if (klass->start) { ret = klass->start (dec); @@ -1853,12 +1840,12 @@ gst_base_audio_decoder_start (GstBaseAudioDecoder * dec) } static void -gst_base_audio_decoder_get_property (GObject * object, guint prop_id, +gst_audio_decoder_get_property (GObject * object, guint prop_id, GValue * value, GParamSpec * pspec) { - GstBaseAudioDecoder *dec; + GstAudioDecoder *dec; - dec = GST_BASE_AUDIO_DECODER (object); + dec = GST_AUDIO_DECODER (object); switch (prop_id) { case PROP_LATENCY: @@ -1877,12 +1864,12 @@ gst_base_audio_decoder_get_property (GObject * object, guint prop_id, } static void -gst_base_audio_decoder_set_property (GObject * object, guint prop_id, +gst_audio_decoder_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec) { - GstBaseAudioDecoder *dec; + GstAudioDecoder *dec; - dec = GST_BASE_AUDIO_DECODER (object); + dec = GST_AUDIO_DECODER (object); switch (prop_id) { case PROP_LATENCY: @@ -1901,19 +1888,18 @@ gst_base_audio_decoder_set_property (GObject * object, guint prop_id, } static GstStateChangeReturn -gst_base_audio_decoder_change_state (GstElement * element, - GstStateChange transition) +gst_audio_decoder_change_state (GstElement * element, GstStateChange transition) { - GstBaseAudioDecoder *codec; + GstAudioDecoder *codec; GstStateChangeReturn ret; - codec = GST_BASE_AUDIO_DECODER (element); + codec = GST_AUDIO_DECODER (element); switch (transition) { case GST_STATE_CHANGE_NULL_TO_READY: break; case GST_STATE_CHANGE_READY_TO_PAUSED: - if (!gst_base_audio_decoder_start (codec)) { + if (!gst_audio_decoder_start (codec)) { goto start_failed; } break; @@ -1929,7 +1915,7 @@ gst_base_audio_decoder_change_state (GstElement * element, case GST_STATE_CHANGE_PLAYING_TO_PAUSED: break; case GST_STATE_CHANGE_PAUSED_TO_READY: - if (!gst_base_audio_decoder_stop (codec)) { + if (!gst_audio_decoder_stop (codec)) { goto stop_failed; } break; @@ -1954,7 +1940,7 @@ stop_failed: } GstFlowReturn -_gst_base_audio_decoder_error (GstBaseAudioDecoder * dec, gint weight, +_gst_audio_decoder_error (GstAudioDecoder * dec, gint weight, GQuark domain, gint code, gchar * txt, gchar * dbg, const gchar * file, const gchar * function, gint line) { @@ -1974,24 +1960,24 @@ _gst_base_audio_decoder_error (GstBaseAudioDecoder * dec, gint weight, } /** - * gst_base_audio_decoder_get_audio_info: - * @dec: a #GstBaseAudioDecoder + * gst_audio_decoder_get_audio_info: + * @dec: a #GstAudioDecoder * * Returns: a #GstAudioInfo describing the input audio format * * Since: 0.10.36 */ GstAudioInfo * -gst_base_audio_decoder_get_audio_info (GstBaseAudioDecoder * dec) +gst_audio_decoder_get_audio_info (GstAudioDecoder * dec) { - g_return_val_if_fail (GST_IS_BASE_AUDIO_DECODER (dec), NULL); + g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), NULL); return &dec->priv->ctx.info; } /** - * gst_base_audio_decoder_set_plc_aware: - * @dec: a #GstBaseAudioDecoder + * gst_audio_decoder_set_plc_aware: + * @dec: a #GstAudioDecoder * @plc: new plc state * * Indicates whether or not subclass handles packet loss concealment (plc). @@ -1999,32 +1985,32 @@ gst_base_audio_decoder_get_audio_info (GstBaseAudioDecoder * dec) * Since: 0.10.36 */ void -gst_base_audio_decoder_set_plc_aware (GstBaseAudioDecoder * dec, gboolean plc) +gst_audio_decoder_set_plc_aware (GstAudioDecoder * dec, gboolean plc) { - g_return_if_fail (GST_IS_BASE_AUDIO_DECODER (dec)); + g_return_if_fail (GST_IS_AUDIO_DECODER (dec)); dec->priv->ctx.do_plc = plc; } /** - * gst_base_audio_decoder_get_plc_aware: - * @dec: a #GstBaseAudioDecoder + * gst_audio_decoder_get_plc_aware: + * @dec: a #GstAudioDecoder * * Returns: currently configured plc handling * * Since: 0.10.36 */ gint -gst_base_audio_decoder_get_plc_aware (GstBaseAudioDecoder * dec) +gst_audio_decoder_get_plc_aware (GstAudioDecoder * dec) { - g_return_val_if_fail (GST_IS_BASE_AUDIO_DECODER (dec), 0); + g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0); return dec->priv->ctx.do_plc; } /** - * gst_base_audio_decoder_set_byte_time: - * @dec: a #GstBaseAudioDecoder + * gst_audio_decoder_set_byte_time: + * @dec: a #GstAudioDecoder * @enabled: whether to enable byte to time conversion * * Allows baseclass to perform byte to time estimated conversion. @@ -2032,49 +2018,48 @@ gst_base_audio_decoder_get_plc_aware (GstBaseAudioDecoder * dec) * Since: 0.10.36 */ void -gst_base_audio_decoder_set_byte_time (GstBaseAudioDecoder * dec, - gboolean enabled) +gst_audio_decoder_set_byte_time (GstAudioDecoder * dec, gboolean enabled) { - g_return_if_fail (GST_IS_BASE_AUDIO_DECODER (dec)); + g_return_if_fail (GST_IS_AUDIO_DECODER (dec)); dec->priv->ctx.do_byte_time = enabled; } /** - * gst_base_audio_decoder_get_byte_time: - * @dec: a #GstBaseAudioDecoder + * gst_audio_decoder_get_byte_time: + * @dec: a #GstAudioDecoder * * Returns: currently configured byte to time conversion setting * * Since: 0.10.36 */ gint -gst_base_audio_decoder_get_byte_time (GstBaseAudioDecoder * dec) +gst_audio_decoder_get_byte_time (GstAudioDecoder * dec) { - g_return_val_if_fail (GST_IS_BASE_AUDIO_DECODER (dec), 0); + g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0); return dec->priv->ctx.do_byte_time; } /** - * gst_base_audio_decoder_get_delay: - * @dec: a #GstBaseAudioDecoder + * gst_audio_decoder_get_delay: + * @dec: a #GstAudioDecoder * * Returns: currently configured decoder delay * * Since: 0.10.36 */ gint -gst_base_audio_decoder_get_delay (GstBaseAudioDecoder * dec) +gst_audio_decoder_get_delay (GstAudioDecoder * dec) { - g_return_val_if_fail (GST_IS_BASE_AUDIO_DECODER (dec), 0); + g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0); return dec->priv->ctx.delay; } /** - * gst_base_audio_decoder_set_max_errors: - * @dec: a #GstBaseAudioDecoder + * gst_audio_decoder_set_max_errors: + * @dec: a #GstAudioDecoder * @num: max tolerated errors * * Sets numbers of tolerated decoder errors, where a tolerated one is then only @@ -2083,32 +2068,32 @@ gst_base_audio_decoder_get_delay (GstBaseAudioDecoder * dec) * Since: 0.10.36 */ void -gst_base_audio_decoder_set_max_errors (GstBaseAudioDecoder * enc, gint num) +gst_audio_decoder_set_max_errors (GstAudioDecoder * enc, gint num) { - g_return_if_fail (GST_IS_BASE_AUDIO_DECODER (enc)); + g_return_if_fail (GST_IS_AUDIO_DECODER (enc)); enc->priv->ctx.max_errors = num; } /** - * gst_base_audio_decoder_get_max_errors: - * @dec: a #GstBaseAudioDecoder + * gst_audio_decoder_get_max_errors: + * @dec: a #GstAudioDecoder * * Returns: currently configured decoder tolerated error count. * * Since: 0.10.36 */ gint -gst_base_audio_decoder_get_max_errors (GstBaseAudioDecoder * dec) +gst_audio_decoder_get_max_errors (GstAudioDecoder * dec) { - g_return_val_if_fail (GST_IS_BASE_AUDIO_DECODER (dec), 0); + g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0); return dec->priv->ctx.max_errors; } /** - * gst_base_audio_decoder_set_latency: - * @dec: a #GstBaseAudioDecoder + * gst_audio_decoder_set_latency: + * @dec: a #GstAudioDecoder * @min: minimum latency * @max: maximum latency * @@ -2117,10 +2102,10 @@ gst_base_audio_decoder_get_max_errors (GstBaseAudioDecoder * dec) * Since: 0.10.36 */ void -gst_base_audio_decoder_set_latency (GstBaseAudioDecoder * dec, +gst_audio_decoder_set_latency (GstAudioDecoder * dec, GstClockTime min, GstClockTime max) { - g_return_if_fail (GST_IS_BASE_AUDIO_DECODER (dec)); + g_return_if_fail (GST_IS_AUDIO_DECODER (dec)); GST_OBJECT_LOCK (dec); dec->priv->ctx.min_latency = min; @@ -2129,8 +2114,8 @@ gst_base_audio_decoder_set_latency (GstBaseAudioDecoder * dec, } /** - * gst_base_audio_decoder_get_latency: - * @dec: a #GstBaseAudioDecoder + * gst_audio_decoder_get_latency: + * @dec: a #GstAudioDecoder * @min: a pointer to storage to hold minimum latency * @max: a pointer to storage to hold maximum latency * @@ -2139,10 +2124,10 @@ gst_base_audio_decoder_set_latency (GstBaseAudioDecoder * dec, * Since: 0.10.36 */ void -gst_base_audio_decoder_get_latency (GstBaseAudioDecoder * dec, +gst_audio_decoder_get_latency (GstAudioDecoder * dec, GstClockTime * min, GstClockTime * max) { - g_return_if_fail (GST_IS_BASE_AUDIO_DECODER (dec)); + g_return_if_fail (GST_IS_AUDIO_DECODER (dec)); GST_OBJECT_LOCK (dec); if (min) @@ -2153,8 +2138,8 @@ gst_base_audio_decoder_get_latency (GstBaseAudioDecoder * dec, } /** - * gst_base_audio_decoder_get_parse_state: - * @dec: a #GstBaseAudioDecoder + * gst_audio_decoder_get_parse_state: + * @dec: a #GstAudioDecoder * @min: a pointer to storage to hold current sync state * @max: a pointer to storage to hold current eos state * @@ -2163,10 +2148,10 @@ gst_base_audio_decoder_get_latency (GstBaseAudioDecoder * dec, * Since: 0.10.36 */ void -gst_base_audio_decoder_get_parse_state (GstBaseAudioDecoder * dec, +gst_audio_decoder_get_parse_state (GstAudioDecoder * dec, gboolean * sync, gboolean * eos) { - g_return_if_fail (GST_IS_BASE_AUDIO_DECODER (dec)); + g_return_if_fail (GST_IS_AUDIO_DECODER (dec)); if (sync) *sync = dec->priv->ctx.sync; @@ -2175,8 +2160,8 @@ gst_base_audio_decoder_get_parse_state (GstBaseAudioDecoder * dec, } /** - * gst_base_audio_decoder_set_plc: - * @dec: a #GstBaseAudioDecoder + * gst_audio_decoder_set_plc: + * @dec: a #GstAudioDecoder * @enabled: new state * * Enable or disable decoder packet loss concealment, provided subclass @@ -2187,9 +2172,9 @@ gst_base_audio_decoder_get_parse_state (GstBaseAudioDecoder * dec, * Since: 0.10.36 */ void -gst_base_audio_decoder_set_plc (GstBaseAudioDecoder * dec, gboolean enabled) +gst_audio_decoder_set_plc (GstAudioDecoder * dec, gboolean enabled) { - g_return_if_fail (GST_IS_BASE_AUDIO_DECODER (dec)); + g_return_if_fail (GST_IS_AUDIO_DECODER (dec)); GST_LOG_OBJECT (dec, "enabled: %d", enabled); @@ -2199,8 +2184,8 @@ gst_base_audio_decoder_set_plc (GstBaseAudioDecoder * dec, gboolean enabled) } /** - * gst_base_audio_decoder_get_plc: - * @dec: a #GstBaseAudioDecoder + * gst_audio_decoder_get_plc: + * @dec: a #GstAudioDecoder * * Queries decoder packet loss concealment handling. * @@ -2211,11 +2196,11 @@ gst_base_audio_decoder_set_plc (GstBaseAudioDecoder * dec, gboolean enabled) * Since: 0.10.36 */ gboolean -gst_base_audio_decoder_get_plc (GstBaseAudioDecoder * dec) +gst_audio_decoder_get_plc (GstAudioDecoder * dec) { gboolean result; - g_return_val_if_fail (GST_IS_BASE_AUDIO_DECODER (dec), FALSE); + g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE); GST_OBJECT_LOCK (dec); result = dec->priv->plc; @@ -2225,8 +2210,8 @@ gst_base_audio_decoder_get_plc (GstBaseAudioDecoder * dec) } /** - * gst_base_audio_decoder_set_min_latency: - * @dec: a #GstBaseAudioDecoder + * gst_audio_decoder_set_min_latency: + * @dec: a #GstAudioDecoder * @num: new minimum latency * * Sets decoder minimum aggregation latency. @@ -2236,9 +2221,9 @@ gst_base_audio_decoder_get_plc (GstBaseAudioDecoder * dec) * Since: 0.10.36 */ void -gst_base_audio_decoder_set_min_latency (GstBaseAudioDecoder * dec, gint64 num) +gst_audio_decoder_set_min_latency (GstAudioDecoder * dec, gint64 num) { - g_return_if_fail (GST_IS_BASE_AUDIO_DECODER (dec)); + g_return_if_fail (GST_IS_AUDIO_DECODER (dec)); GST_OBJECT_LOCK (dec); dec->priv->latency = num; @@ -2246,8 +2231,8 @@ gst_base_audio_decoder_set_min_latency (GstBaseAudioDecoder * dec, gint64 num) } /** - * gst_base_audio_decoder_get_min_latency: - * @enc: a #GstBaseAudioDecoder + * gst_audio_decoder_get_min_latency: + * @enc: a #GstAudioDecoder * * Queries decoder's latency aggregation. * @@ -2258,11 +2243,11 @@ gst_base_audio_decoder_set_min_latency (GstBaseAudioDecoder * dec, gint64 num) * Since: 0.10.36 */ gint64 -gst_base_audio_decoder_get_min_latency (GstBaseAudioDecoder * dec) +gst_audio_decoder_get_min_latency (GstAudioDecoder * dec) { gint64 result; - g_return_val_if_fail (GST_IS_BASE_AUDIO_DECODER (dec), FALSE); + g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE); GST_OBJECT_LOCK (dec); result = dec->priv->latency; @@ -2272,8 +2257,8 @@ gst_base_audio_decoder_get_min_latency (GstBaseAudioDecoder * dec) } /** - * gst_base_audio_decoder_set_tolerance: - * @dec: a #GstBaseAudioDecoder + * gst_audio_decoder_set_tolerance: + * @dec: a #GstAudioDecoder * @tolerance: new tolerance * * Configures decoder audio jitter tolerance threshold. @@ -2283,10 +2268,9 @@ gst_base_audio_decoder_get_min_latency (GstBaseAudioDecoder * dec) * Since: 0.10.36 */ void -gst_base_audio_decoder_set_tolerance (GstBaseAudioDecoder * dec, - gint64 tolerance) +gst_audio_decoder_set_tolerance (GstAudioDecoder * dec, gint64 tolerance) { - g_return_if_fail (GST_IS_BASE_AUDIO_DECODER (dec)); + g_return_if_fail (GST_IS_AUDIO_DECODER (dec)); GST_OBJECT_LOCK (dec); dec->priv->tolerance = tolerance; @@ -2294,8 +2278,8 @@ gst_base_audio_decoder_set_tolerance (GstBaseAudioDecoder * dec, } /** - * gst_base_audio_decoder_get_tolerance: - * @dec: a #GstBaseAudioDecoder + * gst_audio_decoder_get_tolerance: + * @dec: a #GstAudioDecoder * * Queries current audio jitter tolerance threshold. * @@ -2306,11 +2290,11 @@ gst_base_audio_decoder_set_tolerance (GstBaseAudioDecoder * dec, * Since: 0.10.36 */ gint64 -gst_base_audio_decoder_get_tolerance (GstBaseAudioDecoder * dec) +gst_audio_decoder_get_tolerance (GstAudioDecoder * dec) { gint64 result; - g_return_val_if_fail (GST_IS_BASE_AUDIO_DECODER (dec), 0); + g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), 0); GST_OBJECT_LOCK (dec); result = dec->priv->tolerance; diff --git a/gst-libs/gst/audio/gstbaseaudiodecoder.h b/gst-libs/gst/audio/gstaudiodecoder.h similarity index 55% rename from gst-libs/gst/audio/gstbaseaudiodecoder.h rename to gst-libs/gst/audio/gstaudiodecoder.h index 2f78779ff8..0fbb20a80c 100644 --- a/gst-libs/gst/audio/gstbaseaudiodecoder.h +++ b/gst-libs/gst/audio/gstaudiodecoder.h @@ -21,11 +21,11 @@ * Boston, MA 02111-1307, USA. */ -#ifndef _GST_BASE_AUDIO_DECODER_H_ -#define _GST_BASE_AUDIO_DECODER_H_ +#ifndef _GST_AUDIO_DECODER_H_ +#define _GST_AUDIO_DECODER_H_ #ifndef GST_USE_UNSTABLE_API -#warning "GstBaseAudioDecoder is unstable API and may change in future." +#warning "GstAudioDecoder is unstable API and may change in future." #warning "You can define GST_USE_UNSTABLE_API to avoid this warning." #endif @@ -35,70 +35,70 @@ G_BEGIN_DECLS -#define GST_TYPE_BASE_AUDIO_DECODER \ - (gst_base_audio_decoder_get_type()) -#define GST_BASE_AUDIO_DECODER(obj) \ - (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_BASE_AUDIO_DECODER,GstBaseAudioDecoder)) -#define GST_BASE_AUDIO_DECODER_CLASS(klass) \ - (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_BASE_AUDIO_DECODER,GstBaseAudioDecoderClass)) -#define GST_BASE_AUDIO_DECODER_GET_CLASS(obj) \ - (G_TYPE_INSTANCE_GET_CLASS((obj),GST_TYPE_BASE_AUDIO_DECODER,GstBaseAudioDecoderClass)) -#define GST_IS_BASE_AUDIO_DECODER(obj) \ - (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_BASE_AUDIO_DECODER)) -#define GST_IS_BASE_AUDIO_DECODER_CLASS(obj) \ - (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_BASE_AUDIO_DECODER)) +#define GST_TYPE_AUDIO_DECODER \ + (gst_audio_decoder_get_type()) +#define GST_AUDIO_DECODER(obj) \ + (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_AUDIO_DECODER,GstAudioDecoder)) +#define GST_AUDIO_DECODER_CLASS(klass) \ + (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_AUDIO_DECODER,GstAudioDecoderClass)) +#define GST_AUDIO_DECODER_GET_CLASS(obj) \ + (G_TYPE_INSTANCE_GET_CLASS((obj),GST_TYPE_AUDIO_DECODER,GstAudioDecoderClass)) +#define GST_IS_AUDIO_DECODER(obj) \ + (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_AUDIO_DECODER)) +#define GST_IS_AUDIO_DECODER_CLASS(obj) \ + (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_AUDIO_DECODER)) /** - * GST_BASE_AUDIO_DECODER_SINK_NAME: + * GST_AUDIO_DECODER_SINK_NAME: * * The name of the templates for the sink pad. * * Since: 0.10.36 */ -#define GST_BASE_AUDIO_DECODER_SINK_NAME "sink" +#define GST_AUDIO_DECODER_SINK_NAME "sink" /** - * GST_BASE_AUDIO_DECODER_SRC_NAME: + * GST_AUDIO_DECODER_SRC_NAME: * * The name of the templates for the source pad. * * Since: 0.10.36 */ -#define GST_BASE_AUDIO_DECODER_SRC_NAME "src" +#define GST_AUDIO_DECODER_SRC_NAME "src" /** - * GST_BASE_AUDIO_DECODER_SRC_PAD: + * GST_AUDIO_DECODER_SRC_PAD: * @obj: base audio codec instance * * Gives the pointer to the source #GstPad object of the element. * * Since: 0.10.36 */ -#define GST_BASE_AUDIO_DECODER_SRC_PAD(obj) (((GstBaseAudioDecoder *) (obj))->srcpad) +#define GST_AUDIO_DECODER_SRC_PAD(obj) (((GstAudioDecoder *) (obj))->srcpad) /** - * GST_BASE_AUDIO_DECODER_SINK_PAD: + * GST_AUDIO_DECODER_SINK_PAD: * @obj: base audio codec instance * * Gives the pointer to the sink #GstPad object of the element. * * Since: 0.10.36 */ -#define GST_BASE_AUDIO_DECODER_SINK_PAD(obj) (((GstBaseAudioDecoder *) (obj))->sinkpad) +#define GST_AUDIO_DECODER_SINK_PAD(obj) (((GstAudioDecoder *) (obj))->sinkpad) -typedef struct _GstBaseAudioDecoder GstBaseAudioDecoder; -typedef struct _GstBaseAudioDecoderClass GstBaseAudioDecoderClass; +typedef struct _GstAudioDecoder GstAudioDecoder; +typedef struct _GstAudioDecoderClass GstAudioDecoderClass; -typedef struct _GstBaseAudioDecoderPrivate GstBaseAudioDecoderPrivate; +typedef struct _GstAudioDecoderPrivate GstAudioDecoderPrivate; /* do not use this one, use macro below */ -GstFlowReturn _gst_base_audio_decoder_error (GstBaseAudioDecoder *dec, gint weight, - GQuark domain, gint code, - gchar *txt, gchar *debug, - const gchar *file, const gchar *function, - gint line); +GstFlowReturn _gst_audio_decoder_error (GstAudioDecoder *dec, gint weight, + GQuark domain, gint code, + gchar *txt, gchar *debug, + const gchar *file, const gchar *function, + gint line); /** - * GST_BASE_AUDIO_DECODER_ERROR: + * GST_AUDIO_DECODER_ERROR: * @el: the base audio decoder element that generates the error * @weight: element defined weight of the error, added to error count * @domain: like CORE, LIBRARY, RESOURCE or STREAM (see #gstreamer-GstGError) @@ -120,24 +120,24 @@ GstFlowReturn _gst_base_audio_decoder_error (GstBaseAudioDecoder *dec, gint weig * * Since: 0.10.36 */ -#define GST_BASE_AUDIO_DECODER_ERROR(el, w, domain, code, text, debug, ret) \ +#define GST_AUDIO_DECODER_ERROR(el, w, domain, code, text, debug, ret) \ G_STMT_START { \ gchar *__txt = _gst_element_error_printf text; \ gchar *__dbg = _gst_element_error_printf debug; \ - GstBaseAudioDecoder *dec = GST_BASE_AUDIO_DECODER (el); \ - ret = _gst_base_audio_decoder_error (dec, w, GST_ ## domain ## _ERROR, \ + GstAudioDecoder *dec = GST_AUDIO_DECODER (el); \ + ret = _gst_audio_decoder_error (dec, w, GST_ ## domain ## _ERROR, \ GST_ ## domain ## _ERROR_ ## code, __txt, __dbg, __FILE__, \ GST_FUNCTION, __LINE__); \ } G_STMT_END /** - * GstBaseAudioDecoder: + * GstAudioDecoder: * - * The opaque #GstBaseAudioDecoder data structure. + * The opaque #GstAudioDecoder data structure. * * Since: 0.10.36 */ -struct _GstBaseAudioDecoder +struct _GstAudioDecoder { GstElement element; @@ -150,12 +150,12 @@ struct _GstBaseAudioDecoder GstSegment segment; /*< private >*/ - GstBaseAudioDecoderPrivate *priv; + GstAudioDecoderPrivate *priv; gpointer _gst_reserved[GST_PADDING_LARGE]; }; /** - * GstBaseAudioDecoderClass: + * GstAudioDecoderClass: * @start: Optional. * Called when the element starts processing. * Allows opening external resources. @@ -191,85 +191,93 @@ struct _GstBaseAudioDecoder * * Since: 0.10.36 */ -struct _GstBaseAudioDecoderClass +struct _GstAudioDecoderClass { GstElementClass parent_class; /*< public >*/ /* virtual methods for subclasses */ - gboolean (*start) (GstBaseAudioDecoder *dec); + gboolean (*start) (GstAudioDecoder *dec); - gboolean (*stop) (GstBaseAudioDecoder *dec); + gboolean (*stop) (GstAudioDecoder *dec); - gboolean (*set_format) (GstBaseAudioDecoder *dec, + gboolean (*set_format) (GstAudioDecoder *dec, GstCaps *caps); - GstFlowReturn (*parse) (GstBaseAudioDecoder *dec, + GstFlowReturn (*parse) (GstAudioDecoder *dec, GstAdapter *adapter, gint *offset, gint *length); - GstFlowReturn (*handle_frame) (GstBaseAudioDecoder *dec, + GstFlowReturn (*handle_frame) (GstAudioDecoder *dec, GstBuffer *buffer); - void (*flush) (GstBaseAudioDecoder *dec, gboolean hard); + void (*flush) (GstAudioDecoder *dec, gboolean hard); - GstFlowReturn (*pre_push) (GstBaseAudioDecoder *dec, + GstFlowReturn (*pre_push) (GstAudioDecoder *dec, GstBuffer **buffer); - gboolean (*event) (GstBaseAudioDecoder *dec, + gboolean (*event) (GstAudioDecoder *dec, GstEvent *event); /*< private >*/ gpointer _gst_reserved[GST_PADDING_LARGE]; }; -GstFlowReturn gst_base_audio_decoder_finish_frame (GstBaseAudioDecoder * dec, +GType gst_audio_decoder_get_type (void); + +GstFlowReturn gst_audio_decoder_finish_frame (GstAudioDecoder * dec, GstBuffer * buf, gint frames); /* context parameters */ -GstAudioInfo * gst_base_audio_decoder_get_audio_info (GstBaseAudioDecoder * dec); +GstAudioInfo * gst_audio_decoder_get_audio_info (GstAudioDecoder * dec); -void gst_base_audio_decoder_set_plc_aware (GstBaseAudioDecoder * dec, - gboolean plc); -gint gst_base_audio_decoder_get_plc_aware (GstBaseAudioDecoder * dec); +void gst_audio_decoder_set_plc_aware (GstAudioDecoder * dec, + gboolean plc); -void gst_base_audio_decoder_set_byte_time (GstBaseAudioDecoder * dec, - gboolean enabled); -gint gst_base_audio_decoder_get_byte_time (GstBaseAudioDecoder * dec); +gint gst_audio_decoder_get_plc_aware (GstAudioDecoder * dec); -gint gst_base_audio_decoder_get_delay (GstBaseAudioDecoder * dec); +void gst_audio_decoder_set_byte_time (GstAudioDecoder * dec, + gboolean enabled); -void gst_base_audio_decoder_set_max_errors (GstBaseAudioDecoder * enc, - gint num); -gint gst_base_audio_decoder_get_max_errors (GstBaseAudioDecoder * dec); +gint gst_audio_decoder_get_byte_time (GstAudioDecoder * dec); -void gst_base_audio_decoder_set_latency (GstBaseAudioDecoder * dec, - GstClockTime min, GstClockTime max); -void gst_base_audio_decoder_get_latency (GstBaseAudioDecoder * dec, - GstClockTime * min, GstClockTime * max); +gint gst_audio_decoder_get_delay (GstAudioDecoder * dec); -void gst_base_audio_decoder_get_parse_state (GstBaseAudioDecoder * dec, - gboolean * sync, gboolean * eos); +void gst_audio_decoder_set_max_errors (GstAudioDecoder * enc, + gint num); + +gint gst_audio_decoder_get_max_errors (GstAudioDecoder * dec); + +void gst_audio_decoder_set_latency (GstAudioDecoder * dec, + GstClockTime min, + GstClockTime max); + +void gst_audio_decoder_get_latency (GstAudioDecoder * dec, + GstClockTime * min, + GstClockTime * max); + +void gst_audio_decoder_get_parse_state (GstAudioDecoder * dec, + gboolean * sync, + gboolean * eos); /* object properties */ -void gst_base_audio_decoder_set_plc (GstBaseAudioDecoder * dec, - gboolean enabled); -gboolean gst_base_audio_decoder_get_plc (GstBaseAudioDecoder * dec); +void gst_audio_decoder_set_plc (GstAudioDecoder * dec, + gboolean enabled); -void gst_base_audio_decoder_set_min_latency (GstBaseAudioDecoder * dec, - gint64 num); -gint64 gst_base_audio_decoder_get_min_latency (GstBaseAudioDecoder * dec); +gboolean gst_audio_decoder_get_plc (GstAudioDecoder * dec); -void gst_base_audio_decoder_set_tolerance (GstBaseAudioDecoder * dec, - gint64 tolerance); +void gst_audio_decoder_set_min_latency (GstAudioDecoder * dec, + gint64 num); -gint64 gst_base_audio_decoder_get_tolerance (GstBaseAudioDecoder * dec); +gint64 gst_audio_decoder_get_min_latency (GstAudioDecoder * dec); -GType gst_base_audio_decoder_get_type (void); +void gst_audio_decoder_set_tolerance (GstAudioDecoder * dec, + gint64 tolerance); + +gint64 gst_audio_decoder_get_tolerance (GstAudioDecoder * dec); G_END_DECLS -#endif - +#endif /* _GST_AUDIO_DECODER_H_ */ diff --git a/gst-libs/gst/audio/gstbaseaudioencoder.c b/gst-libs/gst/audio/gstaudioencoder.c similarity index 79% rename from gst-libs/gst/audio/gstbaseaudioencoder.c rename to gst-libs/gst/audio/gstaudioencoder.c index 63131fd101..6f47f6b732 100644 --- a/gst-libs/gst/audio/gstbaseaudioencoder.c +++ b/gst-libs/gst/audio/gstaudioencoder.c @@ -20,7 +20,7 @@ */ /** - * SECTION:gstbaseaudioencoder + * SECTION:gstaudioencoder * @short_description: Base class for audio encoders * @see_also: #GstBaseTransform * @since: 0.10.36 @@ -28,16 +28,16 @@ * This base class is for audio encoders turning raw audio samples into * encoded audio data. * - * GstBaseAudioEncoder and subclass should cooperate as follows. + * GstAudioEncoder and subclass should cooperate as follows. * * * Configuration * - * Initially, GstBaseAudioEncoder calls @start when the encoder element + * Initially, GstAudioEncoder calls @start when the encoder element * is activated, which allows subclass to perform any global setup. * * - * GstBaseAudioEncoder calls @set_format to inform subclass of the format + * GstAudioEncoder calls @set_format to inform subclass of the format * of input audio data that it is about to receive. Subclass should * setup for encoding and configure various base class parameters * appropriately, notably those directing desired input data handling. @@ -45,11 +45,11 @@ * parameters require reconfiguration. * * - * GstBaseAudioEncoder calls @stop at end of all processing. + * GstAudioEncoder calls @stop at end of all processing. * * * - * As of configuration stage, and throughout processing, GstBaseAudioEncoder + * As of configuration stage, and throughout processing, GstAudioEncoder * maintains various parameters that provide required context, * e.g. describing the format of input audio data. * Conversely, subclass can and should configure these context parameters @@ -63,7 +63,7 @@ * * * If codec processing results in encoded data, subclass should call - * @gst_base_audio_encoder_finish_frame to have encoded data pushed + * @gst_audio_encoder_finish_frame to have encoded data pushed * downstream. Alternatively, it might also call to indicate dropped * (non-encoded) samples. * @@ -72,7 +72,7 @@ * it is passed to @pre_push. * * - * During the parsing process GstBaseAudioEncoderClass will handle both + * During the parsing process GstAudioEncoderClass will handle both * srcpad and sinkpad events. Sink events will be passed to subclass * if @event callback has been provided. * @@ -81,7 +81,7 @@ * * Shutdown phase * - * GstBaseAudioEncoder class calls @stop to inform the subclass that data + * GstAudioEncoder class calls @stop to inform the subclass that data * parsing will be stopped. * * @@ -92,7 +92,7 @@ * source and sink pads. The pads need to be named "sink" and "src". It also * needs to set the fixed caps on srcpad, when the format is ensured. This * is typically when base class calls subclass' @set_format function, though - * it might be delayed until calling @gst_base_audio_encoder_finish_frame. + * it might be delayed until calling @gst_audio_encoder_finish_frame. * * In summary, above process should have subclass concentrating on * codec data processing while leaving other matters to base class, @@ -101,18 +101,18 @@ * * In particular, base class will either favor tracking upstream timestamps * (at the possible expense of jitter) or aim to arrange for a perfect stream of - * output timestamps, depending on #GstBaseAudioEncoder:perfect-ts. + * output timestamps, depending on #GstAudioEncoder:perfect-ts. * However, in the latter case, the input may not be so perfect or ideal, which * is handled as follows. An input timestamp is compared with the expected * timestamp as dictated by input sample stream and if the deviation is less - * than #GstBaseAudioEncoder:tolerance, the deviation is discarded. + * than #GstAudioEncoder:tolerance, the deviation is discarded. * Otherwise, it is considered a discontuinity and subsequent output timestamp * is resynced to the new position after performing configured discontinuity * processing. In the non-perfect-ts case, an upstream variation exceeding * tolerance only leads to marking DISCONT on subsequent outgoing * (while timestamps are adjusted to upstream regardless of variation). * While DISCONT is also marked in the perfect-ts case, this one optionally - * (see #GstBaseAudioEncoder:hard-resync) + * (see #GstAudioEncoder:hard-resync) * performs some additional steps, such as clipping of (early) input samples * or draining all currently remaining input data, depending on the direction * of the discontuinity. @@ -141,7 +141,7 @@ * * * Accept data in @handle_frame and provide encoded results to - * @gst_base_audio_encoder_finish_frame. + * @gst_audio_encoder_finish_frame. * * * @@ -152,7 +152,7 @@ #endif #define GST_USE_UNSTABLE_API -#include "gstbaseaudioencoder.h" +#include "gstaudioencoder.h" #include #include @@ -160,12 +160,12 @@ #include -GST_DEBUG_CATEGORY_STATIC (gst_base_audio_encoder_debug); -#define GST_CAT_DEFAULT gst_base_audio_encoder_debug +GST_DEBUG_CATEGORY_STATIC (gst_audio_encoder_debug); +#define GST_CAT_DEFAULT gst_audio_encoder_debug -#define GST_BASE_AUDIO_ENCODER_GET_PRIVATE(obj) \ - (G_TYPE_INSTANCE_GET_PRIVATE ((obj), GST_TYPE_BASE_AUDIO_ENCODER, \ - GstBaseAudioEncoderPrivate)) +#define GST_AUDIO_ENCODER_GET_PRIVATE(obj) \ + (G_TYPE_INSTANCE_GET_PRIVATE ((obj), GST_TYPE_AUDIO_ENCODER, \ + GstAudioEncoderPrivate)) enum { @@ -181,7 +181,7 @@ enum #define DEFAULT_HARD_RESYNC FALSE #define DEFAULT_TOLERANCE 40000000 -typedef struct _GstBaseAudioEncoderContext +typedef struct _GstAudioEncoderContext { /* input */ GstAudioInfo info; @@ -193,9 +193,9 @@ typedef struct _GstBaseAudioEncoderContext /* MT-protected (with LOCK) */ GstClockTime min_latency; GstClockTime max_latency; -} GstBaseAudioEncoderContext; +} GstAudioEncoderContext; -struct _GstBaseAudioEncoderPrivate +struct _GstAudioEncoderPrivate { /* activation status */ gboolean active; @@ -232,7 +232,7 @@ struct _GstBaseAudioEncoderPrivate guint64 bytes_out; /* context storage */ - GstBaseAudioEncoderContext ctx; + GstAudioEncoderContext ctx; /* properties */ gint64 tolerance; @@ -244,27 +244,26 @@ struct _GstBaseAudioEncoderPrivate static GstElementClass *parent_class = NULL; -static void gst_base_audio_encoder_class_init (GstBaseAudioEncoderClass * - klass); -static void gst_base_audio_encoder_init (GstBaseAudioEncoder * parse, - GstBaseAudioEncoderClass * klass); +static void gst_audio_encoder_class_init (GstAudioEncoderClass * klass); +static void gst_audio_encoder_init (GstAudioEncoder * parse, + GstAudioEncoderClass * klass); GType -gst_base_audio_encoder_get_type (void) +gst_audio_encoder_get_type (void) { - static GType base_audio_encoder_type = 0; + static GType audio_encoder_type = 0; - if (!base_audio_encoder_type) { - static const GTypeInfo base_audio_encoder_info = { - sizeof (GstBaseAudioEncoderClass), + if (!audio_encoder_type) { + static const GTypeInfo audio_encoder_info = { + sizeof (GstAudioEncoderClass), (GBaseInitFunc) NULL, (GBaseFinalizeFunc) NULL, - (GClassInitFunc) gst_base_audio_encoder_class_init, + (GClassInitFunc) gst_audio_encoder_class_init, NULL, NULL, - sizeof (GstBaseAudioEncoder), + sizeof (GstAudioEncoder), 0, - (GInstanceInitFunc) gst_base_audio_encoder_init, + (GInstanceInitFunc) gst_audio_encoder_init, }; const GInterfaceInfo preset_interface_info = { NULL, /* interface_init */ @@ -272,59 +271,52 @@ gst_base_audio_encoder_get_type (void) NULL /* interface_data */ }; - base_audio_encoder_type = g_type_register_static (GST_TYPE_ELEMENT, - "GstBaseAudioEncoder", &base_audio_encoder_info, G_TYPE_FLAG_ABSTRACT); + audio_encoder_type = g_type_register_static (GST_TYPE_ELEMENT, + "GstAudioEncoder", &audio_encoder_info, G_TYPE_FLAG_ABSTRACT); - g_type_add_interface_static (base_audio_encoder_type, GST_TYPE_PRESET, + g_type_add_interface_static (audio_encoder_type, GST_TYPE_PRESET, &preset_interface_info); } - return base_audio_encoder_type; + return audio_encoder_type; } -static void gst_base_audio_encoder_finalize (GObject * object); -static void gst_base_audio_encoder_reset (GstBaseAudioEncoder * enc, - gboolean full); +static void gst_audio_encoder_finalize (GObject * object); +static void gst_audio_encoder_reset (GstAudioEncoder * enc, gboolean full); -static void gst_base_audio_encoder_set_property (GObject * object, +static void gst_audio_encoder_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec); -static void gst_base_audio_encoder_get_property (GObject * object, +static void gst_audio_encoder_get_property (GObject * object, guint prop_id, GValue * value, GParamSpec * pspec); -static gboolean gst_base_audio_encoder_sink_activate_push (GstPad * pad, +static gboolean gst_audio_encoder_sink_activate_push (GstPad * pad, gboolean active); -static gboolean gst_base_audio_encoder_sink_event (GstPad * pad, - GstEvent * event); -static gboolean gst_base_audio_encoder_sink_setcaps (GstPad * pad, - GstCaps * caps); -static GstFlowReturn gst_base_audio_encoder_chain (GstPad * pad, - GstBuffer * buffer); -static gboolean gst_base_audio_encoder_src_query (GstPad * pad, - GstQuery * query); -static gboolean gst_base_audio_encoder_sink_query (GstPad * pad, - GstQuery * query); -static const GstQueryType *gst_base_audio_encoder_get_query_types (GstPad * - pad); -static GstCaps *gst_base_audio_encoder_sink_getcaps (GstPad * pad); +static gboolean gst_audio_encoder_sink_event (GstPad * pad, GstEvent * event); +static gboolean gst_audio_encoder_sink_setcaps (GstPad * pad, GstCaps * caps); +static GstFlowReturn gst_audio_encoder_chain (GstPad * pad, GstBuffer * buffer); +static gboolean gst_audio_encoder_src_query (GstPad * pad, GstQuery * query); +static gboolean gst_audio_encoder_sink_query (GstPad * pad, GstQuery * query); +static const GstQueryType *gst_audio_encoder_get_query_types (GstPad * pad); +static GstCaps *gst_audio_encoder_sink_getcaps (GstPad * pad); static void -gst_base_audio_encoder_class_init (GstBaseAudioEncoderClass * klass) +gst_audio_encoder_class_init (GstAudioEncoderClass * klass) { GObjectClass *gobject_class; gobject_class = G_OBJECT_CLASS (klass); parent_class = g_type_class_peek_parent (klass); - GST_DEBUG_CATEGORY_INIT (gst_base_audio_encoder_debug, "baseaudioencoder", 0, - "baseaudioencoder element"); + GST_DEBUG_CATEGORY_INIT (gst_audio_encoder_debug, "audioencoder", 0, + "audio encoder base class"); - g_type_class_add_private (klass, sizeof (GstBaseAudioEncoderPrivate)); + g_type_class_add_private (klass, sizeof (GstAudioEncoderPrivate)); - gobject_class->set_property = gst_base_audio_encoder_set_property; - gobject_class->get_property = gst_base_audio_encoder_get_property; + gobject_class->set_property = gst_audio_encoder_set_property; + gobject_class->get_property = gst_audio_encoder_get_property; - gobject_class->finalize = GST_DEBUG_FUNCPTR (gst_base_audio_encoder_finalize); + gobject_class->finalize = GST_DEBUG_FUNCPTR (gst_audio_encoder_finalize); /* properties */ g_object_class_install_property (gobject_class, PROP_PERFECT_TS, @@ -347,14 +339,13 @@ gst_base_audio_encoder_class_init (GstBaseAudioEncoderClass * klass) } static void -gst_base_audio_encoder_init (GstBaseAudioEncoder * enc, - GstBaseAudioEncoderClass * bclass) +gst_audio_encoder_init (GstAudioEncoder * enc, GstAudioEncoderClass * bclass) { GstPadTemplate *pad_template; - GST_DEBUG_OBJECT (enc, "gst_base_audio_encoder_init"); + GST_DEBUG_OBJECT (enc, "gst_audio_encoder_init"); - enc->priv = GST_BASE_AUDIO_ENCODER_GET_PRIVATE (enc); + enc->priv = GST_AUDIO_ENCODER_GET_PRIVATE (enc); /* only push mode supported */ pad_template = @@ -362,17 +353,17 @@ gst_base_audio_encoder_init (GstBaseAudioEncoder * enc, g_return_if_fail (pad_template != NULL); enc->sinkpad = gst_pad_new_from_template (pad_template, "sink"); gst_pad_set_event_function (enc->sinkpad, - GST_DEBUG_FUNCPTR (gst_base_audio_encoder_sink_event)); + GST_DEBUG_FUNCPTR (gst_audio_encoder_sink_event)); gst_pad_set_setcaps_function (enc->sinkpad, - GST_DEBUG_FUNCPTR (gst_base_audio_encoder_sink_setcaps)); + GST_DEBUG_FUNCPTR (gst_audio_encoder_sink_setcaps)); gst_pad_set_getcaps_function (enc->sinkpad, - GST_DEBUG_FUNCPTR (gst_base_audio_encoder_sink_getcaps)); + GST_DEBUG_FUNCPTR (gst_audio_encoder_sink_getcaps)); gst_pad_set_query_function (enc->sinkpad, - GST_DEBUG_FUNCPTR (gst_base_audio_encoder_sink_query)); + GST_DEBUG_FUNCPTR (gst_audio_encoder_sink_query)); gst_pad_set_chain_function (enc->sinkpad, - GST_DEBUG_FUNCPTR (gst_base_audio_encoder_chain)); + GST_DEBUG_FUNCPTR (gst_audio_encoder_chain)); gst_pad_set_activatepush_function (enc->sinkpad, - GST_DEBUG_FUNCPTR (gst_base_audio_encoder_sink_activate_push)); + GST_DEBUG_FUNCPTR (gst_audio_encoder_sink_activate_push)); gst_element_add_pad (GST_ELEMENT (enc), enc->sinkpad); GST_DEBUG_OBJECT (enc, "sinkpad created"); @@ -383,9 +374,9 @@ gst_base_audio_encoder_init (GstBaseAudioEncoder * enc, g_return_if_fail (pad_template != NULL); enc->srcpad = gst_pad_new_from_template (pad_template, "src"); gst_pad_set_query_function (enc->srcpad, - GST_DEBUG_FUNCPTR (gst_base_audio_encoder_src_query)); + GST_DEBUG_FUNCPTR (gst_audio_encoder_src_query)); gst_pad_set_query_type_function (enc->srcpad, - GST_DEBUG_FUNCPTR (gst_base_audio_encoder_get_query_types)); + GST_DEBUG_FUNCPTR (gst_audio_encoder_get_query_types)); gst_pad_use_fixed_caps (enc->srcpad); gst_element_add_pad (GST_ELEMENT (enc), enc->srcpad); GST_DEBUG_OBJECT (enc, "src created"); @@ -399,12 +390,12 @@ gst_base_audio_encoder_init (GstBaseAudioEncoder * enc, enc->priv->tolerance = DEFAULT_TOLERANCE; /* init state */ - gst_base_audio_encoder_reset (enc, TRUE); + gst_audio_encoder_reset (enc, TRUE); GST_DEBUG_OBJECT (enc, "init ok"); } static void -gst_base_audio_encoder_reset (GstBaseAudioEncoder * enc, gboolean full) +gst_audio_encoder_reset (GstAudioEncoder * enc, gboolean full) { GST_OBJECT_LOCK (enc); @@ -431,9 +422,9 @@ gst_base_audio_encoder_reset (GstBaseAudioEncoder * enc, gboolean full) } static void -gst_base_audio_encoder_finalize (GObject * object) +gst_audio_encoder_finalize (GObject * object) { - GstBaseAudioEncoder *enc = GST_BASE_AUDIO_ENCODER (object); + GstAudioEncoder *enc = GST_AUDIO_ENCODER (object); g_object_unref (enc->priv->adapter); @@ -441,8 +432,8 @@ gst_base_audio_encoder_finalize (GObject * object) } /** - * gst_base_audio_encoder_finish_frame: - * @enc: a #GstBaseAudioEncoder + * gst_audio_encoder_finish_frame: + * @enc: a #GstAudioEncoder * @buffer: encoded data * @samples: number of samples (per channel) represented by encoded data * @@ -466,15 +457,15 @@ gst_base_audio_encoder_finalize (GObject * object) * Since: 0.10.36 */ GstFlowReturn -gst_base_audio_encoder_finish_frame (GstBaseAudioEncoder * enc, GstBuffer * buf, +gst_audio_encoder_finish_frame (GstAudioEncoder * enc, GstBuffer * buf, gint samples) { - GstBaseAudioEncoderClass *klass; - GstBaseAudioEncoderPrivate *priv; - GstBaseAudioEncoderContext *ctx; + GstAudioEncoderClass *klass; + GstAudioEncoderPrivate *priv; + GstAudioEncoderContext *ctx; GstFlowReturn ret = GST_FLOW_OK; - klass = GST_BASE_AUDIO_ENCODER_GET_CLASS (enc); + klass = GST_AUDIO_ENCODER_GET_CLASS (enc); priv = enc->priv; ctx = &enc->priv->ctx; @@ -655,16 +646,16 @@ overflow: * (i.e. really returned by encoder subclass) * - start + offset is what needs to be fed to subclass next */ static GstFlowReturn -gst_base_audio_encoder_push_buffers (GstBaseAudioEncoder * enc, gboolean force) +gst_audio_encoder_push_buffers (GstAudioEncoder * enc, gboolean force) { - GstBaseAudioEncoderClass *klass; - GstBaseAudioEncoderPrivate *priv; - GstBaseAudioEncoderContext *ctx; + GstAudioEncoderClass *klass; + GstAudioEncoderPrivate *priv; + GstAudioEncoderContext *ctx; gint av, need; GstBuffer *buf; GstFlowReturn ret = GST_FLOW_OK; - klass = GST_BASE_AUDIO_ENCODER_GET_CLASS (enc); + klass = GST_AUDIO_ENCODER_GET_CLASS (enc); g_return_val_if_fail (klass->handle_frame != NULL, GST_FLOW_ERROR); @@ -737,16 +728,16 @@ gst_base_audio_encoder_push_buffers (GstBaseAudioEncoder * enc, gboolean force) } static GstFlowReturn -gst_base_audio_encoder_drain (GstBaseAudioEncoder * enc) +gst_audio_encoder_drain (GstAudioEncoder * enc) { if (enc->priv->drained) return GST_FLOW_OK; else - return gst_base_audio_encoder_push_buffers (enc, TRUE); + return gst_audio_encoder_push_buffers (enc, TRUE); } static void -gst_base_audio_encoder_set_base_gp (GstBaseAudioEncoder * enc) +gst_audio_encoder_set_base_gp (GstAudioEncoder * enc) { GstClockTime ts; @@ -773,15 +764,15 @@ gst_base_audio_encoder_set_base_gp (GstBaseAudioEncoder * enc) } static GstFlowReturn -gst_base_audio_encoder_chain (GstPad * pad, GstBuffer * buffer) +gst_audio_encoder_chain (GstPad * pad, GstBuffer * buffer) { - GstBaseAudioEncoder *enc; - GstBaseAudioEncoderPrivate *priv; - GstBaseAudioEncoderContext *ctx; + GstAudioEncoder *enc; + GstAudioEncoderPrivate *priv; + GstAudioEncoderContext *ctx; GstFlowReturn ret = GST_FLOW_OK; gboolean discont; - enc = GST_BASE_AUDIO_ENCODER (GST_OBJECT_PARENT (pad)); + enc = GST_AUDIO_ENCODER (GST_OBJECT_PARENT (pad)); priv = enc->priv; ctx = &enc->priv->ctx; @@ -845,7 +836,7 @@ gst_base_audio_encoder_chain (GstPad * pad, GstBuffer * buffer) priv->base_ts = GST_BUFFER_TIMESTAMP (buffer); GST_DEBUG_OBJECT (enc, "new base ts %" GST_TIME_FORMAT, GST_TIME_ARGS (priv->base_ts)); - gst_base_audio_encoder_set_base_gp (enc); + gst_audio_encoder_set_base_gp (enc); } /* check for continuity; @@ -900,12 +891,12 @@ gst_base_audio_encoder_chain (GstPad * pad, GstBuffer * buffer) /* care even less about duration after this */ } else { /* drain stuff prior to resync */ - gst_base_audio_encoder_drain (enc); + gst_audio_encoder_drain (enc); } } /* now re-sync ts */ priv->base_ts += diff; - gst_base_audio_encoder_set_base_gp (enc); + gst_audio_encoder_set_base_gp (enc); priv->discont |= discont; } @@ -913,7 +904,7 @@ gst_base_audio_encoder_chain (GstPad * pad, GstBuffer * buffer) /* new stuff, so we can push subclass again */ enc->priv->drained = FALSE; - ret = gst_base_audio_encoder_push_buffers (enc, FALSE); + ret = gst_audio_encoder_push_buffers (enc, FALSE); done: GST_LOG_OBJECT (enc, "chain leaving"); @@ -955,17 +946,17 @@ audio_info_is_equal (GstAudioInfo * from, GstAudioInfo * to) } static gboolean -gst_base_audio_encoder_sink_setcaps (GstPad * pad, GstCaps * caps) +gst_audio_encoder_sink_setcaps (GstPad * pad, GstCaps * caps) { - GstBaseAudioEncoder *enc; - GstBaseAudioEncoderClass *klass; - GstBaseAudioEncoderContext *ctx; + GstAudioEncoder *enc; + GstAudioEncoderClass *klass; + GstAudioEncoderContext *ctx; GstAudioInfo *state, *old_state; gboolean res = TRUE, changed = FALSE; guint old_rate; - enc = GST_BASE_AUDIO_ENCODER (GST_PAD_PARENT (pad)); - klass = GST_BASE_AUDIO_ENCODER_GET_CLASS (enc); + enc = GST_AUDIO_ENCODER (GST_PAD_PARENT (pad)); + klass = GST_AUDIO_ENCODER_GET_CLASS (enc); /* subclass must do something here ... */ g_return_val_if_fail (klass->set_format != NULL, FALSE); @@ -998,7 +989,7 @@ gst_base_audio_encoder_sink_setcaps (GstPad * pad, GstCaps * caps) GstClockTime old_max_latency; /* drain any pending old data stuff */ - gst_base_audio_encoder_drain (enc); + gst_audio_encoder_drain (enc); /* context defaults */ enc->priv->ctx.frame_samples = 0; @@ -1041,8 +1032,8 @@ refuse_caps: /** - * gst_base_audio_encoder_proxy_getcaps: - * @enc: a #GstBaseAudioEncoder + * gst_audio_encoder_proxy_getcaps: + * @enc: a #GstAudioEncoder * @caps: initial caps * * Returns caps that express @caps (or sink template caps if @caps == NULL) @@ -1054,7 +1045,7 @@ refuse_caps: * Since: 0.10.36 */ GstCaps * -gst_base_audio_encoder_proxy_getcaps (GstBaseAudioEncoder * enc, GstCaps * caps) +gst_audio_encoder_proxy_getcaps (GstAudioEncoder * enc, GstCaps * caps) { const GstCaps *templ_caps; GstCaps *allowed = NULL; @@ -1109,20 +1100,20 @@ done: } static GstCaps * -gst_base_audio_encoder_sink_getcaps (GstPad * pad) +gst_audio_encoder_sink_getcaps (GstPad * pad) { - GstBaseAudioEncoder *enc; - GstBaseAudioEncoderClass *klass; + GstAudioEncoder *enc; + GstAudioEncoderClass *klass; GstCaps *caps; - enc = GST_BASE_AUDIO_ENCODER (gst_pad_get_parent (pad)); - klass = GST_BASE_AUDIO_ENCODER_GET_CLASS (enc); + enc = GST_AUDIO_ENCODER (gst_pad_get_parent (pad)); + klass = GST_AUDIO_ENCODER_GET_CLASS (enc); g_assert (pad == enc->sinkpad); if (klass->getcaps) caps = klass->getcaps (enc); else - caps = gst_base_audio_encoder_proxy_getcaps (enc, NULL); + caps = gst_audio_encoder_proxy_getcaps (enc, NULL); gst_object_unref (enc); GST_LOG_OBJECT (enc, "returning caps %" GST_PTR_FORMAT, caps); @@ -1131,13 +1122,12 @@ gst_base_audio_encoder_sink_getcaps (GstPad * pad) } static gboolean -gst_base_audio_encoder_sink_eventfunc (GstBaseAudioEncoder * enc, - GstEvent * event) +gst_audio_encoder_sink_eventfunc (GstAudioEncoder * enc, GstEvent * event) { - GstBaseAudioEncoderClass *klass; + GstAudioEncoderClass *klass; gboolean handled = FALSE; - klass = GST_BASE_AUDIO_ENCODER_GET_CLASS (enc); + klass = GST_AUDIO_ENCODER_GET_CLASS (enc); switch (GST_EVENT_TYPE (event)) { case GST_EVENT_NEWSEGMENT: @@ -1165,9 +1155,9 @@ gst_base_audio_encoder_sink_eventfunc (GstBaseAudioEncoder * enc, } /* finish current segment */ - gst_base_audio_encoder_drain (enc); + gst_audio_encoder_drain (enc); /* reset partially for new segment */ - gst_base_audio_encoder_reset (enc, FALSE); + gst_audio_encoder_reset (enc, FALSE); /* and follow along with segment */ gst_segment_set_newsegment_full (&enc->segment, update, rate, arate, format, start, stop, time); @@ -1183,11 +1173,11 @@ gst_base_audio_encoder_sink_eventfunc (GstBaseAudioEncoder * enc, if (!enc->priv->drained && klass->flush) klass->flush (enc); /* and get (re)set for the sequel */ - gst_base_audio_encoder_reset (enc, FALSE); + gst_audio_encoder_reset (enc, FALSE); break; case GST_EVENT_EOS: - gst_base_audio_encoder_drain (enc); + gst_audio_encoder_drain (enc); break; default: @@ -1198,15 +1188,15 @@ gst_base_audio_encoder_sink_eventfunc (GstBaseAudioEncoder * enc, } static gboolean -gst_base_audio_encoder_sink_event (GstPad * pad, GstEvent * event) +gst_audio_encoder_sink_event (GstPad * pad, GstEvent * event) { - GstBaseAudioEncoder *enc; - GstBaseAudioEncoderClass *klass; + GstAudioEncoder *enc; + GstAudioEncoderClass *klass; gboolean handled = FALSE; gboolean ret = TRUE; - enc = GST_BASE_AUDIO_ENCODER (gst_pad_get_parent (pad)); - klass = GST_BASE_AUDIO_ENCODER_GET_CLASS (enc); + enc = GST_AUDIO_ENCODER (gst_pad_get_parent (pad)); + klass = GST_AUDIO_ENCODER_GET_CLASS (enc); GST_DEBUG_OBJECT (enc, "received event %d, %s", GST_EVENT_TYPE (event), GST_EVENT_TYPE_NAME (event)); @@ -1215,7 +1205,7 @@ gst_base_audio_encoder_sink_event (GstPad * pad, GstEvent * event) handled = klass->event (enc, event); if (!handled) - handled = gst_base_audio_encoder_sink_eventfunc (enc, event); + handled = gst_audio_encoder_sink_eventfunc (enc, event); if (!handled) ret = gst_pad_event_default (pad, event); @@ -1227,12 +1217,12 @@ gst_base_audio_encoder_sink_event (GstPad * pad, GstEvent * event) } static gboolean -gst_base_audio_encoder_sink_query (GstPad * pad, GstQuery * query) +gst_audio_encoder_sink_query (GstPad * pad, GstQuery * query) { gboolean res = TRUE; - GstBaseAudioEncoder *enc; + GstAudioEncoder *enc; - enc = GST_BASE_AUDIO_ENCODER (gst_pad_get_parent (pad)); + enc = GST_AUDIO_ENCODER (gst_pad_get_parent (pad)); switch (GST_QUERY_TYPE (query)) { case GST_QUERY_FORMATS: @@ -1265,9 +1255,9 @@ error: } static const GstQueryType * -gst_base_audio_encoder_get_query_types (GstPad * pad) +gst_audio_encoder_get_query_types (GstPad * pad) { - static const GstQueryType gst_base_audio_encoder_src_query_types[] = { + static const GstQueryType gst_audio_encoder_src_query_types[] = { GST_QUERY_POSITION, GST_QUERY_DURATION, GST_QUERY_CONVERT, @@ -1275,11 +1265,11 @@ gst_base_audio_encoder_get_query_types (GstPad * pad) 0 }; - return gst_base_audio_encoder_src_query_types; + return gst_audio_encoder_src_query_types; } /* - * gst_base_audio_encoded_audio_convert: + * gst_audio_encoded_audio_convert: * @fmt: audio format of the encoded audio * @bytes: number of encoded bytes * @samples: number of encoded samples @@ -1295,9 +1285,9 @@ gst_base_audio_encoder_get_query_types (GstPad * pad) * * Since: 0.10.36 */ -/* FIXME: make gst_base_audio_encoded_audio_convert() public? */ +/* FIXME: make gst_audio_encoded_audio_convert() public? */ static gboolean -gst_base_audio_encoded_audio_convert (GstAudioInfo * fmt, +gst_audio_encoded_audio_convert (GstAudioInfo * fmt, gint64 bytes, gint64 samples, GstFormat src_format, gint64 src_value, GstFormat * dest_format, gint64 * dest_value) { @@ -1356,13 +1346,13 @@ exit: * segment stuff etc at all * Supposedly that's backward compatibility ... */ static gboolean -gst_base_audio_encoder_src_query (GstPad * pad, GstQuery * query) +gst_audio_encoder_src_query (GstPad * pad, GstQuery * query) { - GstBaseAudioEncoder *enc; + GstAudioEncoder *enc; GstPad *peerpad; gboolean res = FALSE; - enc = GST_BASE_AUDIO_ENCODER (GST_PAD_PARENT (pad)); + enc = GST_AUDIO_ENCODER (GST_PAD_PARENT (pad)); peerpad = gst_pad_get_peer (GST_PAD (enc->sinkpad)); GST_LOG_OBJECT (enc, "handling query: %" GST_PTR_FORMAT, query); @@ -1430,7 +1420,7 @@ gst_base_audio_encoder_src_query (GstPad * pad, GstQuery * query) gint64 src_val, dest_val; gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val); - if (!(res = gst_base_audio_encoded_audio_convert (&enc->priv->ctx.info, + if (!(res = gst_audio_encoded_audio_convert (&enc->priv->ctx.info, enc->priv->bytes_out, enc->priv->samples_in, src_fmt, src_val, &dest_fmt, &dest_val))) break; @@ -1470,12 +1460,12 @@ gst_base_audio_encoder_src_query (GstPad * pad, GstQuery * query) } static void -gst_base_audio_encoder_set_property (GObject * object, guint prop_id, +gst_audio_encoder_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec) { - GstBaseAudioEncoder *enc; + GstAudioEncoder *enc; - enc = GST_BASE_AUDIO_ENCODER (object); + enc = GST_AUDIO_ENCODER (object); switch (prop_id) { case PROP_PERFECT_TS: @@ -1497,12 +1487,12 @@ gst_base_audio_encoder_set_property (GObject * object, guint prop_id, } static void -gst_base_audio_encoder_get_property (GObject * object, guint prop_id, +gst_audio_encoder_get_property (GObject * object, guint prop_id, GValue * value, GParamSpec * pspec) { - GstBaseAudioEncoder *enc; + GstAudioEncoder *enc; - enc = GST_BASE_AUDIO_ENCODER (object); + enc = GST_AUDIO_ENCODER (object); switch (prop_id) { case PROP_PERFECT_TS: @@ -1524,12 +1514,12 @@ gst_base_audio_encoder_get_property (GObject * object, guint prop_id, } static gboolean -gst_base_audio_encoder_activate (GstBaseAudioEncoder * enc, gboolean active) +gst_audio_encoder_activate (GstAudioEncoder * enc, gboolean active) { - GstBaseAudioEncoderClass *klass; + GstAudioEncoderClass *klass; gboolean result = FALSE; - klass = GST_BASE_AUDIO_ENCODER_GET_CLASS (enc); + klass = GST_AUDIO_ENCODER_GET_CLASS (enc); g_return_val_if_fail (!enc->priv->granule || enc->priv->perfect_ts, FALSE); @@ -1548,7 +1538,7 @@ gst_base_audio_encoder_activate (GstBaseAudioEncoder * enc, gboolean active) result = klass->stop (enc); /* clean up */ - gst_base_audio_encoder_reset (enc, TRUE); + gst_audio_encoder_reset (enc, TRUE); } GST_DEBUG_OBJECT (enc, "activate return: %d", result); return result; @@ -1556,16 +1546,16 @@ gst_base_audio_encoder_activate (GstBaseAudioEncoder * enc, gboolean active) static gboolean -gst_base_audio_encoder_sink_activate_push (GstPad * pad, gboolean active) +gst_audio_encoder_sink_activate_push (GstPad * pad, gboolean active) { gboolean result = TRUE; - GstBaseAudioEncoder *enc; + GstAudioEncoder *enc; - enc = GST_BASE_AUDIO_ENCODER (gst_pad_get_parent (pad)); + enc = GST_AUDIO_ENCODER (gst_pad_get_parent (pad)); GST_DEBUG_OBJECT (enc, "sink activate push %d", active); - result = gst_base_audio_encoder_activate (enc, active); + result = gst_audio_encoder_activate (enc, active); if (result) enc->priv->active = active; @@ -1577,24 +1567,24 @@ gst_base_audio_encoder_sink_activate_push (GstPad * pad, gboolean active) } /** - * gst_base_audio_encoder_get_audio_info: - * @enc: a #GstBaseAudioEncoder + * gst_audio_encoder_get_audio_info: + * @enc: a #GstAudioEncoder * * Returns: a #GstAudioInfo describing the input audio format * * Since: 0.10.36 */ GstAudioInfo * -gst_base_audio_encoder_get_audio_info (GstBaseAudioEncoder * enc) +gst_audio_encoder_get_audio_info (GstAudioEncoder * enc) { - g_return_val_if_fail (GST_IS_BASE_AUDIO_ENCODER (enc), NULL); + g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), NULL); return &enc->priv->ctx.info; } /** - * gst_base_audio_encoder_set_frame_samples: - * @enc: a #GstBaseAudioEncoder + * gst_audio_encoder_set_frame_samples: + * @enc: a #GstAudioEncoder * @num: number of samples per frame * * Sets number of samples (per channel) subclass needs to be handed, @@ -1603,32 +1593,32 @@ gst_base_audio_encoder_get_audio_info (GstBaseAudioEncoder * enc) * Since: 0.10.36 */ void -gst_base_audio_encoder_set_frame_samples (GstBaseAudioEncoder * enc, gint num) +gst_audio_encoder_set_frame_samples (GstAudioEncoder * enc, gint num) { - g_return_if_fail (GST_IS_BASE_AUDIO_ENCODER (enc)); + g_return_if_fail (GST_IS_AUDIO_ENCODER (enc)); enc->priv->ctx.frame_samples = num; } /** - * gst_base_audio_encoder_get_frame_samples: - * @enc: a #GstBaseAudioEncoder + * gst_audio_encoder_get_frame_samples: + * @enc: a #GstAudioEncoder * * Returns: currently requested samples per frame * * Since: 0.10.36 */ gint -gst_base_audio_encoder_get_frame_samples (GstBaseAudioEncoder * enc) +gst_audio_encoder_get_frame_samples (GstAudioEncoder * enc) { - g_return_val_if_fail (GST_IS_BASE_AUDIO_ENCODER (enc), 0); + g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), 0); return enc->priv->ctx.frame_samples; } /** - * gst_base_audio_encoder_set_frame_max: - * @enc: a #GstBaseAudioEncoder + * gst_audio_encoder_set_frame_max: + * @enc: a #GstAudioEncoder * @num: number of frames * * Sets max number of frames accepted at once (assumed minimally 1) @@ -1636,32 +1626,32 @@ gst_base_audio_encoder_get_frame_samples (GstBaseAudioEncoder * enc) * Since: 0.10.36 */ void -gst_base_audio_encoder_set_frame_max (GstBaseAudioEncoder * enc, gint num) +gst_audio_encoder_set_frame_max (GstAudioEncoder * enc, gint num) { - g_return_if_fail (GST_IS_BASE_AUDIO_ENCODER (enc)); + g_return_if_fail (GST_IS_AUDIO_ENCODER (enc)); enc->priv->ctx.frame_max = num; } /** - * gst_base_audio_encoder_get_frame_max: - * @enc: a #GstBaseAudioEncoder + * gst_audio_encoder_get_frame_max: + * @enc: a #GstAudioEncoder * * Returns: currently configured maximum handled frames * * Since: 0.10.36 */ gint -gst_base_audio_encoder_get_frame_max (GstBaseAudioEncoder * enc) +gst_audio_encoder_get_frame_max (GstAudioEncoder * enc) { - g_return_val_if_fail (GST_IS_BASE_AUDIO_ENCODER (enc), 0); + g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), 0); return enc->priv->ctx.frame_max; } /** - * gst_base_audio_encoder_set_lookahead: - * @enc: a #GstBaseAudioEncoder + * gst_audio_encoder_set_lookahead: + * @enc: a #GstAudioEncoder * @num: lookahead * * Sets encoder lookahead (in units of input rate samples) @@ -1669,30 +1659,30 @@ gst_base_audio_encoder_get_frame_max (GstBaseAudioEncoder * enc) * Since: 0.10.36 */ void -gst_base_audio_encoder_set_lookahead (GstBaseAudioEncoder * enc, gint num) +gst_audio_encoder_set_lookahead (GstAudioEncoder * enc, gint num) { - g_return_if_fail (GST_IS_BASE_AUDIO_ENCODER (enc)); + g_return_if_fail (GST_IS_AUDIO_ENCODER (enc)); enc->priv->ctx.lookahead = num; } /** - * gst_base_audio_encoder_get_lookahead: - * @enc: a #GstBaseAudioEncoder + * gst_audio_encoder_get_lookahead: + * @enc: a #GstAudioEncoder * * Returns: currently configured encoder lookahead */ gint -gst_base_audio_encoder_get_lookahead (GstBaseAudioEncoder * enc) +gst_audio_encoder_get_lookahead (GstAudioEncoder * enc) { - g_return_val_if_fail (GST_IS_BASE_AUDIO_ENCODER (enc), 0); + g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), 0); return enc->priv->ctx.lookahead; } /** - * gst_base_audio_encoder_set_latency: - * @enc: a #GstBaseAudioEncoder + * gst_audio_encoder_set_latency: + * @enc: a #GstAudioEncoder * @min: minimum latency * @max: maximum latency * @@ -1701,10 +1691,10 @@ gst_base_audio_encoder_get_lookahead (GstBaseAudioEncoder * enc) * Since: 0.10.36 */ void -gst_base_audio_encoder_set_latency (GstBaseAudioEncoder * enc, +gst_audio_encoder_set_latency (GstAudioEncoder * enc, GstClockTime min, GstClockTime max) { - g_return_if_fail (GST_IS_BASE_AUDIO_ENCODER (enc)); + g_return_if_fail (GST_IS_AUDIO_ENCODER (enc)); GST_OBJECT_LOCK (enc); enc->priv->ctx.min_latency = min; @@ -1713,8 +1703,8 @@ gst_base_audio_encoder_set_latency (GstBaseAudioEncoder * enc, } /** - * gst_base_audio_encoder_get_latency: - * @enc: a #GstBaseAudioEncoder + * gst_audio_encoder_get_latency: + * @enc: a #GstAudioEncoder * @min: a pointer to storage to hold minimum latency * @max: a pointer to storage to hold maximum latency * @@ -1723,10 +1713,10 @@ gst_base_audio_encoder_set_latency (GstBaseAudioEncoder * enc, * Since: 0.10.36 */ void -gst_base_audio_encoder_get_latency (GstBaseAudioEncoder * enc, +gst_audio_encoder_get_latency (GstAudioEncoder * enc, GstClockTime * min, GstClockTime * max) { - g_return_if_fail (GST_IS_BASE_AUDIO_ENCODER (enc)); + g_return_if_fail (GST_IS_AUDIO_ENCODER (enc)); GST_OBJECT_LOCK (enc); if (min) @@ -1737,8 +1727,8 @@ gst_base_audio_encoder_get_latency (GstBaseAudioEncoder * enc, } /** - * gst_base_audio_encoder_set_mark_granule: - * @enc: a #GstBaseAudioEncoder + * gst_audio_encoder_set_mark_granule: + * @enc: a #GstAudioEncoder * @enabled: new state * * Enable or disable encoder granule handling. @@ -1748,10 +1738,9 @@ gst_base_audio_encoder_get_latency (GstBaseAudioEncoder * enc, * Since: 0.10.36 */ void -gst_base_audio_encoder_set_mark_granule (GstBaseAudioEncoder * enc, - gboolean enabled) +gst_audio_encoder_set_mark_granule (GstAudioEncoder * enc, gboolean enabled) { - g_return_if_fail (GST_IS_BASE_AUDIO_ENCODER (enc)); + g_return_if_fail (GST_IS_AUDIO_ENCODER (enc)); GST_LOG_OBJECT (enc, "enabled: %d", enabled); @@ -1761,8 +1750,8 @@ gst_base_audio_encoder_set_mark_granule (GstBaseAudioEncoder * enc, } /** - * gst_base_audio_encoder_get_mark_granule: - * @enc: a #GstBaseAudioEncoder + * gst_audio_encoder_get_mark_granule: + * @enc: a #GstAudioEncoder * * Queries if the encoder will handle granule marking. * @@ -1773,11 +1762,11 @@ gst_base_audio_encoder_set_mark_granule (GstBaseAudioEncoder * enc, * Since: 0.10.36 */ gboolean -gst_base_audio_encoder_get_mark_granule (GstBaseAudioEncoder * enc) +gst_audio_encoder_get_mark_granule (GstAudioEncoder * enc) { gboolean result; - g_return_val_if_fail (GST_IS_BASE_AUDIO_ENCODER (enc), FALSE); + g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), FALSE); GST_OBJECT_LOCK (enc); result = enc->priv->granule; @@ -1787,8 +1776,8 @@ gst_base_audio_encoder_get_mark_granule (GstBaseAudioEncoder * enc) } /** - * gst_base_audio_encoder_set_perfect_timestamp: - * @enc: a #GstBaseAudioEncoder + * gst_audio_encoder_set_perfect_timestamp: + * @enc: a #GstAudioEncoder * @enabled: new state * * Enable or disable encoder perfect output timestamp preference. @@ -1798,10 +1787,10 @@ gst_base_audio_encoder_get_mark_granule (GstBaseAudioEncoder * enc) * Since: 0.10.36 */ void -gst_base_audio_encoder_set_perfect_timestamp (GstBaseAudioEncoder * enc, +gst_audio_encoder_set_perfect_timestamp (GstAudioEncoder * enc, gboolean enabled) { - g_return_if_fail (GST_IS_BASE_AUDIO_ENCODER (enc)); + g_return_if_fail (GST_IS_AUDIO_ENCODER (enc)); GST_LOG_OBJECT (enc, "enabled: %d", enabled); @@ -1811,8 +1800,8 @@ gst_base_audio_encoder_set_perfect_timestamp (GstBaseAudioEncoder * enc, } /** - * gst_base_audio_encoder_get_perfect_timestamp: - * @enc: a #GstBaseAudioEncoder + * gst_audio_encoder_get_perfect_timestamp: + * @enc: a #GstAudioEncoder * * Queries encoder perfect timestamp behaviour. * @@ -1823,11 +1812,11 @@ gst_base_audio_encoder_set_perfect_timestamp (GstBaseAudioEncoder * enc, * Since: 0.10.36 */ gboolean -gst_base_audio_encoder_get_perfect_timestamp (GstBaseAudioEncoder * enc) +gst_audio_encoder_get_perfect_timestamp (GstAudioEncoder * enc) { gboolean result; - g_return_val_if_fail (GST_IS_BASE_AUDIO_ENCODER (enc), FALSE); + g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), FALSE); GST_OBJECT_LOCK (enc); result = enc->priv->perfect_ts; @@ -1837,8 +1826,8 @@ gst_base_audio_encoder_get_perfect_timestamp (GstBaseAudioEncoder * enc) } /** - * gst_base_audio_encoder_set_hard_sync: - * @enc: a #GstBaseAudioEncoder + * gst_audio_encoder_set_hard_sync: + * @enc: a #GstAudioEncoder * @enabled: new state * * Sets encoder hard resync handling. @@ -1848,10 +1837,9 @@ gst_base_audio_encoder_get_perfect_timestamp (GstBaseAudioEncoder * enc) * Since: 0.10.36 */ void -gst_base_audio_encoder_set_hard_resync (GstBaseAudioEncoder * enc, - gboolean enabled) +gst_audio_encoder_set_hard_resync (GstAudioEncoder * enc, gboolean enabled) { - g_return_if_fail (GST_IS_BASE_AUDIO_ENCODER (enc)); + g_return_if_fail (GST_IS_AUDIO_ENCODER (enc)); GST_LOG_OBJECT (enc, "enabled: %d", enabled); @@ -1861,8 +1849,8 @@ gst_base_audio_encoder_set_hard_resync (GstBaseAudioEncoder * enc, } /** - * gst_base_audio_encoder_get_hard_sync: - * @enc: a #GstBaseAudioEncoder + * gst_audio_encoder_get_hard_sync: + * @enc: a #GstAudioEncoder * * Queries encoder's hard resync setting. * @@ -1873,11 +1861,11 @@ gst_base_audio_encoder_set_hard_resync (GstBaseAudioEncoder * enc, * Since: 0.10.36 */ gboolean -gst_base_audio_encoder_get_hard_resync (GstBaseAudioEncoder * enc) +gst_audio_encoder_get_hard_resync (GstAudioEncoder * enc) { gboolean result; - g_return_val_if_fail (GST_IS_BASE_AUDIO_ENCODER (enc), FALSE); + g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), FALSE); GST_OBJECT_LOCK (enc); result = enc->priv->hard_resync; @@ -1887,8 +1875,8 @@ gst_base_audio_encoder_get_hard_resync (GstBaseAudioEncoder * enc) } /** - * gst_base_audio_encoder_set_tolerance: - * @enc: a #GstBaseAudioEncoder + * gst_audio_encoder_set_tolerance: + * @enc: a #GstAudioEncoder * @tolerance: new tolerance * * Configures encoder audio jitter tolerance threshold. @@ -1898,10 +1886,9 @@ gst_base_audio_encoder_get_hard_resync (GstBaseAudioEncoder * enc) * Since: 0.10.36 */ void -gst_base_audio_encoder_set_tolerance (GstBaseAudioEncoder * enc, - gint64 tolerance) +gst_audio_encoder_set_tolerance (GstAudioEncoder * enc, gint64 tolerance) { - g_return_if_fail (GST_IS_BASE_AUDIO_ENCODER (enc)); + g_return_if_fail (GST_IS_AUDIO_ENCODER (enc)); GST_OBJECT_LOCK (enc); enc->priv->tolerance = tolerance; @@ -1909,8 +1896,8 @@ gst_base_audio_encoder_set_tolerance (GstBaseAudioEncoder * enc, } /** - * gst_base_audio_encoder_get_tolerance: - * @enc: a #GstBaseAudioEncoder + * gst_audio_encoder_get_tolerance: + * @enc: a #GstAudioEncoder * * Queries current audio jitter tolerance threshold. * @@ -1921,11 +1908,11 @@ gst_base_audio_encoder_set_tolerance (GstBaseAudioEncoder * enc, * Since: 0.10.36 */ gint64 -gst_base_audio_encoder_get_tolerance (GstBaseAudioEncoder * enc) +gst_audio_encoder_get_tolerance (GstAudioEncoder * enc) { gint64 result; - g_return_val_if_fail (GST_IS_BASE_AUDIO_ENCODER (enc), 0); + g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), 0); GST_OBJECT_LOCK (enc); result = enc->priv->tolerance; diff --git a/gst-libs/gst/audio/gstaudioencoder.h b/gst-libs/gst/audio/gstaudioencoder.h new file mode 100644 index 0000000000..ada952fa7a --- /dev/null +++ b/gst-libs/gst/audio/gstaudioencoder.h @@ -0,0 +1,244 @@ +/* GStreamer + * Copyright (C) 2011 Mark Nauwelaerts . + * Copyright (C) 2011 Nokia Corporation. All rights reserved. + * Contact: Stefan Kost + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + */ + +#ifndef __GST_AUDIO_ENCODER_H__ +#define __GST_AUDIO_ENCODER_H__ + +#ifndef GST_USE_UNSTABLE_API +#warning "GstAudioEncoder is unstable API and may change in future." +#warning "You can define GST_USE_UNSTABLE_API to avoid this warning." +#endif + +#include +#include + +G_BEGIN_DECLS + +#define GST_TYPE_AUDIO_ENCODER (gst_audio_encoder_get_type()) +#define GST_AUDIO_ENCODER(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_AUDIO_ENCODER,GstAudioEncoder)) +#define GST_AUDIO_ENCODER_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_AUDIO_ENCODER,GstAudioEncoderClass)) +#define GST_AUDIO_ENCODER_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS((obj),GST_TYPE_AUDIO_ENCODER,GstAudioEncoderClass)) +#define GST_IS_AUDIO_ENCODER(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_AUDIO_ENCODER)) +#define GST_IS_AUDIO_ENCODER_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_AUDIO_ENCODER)) +#define GST_AUDIO_ENCODER_CAST(obj) ((GstAudioEncoder *)(obj)) + +/** + * GST_AUDIO_ENCODER_SINK_NAME: + * + * the name of the templates for the sink pad + * + * Since: 0.10.36 + */ +#define GST_AUDIO_ENCODER_SINK_NAME "sink" +/** + * GST_AUDIO_ENCODER_SRC_NAME: + * + * the name of the templates for the source pad + * + * Since: 0.10.36 + */ +#define GST_AUDIO_ENCODER_SRC_NAME "src" + +/** + * GST_AUDIO_ENCODER_SRC_PAD: + * @obj: base parse instance + * + * Gives the pointer to the source #GstPad object of the element. + * + * Since: 0.10.36 + */ +#define GST_AUDIO_ENCODER_SRC_PAD(obj) (GST_AUDIO_ENCODER_CAST (obj)->srcpad) + +/** + * GST_AUDIO_ENCODER_SINK_PAD: + * @obj: base parse instance + * + * Gives the pointer to the sink #GstPad object of the element. + * + * Since: 0.10.36 + */ +#define GST_AUDIO_ENCODER_SINK_PAD(obj) (GST_AUDIO_ENCODER_CAST (obj)->sinkpad) + +/** + * GST_AUDIO_ENCODER_SEGMENT: + * @obj: base parse instance + * + * Gives the segment of the element. + * + * Since: 0.10.36 + */ +#define GST_AUDIO_ENCODER_SEGMENT(obj) (GST_AUDIO_ENCODER_CAST (obj)->segment) + + +typedef struct _GstAudioEncoder GstAudioEncoder; +typedef struct _GstAudioEncoderClass GstAudioEncoderClass; + +typedef struct _GstAudioEncoderPrivate GstAudioEncoderPrivate; + +/** + * GstAudioEncoder: + * @element: the parent element. + * + * The opaque #GstAudioEncoder data structure. + * + * Since: 0.10.36 + */ +struct _GstAudioEncoder { + GstElement element; + + /*< protected >*/ + /* source and sink pads */ + GstPad *sinkpad; + GstPad *srcpad; + + /* MT-protected (with STREAM_LOCK) */ + GstSegment segment; + + /*< private >*/ + GstAudioEncoderPrivate *priv; + gpointer _gst_reserved[GST_PADDING_LARGE]; +}; + +/** + * GstAudioEncoderClass: + * @start: Optional. + * Called when the element starts processing. + * Allows opening external resources. + * @stop: Optional. + * Called when the element stops processing. + * Allows closing external resources. + * @set_format: Notifies subclass of incoming data format. + * GstAudioInfo contains the format according to provided caps. + * @handle_frame: Provides input samples (or NULL to clear any remaining data) + * according to directions as provided by subclass in the + * #GstAudioEncoderContext. Input data ref management + * is performed by base class, subclass should not care or + * intervene. + * @flush: Optional. + * Instructs subclass to clear any codec caches and discard + * any pending samples and not yet returned encoded data. + * @event: Optional. + * Event handler on the sink pad. This function should return + * TRUE if the event was handled and should be discarded + * (i.e. not unref'ed). + * @pre_push: Optional. + * Called just prior to pushing (encoded data) buffer downstream. + * Subclass has full discretionary access to buffer, + * and a not OK flow return will abort downstream pushing. + * @getcaps: Optional. + * Allows for a custom sink getcaps implementation (e.g. + * for multichannel input specification). If not implemented, + * default returns gst_audio_encoder_proxy_getcaps + * applied to sink template caps. + * + * Subclasses can override any of the available virtual methods or not, as + * needed. At minimum @set_format and @handle_frame needs to be overridden. + * + * Since: 0.10.36 + */ +struct _GstAudioEncoderClass { + GstElementClass parent_class; + + /*< public >*/ + /* virtual methods for subclasses */ + + gboolean (*start) (GstAudioEncoder *enc); + + gboolean (*stop) (GstAudioEncoder *enc); + + gboolean (*set_format) (GstAudioEncoder *enc, + GstAudioInfo *info); + + GstFlowReturn (*handle_frame) (GstAudioEncoder *enc, + GstBuffer *buffer); + + void (*flush) (GstAudioEncoder *enc); + + GstFlowReturn (*pre_push) (GstAudioEncoder *enc, + GstBuffer **buffer); + + gboolean (*event) (GstAudioEncoder *enc, + GstEvent *event); + + GstCaps * (*getcaps) (GstAudioEncoder *enc); + + /*< private >*/ + gpointer _gst_reserved[GST_PADDING_LARGE]; +}; + +GType gst_audio_encoder_get_type (void); + +GstFlowReturn gst_audio_encoder_finish_frame (GstAudioEncoder * enc, + GstBuffer * buffer, + gint samples); + +GstCaps * gst_audio_encoder_proxy_getcaps (GstAudioEncoder * enc, + GstCaps * caps); + + +/* context parameters */ +GstAudioInfo * gst_audio_encoder_get_audio_info (GstAudioEncoder * enc); + +gint gst_audio_encoder_get_frame_samples (GstAudioEncoder * enc); + +void gst_audio_encoder_set_frame_samples (GstAudioEncoder * enc, gint num); + +gint gst_audio_encoder_get_frame_max (GstAudioEncoder * enc); + +void gst_audio_encoder_set_frame_max (GstAudioEncoder * enc, gint num); + +gint gst_audio_encoder_get_lookahead (GstAudioEncoder * enc); + +void gst_audio_encoder_set_lookahead (GstAudioEncoder * enc, gint num); + +void gst_audio_encoder_get_latency (GstAudioEncoder * enc, + GstClockTime * min, + GstClockTime * max); + +void gst_audio_encoder_set_latency (GstAudioEncoder * enc, + GstClockTime min, + GstClockTime max); + +/* object properties */ + +void gst_audio_encoder_set_mark_granule (GstAudioEncoder * enc, + gboolean enabled); + +gboolean gst_audio_encoder_get_mark_granule (GstAudioEncoder * enc); + +void gst_audio_encoder_set_perfect_timestamp (GstAudioEncoder * enc, + gboolean enabled); + +gboolean gst_audio_encoder_get_perfect_timestamp (GstAudioEncoder * enc); + +void gst_audio_encoder_set_hard_resync (GstAudioEncoder * enc, + gboolean enabled); + +gboolean gst_audio_encoder_get_hard_resync (GstAudioEncoder * enc); + +void gst_audio_encoder_set_tolerance (GstAudioEncoder * enc, + gint64 tolerance); + +gint64 gst_audio_encoder_get_tolerance (GstAudioEncoder * enc); + +G_END_DECLS + +#endif /* __GST_AUDIO_ENCODER_H__ */ diff --git a/gst-libs/gst/audio/gstbaseaudioencoder.h b/gst-libs/gst/audio/gstbaseaudioencoder.h deleted file mode 100644 index fa948daff2..0000000000 --- a/gst-libs/gst/audio/gstbaseaudioencoder.h +++ /dev/null @@ -1,235 +0,0 @@ -/* GStreamer - * Copyright (C) 2011 Mark Nauwelaerts . - * Copyright (C) 2011 Nokia Corporation. All rights reserved. - * Contact: Stefan Kost - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Library General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Library General Public License for more details. - * - * You should have received a copy of the GNU Library General Public - * License along with this library; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. - */ - -#ifndef __GST_BASE_AUDIO_ENCODER_H__ -#define __GST_BASE_AUDIO_ENCODER_H__ - -#ifndef GST_USE_UNSTABLE_API -#warning "GstBaseAudioEncoder is unstable API and may change in future." -#warning "You can define GST_USE_UNSTABLE_API to avoid this warning." -#endif - -#include -#include - -G_BEGIN_DECLS - -#define GST_TYPE_BASE_AUDIO_ENCODER (gst_base_audio_encoder_get_type()) -#define GST_BASE_AUDIO_ENCODER(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_BASE_AUDIO_ENCODER,GstBaseAudioEncoder)) -#define GST_BASE_AUDIO_ENCODER_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_BASE_AUDIO_ENCODER,GstBaseAudioEncoderClass)) -#define GST_BASE_AUDIO_ENCODER_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS((obj),GST_TYPE_BASE_AUDIO_ENCODER,GstBaseAudioEncoderClass)) -#define GST_IS_BASE_AUDIO_ENCODER(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_BASE_AUDIO_ENCODER)) -#define GST_IS_BASE_AUDIO_ENCODER_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_BASE_AUDIO_ENCODER)) -#define GST_BASE_AUDIO_ENCODER_CAST(obj) ((GstBaseAudioEncoder *)(obj)) - -/** - * GST_BASE_AUDIO_ENCODER_SINK_NAME: - * - * the name of the templates for the sink pad - * - * Since: 0.10.36 - */ -#define GST_BASE_AUDIO_ENCODER_SINK_NAME "sink" -/** - * GST_BASE_AUDIO_ENCODER_SRC_NAME: - * - * the name of the templates for the source pad - * - * Since: 0.10.36 - */ -#define GST_BASE_AUDIO_ENCODER_SRC_NAME "src" - -/** - * GST_BASE_AUDIO_ENCODER_SRC_PAD: - * @obj: base parse instance - * - * Gives the pointer to the source #GstPad object of the element. - * - * Since: 0.10.36 - */ -#define GST_BASE_AUDIO_ENCODER_SRC_PAD(obj) (GST_BASE_AUDIO_ENCODER_CAST (obj)->srcpad) - -/** - * GST_BASE_AUDIO_ENCODER_SINK_PAD: - * @obj: base parse instance - * - * Gives the pointer to the sink #GstPad object of the element. - * - * Since: 0.10.36 - */ -#define GST_BASE_AUDIO_ENCODER_SINK_PAD(obj) (GST_BASE_AUDIO_ENCODER_CAST (obj)->sinkpad) - -/** - * GST_BASE_AUDIO_ENCODER_SEGMENT: - * @obj: base parse instance - * - * Gives the segment of the element. - * - * Since: 0.10.36 - */ -#define GST_BASE_AUDIO_ENCODER_SEGMENT(obj) (GST_BASE_AUDIO_ENCODER_CAST (obj)->segment) - - -typedef struct _GstBaseAudioEncoder GstBaseAudioEncoder; -typedef struct _GstBaseAudioEncoderClass GstBaseAudioEncoderClass; - -typedef struct _GstBaseAudioEncoderPrivate GstBaseAudioEncoderPrivate; - -/** - * GstBaseAudioEncoder: - * @element: the parent element. - * - * The opaque #GstBaseAudioEncoder data structure. - * - * Since: 0.10.36 - */ -struct _GstBaseAudioEncoder { - GstElement element; - - /*< protected >*/ - /* source and sink pads */ - GstPad *sinkpad; - GstPad *srcpad; - - /* MT-protected (with STREAM_LOCK) */ - GstSegment segment; - - /*< private >*/ - GstBaseAudioEncoderPrivate *priv; - gpointer _gst_reserved[GST_PADDING_LARGE]; -}; - -/** - * GstBaseAudioEncoderClass: - * @start: Optional. - * Called when the element starts processing. - * Allows opening external resources. - * @stop: Optional. - * Called when the element stops processing. - * Allows closing external resources. - * @set_format: Notifies subclass of incoming data format. - * GstAudioInfo contains the format according to provided caps. - * @handle_frame: Provides input samples (or NULL to clear any remaining data) - * according to directions as provided by subclass in the - * #GstBaseAudioEncoderContext. Input data ref management - * is performed by base class, subclass should not care or - * intervene. - * @flush: Optional. - * Instructs subclass to clear any codec caches and discard - * any pending samples and not yet returned encoded data. - * @event: Optional. - * Event handler on the sink pad. This function should return - * TRUE if the event was handled and should be discarded - * (i.e. not unref'ed). - * @pre_push: Optional. - * Called just prior to pushing (encoded data) buffer downstream. - * Subclass has full discretionary access to buffer, - * and a not OK flow return will abort downstream pushing. - * @getcaps: Optional. - * Allows for a custom sink getcaps implementation (e.g. - * for multichannel input specification). If not implemented, - * default returns gst_base_audio_encoder_proxy_getcaps - * applied to sink template caps. - * - * Subclasses can override any of the available virtual methods or not, as - * needed. At minimum @set_format and @handle_frame needs to be overridden. - * - * Since: 0.10.36 - */ -struct _GstBaseAudioEncoderClass { - GstElementClass parent_class; - - /*< public >*/ - /* virtual methods for subclasses */ - - gboolean (*start) (GstBaseAudioEncoder *enc); - - gboolean (*stop) (GstBaseAudioEncoder *enc); - - gboolean (*set_format) (GstBaseAudioEncoder *enc, - GstAudioInfo *info); - - GstFlowReturn (*handle_frame) (GstBaseAudioEncoder *enc, - GstBuffer *buffer); - - void (*flush) (GstBaseAudioEncoder *enc); - - GstFlowReturn (*pre_push) (GstBaseAudioEncoder *enc, - GstBuffer **buffer); - - gboolean (*event) (GstBaseAudioEncoder *enc, - GstEvent *event); - - GstCaps * (*getcaps) (GstBaseAudioEncoder *enc); - - /*< private >*/ - gpointer _gst_reserved[GST_PADDING_LARGE]; -}; - -GType gst_base_audio_encoder_get_type (void); - -GstFlowReturn gst_base_audio_encoder_finish_frame (GstBaseAudioEncoder * enc, - GstBuffer *buffer, gint samples); - -GstCaps * gst_base_audio_encoder_proxy_getcaps (GstBaseAudioEncoder * enc, - GstCaps * caps); - - -/* context parameters */ -GstAudioInfo * gst_base_audio_encoder_get_audio_info (GstBaseAudioEncoder * enc); - -gint gst_base_audio_encoder_get_frame_samples (GstBaseAudioEncoder * enc); -void gst_base_audio_encoder_set_frame_samples (GstBaseAudioEncoder * enc, - gint num); - -gint gst_base_audio_encoder_get_frame_max (GstBaseAudioEncoder * enc); -void gst_base_audio_encoder_set_frame_max (GstBaseAudioEncoder * enc, - gint num); - -gint gst_base_audio_encoder_get_lookahead (GstBaseAudioEncoder * enc); -void gst_base_audio_encoder_set_lookahead (GstBaseAudioEncoder * enc, - gint num); - -void gst_base_audio_encoder_get_latency (GstBaseAudioEncoder * enc, - GstClockTime * min, GstClockTime * max); -void gst_base_audio_encoder_set_latency (GstBaseAudioEncoder * enc, - GstClockTime min, GstClockTime max); - -/* object properties */ -void gst_base_audio_encoder_set_mark_granule (GstBaseAudioEncoder * enc, - gboolean enabled); -gboolean gst_base_audio_encoder_get_mark_granule (GstBaseAudioEncoder * enc); - -void gst_base_audio_encoder_set_perfect_timestamp (GstBaseAudioEncoder * enc, - gboolean enabled); -gboolean gst_base_audio_encoder_get_perfect_timestamp (GstBaseAudioEncoder * enc); - -void gst_base_audio_encoder_set_hard_resync (GstBaseAudioEncoder * enc, - gboolean enabled); -gboolean gst_base_audio_encoder_get_hard_resync (GstBaseAudioEncoder * enc); - -void gst_base_audio_encoder_set_tolerance (GstBaseAudioEncoder * enc, - gint64 tolerance); -gint64 gst_base_audio_encoder_get_tolerance (GstBaseAudioEncoder * enc); - -G_END_DECLS - -#endif /* __GST_BASE_AUDIO_ENCODER_H__ */ diff --git a/win32/common/libgstaudio.def b/win32/common/libgstaudio.def index 414a9679cc..0500ea6860 100644 --- a/win32/common/libgstaudio.def +++ b/win32/common/libgstaudio.def @@ -1,5 +1,5 @@ EXPORTS - _gst_base_audio_decoder_error + _gst_audio_decoder_error gst_audio_buffer_clip gst_audio_channel_position_get_type gst_audio_check_channel_positions @@ -10,8 +10,47 @@ EXPORTS gst_audio_clock_new gst_audio_clock_new_full gst_audio_clock_reset + gst_audio_decoder_finish_frame + gst_audio_decoder_get_audio_info + gst_audio_decoder_get_byte_time + gst_audio_decoder_get_delay + gst_audio_decoder_get_latency + gst_audio_decoder_get_max_errors + gst_audio_decoder_get_min_latency + gst_audio_decoder_get_parse_state + gst_audio_decoder_get_plc + gst_audio_decoder_get_plc_aware + gst_audio_decoder_get_tolerance + gst_audio_decoder_get_type + gst_audio_decoder_set_byte_time + gst_audio_decoder_set_latency + gst_audio_decoder_set_max_errors + gst_audio_decoder_set_min_latency + gst_audio_decoder_set_plc + gst_audio_decoder_set_plc_aware + gst_audio_decoder_set_tolerance gst_audio_default_registry_mixer_filter gst_audio_duration_from_pad_buffer + gst_audio_encoder_finish_frame + gst_audio_encoder_get_audio_info + gst_audio_encoder_get_frame_max + gst_audio_encoder_get_frame_samples + gst_audio_encoder_get_hard_resync + gst_audio_encoder_get_latency + gst_audio_encoder_get_lookahead + gst_audio_encoder_get_mark_granule + gst_audio_encoder_get_perfect_timestamp + gst_audio_encoder_get_tolerance + gst_audio_encoder_get_type + gst_audio_encoder_proxy_getcaps + gst_audio_encoder_set_frame_max + gst_audio_encoder_set_frame_samples + gst_audio_encoder_set_hard_resync + gst_audio_encoder_set_latency + gst_audio_encoder_set_lookahead + gst_audio_encoder_set_mark_granule + gst_audio_encoder_set_perfect_timestamp + gst_audio_encoder_set_tolerance gst_audio_filter_class_add_pad_templates gst_audio_filter_get_type gst_audio_fixate_channel_positions @@ -34,45 +73,6 @@ EXPORTS gst_audio_sink_get_type gst_audio_src_get_type gst_audio_structure_set_int - gst_base_audio_decoder_finish_frame - gst_base_audio_decoder_get_audio_info - gst_base_audio_decoder_get_byte_time - gst_base_audio_decoder_get_delay - gst_base_audio_decoder_get_latency - gst_base_audio_decoder_get_max_errors - gst_base_audio_decoder_get_min_latency - gst_base_audio_decoder_get_parse_state - gst_base_audio_decoder_get_plc - gst_base_audio_decoder_get_plc_aware - gst_base_audio_decoder_get_tolerance - gst_base_audio_decoder_get_type - gst_base_audio_decoder_set_byte_time - gst_base_audio_decoder_set_latency - gst_base_audio_decoder_set_max_errors - gst_base_audio_decoder_set_min_latency - gst_base_audio_decoder_set_plc - gst_base_audio_decoder_set_plc_aware - gst_base_audio_decoder_set_tolerance - gst_base_audio_encoder_finish_frame - gst_base_audio_encoder_get_audio_info - gst_base_audio_encoder_get_frame_max - gst_base_audio_encoder_get_frame_samples - gst_base_audio_encoder_get_hard_resync - gst_base_audio_encoder_get_latency - gst_base_audio_encoder_get_lookahead - gst_base_audio_encoder_get_mark_granule - gst_base_audio_encoder_get_perfect_timestamp - gst_base_audio_encoder_get_tolerance - gst_base_audio_encoder_get_type - gst_base_audio_encoder_proxy_getcaps - gst_base_audio_encoder_set_frame_max - gst_base_audio_encoder_set_frame_samples - gst_base_audio_encoder_set_hard_resync - gst_base_audio_encoder_set_latency - gst_base_audio_encoder_set_lookahead - gst_base_audio_encoder_set_mark_granule - gst_base_audio_encoder_set_perfect_timestamp - gst_base_audio_encoder_set_tolerance gst_base_audio_sink_create_ringbuffer gst_base_audio_sink_get_drift_tolerance gst_base_audio_sink_get_provide_clock From 7563e0c9cf2c99ae48e5d7ebd11d2d3dff34b6c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim-Philipp=20M=C3=BCller?= Date: Mon, 5 Sep 2011 20:45:22 +0100 Subject: [PATCH 17/21] docs: add GstAudioDecoder and GstAudioEncoder to documentation --- docs/libs/gst-plugins-base-libs-docs.sgml | 2 + docs/libs/gst-plugins-base-libs-sections.txt | 80 ++++++++++++++++++++ docs/libs/gst-plugins-base-libs.types | 4 + gst-libs/gst/audio/gstaudiodecoder.c | 7 +- gst-libs/gst/audio/gstaudioencoder.c | 22 +++--- gst-libs/gst/audio/gstaudioencoder.h | 7 +- 6 files changed, 105 insertions(+), 17 deletions(-) diff --git a/docs/libs/gst-plugins-base-libs-docs.sgml b/docs/libs/gst-plugins-base-libs-docs.sgml index 65677efb94..9902258ba9 100644 --- a/docs/libs/gst-plugins-base-libs-docs.sgml +++ b/docs/libs/gst-plugins-base-libs-docs.sgml @@ -44,6 +44,8 @@ + + diff --git a/docs/libs/gst-plugins-base-libs-sections.txt b/docs/libs/gst-plugins-base-libs-sections.txt index f19607a869..8c0a8b3f9d 100644 --- a/docs/libs/gst-plugins-base-libs-sections.txt +++ b/docs/libs/gst-plugins-base-libs-sections.txt @@ -124,6 +124,86 @@ GST_IS_AUDIO_CLOCK_CLASS GST_AUDIO_CLOCK_CAST +
+gstaudiodecoder +gst/audio/gstaudiodecoder.h +GstAudioDecoder +GstAudioDecoderClass +GST_AUDIO_DECODER_ERROR +GST_AUDIO_DECODER_SINK_NAME +GST_AUDIO_DECODER_SINK_PAD +GST_AUDIO_DECODER_SRC_NAME +GST_AUDIO_DECODER_SRC_PAD +gst_audio_decoder_finish_frame +gst_audio_decoder_get_audio_info +gst_audio_decoder_get_byte_time +gst_audio_decoder_get_delay +gst_audio_decoder_get_latency +gst_audio_decoder_get_max_errors +gst_audio_decoder_get_min_latency +gst_audio_decoder_get_parse_state +gst_audio_decoder_get_plc +gst_audio_decoder_get_plc_aware +gst_audio_decoder_get_tolerance +gst_audio_decoder_set_byte_time +gst_audio_decoder_set_latency +gst_audio_decoder_set_max_errors +gst_audio_decoder_set_min_latency +gst_audio_decoder_set_plc +gst_audio_decoder_set_plc_aware +gst_audio_decoder_set_tolerance + +GST_AUDIO_DECODER +GST_IS_AUDI_DECODER +GST_TYPE_AUDIO_DECODER +gst_audio_decoder_get_type +GST_AUDIO_DECODER_CLASS +GST_IS_AUDIO_DECODER_CLASS +GST_AUDIO_DECODER_GET_CLASS +GstAudioDecoderPrivate +
+ +
+gstaudioencoder +gst/audio/gstaudioencoder.h +GstAudioEncoder +GstAudioEncoderClass +GST_AUDIO_ENCODER_SEGMENT +GST_AUDIO_ENCODER_SINK_NAME +GST_AUDIO_ENCODER_SINK_PAD +GST_AUDIO_ENCODER_SRC_NAME +GST_AUDIO_ENCODER_SRC_PAD +gst_audio_encoder_finish_frame +gst_audio_encoder_get_audio_info +gst_audio_encoder_get_frame_max +gst_audio_encoder_get_frame_samples +gst_audio_encoder_get_hard_resync +gst_audio_encoder_get_latency +gst_audio_encoder_get_lookahead +gst_audio_encoder_get_mark_granule +gst_audio_encoder_get_perfect_timestamp +gst_audio_encoder_get_tolerance +gst_audio_encoder_proxy_getcaps +gst_audio_encoder_set_frame_max +gst_audio_encoder_set_frame_samples +gst_audio_encoder_set_hard_resync +gst_audio_encoder_set_latency +gst_audio_encoder_set_lookahead +gst_audio_encoder_set_mark_granule +gst_audio_encoder_set_perfect_timestamp +gst_audio_encoder_set_tolerance + +GST_AUDIO_ENCODER +GST_AUDIO_ENCODER_CAST +GST_IS_AUDIO_ENCODER +GST_TYPE_AUDIO_ENCODER +gst_audio_encoder_get_type +GST_AUDIO_ENCODER_CLASS +GST_IS_AUDIO_ENCODER_CLASS +GST_AUDIO_ENCODER_GET_CLASS +GstAudioEncoderPrivate +
+
gstaudiofilter gst/audio/gstaudiofilter.h diff --git a/docs/libs/gst-plugins-base-libs.types b/docs/libs/gst-plugins-base-libs.types index f6c06af2cd..991a97579e 100644 --- a/docs/libs/gst-plugins-base-libs.types +++ b/docs/libs/gst-plugins-base-libs.types @@ -3,6 +3,10 @@ #include gst_audio_clock_get_type +#include +gst_audio_decoder_get_type +#include +gst_audio_encoder_get_type #include gst_audio_filter_get_type #include diff --git a/gst-libs/gst/audio/gstaudiodecoder.c b/gst-libs/gst/audio/gstaudiodecoder.c index 087baac1f1..24298ec000 100644 --- a/gst-libs/gst/audio/gstaudiodecoder.c +++ b/gst-libs/gst/audio/gstaudiodecoder.c @@ -2116,10 +2116,11 @@ gst_audio_decoder_set_latency (GstAudioDecoder * dec, /** * gst_audio_decoder_get_latency: * @dec: a #GstAudioDecoder - * @min: a pointer to storage to hold minimum latency - * @max: a pointer to storage to hold maximum latency + * @min: (out) (allow-none): a pointer to storage to hold minimum latency + * @max: (out) (allow-none): a pointer to storage to hold maximum latency * - * Returns currently configured latency. + * Sets the variables pointed to by @min and @max to the currently configured + * latency. * * Since: 0.10.36 */ diff --git a/gst-libs/gst/audio/gstaudioencoder.c b/gst-libs/gst/audio/gstaudioencoder.c index 6f47f6b732..619ec5a7ef 100644 --- a/gst-libs/gst/audio/gstaudioencoder.c +++ b/gst-libs/gst/audio/gstaudioencoder.c @@ -101,18 +101,18 @@ * * In particular, base class will either favor tracking upstream timestamps * (at the possible expense of jitter) or aim to arrange for a perfect stream of - * output timestamps, depending on #GstAudioEncoder:perfect-ts. + * output timestamps, depending on #GstAudioEncoder:perfect-timestamp. * However, in the latter case, the input may not be so perfect or ideal, which * is handled as follows. An input timestamp is compared with the expected * timestamp as dictated by input sample stream and if the deviation is less * than #GstAudioEncoder:tolerance, the deviation is discarded. * Otherwise, it is considered a discontuinity and subsequent output timestamp * is resynced to the new position after performing configured discontinuity - * processing. In the non-perfect-ts case, an upstream variation exceeding - * tolerance only leads to marking DISCONT on subsequent outgoing + * processing. In the non-perfect-timestamp case, an upstream variation + * exceeding tolerance only leads to marking DISCONT on subsequent outgoing * (while timestamps are adjusted to upstream regardless of variation). - * While DISCONT is also marked in the perfect-ts case, this one optionally - * (see #GstAudioEncoder:hard-resync) + * While DISCONT is also marked in the perfect-timestamp case, this one + * optionally (see #GstAudioEncoder:hard-resync) * performs some additional steps, such as clipping of (early) input samples * or draining all currently remaining input data, depending on the direction * of the discontuinity. @@ -325,7 +325,7 @@ gst_audio_encoder_class_init (GstAudioEncoderClass * klass) DEFAULT_PERFECT_TS, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); g_object_class_install_property (gobject_class, PROP_GRANULE, g_param_spec_boolean ("mark-granule", "Granule Marking", - "Apply granule semantics to buffer metadata (implies perfect-ts)", + "Apply granule semantics to buffer metadata (implies perfect-timestamp)", DEFAULT_GRANULE, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS)); g_object_class_install_property (gobject_class, PROP_HARD_RESYNC, g_param_spec_boolean ("hard-resync", "Hard Resync", @@ -1470,7 +1470,8 @@ gst_audio_encoder_set_property (GObject * object, guint prop_id, switch (prop_id) { case PROP_PERFECT_TS: if (enc->priv->granule && !g_value_get_boolean (value)) - GST_WARNING_OBJECT (enc, "perfect-ts can not be set FALSE"); + GST_WARNING_OBJECT (enc, "perfect-timestamp can not be set FALSE " + "while granule handling is enabled"); else enc->priv->perfect_ts = g_value_get_boolean (value); break; @@ -1705,10 +1706,11 @@ gst_audio_encoder_set_latency (GstAudioEncoder * enc, /** * gst_audio_encoder_get_latency: * @enc: a #GstAudioEncoder - * @min: a pointer to storage to hold minimum latency - * @max: a pointer to storage to hold maximum latency + * @min: (out) (allow-none): a pointer to storage to hold minimum latency + * @max: (out) (allow-none): a pointer to storage to hold maximum latency * - * Returns currently configured latency. + * Sets the variables pointed to by @min and @max to the currently configured + * latency. * * Since: 0.10.36 */ diff --git a/gst-libs/gst/audio/gstaudioencoder.h b/gst-libs/gst/audio/gstaudioencoder.h index ada952fa7a..8eb9f876b4 100644 --- a/gst-libs/gst/audio/gstaudioencoder.h +++ b/gst-libs/gst/audio/gstaudioencoder.h @@ -128,10 +128,9 @@ struct _GstAudioEncoder { * @set_format: Notifies subclass of incoming data format. * GstAudioInfo contains the format according to provided caps. * @handle_frame: Provides input samples (or NULL to clear any remaining data) - * according to directions as provided by subclass in the - * #GstAudioEncoderContext. Input data ref management - * is performed by base class, subclass should not care or - * intervene. + * according to directions as configured by the subclass + * using the API. Input data ref management is performed + * by base class, subclass should not care or intervene. * @flush: Optional. * Instructs subclass to clear any codec caches and discard * any pending samples and not yet returned encoded data. From ba05716485feed3279790237e9d4c36a2df0cddd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim-Philipp=20M=C3=BCller?= Date: Mon, 5 Sep 2011 21:40:05 +0100 Subject: [PATCH 18/21] docs: some docs love --- docs/libs/Makefile.am | 8 +++++++- docs/libs/gst-plugins-base-libs-sections.txt | 16 +++++++++++++--- gst-libs/gst/audio/gstaudiodecoder.c | 12 ++++++------ gst-libs/gst/audio/gstaudiodecoder.h | 6 +++--- gst-libs/gst/audio/gstaudioencoder.h | 1 - 5 files changed, 29 insertions(+), 14 deletions(-) diff --git a/docs/libs/Makefile.am b/docs/libs/Makefile.am index af2632cf45..ba92fc5212 100644 --- a/docs/libs/Makefile.am +++ b/docs/libs/Makefile.am @@ -79,7 +79,13 @@ SCANOBJ_DEPS = \ $(top_builddir)/gst-libs/gst/pbutils/libgstpbutils-@GST_MAJORMINOR@.la # Header files to ignore when scanning. -IGNORE_HFILES = pbutils-private.h +IGNORE_HFILES = pbutils-private.h gsttageditingprivate.h id3v2.h \ + kiss_fft_f32.h kiss_fft_f64.h kiss_fftr_f32.h kiss_fftr_f64.h \ + kiss_fftr_s16.h kiss_fftr_s32.h kiss_fft_s16.h kiss_fft_s32.h \ + _kiss_fft_guts_f32.h _kiss_fft_guts_f64.h _kiss_fft_guts_s16.h \ + _kiss_fft_guts_s16.h _kiss_fft_guts_s32.h _kiss_fft_guts_s32.h \ + interfaces-marshal.h pbutils-marshal.h + # Images to copy into HTML directory. HTML_IMAGES = diff --git a/docs/libs/gst-plugins-base-libs-sections.txt b/docs/libs/gst-plugins-base-libs-sections.txt index 8c0a8b3f9d..5fcf91f30c 100644 --- a/docs/libs/gst-plugins-base-libs-sections.txt +++ b/docs/libs/gst-plugins-base-libs-sections.txt @@ -35,6 +35,7 @@ GST_IS_APP_BUFFER GST_IS_APP_BUFFER_CLASS GST_TYPE_APP_BUFFER GST_TYPE_APP_STREAM_TYPE +gst_app_stream_type_get_type GstAppSrc GstAppSrcPrivate @@ -154,7 +155,7 @@ gst_audio_decoder_set_plc_aware gst_audio_decoder_set_tolerance GST_AUDIO_DECODER -GST_IS_AUDI_DECODER +GST_IS_AUDIO_DECODER GST_TYPE_AUDIO_DECODER gst_audio_decoder_get_type GST_AUDIO_DECODER_CLASS @@ -545,6 +546,8 @@ KISS_FFT_F64_SIN gst/floatcast/floatcast.h gst_cast_double gst_cast_float + +inline
@@ -1916,7 +1919,7 @@ GST_IS_TAG_MUX_CLASS GST_TAG_MUX GST_TAG_MUX_CLASS GST_TYPE_TAG_MUX -gst_tag_demux_get_type +gst_tag_mux_get_type
@@ -1933,6 +1936,7 @@ gst_tag_get_language_code_iso_639_2T
gsttaglicenses gst/tag/tag.h +GstTagLicenseFlags gst_tag_get_license_flags gst_tag_get_license_nick gst_tag_get_license_title @@ -2156,7 +2160,7 @@ GstEncodingTargetClass
gstvideo -gst/video/video.h +gst/video.h GST_VIDEO_BLUE_MASK_15 GST_VIDEO_BLUE_MASK_15_INT GST_VIDEO_BLUE_MASK_16 @@ -2222,6 +2226,7 @@ GST_VIDEO_SIZE_RANGE GST_VIDEO_BUFFER_TFF GST_VIDEO_BUFFER_RFF GST_VIDEO_BUFFER_ONEFIELD +GST_VIDEO_BUFFER_PROGRESSIVE GstVideoFormat gst_video_calculate_display_ratio gst_video_frame_rate @@ -2326,6 +2331,7 @@ GstDiscovererStreamInfo GstDiscovererContainerInfo GstDiscovererAudioInfo GstDiscovererVideoInfo +GstDiscovererSubtitleInfo gst_discoverer_stream_info_get_caps gst_discoverer_stream_info_get_misc gst_discoverer_stream_info_get_next @@ -2367,6 +2373,7 @@ GST_DISCOVERER_CONTAINER_INFO GST_DISCOVERER_INFO GST_DISCOVERER_STREAM_INFO GST_DISCOVERER_VIDEO_INFO +GST_DISCOVERER_SUBTITLE_INFO GST_IS_DISCOVERER GST_IS_DISCOVERER_INFO GST_IS_DISCOVERER_AUDIO_INFO @@ -2374,6 +2381,7 @@ GST_IS_DISCOVERER_CLASS GST_IS_DISCOVERER_CONTAINER_INFO GST_IS_DISCOVERER_STREAM_INFO GST_IS_DISCOVERER_VIDEO_INFO +GST_IS_DISCOVERER_SUBTITLE_INFO GST_TYPE_DISCOVERER GST_TYPE_DISCOVERER_AUDIO_INFO GST_TYPE_DISCOVERER_CONTAINER_INFO @@ -2381,12 +2389,14 @@ GST_TYPE_DISCOVERER_INFO GST_TYPE_DISCOVERER_RESULT GST_TYPE_DISCOVERER_STREAM_INFO GST_TYPE_DISCOVERER_VIDEO_INFO +GST_TYPE_DISCOVERER_SUBTITLE_INFO GstDiscovererAudioInfoClass GstDiscovererClass GstDiscovererContainerInfoClass GstDiscovererPrivate GstDiscovererStreamInfoClass GstDiscovererVideoInfoClass +GstDiscovererSubtitleInfoClass GstDiscovererInfoClass gst_discoverer_audio_info_get_type gst_discoverer_container_info_get_type diff --git a/gst-libs/gst/audio/gstaudiodecoder.c b/gst-libs/gst/audio/gstaudiodecoder.c index 24298ec000..9bc20655d0 100644 --- a/gst-libs/gst/audio/gstaudiodecoder.c +++ b/gst-libs/gst/audio/gstaudiodecoder.c @@ -2068,11 +2068,11 @@ gst_audio_decoder_get_delay (GstAudioDecoder * dec) * Since: 0.10.36 */ void -gst_audio_decoder_set_max_errors (GstAudioDecoder * enc, gint num) +gst_audio_decoder_set_max_errors (GstAudioDecoder * dec, gint num) { - g_return_if_fail (GST_IS_AUDIO_DECODER (enc)); + g_return_if_fail (GST_IS_AUDIO_DECODER (dec)); - enc->priv->ctx.max_errors = num; + dec->priv->ctx.max_errors = num; } /** @@ -2141,8 +2141,8 @@ gst_audio_decoder_get_latency (GstAudioDecoder * dec, /** * gst_audio_decoder_get_parse_state: * @dec: a #GstAudioDecoder - * @min: a pointer to storage to hold current sync state - * @max: a pointer to storage to hold current eos state + * @sync: a pointer to a variable to hold the current sync state + * @eos: a pointer to a variable to hold the current eos state * * Return current parsing (sync and eos) state. * @@ -2233,7 +2233,7 @@ gst_audio_decoder_set_min_latency (GstAudioDecoder * dec, gint64 num) /** * gst_audio_decoder_get_min_latency: - * @enc: a #GstAudioDecoder + * @dec: a #GstAudioDecoder * * Queries decoder's latency aggregation. * diff --git a/gst-libs/gst/audio/gstaudiodecoder.h b/gst-libs/gst/audio/gstaudiodecoder.h index 0fbb20a80c..894e9cc6e8 100644 --- a/gst-libs/gst/audio/gstaudiodecoder.h +++ b/gst-libs/gst/audio/gstaudiodecoder.h @@ -120,12 +120,12 @@ GstFlowReturn _gst_audio_decoder_error (GstAudioDecoder *dec, gint weight, * * Since: 0.10.36 */ -#define GST_AUDIO_DECODER_ERROR(el, w, domain, code, text, debug, ret) \ +#define GST_AUDIO_DECODER_ERROR(el, weight, domain, code, text, debug, ret) \ G_STMT_START { \ gchar *__txt = _gst_element_error_printf text; \ gchar *__dbg = _gst_element_error_printf debug; \ GstAudioDecoder *dec = GST_AUDIO_DECODER (el); \ - ret = _gst_audio_decoder_error (dec, w, GST_ ## domain ## _ERROR, \ + ret = _gst_audio_decoder_error (dec, weight, GST_ ## domain ## _ERROR, \ GST_ ## domain ## _ERROR_ ## code, __txt, __dbg, __FILE__, \ GST_FUNCTION, __LINE__); \ } G_STMT_END @@ -244,7 +244,7 @@ gint gst_audio_decoder_get_byte_time (GstAudioDecoder * dec); gint gst_audio_decoder_get_delay (GstAudioDecoder * dec); -void gst_audio_decoder_set_max_errors (GstAudioDecoder * enc, +void gst_audio_decoder_set_max_errors (GstAudioDecoder * dec, gint num); gint gst_audio_decoder_get_max_errors (GstAudioDecoder * dec); diff --git a/gst-libs/gst/audio/gstaudioencoder.h b/gst-libs/gst/audio/gstaudioencoder.h index 8eb9f876b4..4995fa9285 100644 --- a/gst-libs/gst/audio/gstaudioencoder.h +++ b/gst-libs/gst/audio/gstaudioencoder.h @@ -95,7 +95,6 @@ typedef struct _GstAudioEncoderPrivate GstAudioEncoderPrivate; /** * GstAudioEncoder: - * @element: the parent element. * * The opaque #GstAudioEncoder data structure. * From 5e61db25b505f757983ae29ebe718634619bf529 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim-Philipp=20M=C3=BCller?= Date: Mon, 5 Sep 2011 22:51:38 +0100 Subject: [PATCH 19/21] audio: fix GST_AUDIO_FORMAT_INFO_IS_*() macros to return a boolean --- gst-libs/gst/audio/audio.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/gst-libs/gst/audio/audio.h b/gst-libs/gst/audio/audio.h index e6d6363fb2..e0bf676326 100644 --- a/gst-libs/gst/audio/audio.h +++ b/gst-libs/gst/audio/audio.h @@ -192,10 +192,9 @@ struct _GstAudioFormatInfo { #define GST_AUDIO_FORMAT_INFO_NAME(info) ((info)->name) #define GST_AUDIO_FORMAT_INFO_FLAGS(info) ((info)->flags) -// FIXME: fix IS macros (make boolean) -#define GST_AUDIO_FORMAT_INFO_IS_INTEGER(info) ((info)->flags & GST_AUDIO_FORMAT_FLAG_INTEGER) -#define GST_AUDIO_FORMAT_INFO_IS_FLOAT(info) ((info)->flags & GST_AUDIO_FORMAT_FLAG_FLOAT) -#define GST_AUDIO_FORMAT_INFO_IS_SIGNED(info) ((info)->flags & GST_AUDIO_FORMAT_FLAG_SIGNED) +#define GST_AUDIO_FORMAT_INFO_IS_INTEGER(info) !!((info)->flags & GST_AUDIO_FORMAT_FLAG_INTEGER) +#define GST_AUDIO_FORMAT_INFO_IS_FLOAT(info) !!((info)->flags & GST_AUDIO_FORMAT_FLAG_FLOAT) +#define GST_AUDIO_FORMAT_INFO_IS_SIGNED(info) !!((info)->flags & GST_AUDIO_FORMAT_FLAG_SIGNED) #define GST_AUDIO_FORMAT_INFO_ENDIANNESS(info) ((info)->endianness) #define GST_AUDIO_FORMAT_INFO_IS_LE(info) ((info)->endianness == G_LITTLE_ENDIAN) From 78f50f2d2522cea0e3db7dcd866911b13f4ac152 Mon Sep 17 00:00:00 2001 From: Vincent Penquerc'h Date: Mon, 5 Sep 2011 23:00:30 +0100 Subject: [PATCH 20/21] videorate: don't take the object lock twice in {set,get}_property https://bugzilla.gnome.org/show_bug.cgi?id=658294 --- gst/videorate/gstvideorate.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/gst/videorate/gstvideorate.c b/gst/videorate/gstvideorate.c index 32f15fe6c7..307ebc2e62 100644 --- a/gst/videorate/gstvideorate.c +++ b/gst/videorate/gstvideorate.c @@ -994,9 +994,7 @@ gst_video_rate_set_property (GObject * object, videorate->drop_only = g_value_get_boolean (value); break; case ARG_AVERAGE_PERIOD: - GST_OBJECT_LOCK (videorate); videorate->average_period_set = g_value_get_uint64 (value); - GST_OBJECT_UNLOCK (videorate); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); @@ -1038,9 +1036,7 @@ gst_video_rate_get_property (GObject * object, g_value_set_boolean (value, videorate->drop_only); break; case ARG_AVERAGE_PERIOD: - GST_OBJECT_LOCK (videorate); g_value_set_uint64 (value, videorate->average_period_set); - GST_OBJECT_UNLOCK (videorate); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); From 9a8a989a2270d8b7bd964cf0c2ee668fa6dc2fbc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim-Philipp=20M=C3=BCller?= Date: Tue, 6 Sep 2011 10:07:33 +0100 Subject: [PATCH 21/21] docs: more docs clean-ups --- docs/libs/gst-plugins-base-libs-sections.txt | 2 +- gst-libs/gst/audio/gstaudiodecoder.h | 3 ++- gst-libs/gst/audio/gstaudioencoder.h | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/docs/libs/gst-plugins-base-libs-sections.txt b/docs/libs/gst-plugins-base-libs-sections.txt index 5fcf91f30c..39d274e01b 100644 --- a/docs/libs/gst-plugins-base-libs-sections.txt +++ b/docs/libs/gst-plugins-base-libs-sections.txt @@ -2160,7 +2160,7 @@ GstEncodingTargetClass
gstvideo -gst/video.h +gst/video/video.h GST_VIDEO_BLUE_MASK_15 GST_VIDEO_BLUE_MASK_15_INT GST_VIDEO_BLUE_MASK_16 diff --git a/gst-libs/gst/audio/gstaudiodecoder.h b/gst-libs/gst/audio/gstaudiodecoder.h index 894e9cc6e8..1c47e1a7d5 100644 --- a/gst-libs/gst/audio/gstaudiodecoder.h +++ b/gst-libs/gst/audio/gstaudiodecoder.h @@ -156,6 +156,7 @@ struct _GstAudioDecoder /** * GstAudioDecoderClass: + * @element_class: The parent class structure * @start: Optional. * Called when the element starts processing. * Allows opening external resources. @@ -193,7 +194,7 @@ struct _GstAudioDecoder */ struct _GstAudioDecoderClass { - GstElementClass parent_class; + GstElementClass element_class; /*< public >*/ /* virtual methods for subclasses */ diff --git a/gst-libs/gst/audio/gstaudioencoder.h b/gst-libs/gst/audio/gstaudioencoder.h index 4995fa9285..a8ff018874 100644 --- a/gst-libs/gst/audio/gstaudioencoder.h +++ b/gst-libs/gst/audio/gstaudioencoder.h @@ -118,6 +118,7 @@ struct _GstAudioEncoder { /** * GstAudioEncoderClass: + * @element_class: The parent class structure * @start: Optional. * Called when the element starts processing. * Allows opening external resources. @@ -153,7 +154,7 @@ struct _GstAudioEncoder { * Since: 0.10.36 */ struct _GstAudioEncoderClass { - GstElementClass parent_class; + GstElementClass element_class; /*< public >*/ /* virtual methods for subclasses */