From 217ac7b3be16baa90746cbaf9794f3fad34290eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Dr=C3=B6ge?= Date: Fri, 25 Nov 2011 11:31:58 +0100 Subject: [PATCH] omxaudioenc: Use audio base classes from gst-plugins-base instead of having our own copies --- configure.ac | 2 +- omx/Makefile.am | 16 +- omx/gstbaseaudiodecoder.c | 1947 ------------------------------------- omx/gstbaseaudiodecoder.h | 270 ----- omx/gstbaseaudioencoder.c | 1507 ---------------------------- omx/gstbaseaudioencoder.h | 234 ----- omx/gstbaseaudioutils.c | 315 ------ omx/gstbaseaudioutils.h | 74 -- omx/gstomxaacenc.c | 16 +- omx/gstomxaudioenc.c | 159 ++- omx/gstomxaudioenc.h | 12 +- 11 files changed, 96 insertions(+), 4456 deletions(-) delete mode 100644 omx/gstbaseaudiodecoder.c delete mode 100644 omx/gstbaseaudiodecoder.h delete mode 100644 omx/gstbaseaudioencoder.c delete mode 100644 omx/gstbaseaudioencoder.h delete mode 100644 omx/gstbaseaudioutils.c delete mode 100644 omx/gstbaseaudioutils.h diff --git a/configure.ac b/configure.ac index 7faff8225f..d4f73730c2 100644 --- a/configure.ac +++ b/configure.ac @@ -54,7 +54,7 @@ AC_LIBTOOL_WIN32_DLL AM_PROG_LIBTOOL dnl *** required versions of GStreamer stuff *** -GST_REQ=0.10.29 +GST_REQ=0.10.35.1 dnl *** autotools stuff **** diff --git a/omx/Makefile.am b/omx/Makefile.am index a67157ddd4..a24c39d454 100644 --- a/omx/Makefile.am +++ b/omx/Makefile.am @@ -16,10 +16,7 @@ libgstopenmax_la_SOURCES = \ gstbasevideocodec.c \ gstbasevideodecoder.c \ gstbasevideoencoder.c \ - gstbasevideoutils.c \ - gstbaseaudiodecoder.c \ - gstbaseaudioencoder.c \ - gstbaseaudioutils.c + gstbasevideoutils.c noinst_HEADERS = \ gstomx.h \ @@ -37,10 +34,7 @@ noinst_HEADERS = \ gstbasevideocodec.h \ gstbasevideodecoder.h \ gstbasevideoencoder.h \ - gstbasevideoutils.h \ - gstbaseaudiodecoder.h \ - gstbaseaudioencoder.h \ - gstbaseaudioutils.h + gstbasevideoutils.h fixbaseclasses = \ -DGstBaseVideoCodec=OMXBaseVideoCodec \ @@ -48,11 +42,7 @@ fixbaseclasses = \ -DGstBaseVideoEncoder=OMXBaseVideoEncoder \ -DGstBaseVideoEncoderClass=OMXBaseVideoEncoderClass \ -DGstBaseVideoDecoder=OMXBaseVideoDecoder \ - -DGstBaseVideoDecoderClass=OMXBaseVideoDecoderClass \ - -DGstBaseAudioDecoder=OMXBaseAudioDecoder \ - -DGstBaseAudioDecoderClass=OMXBaseAudioDecoderClass \ - -DGstBaseAudioEncoder=OMXBaseAudioEncoder \ - -DGstBaseAudioEncoderClass=OMXBaseAudioEncoderClass + -DGstBaseVideoDecoderClass=OMXBaseVideoDecoderClass libgstopenmax_la_CFLAGS = \ -DGST_USE_UNSTABLE_API=1 \ diff --git a/omx/gstbaseaudiodecoder.c b/omx/gstbaseaudiodecoder.c deleted file mode 100644 index f268cbea11..0000000000 --- a/omx/gstbaseaudiodecoder.c +++ /dev/null @@ -1,1947 +0,0 @@ -/* GStreamer - * Copyright (C) 2009 Igalia S.L. - * Author: Iago Toral Quiroga - * Copyright (C) 2011 Mark Nauwelaerts . - * Copyright (C) 2011 Nokia Corporation. All rights reserved. - * Contact: Stefan Kost - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Library General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Library General Public License for more details. - * - * You should have received a copy of the GNU Library General Public - * License along with this library; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. - */ - -/** - * SECTION:gstbaseaudiodecoder - * @short_description: Base class for audio decoders - * @see_also: #GstBaseTransform - * - * This base class is for audio decoders turning encoded data into - * raw audio samples. - * - * GstBaseAudioDecoder and subclass should cooperate as follows. - * - * - * Configuration - * - * Initially, GstBaseAudioDecoder calls @start when the decoder element - * is activated, which allows subclass to perform any global setup. - * Base class context parameters can already be set according to subclass - * capabilities (or possibly upon receive more information in subsequent - * @set_format). - * - * - * GstBaseAudioDecoder calls @set_format to inform subclass of the format - * of input audio data that it is about to receive. - * While unlikely, it might be called more than once, if changing input - * parameters require reconfiguration. - * - * - * GstBaseAudioDecoder calls @stop at end of all processing. - * - * - * - * As of configuration stage, and throughout processing, GstBaseAudioDecoder - * provides a GstBaseAudioDecoderContext that provides required context, - * e.g. describing the format of output audio data - * (valid when output caps have been caps) or current parsing state. - * Conversely, subclass can and should configure context to inform - * base class of its expectation w.r.t. buffer handling. - * - * - * Data processing - * - * Base class gathers input data, and optionally allows subclass - * to parse this into subsequently manageable (as defined by subclass) - * chunks. Such chunks are subsequently referred to as 'frames', - * though they may or may not correspond to 1 (or more) audio format frame. - * - * - * Input frame is provided to subclass' @handle_frame. - * - * - * If codec processing results in decoded data, subclass should call - * @gst_base_audio_decoder_finish_frame to have decoded data pushed - * downstream. - * - * - * Just prior to actually pushing a buffer downstream, - * it is passed to @pre_push. Subclass should either use this callback - * to arrange for additional downstream pushing or otherwise ensure such - * custom pushing occurs after at least a method call has finished since - * setting src pad caps. - * - * - * During the parsing process GstBaseAudioDecoderClass will handle both - * srcpad and sinkpad events. Sink events will be passed to subclass - * if @event callback has been provided. - * - * - * - * - * Shutdown phase - * - * GstBaseAudioDecoder class calls @stop to inform the subclass that data - * parsing will be stopped. - * - * - * - * - * - * Subclass is responsible for providing pad template caps for - * source and sink pads. The pads need to be named "sink" and "src". It also - * needs to set the fixed caps on srcpad, when the format is ensured. This - * is typically when base class calls subclass' @set_format function, though - * it might be delayed until calling @gst_base_audio_decoder_finish_frame. - * - * In summary, above process should have subclass concentrating on - * codec data processing while leaving other matters to base class, - * such as most notably timestamp handling. While it may exert more control - * in this area (see e.g. @pre_push), it is very much not recommended. - * - * In particular, base class will try to arrange for perfect output timestamps - * as much as possible while tracking upstream timestamps. - * To this end, if deviation between the next ideal expected perfect timestamp - * and upstream exceeds #GstBaseAudioDecoder:tolerance, then resync to upstream - * occurs (which would happen always if the tolerance mechanism is disabled). - * - * In non-live pipelines, baseclass can also (configurably) arrange for - * output buffer aggregation which may help to redue large(r) numbers of - * small(er) buffers being pushed and processed downstream. - * - * On the other hand, it should be noted that baseclass only provides limited - * seeking support (upon explicit subclass request), as full-fledged support - * should rather be left to upstream demuxer, parser or alike. This simple - * approach caters for seeking and duration reporting using estimated input - * bitrates. - * - * Things that subclass need to take care of: - * - * Provide pad templates - * - * Set source pad caps when appropriate - * - * - * Set user-configurable properties to sane defaults for format and - * implementing codec at hand, and convey some subclass capabilities and - * expectations in context. - * - * - * Accept data in @handle_frame and provide encoded results to - * @gst_base_audio_decoder_finish_frame. If it is prepared to perform - * PLC, it should also accept NULL data in @handle_frame and provide for - * data for indicated duration. - * - * - */ - -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include "gstbaseaudiodecoder.h" -#include -#include -#include - -#include - -GST_DEBUG_CATEGORY (baseaudiodecoder_debug); -#define GST_CAT_DEFAULT baseaudiodecoder_debug - -#define GST_BASE_AUDIO_DECODER_GET_PRIVATE(obj) \ - (G_TYPE_INSTANCE_GET_PRIVATE ((obj), GST_TYPE_BASE_AUDIO_DECODER, \ - GstBaseAudioDecoderPrivate)) - -enum -{ - LAST_SIGNAL -}; - -enum -{ - PROP_0, - PROP_LATENCY, - PROP_TOLERANCE, - PROP_PLC -}; - -#define DEFAULT_LATENCY 0 -#define DEFAULT_TOLERANCE 0 -#define DEFAULT_PLC FALSE - -struct _GstBaseAudioDecoderPrivate -{ - /* activation status */ - gboolean active; - - /* input base/first ts as basis for output ts */ - GstClockTime base_ts; - /* input samples processed and sent downstream so far (w.r.t. base_ts) */ - guint64 samples; - - /* collected input data */ - GstAdapter *adapter; - /* tracking input ts for changes */ - GstClockTime prev_ts; - /* frames obtained from input */ - GQueue frames; - /* collected output data */ - GstAdapter *adapter_out; - /* ts and duration for output data collected above */ - GstClockTime out_ts, out_dur; - /* mark outgoing discont */ - gboolean discont; - - /* subclass gave all it could already */ - gboolean drained; - /* subclass currently being forcibly drained */ - gboolean force; - - /* input bps estimatation */ - /* global in bytes seen */ - guint64 bytes_in; - /* global samples sent out */ - guint64 samples_out; - /* bytes flushed during parsing */ - guint sync_flush; - /* error count */ - gint error_count; - /* codec id tag */ - GstTagList *taglist; - - /* whether circumstances allow output aggregation */ - gint agg; - - /* reverse playback queues */ - /* collect input */ - GList *gather; - /* to-be-decoded */ - GList *decode; - /* reversed output */ - GList *queued; - - /* context storage */ - GstBaseAudioDecoderContext ctx; - - /* pending serialized sink events, will be sent from finish_frame() */ - GList *pending_events; -}; - - -static void gst_base_audio_decoder_finalize (GObject * object); -static void gst_base_audio_decoder_set_property (GObject * object, - guint prop_id, const GValue * value, GParamSpec * pspec); -static void gst_base_audio_decoder_get_property (GObject * object, - guint prop_id, GValue * value, GParamSpec * pspec); - -static void gst_base_audio_decoder_clear_queues (GstBaseAudioDecoder * dec); -static GstFlowReturn gst_base_audio_decoder_chain_reverse (GstBaseAudioDecoder * - dec, GstBuffer * buf); - -static GstStateChangeReturn gst_base_audio_decoder_change_state (GstElement * - element, GstStateChange transition); -static gboolean gst_base_audio_decoder_sink_event (GstPad * pad, - GstEvent * event); -static gboolean gst_base_audio_decoder_src_event (GstPad * pad, - GstEvent * event); -static gboolean gst_base_audio_decoder_sink_setcaps (GstPad * pad, - GstCaps * caps); -static gboolean gst_base_audio_decoder_src_setcaps (GstPad * pad, - GstCaps * caps); -static GstFlowReturn gst_base_audio_decoder_chain (GstPad * pad, - GstBuffer * buf); -static gboolean gst_base_audio_decoder_src_query (GstPad * pad, - GstQuery * query); -static gboolean gst_base_audio_decoder_sink_query (GstPad * pad, - GstQuery * query); -static const GstQueryType *gst_base_audio_decoder_get_query_types (GstPad * - pad); -static void gst_base_audio_decoder_reset (GstBaseAudioDecoder * dec, - gboolean full); - - -GST_BOILERPLATE (GstBaseAudioDecoder, gst_base_audio_decoder, GstElement, - GST_TYPE_ELEMENT); - -static void -gst_base_audio_decoder_base_init (gpointer g_class) -{ -} - -static void -gst_base_audio_decoder_class_init (GstBaseAudioDecoderClass * klass) -{ - GObjectClass *gobject_class; - GstElementClass *element_class; - - gobject_class = G_OBJECT_CLASS (klass); - element_class = GST_ELEMENT_CLASS (klass); - - g_type_class_add_private (klass, sizeof (GstBaseAudioDecoderPrivate)); - - GST_DEBUG_CATEGORY_INIT (baseaudiodecoder_debug, "baseaudiodecoder", 0, - "baseaudiodecoder element"); - - gobject_class->set_property = gst_base_audio_decoder_set_property; - gobject_class->get_property = gst_base_audio_decoder_get_property; - gobject_class->finalize = gst_base_audio_decoder_finalize; - - element_class->change_state = gst_base_audio_decoder_change_state; - - /* Properties */ - g_object_class_install_property (gobject_class, PROP_LATENCY, - g_param_spec_int64 ("latency", "Latency", - "Aggregate output data to a minimum of latency time (ns)", - 0, G_MAXINT64, DEFAULT_LATENCY, - G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); - - g_object_class_install_property (gobject_class, PROP_TOLERANCE, - g_param_spec_int64 ("tolerance", "Tolerance", - "Perfect ts while timestamp jitter/imperfection within tolerance (ns)", - 0, G_MAXINT64, DEFAULT_TOLERANCE, - G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); - - g_object_class_install_property (gobject_class, PROP_PLC, - g_param_spec_boolean ("plc", "Packet Loss Concealment", - "Perform packet loss concealment (if supported)", - DEFAULT_PLC, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); -} - -static void -gst_base_audio_decoder_init (GstBaseAudioDecoder * dec, - GstBaseAudioDecoderClass * klass) -{ - GstPadTemplate *pad_template; - - GST_DEBUG_OBJECT (dec, "gst_base_audio_decoder_init"); - - dec->priv = GST_BASE_AUDIO_DECODER_GET_PRIVATE (dec); - - /* Setup sink pad */ - pad_template = - gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "sink"); - g_return_if_fail (pad_template != NULL); - - dec->sinkpad = gst_pad_new_from_template (pad_template, "sink"); - gst_pad_set_event_function (dec->sinkpad, - GST_DEBUG_FUNCPTR (gst_base_audio_decoder_sink_event)); - gst_pad_set_setcaps_function (dec->sinkpad, - GST_DEBUG_FUNCPTR (gst_base_audio_decoder_sink_setcaps)); - gst_pad_set_chain_function (dec->sinkpad, - GST_DEBUG_FUNCPTR (gst_base_audio_decoder_chain)); - gst_pad_set_query_function (dec->sinkpad, - GST_DEBUG_FUNCPTR (gst_base_audio_decoder_sink_query)); - gst_element_add_pad (GST_ELEMENT (dec), dec->sinkpad); - GST_DEBUG_OBJECT (dec, "sinkpad created"); - - /* Setup source pad */ - pad_template = - gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src"); - g_return_if_fail (pad_template != NULL); - - dec->srcpad = gst_pad_new_from_template (pad_template, "src"); - gst_pad_set_setcaps_function (dec->srcpad, - GST_DEBUG_FUNCPTR (gst_base_audio_decoder_src_setcaps)); - gst_pad_set_event_function (dec->srcpad, - GST_DEBUG_FUNCPTR (gst_base_audio_decoder_src_event)); - gst_pad_set_query_function (dec->srcpad, - GST_DEBUG_FUNCPTR (gst_base_audio_decoder_src_query)); - gst_pad_set_query_type_function (dec->srcpad, - GST_DEBUG_FUNCPTR (gst_base_audio_decoder_get_query_types)); - gst_pad_use_fixed_caps (dec->srcpad); - gst_element_add_pad (GST_ELEMENT (dec), dec->srcpad); - GST_DEBUG_OBJECT (dec, "srcpad created"); - - dec->priv->adapter = gst_adapter_new (); - dec->priv->adapter_out = gst_adapter_new (); - g_queue_init (&dec->priv->frames); - dec->ctx = &dec->priv->ctx; - - g_static_rec_mutex_init (&dec->stream_lock); - - /* property default */ - dec->latency = DEFAULT_LATENCY; - dec->tolerance = DEFAULT_TOLERANCE; - - /* init state */ - gst_base_audio_decoder_reset (dec, TRUE); - GST_DEBUG_OBJECT (dec, "init ok"); -} - -static void -gst_base_audio_decoder_reset (GstBaseAudioDecoder * dec, gboolean full) -{ - GST_DEBUG_OBJECT (dec, "gst_base_audio_decoder_reset"); - - GST_BASE_AUDIO_DECODER_STREAM_LOCK (dec); - - if (full) { - dec->priv->active = FALSE; - dec->priv->bytes_in = 0; - dec->priv->samples_out = 0; - dec->priv->agg = -1; - dec->priv->error_count = 0; - gst_base_audio_decoder_clear_queues (dec); - - g_free (dec->ctx->state.channel_pos); - memset (dec->ctx, 0, sizeof (dec->ctx)); - - if (dec->priv->taglist) { - gst_tag_list_free (dec->priv->taglist); - dec->priv->taglist = NULL; - } - - gst_segment_init (&dec->segment, GST_FORMAT_TIME); - - g_list_foreach (dec->priv->pending_events, (GFunc) gst_event_unref, NULL); - g_list_free (dec->priv->pending_events); - dec->priv->pending_events = NULL; - } - - g_queue_foreach (&dec->priv->frames, (GFunc) gst_buffer_unref, NULL); - g_queue_clear (&dec->priv->frames); - gst_adapter_clear (dec->priv->adapter); - gst_adapter_clear (dec->priv->adapter_out); - dec->priv->out_ts = GST_CLOCK_TIME_NONE; - dec->priv->out_dur = 0; - dec->priv->prev_ts = GST_CLOCK_TIME_NONE; - dec->priv->drained = TRUE; - dec->priv->base_ts = GST_CLOCK_TIME_NONE; - dec->priv->samples = 0; - dec->priv->discont = TRUE; - dec->priv->sync_flush = FALSE; - - GST_BASE_AUDIO_DECODER_STREAM_UNLOCK (dec); -} - -static void -gst_base_audio_decoder_finalize (GObject * object) -{ - GstBaseAudioDecoder *dec; - - g_return_if_fail (GST_IS_BASE_AUDIO_DECODER (object)); - dec = GST_BASE_AUDIO_DECODER (object); - - if (dec->priv->adapter) { - g_object_unref (dec->priv->adapter); - } - if (dec->priv->adapter_out) { - g_object_unref (dec->priv->adapter_out); - } - - g_static_rec_mutex_free (&dec->stream_lock); - - G_OBJECT_CLASS (parent_class)->finalize (object); -} - -/* automagically perform sanity checking of src caps; - * also extracts output data format */ -static gboolean -gst_base_audio_decoder_src_setcaps (GstPad * pad, GstCaps * caps) -{ - GstBaseAudioDecoder *dec; - GstAudioState *state; - gboolean res = TRUE, changed; - - dec = GST_BASE_AUDIO_DECODER (gst_pad_get_parent (pad)); - state = &dec->ctx->state; - - GST_DEBUG_OBJECT (dec, "setting src caps %" GST_PTR_FORMAT, caps); - - GST_BASE_AUDIO_DECODER_STREAM_LOCK (dec); - - /* parse caps here to check subclass; - * also makes us aware of output format */ - if (!gst_caps_is_fixed (caps)) - goto refuse_caps; - - /* adjust ts tracking to new sample rate */ - if (GST_CLOCK_TIME_IS_VALID (dec->priv->base_ts) && state->rate) { - dec->priv->base_ts += - GST_FRAMES_TO_CLOCK_TIME (dec->priv->samples, state->rate); - dec->priv->samples = 0; - } - - if (!gst_base_audio_parse_caps (caps, state, &changed)) - goto refuse_caps; - -done: - GST_BASE_AUDIO_DECODER_STREAM_UNLOCK (dec); - - gst_object_unref (dec); - return res; - - /* ERRORS */ -refuse_caps: - { - GST_WARNING_OBJECT (dec, "rejected caps %" GST_PTR_FORMAT, caps); - res = FALSE; - goto done; - } -} - -static gboolean -gst_base_audio_decoder_sink_setcaps (GstPad * pad, GstCaps * caps) -{ - GstBaseAudioDecoder *dec; - GstBaseAudioDecoderClass *klass; - gboolean res = TRUE; - - dec = GST_BASE_AUDIO_DECODER (gst_pad_get_parent (pad)); - klass = GST_BASE_AUDIO_DECODER_GET_CLASS (dec); - - GST_DEBUG_OBJECT (dec, "caps: %" GST_PTR_FORMAT, caps); - - GST_BASE_AUDIO_DECODER_STREAM_LOCK (dec); - /* NOTE pbutils only needed here */ - /* TODO maybe (only) upstream demuxer/parser etc should handle this ? */ - if (dec->priv->taglist) - gst_tag_list_free (dec->priv->taglist); - dec->priv->taglist = gst_tag_list_new (); - gst_pb_utils_add_codec_description_to_tag_list (dec->priv->taglist, - GST_TAG_AUDIO_CODEC, caps); - - if (klass->set_format) - res = klass->set_format (dec, caps); - - GST_BASE_AUDIO_DECODER_STREAM_UNLOCK (dec); - - g_object_unref (dec); - return res; -} - -static void -gst_base_audio_decoder_setup (GstBaseAudioDecoder * dec) -{ - GstQuery *query; - gboolean res; - - /* check if in live pipeline, then latency messing is no-no */ - query = gst_query_new_latency (); - res = gst_pad_peer_query (dec->sinkpad, query); - if (res) { - gst_query_parse_latency (query, &res, NULL, NULL); - res = !res; - } - gst_query_unref (query); - - /* normalize to bool */ - dec->priv->agg = ! !res; -} - -/* mini aggregator combining output buffers into fewer larger ones, - * if so allowed/configured */ -static GstFlowReturn -gst_base_audio_decoder_output (GstBaseAudioDecoder * dec, GstBuffer * buf) -{ - GstBaseAudioDecoderClass *klass; - GstBaseAudioDecoderPrivate *priv; - GstBaseAudioDecoderContext *ctx; - GstFlowReturn ret = GST_FLOW_OK; - GstBuffer *inbuf = NULL; - - klass = GST_BASE_AUDIO_DECODER_GET_CLASS (dec); - priv = dec->priv; - ctx = dec->ctx; - - if (G_UNLIKELY (priv->agg < 0)) - gst_base_audio_decoder_setup (dec); - - if (G_LIKELY (buf)) { - g_return_val_if_fail (ctx->state.bpf != 0, GST_FLOW_ERROR); - - GST_LOG_OBJECT (dec, "output buffer of size %d with ts %" GST_TIME_FORMAT - ", duration %" GST_TIME_FORMAT, GST_BUFFER_SIZE (buf), - GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)), - GST_TIME_ARGS (GST_BUFFER_DURATION (buf))); - - /* clip buffer */ - buf = gst_audio_buffer_clip (buf, &dec->segment, ctx->state.rate, - ctx->state.bpf); - if (G_UNLIKELY (!buf)) { - GST_DEBUG_OBJECT (dec, "no data after clipping to segment"); - } else { - GST_LOG_OBJECT (dec, - "buffer after segment clipping has size %d with ts %" GST_TIME_FORMAT - ", duration %" GST_TIME_FORMAT, GST_BUFFER_SIZE (buf), - GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)), - GST_TIME_ARGS (GST_BUFFER_DURATION (buf))); - } - } else { - GST_DEBUG_OBJECT (dec, "no output buffer"); - } - -again: - inbuf = NULL; - if (priv->agg && dec->latency > 0) { - gint av; - gboolean assemble = FALSE; - const GstClockTimeDiff tol = 10 * GST_MSECOND; - GstClockTimeDiff diff = -100 * GST_MSECOND; - - av = gst_adapter_available (priv->adapter_out); - if (G_UNLIKELY (!buf)) { - /* forcibly send current */ - assemble = TRUE; - GST_LOG_OBJECT (dec, "forcing fragment flush"); - } else if (av && (!GST_BUFFER_TIMESTAMP_IS_VALID (buf) || - !GST_CLOCK_TIME_IS_VALID (priv->out_ts) || - ((diff = GST_CLOCK_DIFF (GST_BUFFER_TIMESTAMP (buf), - priv->out_ts + priv->out_dur)) > tol) || diff < -tol)) { - assemble = TRUE; - GST_LOG_OBJECT (dec, "buffer %d ms apart from current fragment", - (gint) (diff / GST_MSECOND)); - } else { - /* add or start collecting */ - if (!av) { - GST_LOG_OBJECT (dec, "starting new fragment"); - priv->out_ts = GST_BUFFER_TIMESTAMP (buf); - } else { - GST_LOG_OBJECT (dec, "adding to fragment"); - } - gst_adapter_push (priv->adapter_out, buf); - priv->out_dur += GST_BUFFER_DURATION (buf); - av += GST_BUFFER_SIZE (buf); - buf = NULL; - } - if (priv->out_dur > dec->latency) - assemble = TRUE; - if (av && assemble) { - GST_LOG_OBJECT (dec, "assembling fragment"); - inbuf = buf; - buf = gst_adapter_take_buffer (priv->adapter_out, av); - GST_BUFFER_TIMESTAMP (buf) = priv->out_ts; - GST_BUFFER_DURATION (buf) = priv->out_dur; - priv->out_ts = GST_CLOCK_TIME_NONE; - priv->out_dur = 0; - } - } - - if (G_LIKELY (buf)) { - - /* decorate */ - gst_buffer_set_caps (buf, GST_PAD_CAPS (dec->srcpad)); - - if (G_UNLIKELY (priv->discont)) { - GST_LOG_OBJECT (dec, "marking discont"); - GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT); - priv->discont = FALSE; - } - - if (G_LIKELY (GST_BUFFER_TIMESTAMP_IS_VALID (buf))) { - /* duration should always be valid for raw audio */ - g_assert (GST_BUFFER_DURATION_IS_VALID (buf)); - dec->segment.last_stop = - GST_BUFFER_TIMESTAMP (buf) + GST_BUFFER_DURATION (buf); - } - - if (klass->pre_push) { - /* last chance for subclass to do some dirty stuff */ - ret = klass->pre_push (dec, &buf); - if (ret != GST_FLOW_OK || !buf) { - GST_DEBUG_OBJECT (dec, "subclass returned %s, buf %p", - gst_flow_get_name (ret), buf); - if (buf) - gst_buffer_unref (buf); - goto exit; - } - } - - GST_LOG_OBJECT (dec, "pushing buffer of size %d with ts %" GST_TIME_FORMAT - ", duration %" GST_TIME_FORMAT, GST_BUFFER_SIZE (buf), - GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)), - GST_TIME_ARGS (GST_BUFFER_DURATION (buf))); - - if (dec->segment.rate > 0.0) { - ret = gst_pad_push (dec->srcpad, buf); - GST_LOG_OBJECT (dec, "buffer pushed: %s", gst_flow_get_name (ret)); - } else { - ret = GST_FLOW_OK; - priv->queued = g_list_prepend (priv->queued, buf); - GST_LOG_OBJECT (dec, "buffer queued"); - } - - exit: - if (inbuf) { - buf = inbuf; - goto again; - } - } - - return ret; -} - -GstFlowReturn -gst_base_audio_decoder_finish_frame (GstBaseAudioDecoder * dec, GstBuffer * buf, - gint frames) -{ - GstBaseAudioDecoderPrivate *priv; - GstBaseAudioDecoderContext *ctx; - gint samples = 0; - GstClockTime ts, next_ts; - GstFlowReturn ret = GST_FLOW_OK; - - /* subclass should know what it is producing by now */ - g_return_val_if_fail (buf == NULL || GST_PAD_CAPS (dec->srcpad) != NULL, - GST_FLOW_ERROR); - /* subclass should not hand us no data */ - g_return_val_if_fail (buf == NULL || GST_BUFFER_SIZE (buf) > 0, - GST_FLOW_ERROR); - /* no dummy calls please */ - g_return_val_if_fail (frames != 0, GST_FLOW_ERROR); - - priv = dec->priv; - ctx = dec->ctx; - - GST_LOG_OBJECT (dec, "accepting %d bytes == %d samples for %d frames", - buf ? GST_BUFFER_SIZE (buf) : -1, - buf ? GST_BUFFER_SIZE (buf) / ctx->state.bpf : -1, frames); - - GST_BASE_AUDIO_DECODER_STREAM_LOCK (dec); - - if (priv->pending_events) { - GList *pending_events, *l; - - pending_events = priv->pending_events; - priv->pending_events = NULL; - - GST_DEBUG_OBJECT (dec, "Pushing pending events"); - for (l = priv->pending_events; l; l = l->next) - gst_pad_push_event (dec->srcpad, l->data); - g_list_free (pending_events); - } - - /* output shoud be whole number of sample frames */ - if (G_LIKELY (buf && ctx->state.bpf)) { - if (GST_BUFFER_SIZE (buf) % ctx->state.bpf) - goto wrong_buffer; - /* per channel least */ - samples = GST_BUFFER_SIZE (buf) / ctx->state.bpf; - } - - /* frame and ts book-keeping */ - if (G_UNLIKELY (frames < 0)) { - if (G_UNLIKELY (-frames - 1 > priv->frames.length)) - goto overflow; - frames = priv->frames.length + frames + 1; - } else if (G_UNLIKELY (frames > priv->frames.length)) { - if (G_LIKELY (!priv->force)) { - /* no way we can let this pass */ - g_assert_not_reached (); - /* really no way */ - goto overflow; - } - } - - if (G_LIKELY (priv->frames.length)) - ts = GST_BUFFER_TIMESTAMP (priv->frames.head->data); - else - ts = GST_CLOCK_TIME_NONE; - - GST_DEBUG_OBJECT (dec, "leading frame ts %" GST_TIME_FORMAT, - GST_TIME_ARGS (ts)); - - while (priv->frames.length && frames) { - gst_buffer_unref (g_queue_pop_head (&priv->frames)); - dec->ctx->delay = dec->priv->frames.length; - frames--; - } - - /* lock on */ - if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (priv->base_ts))) { - priv->base_ts = ts; - GST_DEBUG_OBJECT (dec, "base_ts now %" GST_TIME_FORMAT, GST_TIME_ARGS (ts)); - } - - if (G_UNLIKELY (!buf)) - goto exit; - - /* slightly convoluted approach caters for perfect ts if subclass desires */ - if (GST_CLOCK_TIME_IS_VALID (ts)) { - if (dec->tolerance > 0) { - GstClockTimeDiff diff; - - g_assert (GST_CLOCK_TIME_IS_VALID (priv->base_ts)); - next_ts = priv->base_ts + - gst_util_uint64_scale (samples, GST_SECOND, ctx->state.rate); - GST_LOG_OBJECT (dec, "buffer is %d samples past base_ts %" GST_TIME_FORMAT - ", expected ts %" GST_TIME_FORMAT, samples, - GST_TIME_ARGS (priv->base_ts), GST_TIME_ARGS (next_ts)); - diff = GST_CLOCK_DIFF (next_ts, ts); - GST_LOG_OBJECT (dec, "ts diff %d ms", (gint) (diff / GST_MSECOND)); - /* if within tolerance, - * discard buffer ts and carry on producing perfect stream, - * otherwise resync to ts */ - if (G_UNLIKELY (diff < -dec->tolerance || diff > dec->tolerance)) { - GST_DEBUG_OBJECT (dec, "base_ts resync"); - priv->base_ts = ts; - priv->samples = 0; - } - } else { - GST_DEBUG_OBJECT (dec, "base_ts resync"); - priv->base_ts = ts; - priv->samples = 0; - } - } - - /* delayed one-shot stuff until confirmed data */ - if (priv->taglist) { - GST_DEBUG_OBJECT (dec, "codec tag %" GST_PTR_FORMAT, priv->taglist); - if (gst_tag_list_is_empty (priv->taglist)) { - gst_tag_list_free (priv->taglist); - } else { - gst_element_found_tags (GST_ELEMENT (dec), priv->taglist); - } - priv->taglist = NULL; - } - - buf = gst_buffer_make_metadata_writable (buf); - if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (priv->base_ts))) { - GST_BUFFER_TIMESTAMP (buf) = - priv->base_ts + - GST_FRAMES_TO_CLOCK_TIME (priv->samples, ctx->state.rate); - GST_BUFFER_DURATION (buf) = priv->base_ts + - GST_FRAMES_TO_CLOCK_TIME (priv->samples + samples, ctx->state.rate) - - GST_BUFFER_TIMESTAMP (buf); - } else { - GST_BUFFER_TIMESTAMP (buf) = GST_CLOCK_TIME_NONE; - GST_BUFFER_DURATION (buf) = - GST_FRAMES_TO_CLOCK_TIME (samples, ctx->state.rate); - } - priv->samples += samples; - priv->samples_out += samples; - - /* we got data, so note things are looking up */ - if (G_UNLIKELY (dec->priv->error_count)) - dec->priv->error_count--; - -exit: - ret = gst_base_audio_decoder_output (dec, buf); - - GST_BASE_AUDIO_DECODER_STREAM_UNLOCK (dec); - - return ret; - - /* ERRORS */ -wrong_buffer: - { - GST_ELEMENT_ERROR (dec, STREAM, ENCODE, (NULL), - ("buffer size %d not a multiple of %d", GST_BUFFER_SIZE (buf), - ctx->state.bpf)); - gst_buffer_unref (buf); - ret = GST_FLOW_ERROR; - goto exit; - } -overflow: - { - GST_ELEMENT_ERROR (dec, STREAM, ENCODE, - ("received more decoded frames %d than provided %d", frames, - priv->frames.length), (NULL)); - if (buf) - gst_buffer_unref (buf); - ret = GST_FLOW_ERROR; - goto exit; - } -} - -static GstFlowReturn -gst_base_audio_decoder_handle_frame (GstBaseAudioDecoder * dec, - GstBaseAudioDecoderClass * klass, GstBuffer * buffer) -{ - if (G_LIKELY (buffer)) { - /* keep around for admin */ - GST_LOG_OBJECT (dec, "tracking frame size %d, ts %" GST_TIME_FORMAT, - GST_BUFFER_SIZE (buffer), - GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer))); - g_queue_push_tail (&dec->priv->frames, buffer); - dec->ctx->delay = dec->priv->frames.length; - dec->priv->bytes_in += GST_BUFFER_SIZE (buffer); - } else { - GST_LOG_OBJECT (dec, "providing subclass with NULL frame"); - } - - return klass->handle_frame (dec, buffer); -} - -/* maybe subclass configurable instead, but this allows for a whole lot of - * raw samples, so at least quite some encoded ... */ -#define GST_BASE_AUDIO_DECODER_MAX_SYNC 10 * 8 * 2 * 1024 - -static GstFlowReturn -gst_base_audio_decoder_push_buffers (GstBaseAudioDecoder * dec, gboolean force) -{ - GstBaseAudioDecoderClass *klass; - GstBaseAudioDecoderPrivate *priv; - GstBaseAudioDecoderContext *ctx; - GstFlowReturn ret = GST_FLOW_OK; - GstBuffer *buffer; - gint av, flush; - - klass = GST_BASE_AUDIO_DECODER_GET_CLASS (dec); - priv = dec->priv; - ctx = dec->ctx; - - g_return_val_if_fail (klass->handle_frame != NULL, GST_FLOW_ERROR); - - av = gst_adapter_available (priv->adapter); - GST_DEBUG_OBJECT (dec, "available: %d", av); - - while (ret == GST_FLOW_OK) { - - flush = 0; - ctx->eos = force; - - if (G_LIKELY (av)) { - gint len; - GstClockTime ts; - - /* parse if needed */ - if (klass->parse) { - gint offset = 0; - - /* limited (legacy) parsing; avoid whole of baseparse */ - GST_DEBUG_OBJECT (dec, "parsing available: %d", av); - /* piggyback sync state on discont */ - ctx->sync = !priv->discont; - ret = klass->parse (dec, priv->adapter, &offset, &len); - - g_assert (offset <= av); - if (offset) { - /* jumped a bit */ - GST_DEBUG_OBJECT (dec, "setting DISCONT"); - gst_adapter_flush (priv->adapter, offset); - flush = offset; - /* avoid parsing indefinitely */ - priv->sync_flush += offset; - if (priv->sync_flush > GST_BASE_AUDIO_DECODER_MAX_SYNC) - goto parse_failed; - } - - if (ret == GST_FLOW_UNEXPECTED) { - GST_LOG_OBJECT (dec, "no frame yet"); - ret = GST_FLOW_OK; - break; - } else if (ret == GST_FLOW_OK) { - GST_LOG_OBJECT (dec, "frame at offset %d of length %d", offset, len); - g_assert (offset + len <= av); - priv->sync_flush = 0; - } else { - break; - } - } else { - len = av; - } - /* track upstream ts, but do not get stuck if nothing new upstream */ - ts = gst_adapter_prev_timestamp (priv->adapter, NULL); - if (ts == priv->prev_ts) { - GST_LOG_OBJECT (dec, "ts == prev_ts; discarding"); - ts = GST_CLOCK_TIME_NONE; - } else { - priv->prev_ts = ts; - } - buffer = gst_adapter_take_buffer (priv->adapter, len); - buffer = gst_buffer_make_metadata_writable (buffer); - GST_BUFFER_TIMESTAMP (buffer) = ts; - flush += len; - } else { - if (!force) - break; - buffer = NULL; - } - - ret = gst_base_audio_decoder_handle_frame (dec, klass, buffer); - - /* do not keep pushing it ... */ - if (G_UNLIKELY (!av)) { - priv->drained = TRUE; - break; - } - - av -= flush; - g_assert (av >= 0); - } - - GST_LOG_OBJECT (dec, "done pushing to subclass"); - return ret; - - /* ERRORS */ -parse_failed: - { - GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL), ("failed to parse stream")); - return GST_FLOW_ERROR; - } -} - -static GstFlowReturn -gst_base_audio_decoder_drain (GstBaseAudioDecoder * dec) -{ - GstFlowReturn ret; - - if (dec->priv->drained) - return GST_FLOW_OK; - else { - /* dispatch reverse pending buffers */ - /* chain eventually calls upon drain as well, but by that time - * gather list should be clear, so ok ... */ - if (dec->segment.rate < 0.0 && dec->priv->gather) - gst_base_audio_decoder_chain_reverse (dec, NULL); - /* have subclass give all it can */ - ret = gst_base_audio_decoder_push_buffers (dec, TRUE); - /* ensure all output sent */ - ret = gst_base_audio_decoder_output (dec, NULL); - /* everything should be away now */ - if (dec->priv->frames.length) { - /* not fatal/impossible though if subclass/codec eats stuff */ - GST_WARNING_OBJECT (dec, "still %d frames left after draining", - dec->priv->frames.length); - g_queue_foreach (&dec->priv->frames, (GFunc) gst_buffer_unref, NULL); - g_queue_clear (&dec->priv->frames); - } - /* discard (unparsed) leftover */ - gst_adapter_clear (dec->priv->adapter); - - return ret; - } -} - -/* hard == FLUSH, otherwise discont */ -static GstFlowReturn -gst_base_audio_decoder_flush (GstBaseAudioDecoder * dec, gboolean hard) -{ - GstBaseAudioDecoderClass *klass; - GstFlowReturn ret = GST_FLOW_OK; - - klass = GST_BASE_AUDIO_DECODER_GET_CLASS (dec); - - GST_LOG_OBJECT (dec, "flush hard %d", hard); - - if (!hard) { - ret = gst_base_audio_decoder_drain (dec); - } else { - gst_base_audio_decoder_clear_queues (dec); - gst_segment_init (&dec->segment, GST_FORMAT_TIME); - dec->priv->error_count = 0; - } - /* only bother subclass with flushing if known it is already alive - * and kicking out stuff */ - if (klass->flush && dec->priv->samples_out > 0) - klass->flush (dec, hard); - /* and get (re)set for the sequel */ - gst_base_audio_decoder_reset (dec, FALSE); - - return ret; -} - -static GstFlowReturn -gst_base_audio_decoder_chain_forward (GstBaseAudioDecoder * dec, - GstBuffer * buffer) -{ - GstFlowReturn ret; - - /* grab buffer */ - gst_adapter_push (dec->priv->adapter, buffer); - buffer = NULL; - /* new stuff, so we can push subclass again */ - dec->priv->drained = FALSE; - - /* hand to subclass */ - ret = gst_base_audio_decoder_push_buffers (dec, FALSE); - - GST_LOG_OBJECT (dec, "chain-done"); - return ret; -} - -static void -gst_base_audio_decoder_clear_queues (GstBaseAudioDecoder * dec) -{ - GstBaseAudioDecoderPrivate *priv = dec->priv; - - g_list_foreach (priv->queued, (GFunc) gst_mini_object_unref, NULL); - g_list_free (priv->queued); - priv->queued = NULL; - g_list_foreach (priv->gather, (GFunc) gst_mini_object_unref, NULL); - g_list_free (priv->gather); - priv->gather = NULL; - g_list_foreach (priv->decode, (GFunc) gst_mini_object_unref, NULL); - g_list_free (priv->decode); - priv->decode = NULL; -} - -/* - * Input: - * Buffer decoding order: 7 8 9 4 5 6 3 1 2 EOS - * Discont flag: D D D D - * - * - Each Discont marks a discont in the decoding order. - * - * for vorbis, each buffer is a keyframe when we have the previous - * buffer. This means that to decode buffer 7, we need buffer 6, which - * arrives out of order. - * - * we first gather buffers in the gather queue until we get a DISCONT. We - * prepend each incomming buffer so that they are in reversed order. - * - * gather queue: 9 8 7 - * decode queue: - * output queue: - * - * When a DISCONT is received (buffer 4), we move the gather queue to the - * decode queue. This is simply done be taking the head of the gather queue - * and prepending it to the decode queue. This yields: - * - * gather queue: - * decode queue: 7 8 9 - * output queue: - * - * Then we decode each buffer in the decode queue in order and put the output - * buffer in the output queue. The first buffer (7) will not produce any output - * because it needs the previous buffer (6) which did not arrive yet. This - * yields: - * - * gather queue: - * decode queue: 7 8 9 - * output queue: 9 8 - * - * Then we remove the consumed buffers from the decode queue. Buffer 7 is not - * completely consumed, we need to keep it around for when we receive buffer - * 6. This yields: - * - * gather queue: - * decode queue: 7 - * output queue: 9 8 - * - * Then we accumulate more buffers: - * - * gather queue: 6 5 4 - * decode queue: 7 - * output queue: - * - * prepending to the decode queue on DISCONT yields: - * - * gather queue: - * decode queue: 4 5 6 7 - * output queue: - * - * after decoding and keeping buffer 4: - * - * gather queue: - * decode queue: 4 - * output queue: 7 6 5 - * - * Etc.. - */ -static GstFlowReturn -gst_base_audio_decoder_flush_decode (GstBaseAudioDecoder * dec) -{ - GstBaseAudioDecoderPrivate *priv = dec->priv; - GstFlowReturn res = GST_FLOW_OK; - GList *walk; - - walk = priv->decode; - - GST_DEBUG_OBJECT (dec, "flushing buffers to decoder"); - - /* clear buffer and decoder state */ - gst_base_audio_decoder_flush (dec, FALSE); - - while (walk) { - GList *next; - GstBuffer *buf = GST_BUFFER_CAST (walk->data); - - GST_DEBUG_OBJECT (dec, "decoding buffer %p, ts %" GST_TIME_FORMAT, - buf, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf))); - - next = g_list_next (walk); - /* decode buffer, resulting data prepended to output queue */ - gst_buffer_ref (buf); - res = gst_base_audio_decoder_chain_forward (dec, buf); - - /* if we generated output, we can discard the buffer, else we - * keep it in the queue */ - if (priv->queued) { - GST_DEBUG_OBJECT (dec, "decoded buffer to %p", priv->queued->data); - priv->decode = g_list_delete_link (priv->decode, walk); - gst_buffer_unref (buf); - } else { - GST_DEBUG_OBJECT (dec, "buffer did not decode, keeping"); - } - walk = next; - } - - /* drain any aggregation (or otherwise) leftover */ - gst_base_audio_decoder_drain (dec); - - /* now send queued data downstream */ - while (priv->queued) { - GstBuffer *buf = GST_BUFFER_CAST (priv->queued->data); - - if (G_LIKELY (res == GST_FLOW_OK)) { - GST_DEBUG_OBJECT (dec, "pushing buffer %p of size %u, " - "time %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf, - GST_BUFFER_SIZE (buf), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)), - GST_TIME_ARGS (GST_BUFFER_DURATION (buf))); - /* should be already, but let's be sure */ - buf = gst_buffer_make_metadata_writable (buf); - /* avoid stray DISCONT from forward processing, - * which have no meaning in reverse pushing */ - GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DISCONT); - res = gst_pad_push (dec->srcpad, buf); - } else { - gst_buffer_unref (buf); - } - - priv->queued = g_list_delete_link (priv->queued, priv->queued); - } - - return res; -} - -static GstFlowReturn -gst_base_audio_decoder_chain_reverse (GstBaseAudioDecoder * dec, - GstBuffer * buf) -{ - GstBaseAudioDecoderPrivate *priv = dec->priv; - GstFlowReturn result = GST_FLOW_OK; - - /* if we have a discont, move buffers to the decode list */ - if (!buf || GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DISCONT)) { - GST_DEBUG_OBJECT (dec, "received discont"); - while (priv->gather) { - GstBuffer *gbuf; - - gbuf = GST_BUFFER_CAST (priv->gather->data); - /* remove from the gather list */ - priv->gather = g_list_delete_link (priv->gather, priv->gather); - /* copy to decode queue */ - priv->decode = g_list_prepend (priv->decode, gbuf); - } - /* decode stuff in the decode queue */ - gst_base_audio_decoder_flush_decode (dec); - } - - if (G_LIKELY (buf)) { - GST_DEBUG_OBJECT (dec, "gathering buffer %p of size %u, " - "time %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf, - GST_BUFFER_SIZE (buf), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)), - GST_TIME_ARGS (GST_BUFFER_DURATION (buf))); - - /* add buffer to gather queue */ - priv->gather = g_list_prepend (priv->gather, buf); - } - - return result; -} - -static GstFlowReturn -gst_base_audio_decoder_chain (GstPad * pad, GstBuffer * buffer) -{ - GstBaseAudioDecoder *dec; - GstFlowReturn ret; - - dec = GST_BASE_AUDIO_DECODER (GST_PAD_PARENT (pad)); - - GST_LOG_OBJECT (dec, - "received buffer of size %d with ts %" GST_TIME_FORMAT - ", duration %" GST_TIME_FORMAT, GST_BUFFER_SIZE (buffer), - GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)), - GST_TIME_ARGS (GST_BUFFER_DURATION (buffer))); - - GST_BASE_AUDIO_DECODER_STREAM_LOCK (dec); - - if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DISCONT)) { - gint64 samples, ts; - - /* track present position */ - ts = dec->priv->base_ts; - samples = dec->priv->samples; - - GST_DEBUG_OBJECT (dec, "handling discont"); - gst_base_audio_decoder_flush (dec, FALSE); - dec->priv->discont = TRUE; - - /* buffer may claim DISCONT loudly, if it can't tell us where we are now, - * we'll stick to where we were ... - * Particularly useful/needed for upstream BYTE based */ - if (dec->segment.rate > 0.0 && !GST_BUFFER_TIMESTAMP_IS_VALID (buffer)) { - GST_DEBUG_OBJECT (dec, "... but restoring previous ts tracking"); - dec->priv->base_ts = ts; - dec->priv->samples = samples; - } - } - - if (dec->segment.rate > 0.0) - ret = gst_base_audio_decoder_chain_forward (dec, buffer); - else - ret = gst_base_audio_decoder_chain_reverse (dec, buffer); - - GST_BASE_AUDIO_DECODER_STREAM_UNLOCK (dec); - - return ret; -} - -/* perform upstream byte <-> time conversion (duration, seeking) - * if subclass allows and if enough data for moderately decent conversion */ -static inline gboolean -gst_base_audio_decoder_do_byte (GstBaseAudioDecoder * dec) -{ - return dec->ctx->do_byte_time && dec->ctx->state.bpf && - dec->ctx->state.rate <= dec->priv->samples_out; -} - -static gboolean -gst_base_audio_decoder_sink_eventfunc (GstBaseAudioDecoder * dec, - GstEvent * event) -{ - gboolean handled = FALSE; - - switch (GST_EVENT_TYPE (event)) { - case GST_EVENT_NEWSEGMENT: - { - GstFormat format; - gdouble rate, arate; - gint64 start, stop, time; - gboolean update; - - GST_BASE_AUDIO_DECODER_STREAM_LOCK (dec); - gst_event_parse_new_segment_full (event, &update, &rate, &arate, &format, - &start, &stop, &time); - - if (format == GST_FORMAT_TIME) { - GST_DEBUG_OBJECT (dec, "received TIME NEW_SEGMENT %" GST_TIME_FORMAT - " -- %" GST_TIME_FORMAT ", time %" GST_TIME_FORMAT - ", rate %g, applied_rate %g", - GST_TIME_ARGS (start), GST_TIME_ARGS (stop), GST_TIME_ARGS (time), - rate, arate); - } else { - GstFormat dformat = GST_FORMAT_TIME; - - GST_DEBUG_OBJECT (dec, "received NEW_SEGMENT %" G_GINT64_FORMAT - " -- %" G_GINT64_FORMAT ", time %" G_GINT64_FORMAT - ", rate %g, applied_rate %g", start, stop, time, rate, arate); - /* handle newsegment resulting from legacy simple seeking */ - /* note that we need to convert this whether or not enough data - * to handle initial newsegment */ - if (dec->ctx->do_byte_time && - gst_pad_query_convert (dec->sinkpad, GST_FORMAT_BYTES, start, - &dformat, &start)) { - /* best attempt convert */ - /* as these are only estimates, stop is kept open-ended to avoid - * premature cutting */ - GST_DEBUG_OBJECT (dec, "converted to TIME start %" GST_TIME_FORMAT, - GST_TIME_ARGS (start)); - format = GST_FORMAT_TIME; - time = start; - stop = GST_CLOCK_TIME_NONE; - /* replace event */ - gst_event_unref (event); - event = gst_event_new_new_segment_full (update, rate, arate, - GST_FORMAT_TIME, start, stop, time); - } else { - GST_DEBUG_OBJECT (dec, "unsupported format; ignoring"); - GST_BASE_AUDIO_DECODER_STREAM_UNLOCK (dec); - break; - } - } - - /* finish current segment */ - gst_base_audio_decoder_drain (dec); - - if (update) { - /* time progressed without data, see if we can fill the gap with - * some concealment data */ - GST_DEBUG_OBJECT (dec, - "segment update: plc %d, do_plc %d, last_stop %" GST_TIME_FORMAT, - dec->plc, dec->ctx->do_plc, GST_TIME_ARGS (dec->segment.last_stop)); - if (dec->plc && dec->ctx->do_plc && dec->segment.rate > 0.0 && - dec->segment.last_stop < start) { - GstBaseAudioDecoderClass *klass; - GstBuffer *buf; - - klass = GST_BASE_AUDIO_DECODER_GET_CLASS (dec); - /* hand subclass empty frame with duration that needs covering */ - buf = gst_buffer_new (); - GST_BUFFER_DURATION (buf) = start - dec->segment.last_stop; - /* best effort, not much error handling */ - gst_base_audio_decoder_handle_frame (dec, klass, buf); - } - } else { - /* prepare for next one */ - gst_base_audio_decoder_flush (dec, FALSE); - /* and that's where we time from, - * in case upstream does not come up with anything better - * (e.g. upstream BYTE) */ - if (format != GST_FORMAT_TIME) { - dec->priv->base_ts = start; - dec->priv->samples = 0; - } - } - - /* and follow along with segment */ - gst_segment_set_newsegment_full (&dec->segment, update, rate, arate, - format, start, stop, time); - - dec->priv->pending_events = - g_list_append (dec->priv->pending_events, event); - handled = TRUE; - GST_BASE_AUDIO_DECODER_STREAM_UNLOCK (dec); - break; - } - - case GST_EVENT_FLUSH_START: - break; - - case GST_EVENT_FLUSH_STOP: - GST_BASE_AUDIO_DECODER_STREAM_LOCK (dec); - /* prepare for fresh start */ - gst_base_audio_decoder_flush (dec, TRUE); - - g_list_foreach (dec->priv->pending_events, (GFunc) gst_event_unref, NULL); - g_list_free (dec->priv->pending_events); - dec->priv->pending_events = NULL; - GST_BASE_AUDIO_DECODER_STREAM_UNLOCK (dec); - break; - - case GST_EVENT_EOS: - GST_BASE_AUDIO_DECODER_STREAM_LOCK (dec); - gst_base_audio_decoder_drain (dec); - GST_BASE_AUDIO_DECODER_STREAM_UNLOCK (dec); - break; - - default: - break; - } - - return handled; -} - -static gboolean -gst_base_audio_decoder_sink_event (GstPad * pad, GstEvent * event) -{ - GstBaseAudioDecoder *dec; - GstBaseAudioDecoderClass *klass; - gboolean handled = FALSE; - gboolean ret = TRUE; - - dec = GST_BASE_AUDIO_DECODER (gst_pad_get_parent (pad)); - klass = GST_BASE_AUDIO_DECODER_GET_CLASS (dec); - - GST_DEBUG_OBJECT (dec, "received event %d, %s", GST_EVENT_TYPE (event), - GST_EVENT_TYPE_NAME (event)); - - if (klass->event) - handled = klass->event (dec, event); - - if (!handled) - handled = gst_base_audio_decoder_sink_eventfunc (dec, event); - - if (!handled) { - /* Forward non-serialized events and EOS/FLUSH_STOP immediately. - * For EOS this is required because no buffer or serialized event - * will come after EOS and nothing could trigger another - * _finish_frame() call. - * - * For FLUSH_STOP this is required because it is expected - * to be forwarded immediately and no buffers are queued anyway. - */ - if (!GST_EVENT_IS_SERIALIZED (event) - || GST_EVENT_TYPE (event) == GST_EVENT_EOS - || GST_EVENT_TYPE (event) == GST_EVENT_FLUSH_STOP) { - ret = gst_pad_event_default (pad, event); - } else { - GST_BASE_AUDIO_DECODER_STREAM_LOCK (dec); - dec->priv->pending_events = - g_list_append (dec->priv->pending_events, event); - GST_BASE_AUDIO_DECODER_STREAM_UNLOCK (dec); - ret = TRUE; - } - } - - GST_DEBUG_OBJECT (dec, "event handled"); - - gst_object_unref (dec); - return ret; -} - -static gboolean -gst_base_audio_decoder_do_seek (GstBaseAudioDecoder * dec, GstEvent * event) -{ - GstSeekFlags flags; - GstSeekType start_type, end_type; - GstFormat format; - gdouble rate; - gint64 start, start_time, end_time; - GstSegment seek_segment; - guint32 seqnum; - - gst_event_parse_seek (event, &rate, &format, &flags, &start_type, - &start_time, &end_type, &end_time); - - /* we'll handle plain open-ended flushing seeks with the simple approach */ - if (rate != 1.0) { - GST_DEBUG_OBJECT (dec, "unsupported seek: rate"); - return FALSE; - } - - if (start_type != GST_SEEK_TYPE_SET) { - GST_DEBUG_OBJECT (dec, "unsupported seek: start time"); - return FALSE; - } - - if (end_type != GST_SEEK_TYPE_NONE || - (end_type == GST_SEEK_TYPE_SET && end_time != GST_CLOCK_TIME_NONE)) { - GST_DEBUG_OBJECT (dec, "unsupported seek: end time"); - return FALSE; - } - - if (!(flags & GST_SEEK_FLAG_FLUSH)) { - GST_DEBUG_OBJECT (dec, "unsupported seek: not flushing"); - return FALSE; - } - - memcpy (&seek_segment, &dec->segment, sizeof (seek_segment)); - gst_segment_set_seek (&seek_segment, rate, format, flags, start_type, - start_time, end_type, end_time, NULL); - start_time = seek_segment.last_stop; - - format = GST_FORMAT_BYTES; - if (!gst_pad_query_convert (dec->sinkpad, GST_FORMAT_TIME, start_time, - &format, &start)) { - GST_DEBUG_OBJECT (dec, "conversion failed"); - return FALSE; - } - - seqnum = gst_event_get_seqnum (event); - event = gst_event_new_seek (1.0, GST_FORMAT_BYTES, flags, - GST_SEEK_TYPE_SET, start, GST_SEEK_TYPE_NONE, -1); - gst_event_set_seqnum (event, seqnum); - - GST_DEBUG_OBJECT (dec, "seeking to %" GST_TIME_FORMAT " at byte offset %" - G_GINT64_FORMAT, GST_TIME_ARGS (start_time), start); - - return gst_pad_push_event (dec->sinkpad, event); -} - -static gboolean -gst_base_audio_decoder_src_event (GstPad * pad, GstEvent * event) -{ - GstBaseAudioDecoder *dec; - gboolean res = FALSE; - - dec = GST_BASE_AUDIO_DECODER (gst_pad_get_parent (pad)); - - GST_DEBUG_OBJECT (dec, "received event %d, %s", GST_EVENT_TYPE (event), - GST_EVENT_TYPE_NAME (event)); - - switch (GST_EVENT_TYPE (event)) { - case GST_EVENT_SEEK: - { - GstFormat format, tformat; - gdouble rate; - GstSeekFlags flags; - GstSeekType cur_type, stop_type; - gint64 cur, stop; - gint64 tcur, tstop; - guint32 seqnum; - - gst_event_parse_seek (event, &rate, &format, &flags, &cur_type, &cur, - &stop_type, &stop); - seqnum = gst_event_get_seqnum (event); - - /* upstream gets a chance first */ - if ((res = gst_pad_push_event (dec->sinkpad, event))) - break; - - /* if upstream fails for a time seek, maybe we can help if allowed */ - if (format == GST_FORMAT_TIME) { - if (gst_base_audio_decoder_do_byte (dec)) - res = gst_base_audio_decoder_do_seek (dec, event); - break; - } - - /* ... though a non-time seek can be aided as well */ - /* First bring the requested format to time */ - tformat = GST_FORMAT_TIME; - if (!(res = gst_pad_query_convert (pad, format, cur, &tformat, &tcur))) - goto convert_error; - if (!(res = gst_pad_query_convert (pad, format, stop, &tformat, &tstop))) - goto convert_error; - - /* then seek with time on the peer */ - event = gst_event_new_seek (rate, GST_FORMAT_TIME, - flags, cur_type, tcur, stop_type, tstop); - gst_event_set_seqnum (event, seqnum); - - res = gst_pad_push_event (dec->sinkpad, event); - break; - } - default: - res = gst_pad_push_event (dec->sinkpad, event); - break; - } -done: - gst_object_unref (dec); - - return res; - - /* ERRORS */ -convert_error: - { - GST_DEBUG_OBJECT (dec, "cannot convert start/stop for seek"); - goto done; - } -} - -static gboolean -gst_base_audio_decoder_sink_query (GstPad * pad, GstQuery * query) -{ - gboolean res = TRUE; - GstBaseAudioDecoder *dec; - - dec = GST_BASE_AUDIO_DECODER (gst_pad_get_parent (pad)); - - switch (GST_QUERY_TYPE (query)) { - case GST_QUERY_FORMATS: - { - gst_query_set_formats (query, 2, GST_FORMAT_TIME, GST_FORMAT_BYTES); - res = TRUE; - break; - } - case GST_QUERY_CONVERT: - { - GstFormat src_fmt, dest_fmt; - gint64 src_val, dest_val; - - gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val); - if (!(res = gst_base_audio_encoded_audio_convert (&dec->ctx->state, - dec->priv->bytes_in, dec->priv->samples_out, - src_fmt, src_val, &dest_fmt, &dest_val))) - goto error; - gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val); - break; - } - default: - res = gst_pad_query_default (pad, query); - break; - } - -error: - gst_object_unref (dec); - return res; -} - -static const GstQueryType * -gst_base_audio_decoder_get_query_types (GstPad * pad) -{ - static const GstQueryType gst_base_audio_decoder_src_query_types[] = { - GST_QUERY_POSITION, - GST_QUERY_DURATION, - GST_QUERY_CONVERT, - GST_QUERY_LATENCY, - 0 - }; - - return gst_base_audio_decoder_src_query_types; -} - -/* FIXME ? are any of these queries (other than latency) a decoder's business ?? - * also, the conversion stuff might seem to make sense, but seems to not mind - * segment stuff etc at all - * Supposedly that's backward compatibility ... */ -static gboolean -gst_base_audio_decoder_src_query (GstPad * pad, GstQuery * query) -{ - GstBaseAudioDecoder *dec; - GstPad *peerpad; - gboolean res = FALSE; - - dec = GST_BASE_AUDIO_DECODER (GST_PAD_PARENT (pad)); - peerpad = gst_pad_get_peer (GST_PAD (dec->sinkpad)); - - GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query); - - switch (GST_QUERY_TYPE (query)) { - case GST_QUERY_DURATION: - { - GstFormat format; - - /* upstream in any case */ - if ((res = gst_pad_query_default (pad, query))) - break; - - gst_query_parse_duration (query, &format, NULL); - /* try answering TIME by converting from BYTE if subclass allows */ - if (format == GST_FORMAT_TIME && gst_base_audio_decoder_do_byte (dec)) { - gint64 value; - - format = GST_FORMAT_BYTES; - if (gst_pad_query_peer_duration (dec->sinkpad, &format, &value)) { - GST_LOG_OBJECT (dec, "upstream size %" G_GINT64_FORMAT, value); - format = GST_FORMAT_TIME; - if (gst_pad_query_convert (dec->sinkpad, GST_FORMAT_BYTES, value, - &format, &value)) { - gst_query_set_duration (query, GST_FORMAT_TIME, value); - res = TRUE; - } - } - } - break; - } - case GST_QUERY_POSITION: - { - GstFormat format; - gint64 time, value; - - if ((res = gst_pad_peer_query (dec->sinkpad, query))) { - GST_LOG_OBJECT (dec, "returning peer response"); - break; - } - - /* we start from the last seen time */ - time = dec->segment.last_stop; - /* correct for the segment values */ - time = gst_segment_to_stream_time (&dec->segment, GST_FORMAT_TIME, time); - - GST_LOG_OBJECT (dec, - "query %p: our time: %" GST_TIME_FORMAT, query, GST_TIME_ARGS (time)); - - /* and convert to the final format */ - gst_query_parse_position (query, &format, NULL); - if (!(res = gst_pad_query_convert (pad, GST_FORMAT_TIME, time, - &format, &value))) - break; - - gst_query_set_position (query, format, value); - - GST_LOG_OBJECT (dec, - "query %p: we return %" G_GINT64_FORMAT " (format %u)", query, value, - format); - break; - } - case GST_QUERY_FORMATS: - { - gst_query_set_formats (query, 3, - GST_FORMAT_TIME, GST_FORMAT_BYTES, GST_FORMAT_DEFAULT); - res = TRUE; - break; - } - case GST_QUERY_CONVERT: - { - GstFormat src_fmt, dest_fmt; - gint64 src_val, dest_val; - - gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val); - if (!(res = gst_base_audio_raw_audio_convert (&dec->ctx->state, - src_fmt, src_val, &dest_fmt, &dest_val))) - break; - gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val); - break; - } - case GST_QUERY_LATENCY: - { - if ((res = gst_pad_peer_query (dec->sinkpad, query))) { - gboolean live; - GstClockTime min_latency, max_latency; - - gst_query_parse_latency (query, &live, &min_latency, &max_latency); - GST_DEBUG_OBJECT (dec, "Peer latency: live %d, min %" - GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live, - GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency)); - - GST_OBJECT_LOCK (dec); - /* add our latency */ - if (min_latency != -1) - min_latency += dec->ctx->min_latency; - if (max_latency != -1) - max_latency += dec->ctx->max_latency; - GST_OBJECT_UNLOCK (dec); - - gst_query_set_latency (query, live, min_latency, max_latency); - } - break; - } - default: - res = gst_pad_query_default (pad, query); - break; - } - - gst_object_unref (peerpad); - return res; -} - -static gboolean -gst_base_audio_decoder_stop (GstBaseAudioDecoder * dec) -{ - GstBaseAudioDecoderClass *klass; - gboolean ret = TRUE; - - GST_DEBUG_OBJECT (dec, "gst_base_audio_decoder_stop"); - - klass = GST_BASE_AUDIO_DECODER_GET_CLASS (dec); - - if (klass->stop) { - ret = klass->stop (dec); - } - - /* clean up */ - gst_base_audio_decoder_reset (dec, TRUE); - - if (ret) - dec->priv->active = FALSE; - - return TRUE; -} - -static gboolean -gst_base_audio_decoder_start (GstBaseAudioDecoder * dec) -{ - GstBaseAudioDecoderClass *klass; - gboolean ret = TRUE; - - GST_DEBUG_OBJECT (dec, "gst_base_audio_decoder_start"); - - klass = GST_BASE_AUDIO_DECODER_GET_CLASS (dec); - - /* arrange clean state */ - gst_base_audio_decoder_reset (dec, TRUE); - - if (klass->start) { - ret = klass->start (dec); - } - - if (ret) - dec->priv->active = TRUE; - - return TRUE; -} - -static void -gst_base_audio_decoder_get_property (GObject * object, guint prop_id, - GValue * value, GParamSpec * pspec) -{ - GstBaseAudioDecoder *dec; - - dec = GST_BASE_AUDIO_DECODER (object); - - switch (prop_id) { - case PROP_LATENCY: - g_value_set_int64 (value, dec->latency); - break; - case PROP_TOLERANCE: - g_value_set_int64 (value, dec->tolerance); - break; - case PROP_PLC: - g_value_set_boolean (value, dec->plc); - break; - default: - G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); - break; - } -} - -static void -gst_base_audio_decoder_set_property (GObject * object, guint prop_id, - const GValue * value, GParamSpec * pspec) -{ - GstBaseAudioDecoder *dec; - - dec = GST_BASE_AUDIO_DECODER (object); - - switch (prop_id) { - case PROP_LATENCY: - dec->latency = g_value_get_int64 (value); - break; - case PROP_TOLERANCE: - dec->tolerance = g_value_get_int64 (value); - break; - case PROP_PLC: - dec->plc = g_value_get_boolean (value); - break; - default: - G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); - break; - } -} - -static GstStateChangeReturn -gst_base_audio_decoder_change_state (GstElement * element, - GstStateChange transition) -{ - GstBaseAudioDecoder *codec; - GstStateChangeReturn ret; - - codec = GST_BASE_AUDIO_DECODER (element); - - switch (transition) { - case GST_STATE_CHANGE_NULL_TO_READY: - break; - case GST_STATE_CHANGE_READY_TO_PAUSED: - if (!gst_base_audio_decoder_start (codec)) { - goto start_failed; - } - break; - case GST_STATE_CHANGE_PAUSED_TO_PLAYING: - break; - default: - break; - } - - ret = parent_class->change_state (element, transition); - - switch (transition) { - case GST_STATE_CHANGE_PLAYING_TO_PAUSED: - break; - case GST_STATE_CHANGE_PAUSED_TO_READY: - if (!gst_base_audio_decoder_stop (codec)) { - goto stop_failed; - } - break; - case GST_STATE_CHANGE_READY_TO_NULL: - break; - default: - break; - } - - return ret; - -start_failed: - { - GST_ELEMENT_ERROR (codec, LIBRARY, INIT, (NULL), ("Failed to start codec")); - return GST_STATE_CHANGE_FAILURE; - } -stop_failed: - { - GST_ELEMENT_ERROR (codec, LIBRARY, INIT, (NULL), ("Failed to stop codec")); - return GST_STATE_CHANGE_FAILURE; - } -} - -GstFlowReturn -_gst_base_audio_decoder_error (GstBaseAudioDecoder * dec, gint weight, - GQuark domain, gint code, gchar * txt, gchar * dbg, const gchar * file, - const gchar * function, gint line) -{ - if (txt) - GST_WARNING_OBJECT (dec, "error: %s", txt); - if (dbg) - GST_WARNING_OBJECT (dec, "error: %s", dbg); - dec->priv->error_count += weight; - dec->priv->discont = TRUE; - if (dec->ctx->max_errors < dec->priv->error_count) { - gst_element_message_full (GST_ELEMENT (dec), GST_MESSAGE_ERROR, - domain, code, txt, dbg, file, function, line); - return GST_FLOW_ERROR; - } else { - return GST_FLOW_OK; - } -} diff --git a/omx/gstbaseaudiodecoder.h b/omx/gstbaseaudiodecoder.h deleted file mode 100644 index b4ac5650c5..0000000000 --- a/omx/gstbaseaudiodecoder.h +++ /dev/null @@ -1,270 +0,0 @@ -/* GStreamer - * Copyright (C) 2009 Igalia S.L. - * Author: Iago Toral Quiroga - * Copyright (C) 2011 Mark Nauwelaerts . - * Copyright (C) 2011 Nokia Corporation. All rights reserved. - * Contact: Stefan Kost - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Library General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Library General Public License for more details. - * - * You should have received a copy of the GNU Library General Public - * License along with this library; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. - */ - -#ifndef _GST_BASE_AUDIO_DECODER_H_ -#define _GST_BASE_AUDIO_DECODER_H_ - -#ifndef GST_USE_UNSTABLE_API -#warning "GstBaseAudioDecoder is unstable API and may change in future." -#warning "You can define GST_USE_UNSTABLE_API to avoid this warning." -#endif - -#include -#include -#include "gstbaseaudioutils.h" - -G_BEGIN_DECLS - -#define GST_TYPE_BASE_AUDIO_DECODER \ - (gst_base_audio_decoder_get_type()) -#define GST_BASE_AUDIO_DECODER(obj) \ - (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_BASE_AUDIO_DECODER,GstBaseAudioDecoder)) -#define GST_BASE_AUDIO_DECODER_CLASS(klass) \ - (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_BASE_AUDIO_DECODER,GstBaseAudioDecoderClass)) -#define GST_BASE_AUDIO_DECODER_GET_CLASS(obj) \ - (G_TYPE_INSTANCE_GET_CLASS((obj),GST_TYPE_BASE_AUDIO_DECODER,GstBaseAudioDecoderClass)) -#define GST_IS_BASE_AUDIO_DECODER(obj) \ - (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_BASE_AUDIO_DECODER)) -#define GST_IS_BASE_AUDIO_DECODER_CLASS(obj) \ - (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_BASE_AUDIO_DECODER)) - -/** - * GST_BASE_AUDIO_DECODER_SINK_NAME: - * - * The name of the templates for the sink pad. - */ -#define GST_BASE_AUDIO_DECODER_SINK_NAME "sink" -/** - * GST_BASE_AUDIO_DECODER_SRC_NAME: - * - * The name of the templates for the source pad. - */ -#define GST_BASE_AUDIO_DECODER_SRC_NAME "src" - -/** - * GST_BASE_AUDIO_DECODER_SRC_PAD: - * @obj: base audio codec instance - * - * Gives the pointer to the source #GstPad object of the element. - */ -#define GST_BASE_AUDIO_DECODER_SRC_PAD(obj) (((GstBaseAudioDecoder *) (obj))->srcpad) - -/** - * GST_BASE_AUDIO_DECODER_SINK_PAD: - * @obj: base audio codec instance - * - * Gives the pointer to the sink #GstPad object of the element. - */ -#define GST_BASE_AUDIO_DECODER_SINK_PAD(obj) (((GstBaseAudioDecoder *) (obj))->sinkpad) - -#define GST_BASE_AUDIO_DECODER_STREAM_LOCK(dec) g_static_rec_mutex_lock (&GST_BASE_AUDIO_DECODER (dec)->stream_lock) -#define GST_BASE_AUDIO_DECODER_STREAM_UNLOCK(dec) g_static_rec_mutex_unlock (&GST_BASE_AUDIO_DECODER (dec)->stream_lock) - -typedef struct _GstBaseAudioDecoder GstBaseAudioDecoder; -typedef struct _GstBaseAudioDecoderClass GstBaseAudioDecoderClass; - -typedef struct _GstBaseAudioDecoderPrivate GstBaseAudioDecoderPrivate; -typedef struct _GstBaseAudioDecoderContext GstBaseAudioDecoderContext; - -/* do not use this one, use macro below */ -GstFlowReturn _gst_base_audio_decoder_error (GstBaseAudioDecoder *dec, gint weight, - GQuark domain, gint code, - gchar *txt, gchar *debug, - const gchar *file, const gchar *function, - gint line); - -/** - * GST_BASE_AUDIO_DECODER_ERROR: - * @el: the base audio decoder element that generates the error - * @weight: element defined weight of the error, added to error count - * @domain: like CORE, LIBRARY, RESOURCE or STREAM (see #gstreamer-GstGError) - * @code: error code defined for that domain (see #gstreamer-GstGError) - * @text: the message to display (format string and args enclosed in - * parentheses) - * @debug: debugging information for the message (format string and args - * enclosed in parentheses) - * @ret: variable to receive return value - * - * Utility function that audio decoder elements can use in case they encountered - * a data processing error that may be fatal for the current "data unit" but - * need not prevent subsequent decoding. Such errors are counted and if there - * are too many, as configured in the context's max_errors, the pipeline will - * post an error message and the application will be requested to stop further - * media processing. Otherwise, it is considered a "glitch" and only a warning - * is logged. In either case, @ret is set to the proper value to - * return to upstream/caller (indicating either GST_FLOW_ERROR or GST_FLOW_OK). - */ -#define GST_BASE_AUDIO_DECODER_ERROR(el, w, domain, code, text, debug, ret) \ -G_STMT_START { \ - gchar *__txt = _gst_element_error_printf text; \ - gchar *__dbg = _gst_element_error_printf debug; \ - GstBaseAudioDecoder *dec = GST_BASE_AUDIO_DECODER (el); \ - ret = _gst_base_audio_decoder_error (dec, w, GST_ ## domain ## _ERROR, \ - GST_ ## domain ## _ERROR_ ## code, __txt, __dbg, __FILE__, \ - GST_FUNCTION, __LINE__); \ -} G_STMT_END - -/** - * GstBaseAudioDecoderContext: - * @state: a #GstAudioState describing input audio format - * @eos: no (immediate) subsequent data in stream - * @sync: stream parsing in sync - * @delay: number of frames pending decoding (typically at least 1 for current) - * @do_plc: whether subclass is prepared to handle (packet) loss concealment - * @min_latency: min latency of element - * @max_latency: max latency of element - * @lookahead: decoder lookahead (in units of input rate samples) - * - * Transparent #GstBaseAudioEncoderContext data structure. - */ -struct _GstBaseAudioDecoderContext { - /* input */ - /* (output) audio format */ - GstAudioState state; - - /* parsing state */ - gboolean eos; - gboolean sync; - - /* misc */ - gint delay; - - /* output */ - gboolean do_plc; - gboolean do_byte_time; - gint max_errors; - /* MT-protected (with LOCK) */ - GstClockTime min_latency; - GstClockTime max_latency; -}; - -/** - * GstBaseAudioDecoder: - * - * The opaque #GstBaseAudioDecoder data structure. - */ -struct _GstBaseAudioDecoder -{ - GstElement element; - - /*< protected >*/ - /* source and sink pads */ - GstPad *sinkpad; - GstPad *srcpad; - - /* protects all data processing, i.e. is locked - * in the chain function, finish_frame and when - * processing serialized events */ - GStaticRecMutex stream_lock; - - /* MT-protected (with STREAM_LOCK) */ - GstSegment segment; - GstBaseAudioDecoderContext *ctx; - - /* properties */ - GstClockTime latency; - GstClockTime tolerance; - gboolean plc; - - /*< private >*/ - GstBaseAudioDecoderPrivate *priv; - gpointer _gst_reserved[GST_PADDING_LARGE]; -}; - -/** - * GstBaseAudioDecoderClass: - * @start: Optional. - * Called when the element starts processing. - * Allows opening external resources. - * @stop: Optional. - * Called when the element stops processing. - * Allows closing external resources. - * @set_format: Notifies subclass of incoming data format (caps). - * @parse: Optional. - * Allows chopping incoming data into manageable units (frames) - * for subsequent decoding. This division is at subclass - * discretion and may or may not correspond to 1 (or more) - * frames as defined by audio format. - * @handle_frame: Provides input data (or NULL to clear any remaining data) - * to subclass. Input data ref management is performed by - * base class, subclass should not care or intervene. - * @flush: Optional. - * Instructs subclass to clear any codec caches and discard - * any pending samples and not yet returned encoded data. - * @hard indicates whether a FLUSH is being processed, - * or otherwise a DISCONT (or conceptually similar). - * @event: Optional. - * Event handler on the sink pad. This function should return - * TRUE if the event was handled and should be discarded - * (i.e. not unref'ed). - * @pre_push: Optional. - * Called just prior to pushing (encoded data) buffer downstream. - * Subclass has full discretionary access to buffer, - * and a not OK flow return will abort downstream pushing. - * - * Subclasses can override any of the available virtual methods or not, as - * needed. At minimum @handle_frame (and likely @set_format) needs to be - * overridden. - */ -struct _GstBaseAudioDecoderClass -{ - GstElementClass parent_class; - - /*< public >*/ - /* virtual methods for subclasses */ - - gboolean (*start) (GstBaseAudioDecoder *dec); - - gboolean (*stop) (GstBaseAudioDecoder *dec); - - gboolean (*set_format) (GstBaseAudioDecoder *dec, - GstCaps *caps); - - GstFlowReturn (*parse) (GstBaseAudioDecoder *dec, - GstAdapter *adapter, - gint *offset, gint *length); - - GstFlowReturn (*handle_frame) (GstBaseAudioDecoder *dec, - GstBuffer *buffer); - - void (*flush) (GstBaseAudioDecoder *dec, gboolean hard); - - GstFlowReturn (*pre_push) (GstBaseAudioDecoder *dec, - GstBuffer **buffer); - - gboolean (*event) (GstBaseAudioDecoder *dec, - GstEvent *event); - - /*< private >*/ - gpointer _gst_reserved[GST_PADDING_LARGE]; -}; - -GstFlowReturn gst_base_audio_decoder_finish_frame (GstBaseAudioDecoder * dec, - GstBuffer * buf, gint frames); - -GType gst_base_audio_decoder_get_type (void); - -G_END_DECLS - -#endif - diff --git a/omx/gstbaseaudioencoder.c b/omx/gstbaseaudioencoder.c deleted file mode 100644 index 5750da7b20..0000000000 --- a/omx/gstbaseaudioencoder.c +++ /dev/null @@ -1,1507 +0,0 @@ -/* GStreamer - * Copyright (C) 2011 Mark Nauwelaerts . - * Copyright (C) 2011 Nokia Corporation. All rights reserved. - * Contact: Stefan Kost - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Library General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Library General Public License for more details. - * - * You should have received a copy of the GNU Library General Public - * License along with this library; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. - */ - -/** - * SECTION:gstbaseaudioencoder - * @short_description: Base class for audio encoders - * @see_also: #GstBaseTransform - * - * This base class is for audio encoders turning raw audio samples into - * encoded audio data. - * - * GstBaseAudioEncoder and subclass should cooperate as follows. - * - * - * Configuration - * - * Initially, GstBaseAudioEncoder calls @start when the encoder element - * is activated, which allows subclass to perform any global setup. - * - * - * GstBaseAudioEncoder calls @set_format to inform subclass of the format - * of input audio data that it is about to receive. Subclass should - * setup for encoding and configure various base class context parameters - * appropriately, notably those directing desired input data handling. - * While unlikely, it might be called more than once, if changing input - * parameters require reconfiguration. - * - * - * GstBaseAudioEncoder calls @stop at end of all processing. - * - * - * - * As of configuration stage, and throughout processing, GstBaseAudioEncoder - * provides a GstBaseAudioEncoderContext that provides required context, - * e.g. describing the format of input audio data. - * Conversely, subclass can and should configure context to inform - * base class of its expectation w.r.t. buffer handling. - * - * - * Data processing - * - * Base class gathers input sample data (as directed by the context's - * frame_samples and frame_max) and provides this to subclass' @handle_frame. - * - * - * If codec processing results in encoded data, subclass should call - * @gst_base_audio_encoder_finish_frame to have encoded data pushed - * downstream. Alternatively, it might also call to indicate dropped - * (non-encoded) samples. - * - * - * Just prior to actually pushing a buffer downstream, - * it is passed to @pre_push. - * - * - * During the parsing process GstBaseAudioEncoderClass will handle both - * srcpad and sinkpad events. Sink events will be passed to subclass - * if @event callback has been provided. - * - * - * - * - * Shutdown phase - * - * GstBaseAudioEncoder class calls @stop to inform the subclass that data - * parsing will be stopped. - * - * - * - * - * - * Subclass is responsible for providing pad template caps for - * source and sink pads. The pads need to be named "sink" and "src". It also - * needs to set the fixed caps on srcpad, when the format is ensured. This - * is typically when base class calls subclass' @set_format function, though - * it might be delayed until calling @gst_base_audio_encoder_finish_frame. - * - * In summary, above process should have subclass concentrating on - * codec data processing while leaving other matters to base class, - * such as most notably timestamp handling. While it may exert more control - * in this area (see e.g. @pre_push), it is very much not recommended. - * - * In particular, base class will either favor tracking upstream timestamps - * (at the possible expense of jitter) or aim to arrange for a perfect stream of - * output timestamps, depending on #GstBaseAudioEncoder:perfect-ts. - * However, in the latter case, the input may not be so perfect or ideal, which - * is handled as follows. An input timestamp is compared with the expected - * timestamp as dictated by input sample stream and if the deviation is less - * than #GstBaseAudioEncoder:tolerance, the deviation is discarded. - * Otherwise, it is considered a discontuinity and subsequent output timestamp - * is resynced to the new position after performing configured discontinuity - * processing. In the non-perfect-ts case, an upstream variation exceeding - * tolerance only leads to marking DISCONT on subsequent outgoing - * (while timestamps are adjusted to upstream regardless of variation). - * While DISCONT is also marked in the perfect-ts case, this one optionally - * (see #GstBaseAudioEncoder:hard-resync) - * performs some additional steps, such as clipping of (early) input samples - * or draining all currently remaining input data, depending on the direction - * of the discontuinity. - * - * If perfect timestamps are arranged, it is also possible to request baseclass - * (usually set by subclass) to provide additional buffer metadata (in OFFSET - * and OFFSET_END) fields according to granule defined semantics currently - * needed by oggmux. Specifically, OFFSET is set to granulepos (= sample count - * including buffer) and OFFSET_END to corresponding timestamp (as determined - * by same sample count and sample rate). - * - * Things that subclass need to take care of: - * - * Provide pad templates - * - * Set source pad caps when appropriate - * - * - * Inform base class of buffer processing needs using context's - * frame_samples and frame_bytes. - * - * - * Set user-configurable properties to sane defaults for format and - * implementing codec at hand, e.g. those controlling timestamp behaviour - * and discontinuity processing. - * - * - * Accept data in @handle_frame and provide encoded results to - * @gst_base_audio_encoder_finish_frame. - * - * - * - */ - -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif - -#include "gstbaseaudioencoder.h" -#include -#include - -#include -#include - - -GST_DEBUG_CATEGORY_STATIC (gst_base_audio_encoder_debug); -#define GST_CAT_DEFAULT gst_base_audio_encoder_debug - -#define GST_BASE_AUDIO_ENCODER_GET_PRIVATE(obj) \ - (G_TYPE_INSTANCE_GET_PRIVATE ((obj), GST_TYPE_BASE_AUDIO_ENCODER, \ - GstBaseAudioEncoderPrivate)) - -enum -{ - PROP_0, - PROP_PERFECT_TS, - PROP_GRANULE, - PROP_HARD_RESYNC, - PROP_TOLERANCE -}; - -#define DEFAULT_PERFECT_TS FALSE -#define DEFAULT_GRANULE FALSE -#define DEFAULT_HARD_RESYNC FALSE -#define DEFAULT_TOLERANCE 40000000 - -struct _GstBaseAudioEncoderPrivate -{ - /* activation status */ - gboolean active; - - /* input base/first ts as basis for output ts; - * kept nearly constant for perfect_ts, - * otherwise resyncs to upstream ts */ - GstClockTime base_ts; - /* corresponding base granulepos */ - gint64 base_gp; - /* input samples processed and sent downstream so far (w.r.t. base_ts) */ - guint64 samples; - - /* currently collected sample data */ - GstAdapter *adapter; - /* offset in adapter up to which already supplied to encoder */ - gint offset; - /* mark outgoing discont */ - gboolean discont; - /* to guess duration of drained data */ - GstClockTime last_duration; - - /* subclass provided data in processing round */ - gboolean got_data; - /* subclass gave all it could already */ - gboolean drained; - /* subclass currently being forcibly drained */ - gboolean force; - - /* output bps estimatation */ - /* global in samples seen */ - guint64 samples_in; - /* global bytes sent out */ - guint64 bytes_out; - - /* context storage */ - GstBaseAudioEncoderContext ctx; - - /* pending serialized sink events, will be sent from finish_frame() */ - GList *pending_events; -}; - - -static void -do_init (GType gtype) -{ - const GInterfaceInfo preset_interface_info = { - NULL, /* interface_init */ - NULL, /* interface_finalize */ - NULL /* interface_data */ - }; - - g_type_add_interface_static (gtype, GST_TYPE_PRESET, &preset_interface_info); -} - -GST_BOILERPLATE_FULL (GstBaseAudioEncoder, gst_base_audio_encoder, GstElement, - GST_TYPE_ELEMENT, do_init); - -static void gst_base_audio_encoder_finalize (GObject * object); -static void gst_base_audio_encoder_reset (GstBaseAudioEncoder * enc, - gboolean full); - -static void gst_base_audio_encoder_set_property (GObject * object, - guint prop_id, const GValue * value, GParamSpec * pspec); -static void gst_base_audio_encoder_get_property (GObject * object, - guint prop_id, GValue * value, GParamSpec * pspec); - -static gboolean gst_base_audio_encoder_sink_activate_push (GstPad * pad, - gboolean active); - -static gboolean gst_base_audio_encoder_sink_event (GstPad * pad, - GstEvent * event); -static gboolean gst_base_audio_encoder_sink_setcaps (GstPad * pad, - GstCaps * caps); -static GstFlowReturn gst_base_audio_encoder_chain (GstPad * pad, - GstBuffer * buffer); -static gboolean gst_base_audio_encoder_src_query (GstPad * pad, - GstQuery * query); -static gboolean gst_base_audio_encoder_sink_query (GstPad * pad, - GstQuery * query); -static const GstQueryType *gst_base_audio_encoder_get_query_types (GstPad * - pad); -static GstCaps *gst_base_audio_encoder_sink_getcaps (GstPad * pad); - - -static void -gst_base_audio_encoder_class_init (GstBaseAudioEncoderClass * klass) -{ - GObjectClass *gobject_class; - - gobject_class = G_OBJECT_CLASS (klass); - - GST_DEBUG_CATEGORY_INIT (gst_base_audio_encoder_debug, "baseaudioencoder", 0, - "baseaudioencoder element"); - - g_type_class_add_private (klass, sizeof (GstBaseAudioEncoderPrivate)); - - gobject_class->set_property = gst_base_audio_encoder_set_property; - gobject_class->get_property = gst_base_audio_encoder_get_property; - - gobject_class->finalize = GST_DEBUG_FUNCPTR (gst_base_audio_encoder_finalize); - - /* properties */ - g_object_class_install_property (gobject_class, PROP_PERFECT_TS, - g_param_spec_boolean ("perfect-ts", "Perfect Timestamps", - "Favour perfect timestamps over tracking upstream timestamps", - DEFAULT_PERFECT_TS, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); - g_object_class_install_property (gobject_class, PROP_GRANULE, - g_param_spec_boolean ("granule", "Granule Marking", - "Apply granule semantics to buffer metadata (implies perfect-ts)", - DEFAULT_GRANULE, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS)); - g_object_class_install_property (gobject_class, PROP_HARD_RESYNC, - g_param_spec_boolean ("hard-resync", "Hard Resync", - "Perform clipping and sample flushing upon discontinuity", - DEFAULT_HARD_RESYNC, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); - g_object_class_install_property (gobject_class, PROP_TOLERANCE, - g_param_spec_int64 ("tolerance", "Tolerance", - "Consider discontinuity if timestamp jitter/imperfection exceeds tolerance (ns)", - 0, G_MAXINT64, DEFAULT_TOLERANCE, - G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); -} - -static void -gst_base_audio_encoder_base_init (gpointer g_class) -{ -} - -static void -gst_base_audio_encoder_init (GstBaseAudioEncoder * enc, - GstBaseAudioEncoderClass * bclass) -{ - GstPadTemplate *pad_template; - - GST_DEBUG_OBJECT (enc, "gst_base_audio_encoder_init"); - - enc->priv = GST_BASE_AUDIO_ENCODER_GET_PRIVATE (enc); - - /* only push mode supported */ - pad_template = - gst_element_class_get_pad_template (GST_ELEMENT_CLASS (bclass), "sink"); - g_return_if_fail (pad_template != NULL); - enc->sinkpad = gst_pad_new_from_template (pad_template, "sink"); - gst_pad_set_event_function (enc->sinkpad, - GST_DEBUG_FUNCPTR (gst_base_audio_encoder_sink_event)); - gst_pad_set_setcaps_function (enc->sinkpad, - GST_DEBUG_FUNCPTR (gst_base_audio_encoder_sink_setcaps)); - gst_pad_set_getcaps_function (enc->sinkpad, - GST_DEBUG_FUNCPTR (gst_base_audio_encoder_sink_getcaps)); - gst_pad_set_query_function (enc->sinkpad, - GST_DEBUG_FUNCPTR (gst_base_audio_encoder_sink_query)); - gst_pad_set_chain_function (enc->sinkpad, - GST_DEBUG_FUNCPTR (gst_base_audio_encoder_chain)); - gst_pad_set_activatepush_function (enc->sinkpad, - GST_DEBUG_FUNCPTR (gst_base_audio_encoder_sink_activate_push)); - gst_element_add_pad (GST_ELEMENT (enc), enc->sinkpad); - - GST_DEBUG_OBJECT (enc, "sinkpad created"); - - /* and we don't mind upstream traveling stuff that much ... */ - pad_template = - gst_element_class_get_pad_template (GST_ELEMENT_CLASS (bclass), "src"); - g_return_if_fail (pad_template != NULL); - enc->srcpad = gst_pad_new_from_template (pad_template, "src"); - gst_pad_set_query_function (enc->srcpad, - GST_DEBUG_FUNCPTR (gst_base_audio_encoder_src_query)); - gst_pad_set_query_type_function (enc->srcpad, - GST_DEBUG_FUNCPTR (gst_base_audio_encoder_get_query_types)); - gst_pad_use_fixed_caps (enc->srcpad); - gst_element_add_pad (GST_ELEMENT (enc), enc->srcpad); - GST_DEBUG_OBJECT (enc, "src created"); - - enc->priv->adapter = gst_adapter_new (); - enc->ctx = &enc->priv->ctx; - - g_static_rec_mutex_init (&enc->stream_lock); - - /* property default */ - enc->perfect_ts = DEFAULT_PERFECT_TS; - enc->hard_resync = DEFAULT_HARD_RESYNC; - enc->tolerance = DEFAULT_TOLERANCE; - - /* init state */ - gst_base_audio_encoder_reset (enc, TRUE); - GST_DEBUG_OBJECT (enc, "init ok"); -} - -static void -gst_base_audio_encoder_reset (GstBaseAudioEncoder * enc, gboolean full) -{ - GST_BASE_AUDIO_ENCODER_STREAM_LOCK (enc); - - if (full) { - enc->priv->active = FALSE; - enc->priv->samples_in = 0; - enc->priv->bytes_out = 0; - g_free (enc->ctx->state.channel_pos); - memset (enc->ctx, 0, sizeof (enc->ctx)); - - g_list_foreach (enc->priv->pending_events, (GFunc) gst_event_unref, NULL); - g_list_free (enc->priv->pending_events); - enc->priv->pending_events = NULL; - } - - gst_segment_init (&enc->segment, GST_FORMAT_TIME); - - gst_adapter_clear (enc->priv->adapter); - enc->priv->got_data = FALSE; - enc->priv->drained = TRUE; - enc->priv->offset = 0; - enc->priv->base_ts = GST_CLOCK_TIME_NONE; - enc->priv->base_gp = -1; - enc->priv->samples = 0; - enc->priv->discont = FALSE; - - GST_BASE_AUDIO_ENCODER_STREAM_UNLOCK (enc); -} - -static void -gst_base_audio_encoder_finalize (GObject * object) -{ - GstBaseAudioEncoder *enc = GST_BASE_AUDIO_ENCODER (object); - - g_object_unref (enc->priv->adapter); - - g_static_rec_mutex_free (&enc->stream_lock); - - G_OBJECT_CLASS (parent_class)->finalize (object); -} - -/** - * gst_base_audio_encoder_finish_frame: - * @enc: a #GstBaseAudioEncoder - * @buffer: encoded data - * @samples: number of samples (per channel) represented by encoded data - * - * Collects encoded data and/or pushes encoded data downstream. - * Source pad caps must be set when this is called. Depending on the nature - * of the (framing of) the format, subclass can decide whether to push - * encoded data directly or to collect various "frames" in a single buffer. - * Note that the latter behaviour is recommended whenever the format is allowed, - * as it incurs no additional latency and avoids otherwise generating a - * a multitude of (small) output buffers. If not explicitly pushed, - * any available encoded data is pushed at the end of each processing cycle, - * i.e. which encodes as much data as available input data allows. - * - * If @samples < 0, then best estimate is all samples provided to encoder - * (subclass) so far. @buf may be NULL, in which case next number of @samples - * are considered discarded, e.g. as a result of discontinuous transmission, - * and a discontinuity is marked (note that @buf == NULL => push == TRUE). - * - * Returns: a #GstFlowReturn that should be escalated to caller (of caller) - */ -GstFlowReturn -gst_base_audio_encoder_finish_frame (GstBaseAudioEncoder * enc, GstBuffer * buf, - gint samples) -{ - GstBaseAudioEncoderClass *klass; - GstBaseAudioEncoderPrivate *priv; - GstBaseAudioEncoderContext *ctx; - GstFlowReturn ret = GST_FLOW_OK; - - klass = GST_BASE_AUDIO_ENCODER_GET_CLASS (enc); - priv = enc->priv; - ctx = enc->ctx; - - /* subclass should know what it is producing by now */ - g_return_val_if_fail (GST_PAD_CAPS (enc->srcpad) != NULL, GST_FLOW_ERROR); - /* subclass should not hand us no data */ - g_return_val_if_fail (buf == NULL || GST_BUFFER_SIZE (buf) > 0, - GST_FLOW_ERROR); - - GST_BASE_AUDIO_ENCODER_STREAM_LOCK (enc); - - GST_LOG_OBJECT (enc, "accepting %d bytes encoded data as %d samples", - buf ? GST_BUFFER_SIZE (buf) : -1, samples); - - /* mark subclass still alive and providing */ - priv->got_data = TRUE; - - if (priv->pending_events) { - GList *pending_events, *l; - - pending_events = priv->pending_events; - priv->pending_events = NULL; - - GST_DEBUG_OBJECT (enc, "Pushing pending events"); - for (l = priv->pending_events; l; l = l->next) - gst_pad_push_event (enc->srcpad, l->data); - g_list_free (pending_events); - } - - /* remove corresponding samples from input */ - if (samples < 0) - samples = (enc->priv->offset / ctx->state.bpf); - - if (G_LIKELY (samples)) { - /* track upstream ts if so configured */ - if (!enc->perfect_ts) { - guint64 ts, distance; - - ts = gst_adapter_prev_timestamp (priv->adapter, &distance); - g_assert (distance % ctx->state.bpf == 0); - distance /= ctx->state.bpf; - GST_LOG_OBJECT (enc, "%" G_GUINT64_FORMAT " samples past prev_ts %" - GST_TIME_FORMAT, distance, GST_TIME_ARGS (ts)); - GST_LOG_OBJECT (enc, "%" G_GUINT64_FORMAT " samples past base_ts %" - GST_TIME_FORMAT, priv->samples, GST_TIME_ARGS (priv->base_ts)); - /* when draining adapter might be empty and no ts to offer */ - if (GST_CLOCK_TIME_IS_VALID (ts) && ts != priv->base_ts) { - GstClockTimeDiff diff; - GstClockTime old_ts, next_ts; - - /* passed into another buffer; - * mild check for discontinuity and only mark if so */ - next_ts = ts + - gst_util_uint64_scale (distance, GST_SECOND, ctx->state.rate); - old_ts = priv->base_ts + - gst_util_uint64_scale (priv->samples, GST_SECOND, ctx->state.rate); - diff = GST_CLOCK_DIFF (next_ts, old_ts); - GST_LOG_OBJECT (enc, "ts diff %d ms", (gint) (diff / GST_MSECOND)); - /* only mark discontinuity if beyond tolerance */ - if (G_UNLIKELY (diff < -enc->tolerance || diff > enc->tolerance)) { - GST_DEBUG_OBJECT (enc, "marked discont"); - priv->discont = TRUE; - } - GST_LOG_OBJECT (enc, "new upstream ts %" GST_TIME_FORMAT - " at distance %" G_GUINT64_FORMAT, GST_TIME_ARGS (ts), distance); - /* re-sync to upstream ts */ - priv->base_ts = ts; - priv->samples = distance; - } - } - /* advance sample view */ - if (G_UNLIKELY (samples * ctx->state.bpf > priv->offset)) { - if (G_LIKELY (!priv->force)) { - /* no way we can let this pass */ - g_assert_not_reached (); - /* really no way */ - goto overflow; - } else { - priv->offset = 0; - if (samples * ctx->state.bpf >= gst_adapter_available (priv->adapter)) - gst_adapter_clear (priv->adapter); - else - gst_adapter_flush (priv->adapter, samples * ctx->state.bpf); - } - } else { - gst_adapter_flush (priv->adapter, samples * ctx->state.bpf); - priv->offset -= samples * ctx->state.bpf; - /* avoid subsequent stray prev_ts */ - if (G_UNLIKELY (gst_adapter_available (priv->adapter) == 0)) - gst_adapter_clear (priv->adapter); - } - /* sample count advanced below after buffer handling */ - } - - /* collect output */ - if (G_LIKELY (buf)) { - GST_LOG_OBJECT (enc, "taking %d bytes for output", GST_BUFFER_SIZE (buf)); - buf = gst_buffer_make_metadata_writable (buf); - - /* decorate */ - gst_buffer_set_caps (buf, GST_PAD_CAPS (enc->srcpad)); - if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (priv->base_ts))) { - /* FIXME ? lookahead could lead to weird ts and duration ? - * (particularly if not in perfect mode) */ - /* mind sample rounding and produce perfect output */ - GST_BUFFER_TIMESTAMP (buf) = priv->base_ts + - gst_util_uint64_scale (priv->samples - ctx->lookahead, GST_SECOND, - ctx->state.rate); - GST_DEBUG_OBJECT (enc, "out samples %d", samples); - if (G_LIKELY (samples > 0)) { - priv->samples += samples; - GST_BUFFER_DURATION (buf) = priv->base_ts + - gst_util_uint64_scale (priv->samples - ctx->lookahead, GST_SECOND, - ctx->state.rate) - GST_BUFFER_TIMESTAMP (buf); - priv->last_duration = GST_BUFFER_DURATION (buf); - } else { - /* duration forecast in case of handling remainder; - * the last one is probably like the previous one ... */ - GST_BUFFER_DURATION (buf) = priv->last_duration; - } - if (priv->base_gp >= 0) { - /* pamper oggmux */ - /* FIXME: in longer run, muxer should take care of this ... */ - /* offset_end = granulepos for ogg muxer */ - GST_BUFFER_OFFSET_END (buf) = priv->base_gp + priv->samples - - enc->ctx->lookahead; - /* offset = timestamp corresponding to granulepos for ogg muxer */ - GST_BUFFER_OFFSET (buf) = - GST_FRAMES_TO_CLOCK_TIME (GST_BUFFER_OFFSET_END (buf), - ctx->state.rate); - } else { - GST_BUFFER_OFFSET (buf) = priv->bytes_out; - GST_BUFFER_OFFSET_END (buf) = priv->bytes_out + GST_BUFFER_SIZE (buf); - } - } - - priv->bytes_out += GST_BUFFER_SIZE (buf); - - if (G_UNLIKELY (priv->discont)) { - GST_LOG_OBJECT (enc, "marking discont"); - GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT); - priv->discont = FALSE; - } - - if (klass->pre_push) { - /* last chance for subclass to do some dirty stuff */ - ret = klass->pre_push (enc, &buf); - if (ret != GST_FLOW_OK || !buf) { - GST_DEBUG_OBJECT (enc, "subclass returned %s, buf %p", - gst_flow_get_name (ret), buf); - if (buf) - gst_buffer_unref (buf); - goto exit; - } - } - - GST_LOG_OBJECT (enc, "pushing buffer of size %d with ts %" GST_TIME_FORMAT - ", duration %" GST_TIME_FORMAT, GST_BUFFER_SIZE (buf), - GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)), - GST_TIME_ARGS (GST_BUFFER_DURATION (buf))); - - ret = gst_pad_push (enc->srcpad, buf); - GST_LOG_OBJECT (enc, "buffer pushed: %s", gst_flow_get_name (ret)); - } else { - /* merely advance samples, most work for that already done above */ - priv->samples += samples; - } - -exit: - GST_BASE_AUDIO_ENCODER_STREAM_UNLOCK (enc); - - return ret; - - /* ERRORS */ -overflow: - { - GST_ELEMENT_ERROR (enc, STREAM, ENCODE, - ("received more encoded samples %d than provided %d", - samples, priv->offset / ctx->state.bpf), (NULL)); - if (buf) - gst_buffer_unref (buf); - ret = GST_FLOW_ERROR; - goto exit; - } -} - - /* adapter tracking idea: - * - start of adapter corresponds with what has already been encoded - * (i.e. really returned by encoder subclass) - * - start + offset is what needs to be fed to subclass next */ -static GstFlowReturn -gst_base_audio_encoder_push_buffers (GstBaseAudioEncoder * enc, gboolean force) -{ - GstBaseAudioEncoderClass *klass; - GstBaseAudioEncoderPrivate *priv; - GstBaseAudioEncoderContext *ctx; - gint av, need; - GstBuffer *buf; - GstFlowReturn ret = GST_FLOW_OK; - - klass = GST_BASE_AUDIO_ENCODER_GET_CLASS (enc); - - g_return_val_if_fail (klass->handle_frame != NULL, GST_FLOW_ERROR); - - priv = enc->priv; - ctx = enc->ctx; - - while (ret == GST_FLOW_OK) { - - buf = NULL; - av = gst_adapter_available (priv->adapter); - - g_assert (priv->offset <= av); - av -= priv->offset; - - need = - ctx->frame_samples_min > - 0 ? ctx->frame_samples_min * ctx->state.bpf : av; - GST_LOG_OBJECT (enc, "available: %d, needed: %d, force: %d", av, need, - force); - - if ((need > av) || !av) { - if (G_UNLIKELY (force)) { - priv->force = TRUE; - need = av; - } else { - break; - } - } else { - priv->force = FALSE; - } - - if (ctx->frame_samples_max > 0) - need = MIN (av, ctx->frame_samples_max * ctx->state.bpf); - - if (ctx->frame_samples_min == ctx->frame_samples_max) { - /* if we have some extra metadata, - * provide for integer multiple of frames to allow for better granularity - * of processing */ - if (ctx->frame_samples_min > 0 && need) { - if (ctx->frame_max > 1) - need = need * MIN ((av / need), ctx->frame_max); - else if (ctx->frame_max == 0) - need = need * (av / need); - } - } - - if (need) { - buf = gst_buffer_new (); - GST_BUFFER_DATA (buf) = (guint8 *) - gst_adapter_peek (priv->adapter, priv->offset + need) + priv->offset; - GST_BUFFER_SIZE (buf) = need; - } - - GST_LOG_OBJECT (enc, "providing subclass with %d bytes at offset %d", - need, priv->offset); - - /* mark this already as consumed, - * which it should be when subclass gives us data in exchange for samples */ - priv->offset += need; - priv->samples_in += need / ctx->state.bpf; - - priv->got_data = FALSE; - ret = klass->handle_frame (enc, buf); - - if (G_LIKELY (buf)) - gst_buffer_unref (buf); - - /* no data to feed, no leftover provided, then bail out */ - if (G_UNLIKELY (!buf && !priv->got_data)) { - priv->drained = TRUE; - GST_LOG_OBJECT (enc, "no more data drained from subclass"); - break; - } - } - - return ret; -} - -static GstFlowReturn -gst_base_audio_encoder_drain (GstBaseAudioEncoder * enc) -{ - if (enc->priv->drained) - return GST_FLOW_OK; - else - return gst_base_audio_encoder_push_buffers (enc, TRUE); -} - -static void -gst_base_audio_encoder_set_base_gp (GstBaseAudioEncoder * enc) -{ - GstClockTime ts; - - if (!enc->granule) - return; - - /* use running time for granule */ - /* incoming data is clipped, so a valid input should yield a valid output */ - ts = gst_segment_to_running_time (&enc->segment, GST_FORMAT_TIME, - enc->priv->base_ts); - if (GST_CLOCK_TIME_IS_VALID (ts)) { - enc->priv->base_gp = - GST_CLOCK_TIME_TO_FRAMES (enc->priv->base_ts, enc->ctx->state.rate); - GST_DEBUG_OBJECT (enc, "new base gp %" G_GINT64_FORMAT, enc->priv->base_gp); - } else { - /* should reasonably have a valid base, - * otherwise start at 0 if we did not already start there earlier */ - if (enc->priv->base_gp < 0) { - enc->priv->base_gp = 0; - GST_DEBUG_OBJECT (enc, "new base gp %" G_GINT64_FORMAT, - enc->priv->base_gp); - } - } -} - -static GstFlowReturn -gst_base_audio_encoder_chain (GstPad * pad, GstBuffer * buffer) -{ - GstBaseAudioEncoder *enc; - GstBaseAudioEncoderPrivate *priv; - GstBaseAudioEncoderContext *ctx; - GstFlowReturn ret = GST_FLOW_OK; - gboolean discont; - - enc = GST_BASE_AUDIO_ENCODER (GST_OBJECT_PARENT (pad)); - - priv = enc->priv; - ctx = enc->ctx; - - GST_BASE_AUDIO_ENCODER_STREAM_LOCK (enc); - - /* should know what is coming by now */ - if (!ctx->state.bpf) - goto not_negotiated; - - GST_LOG_OBJECT (enc, - "received buffer of size %d with ts %" GST_TIME_FORMAT - ", duration %" GST_TIME_FORMAT, GST_BUFFER_SIZE (buffer), - GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)), - GST_TIME_ARGS (GST_BUFFER_DURATION (buffer))); - - /* input shoud be whole number of sample frames */ - if (GST_BUFFER_SIZE (buffer) % ctx->state.bpf) - goto wrong_buffer; - -#ifndef GST_DISABLE_GST_DEBUG - { - GstClockTime duration; - GstClockTimeDiff diff; - - /* verify buffer duration */ - duration = gst_util_uint64_scale (GST_BUFFER_SIZE (buffer), GST_SECOND, - ctx->state.rate * ctx->state.bpf); - diff = GST_CLOCK_DIFF (duration, GST_BUFFER_DURATION (buffer)); - if (GST_BUFFER_DURATION (buffer) != GST_CLOCK_TIME_NONE && - (diff > GST_SECOND / ctx->state.rate / 2 || - diff < -GST_SECOND / ctx->state.rate / 2)) { - GST_DEBUG_OBJECT (enc, "incoming buffer had incorrect duration %" - GST_TIME_FORMAT ", expected duration %" GST_TIME_FORMAT, - GST_TIME_ARGS (GST_BUFFER_DURATION (buffer)), - GST_TIME_ARGS (duration)); - } - } -#endif - - discont = GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DISCONT); - if (G_UNLIKELY (discont)) { - GST_LOG_OBJECT (buffer, "marked discont"); - enc->priv->discont = discont; - } - - /* clip to segment */ - /* NOTE: slightly painful linking -laudio only for this one ... */ - buffer = gst_audio_buffer_clip (buffer, &enc->segment, ctx->state.rate, - ctx->state.bpf); - if (G_UNLIKELY (!buffer)) { - GST_DEBUG_OBJECT (buffer, "no data after clipping to segment"); - goto done; - } - - GST_LOG_OBJECT (enc, - "buffer after segment clipping has size %d with ts %" GST_TIME_FORMAT - ", duration %" GST_TIME_FORMAT, GST_BUFFER_SIZE (buffer), - GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)), - GST_TIME_ARGS (GST_BUFFER_DURATION (buffer))); - - if (!GST_CLOCK_TIME_IS_VALID (priv->base_ts)) { - priv->base_ts = GST_BUFFER_TIMESTAMP (buffer); - GST_DEBUG_OBJECT (enc, "new base ts %" GST_TIME_FORMAT, - GST_TIME_ARGS (priv->base_ts)); - gst_base_audio_encoder_set_base_gp (enc); - } - - /* check for continuity; - * checked elsewhere in non-perfect case */ - if (enc->perfect_ts) { - GstClockTimeDiff diff = 0; - GstClockTime next_ts = 0; - - if (GST_BUFFER_TIMESTAMP_IS_VALID (buffer) && - GST_CLOCK_TIME_IS_VALID (priv->base_ts)) { - guint64 samples; - - samples = priv->samples + - gst_adapter_available (priv->adapter) / ctx->state.bpf; - next_ts = priv->base_ts + - gst_util_uint64_scale (samples, GST_SECOND, ctx->state.rate); - GST_LOG_OBJECT (enc, "buffer is %" G_GUINT64_FORMAT - " samples past base_ts %" GST_TIME_FORMAT - ", expected ts %" GST_TIME_FORMAT, samples, - GST_TIME_ARGS (priv->base_ts), GST_TIME_ARGS (next_ts)); - diff = GST_CLOCK_DIFF (next_ts, GST_BUFFER_TIMESTAMP (buffer)); - GST_LOG_OBJECT (enc, "ts diff %d ms", (gint) (diff / GST_MSECOND)); - /* if within tolerance, - * discard buffer ts and carry on producing perfect stream, - * otherwise clip or resync to ts */ - if (G_UNLIKELY (diff < -enc->tolerance || diff > enc->tolerance)) { - GST_DEBUG_OBJECT (enc, "marked discont"); - discont = TRUE; - } - } - - /* do some fancy tweaking in hard resync case */ - if (discont && enc->hard_resync) { - if (diff < 0) { - guint64 diff_bytes; - - GST_WARNING_OBJECT (enc, "Buffer is older than expected ts %" - GST_TIME_FORMAT ". Clipping buffer", GST_TIME_ARGS (next_ts)); - - diff_bytes = - GST_CLOCK_TIME_TO_FRAMES (-diff, ctx->state.rate) * ctx->state.bpf; - if (diff_bytes >= GST_BUFFER_SIZE (buffer)) { - gst_buffer_unref (buffer); - goto done; - } - buffer = gst_buffer_make_metadata_writable (buffer); - GST_BUFFER_DATA (buffer) += diff_bytes; - GST_BUFFER_SIZE (buffer) -= diff_bytes; - - GST_BUFFER_TIMESTAMP (buffer) += diff; - /* care even less about duration after this */ - } else { - /* drain stuff prior to resync */ - gst_base_audio_encoder_drain (enc); - } - } - /* now re-sync ts */ - priv->base_ts += diff; - gst_base_audio_encoder_set_base_gp (enc); - priv->discont |= discont; - } - - gst_adapter_push (enc->priv->adapter, buffer); - /* new stuff, so we can push subclass again */ - enc->priv->drained = FALSE; - - ret = gst_base_audio_encoder_push_buffers (enc, FALSE); - -done: - GST_LOG_OBJECT (enc, "chain leaving"); - - GST_BASE_AUDIO_ENCODER_STREAM_UNLOCK (enc); - - return ret; - - /* ERRORS */ -not_negotiated: - { - GST_ELEMENT_ERROR (enc, CORE, NEGOTIATION, (NULL), - ("encoder not initialized")); - gst_buffer_unref (buffer); - ret = GST_FLOW_NOT_NEGOTIATED; - goto done; - } -wrong_buffer: - { - GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL), - ("buffer size %d not a multiple of %d", GST_BUFFER_SIZE (buffer), - ctx->state.bpf)); - gst_buffer_unref (buffer); - ret = GST_FLOW_ERROR; - goto done; - } -} - -static gboolean -gst_base_audio_encoder_sink_setcaps (GstPad * pad, GstCaps * caps) -{ - GstBaseAudioEncoder *enc; - GstBaseAudioEncoderClass *klass; - GstBaseAudioEncoderContext *ctx; - GstAudioState *state; - gboolean res = TRUE, changed = FALSE; - - enc = GST_BASE_AUDIO_ENCODER (GST_PAD_PARENT (pad)); - klass = GST_BASE_AUDIO_ENCODER_GET_CLASS (enc); - - /* subclass must do something here ... */ - g_return_val_if_fail (klass->set_format != NULL, FALSE); - - ctx = enc->ctx; - state = &ctx->state; - - GST_BASE_AUDIO_ENCODER_STREAM_LOCK (enc); - - GST_DEBUG_OBJECT (enc, "caps: %" GST_PTR_FORMAT, caps); - - if (!gst_caps_is_fixed (caps)) - goto refuse_caps; - - /* adjust ts tracking to new sample rate */ - if (GST_CLOCK_TIME_IS_VALID (enc->priv->base_ts) && state->rate) { - enc->priv->base_ts += - GST_FRAMES_TO_CLOCK_TIME (enc->priv->samples, state->rate); - enc->priv->samples = 0; - } - - if (!gst_base_audio_parse_caps (caps, state, &changed)) - goto refuse_caps; - - if (changed) { - GstClockTime old_min_latency; - GstClockTime old_max_latency; - - /* drain any pending old data stuff */ - gst_base_audio_encoder_drain (enc); - - /* context defaults */ - enc->ctx->frame_samples_min = 0; - enc->ctx->frame_samples_max = 0; - enc->ctx->frame_max = 0; - enc->ctx->lookahead = 0; - - /* element might report latency */ - GST_OBJECT_LOCK (enc); - old_min_latency = ctx->min_latency; - old_max_latency = ctx->max_latency; - GST_OBJECT_UNLOCK (enc); - - if (klass->set_format) - res = klass->set_format (enc, state); - - /* notify if new latency */ - GST_OBJECT_LOCK (enc); - if ((ctx->min_latency > 0 && ctx->min_latency != old_min_latency) || - (ctx->max_latency > 0 && ctx->max_latency != old_max_latency)) { - GST_OBJECT_UNLOCK (enc); - /* post latency message on the bus */ - gst_element_post_message (GST_ELEMENT (enc), - gst_message_new_latency (GST_OBJECT (enc))); - GST_OBJECT_LOCK (enc); - } - GST_OBJECT_UNLOCK (enc); - } else { - GST_DEBUG_OBJECT (enc, "new audio format identical to configured format"); - } - -exit: - - GST_BASE_AUDIO_ENCODER_STREAM_UNLOCK (enc); - - return res; - - /* ERRORS */ -refuse_caps: - { - GST_WARNING_OBJECT (enc, "rejected caps %" GST_PTR_FORMAT, caps); - goto exit; - } -} - - -/** - * gst_base_audio_encoder_proxy_getcaps: - * @enc: a #GstBaseAudioEncoder - * @caps: initial - * - * Returns caps that express @caps (or sink template caps if @caps == NULL) - * restricted to channel/rate combinations supported by downstream elements - * (e.g. muxers). - * - * Returns: a #GstCaps owned by caller - */ -GstCaps * -gst_base_audio_encoder_proxy_getcaps (GstBaseAudioEncoder * enc, GstCaps * caps) -{ - const GstCaps *templ_caps; - GstCaps *allowed = NULL; - GstCaps *fcaps, *filter_caps; - gint i, j; - - /* we want to be able to communicate to upstream elements like audioconvert - * and audioresample any rate/channel restrictions downstream (e.g. muxer - * only accepting certain sample rates) */ - templ_caps = caps ? caps : gst_pad_get_pad_template_caps (enc->sinkpad); - allowed = gst_pad_get_allowed_caps (enc->srcpad); - if (!allowed || gst_caps_is_empty (allowed) || gst_caps_is_any (allowed)) { - fcaps = gst_caps_copy (templ_caps); - goto done; - } - - GST_LOG_OBJECT (enc, "template caps %" GST_PTR_FORMAT, templ_caps); - GST_LOG_OBJECT (enc, "allowed caps %" GST_PTR_FORMAT, allowed); - - filter_caps = gst_caps_new_empty (); - - for (i = 0; i < gst_caps_get_size (templ_caps); i++) { - GQuark q_name; - - q_name = gst_structure_get_name_id (gst_caps_get_structure (templ_caps, i)); - - /* pick rate + channel fields from allowed caps */ - for (j = 0; j < gst_caps_get_size (allowed); j++) { - const GstStructure *allowed_s = gst_caps_get_structure (allowed, j); - const GValue *val; - GstStructure *s; - - s = gst_structure_id_empty_new (q_name); - if ((val = gst_structure_get_value (allowed_s, "rate"))) - gst_structure_set_value (s, "rate", val); - if ((val = gst_structure_get_value (allowed_s, "channels"))) - gst_structure_set_value (s, "channels", val); - - gst_caps_merge_structure (filter_caps, s); - } - } - - fcaps = gst_caps_intersect (filter_caps, templ_caps); - gst_caps_unref (filter_caps); - -done: - gst_caps_replace (&allowed, NULL); - - GST_LOG_OBJECT (enc, "proxy caps %" GST_PTR_FORMAT, fcaps); - - return fcaps; -} - -static GstCaps * -gst_base_audio_encoder_sink_getcaps (GstPad * pad) -{ - GstBaseAudioEncoder *enc; - GstBaseAudioEncoderClass *klass; - GstCaps *caps; - - enc = GST_BASE_AUDIO_ENCODER (gst_pad_get_parent (pad)); - klass = GST_BASE_AUDIO_ENCODER_GET_CLASS (enc); - g_assert (pad == enc->sinkpad); - - if (klass->getcaps) - caps = klass->getcaps (enc); - else - caps = gst_base_audio_encoder_proxy_getcaps (enc, NULL); - gst_object_unref (enc); - - GST_LOG_OBJECT (enc, "returning caps %" GST_PTR_FORMAT, caps); - - return caps; -} - -static gboolean -gst_base_audio_encoder_sink_eventfunc (GstBaseAudioEncoder * enc, - GstEvent * event) -{ - GstBaseAudioEncoderClass *klass; - gboolean handled = FALSE; - - klass = GST_BASE_AUDIO_ENCODER_GET_CLASS (enc); - - switch (GST_EVENT_TYPE (event)) { - case GST_EVENT_NEWSEGMENT: - { - GstFormat format; - gdouble rate, arate; - gint64 start, stop, time; - gboolean update; - - gst_event_parse_new_segment_full (event, &update, &rate, &arate, &format, - &start, &stop, &time); - - if (format == GST_FORMAT_TIME) { - GST_DEBUG_OBJECT (enc, "received TIME NEW_SEGMENT %" GST_TIME_FORMAT - " -- %" GST_TIME_FORMAT ", time %" GST_TIME_FORMAT - ", rate %g, applied_rate %g", - GST_TIME_ARGS (start), GST_TIME_ARGS (stop), GST_TIME_ARGS (time), - rate, arate); - } else { - GST_DEBUG_OBJECT (enc, "received NEW_SEGMENT %" G_GINT64_FORMAT - " -- %" G_GINT64_FORMAT ", time %" G_GINT64_FORMAT - ", rate %g, applied_rate %g", start, stop, time, rate, arate); - GST_DEBUG_OBJECT (enc, "unsupported format; ignoring"); - break; - } - - GST_BASE_AUDIO_ENCODER_STREAM_LOCK (enc); - /* finish current segment */ - gst_base_audio_encoder_drain (enc); - /* reset partially for new segment */ - gst_base_audio_encoder_reset (enc, FALSE); - /* and follow along with segment */ - gst_segment_set_newsegment_full (&enc->segment, update, rate, arate, - format, start, stop, time); - GST_BASE_AUDIO_ENCODER_STREAM_UNLOCK (enc); - break; - } - - case GST_EVENT_FLUSH_START: - break; - - case GST_EVENT_FLUSH_STOP: - GST_BASE_AUDIO_ENCODER_STREAM_LOCK (enc); - /* discard any pending stuff */ - /* TODO route through drain ?? */ - if (!enc->priv->drained && klass->flush) - klass->flush (enc); - /* and get (re)set for the sequel */ - gst_base_audio_encoder_reset (enc, FALSE); - - g_list_foreach (enc->priv->pending_events, (GFunc) gst_event_unref, NULL); - g_list_free (enc->priv->pending_events); - enc->priv->pending_events = NULL; - GST_BASE_AUDIO_ENCODER_STREAM_UNLOCK (enc); - - break; - - case GST_EVENT_EOS: - GST_BASE_AUDIO_ENCODER_STREAM_LOCK (enc); - gst_base_audio_encoder_drain (enc); - GST_BASE_AUDIO_ENCODER_STREAM_UNLOCK (enc); - break; - - default: - break; - } - - return handled; -} - -static gboolean -gst_base_audio_encoder_sink_event (GstPad * pad, GstEvent * event) -{ - GstBaseAudioEncoder *enc; - GstBaseAudioEncoderClass *klass; - gboolean handled = FALSE; - gboolean ret = TRUE; - - enc = GST_BASE_AUDIO_ENCODER (gst_pad_get_parent (pad)); - klass = GST_BASE_AUDIO_ENCODER_GET_CLASS (enc); - - GST_DEBUG_OBJECT (enc, "received event %d, %s", GST_EVENT_TYPE (event), - GST_EVENT_TYPE_NAME (event)); - - if (klass->event) - handled = klass->event (enc, event); - - if (!handled) - handled = gst_base_audio_encoder_sink_eventfunc (enc, event); - - if (!handled) { - /* Forward non-serialized events and EOS/FLUSH_STOP immediately. - * For EOS this is required because no buffer or serialized event - * will come after EOS and nothing could trigger another - * _finish_frame() call. - * - * For FLUSH_STOP this is required because it is expected - * to be forwarded immediately and no buffers are queued anyway. - */ - if (!GST_EVENT_IS_SERIALIZED (event) - || GST_EVENT_TYPE (event) == GST_EVENT_EOS - || GST_EVENT_TYPE (event) == GST_EVENT_FLUSH_STOP) { - ret = gst_pad_event_default (pad, event); - } else { - GST_BASE_AUDIO_ENCODER_STREAM_LOCK (enc); - enc->priv->pending_events = - g_list_append (enc->priv->pending_events, event); - GST_BASE_AUDIO_ENCODER_STREAM_UNLOCK (enc); - ret = TRUE; - } - } - - GST_DEBUG_OBJECT (enc, "event handled"); - - gst_object_unref (enc); - return ret; -} - -static gboolean -gst_base_audio_encoder_sink_query (GstPad * pad, GstQuery * query) -{ - gboolean res = TRUE; - GstBaseAudioEncoder *enc; - - enc = GST_BASE_AUDIO_ENCODER (gst_pad_get_parent (pad)); - - switch (GST_QUERY_TYPE (query)) { - case GST_QUERY_FORMATS: - { - gst_query_set_formats (query, 3, - GST_FORMAT_TIME, GST_FORMAT_BYTES, GST_FORMAT_DEFAULT); - res = TRUE; - break; - } - case GST_QUERY_CONVERT: - { - GstFormat src_fmt, dest_fmt; - gint64 src_val, dest_val; - - gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val); - if (!(res = gst_base_audio_raw_audio_convert (&enc->ctx->state, - src_fmt, src_val, &dest_fmt, &dest_val))) - goto error; - gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val); - break; - } - default: - res = gst_pad_query_default (pad, query); - break; - } - -error: - gst_object_unref (enc); - return res; -} - -static const GstQueryType * -gst_base_audio_encoder_get_query_types (GstPad * pad) -{ - static const GstQueryType gst_base_audio_encoder_src_query_types[] = { - GST_QUERY_POSITION, - GST_QUERY_DURATION, - GST_QUERY_CONVERT, - GST_QUERY_LATENCY, - 0 - }; - - return gst_base_audio_encoder_src_query_types; -} - -/* FIXME ? are any of these queries (other than latency) an encoder's business - * also, the conversion stuff might seem to make sense, but seems to not mind - * segment stuff etc at all - * Supposedly that's backward compatibility ... */ -static gboolean -gst_base_audio_encoder_src_query (GstPad * pad, GstQuery * query) -{ - GstBaseAudioEncoder *enc; - GstPad *peerpad; - gboolean res = FALSE; - - enc = GST_BASE_AUDIO_ENCODER (GST_PAD_PARENT (pad)); - peerpad = gst_pad_get_peer (GST_PAD (enc->sinkpad)); - - GST_LOG_OBJECT (enc, "handling query: %" GST_PTR_FORMAT, query); - - switch (GST_QUERY_TYPE (query)) { - case GST_QUERY_POSITION: - { - GstFormat fmt, req_fmt; - gint64 pos, val; - - if ((res = gst_pad_peer_query (enc->sinkpad, query))) { - GST_LOG_OBJECT (enc, "returning peer response"); - break; - } - - if (!peerpad) { - GST_LOG_OBJECT (enc, "no peer"); - break; - } - - gst_query_parse_position (query, &req_fmt, NULL); - fmt = GST_FORMAT_TIME; - if (!(res = gst_pad_query_position (peerpad, &fmt, &pos))) - break; - - if ((res = gst_pad_query_convert (peerpad, fmt, pos, &req_fmt, &val))) { - gst_query_set_position (query, req_fmt, val); - } - break; - } - case GST_QUERY_DURATION: - { - GstFormat fmt, req_fmt; - gint64 dur, val; - - if ((res = gst_pad_peer_query (enc->sinkpad, query))) { - GST_LOG_OBJECT (enc, "returning peer response"); - break; - } - - if (!peerpad) { - GST_LOG_OBJECT (enc, "no peer"); - break; - } - - gst_query_parse_duration (query, &req_fmt, NULL); - fmt = GST_FORMAT_TIME; - if (!(res = gst_pad_query_duration (peerpad, &fmt, &dur))) - break; - - if ((res = gst_pad_query_convert (peerpad, fmt, dur, &req_fmt, &val))) { - gst_query_set_duration (query, req_fmt, val); - } - break; - } - case GST_QUERY_FORMATS: - { - gst_query_set_formats (query, 2, GST_FORMAT_TIME, GST_FORMAT_BYTES); - res = TRUE; - break; - } - case GST_QUERY_CONVERT: - { - GstFormat src_fmt, dest_fmt; - gint64 src_val, dest_val; - - gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val); - if (!(res = gst_base_audio_encoded_audio_convert (&enc->ctx->state, - enc->priv->bytes_out, enc->priv->samples_in, src_fmt, src_val, - &dest_fmt, &dest_val))) - break; - gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val); - break; - } - case GST_QUERY_LATENCY: - { - if ((res = gst_pad_peer_query (enc->sinkpad, query))) { - gboolean live; - GstClockTime min_latency, max_latency; - - gst_query_parse_latency (query, &live, &min_latency, &max_latency); - GST_DEBUG_OBJECT (enc, "Peer latency: live %d, min %" - GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live, - GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency)); - - GST_OBJECT_LOCK (enc); - /* add our latency */ - if (min_latency != -1) - min_latency += enc->ctx->min_latency; - if (max_latency != -1) - max_latency += enc->ctx->max_latency; - GST_OBJECT_UNLOCK (enc); - - gst_query_set_latency (query, live, min_latency, max_latency); - } - break; - } - default: - res = gst_pad_query_default (pad, query); - break; - } - - gst_object_unref (peerpad); - return res; -} - -static void -gst_base_audio_encoder_set_property (GObject * object, guint prop_id, - const GValue * value, GParamSpec * pspec) -{ - GstBaseAudioEncoder *enc; - - enc = GST_BASE_AUDIO_ENCODER (object); - - switch (prop_id) { - case PROP_PERFECT_TS: - if (enc->granule && !g_value_get_boolean (value)) - GST_WARNING_OBJECT (enc, "perfect-ts can not be set FALSE"); - else - enc->perfect_ts = g_value_get_boolean (value); - break; - case PROP_HARD_RESYNC: - enc->hard_resync = g_value_get_boolean (value); - break; - case PROP_TOLERANCE: - enc->tolerance = g_value_get_int64 (value); - break; - default: - G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); - break; - } -} - -static void -gst_base_audio_encoder_get_property (GObject * object, guint prop_id, - GValue * value, GParamSpec * pspec) -{ - GstBaseAudioEncoder *enc; - - enc = GST_BASE_AUDIO_ENCODER (object); - - switch (prop_id) { - case PROP_PERFECT_TS: - g_value_set_boolean (value, enc->perfect_ts); - break; - case PROP_GRANULE: - g_value_set_boolean (value, enc->granule); - break; - case PROP_HARD_RESYNC: - g_value_set_boolean (value, enc->hard_resync); - break; - case PROP_TOLERANCE: - g_value_set_int64 (value, enc->tolerance); - break; - default: - G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); - break; - } -} - -static gboolean -gst_base_audio_encoder_activate (GstBaseAudioEncoder * enc, gboolean active) -{ - GstBaseAudioEncoderClass *klass; - gboolean result = FALSE; - - klass = GST_BASE_AUDIO_ENCODER_GET_CLASS (enc); - - g_return_val_if_fail (!enc->granule || enc->perfect_ts, FALSE); - - GST_DEBUG_OBJECT (enc, "activate %d", active); - - if (active) { - if (!enc->priv->active && klass->start) - result = klass->start (enc); - } else { - /* We must make sure streaming has finished before resetting things - * and calling the ::stop vfunc */ - GST_PAD_STREAM_LOCK (enc->sinkpad); - GST_PAD_STREAM_UNLOCK (enc->sinkpad); - - if (enc->priv->active && klass->stop) - result = klass->stop (enc); - - /* clean up */ - gst_base_audio_encoder_reset (enc, TRUE); - } - GST_DEBUG_OBJECT (enc, "activate return: %d", result); - return result; -} - - -static gboolean -gst_base_audio_encoder_sink_activate_push (GstPad * pad, gboolean active) -{ - gboolean result = TRUE; - GstBaseAudioEncoder *enc; - - enc = GST_BASE_AUDIO_ENCODER (gst_pad_get_parent (pad)); - - GST_DEBUG_OBJECT (enc, "sink activate push %d", active); - - result = gst_base_audio_encoder_activate (enc, active); - - if (result) - enc->priv->active = active; - - GST_DEBUG_OBJECT (enc, "sink activate push return: %d", result); - - gst_object_unref (enc); - return result; -} diff --git a/omx/gstbaseaudioencoder.h b/omx/gstbaseaudioencoder.h deleted file mode 100644 index 90e624f6e8..0000000000 --- a/omx/gstbaseaudioencoder.h +++ /dev/null @@ -1,234 +0,0 @@ -/* GStreamer - * Copyright (C) 2011 Mark Nauwelaerts . - * Copyright (C) 2011 Nokia Corporation. All rights reserved. - * Contact: Stefan Kost - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Library General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Library General Public License for more details. - * - * You should have received a copy of the GNU Library General Public - * License along with this library; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. - */ - -#ifndef __GST_BASE_AUDIO_ENCODER_H__ -#define __GST_BASE_AUDIO_ENCODER_H__ - -#ifndef GST_USE_UNSTABLE_API -#warning "GstBaseAudioEncoder is unstable API and may change in future." -#warning "You can define GST_USE_UNSTABLE_API to avoid this warning." -#endif - -#include -#include "gstbaseaudioutils.h" - -G_BEGIN_DECLS - -#define GST_TYPE_BASE_AUDIO_ENCODER (gst_base_audio_encoder_get_type()) -#define GST_BASE_AUDIO_ENCODER(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_BASE_AUDIO_ENCODER,GstBaseAudioEncoder)) -#define GST_BASE_AUDIO_ENCODER_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_BASE_AUDIO_ENCODER,GstBaseAudioEncoderClass)) -#define GST_BASE_AUDIO_ENCODER_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS((obj),GST_TYPE_BASE_AUDIO_ENCODER,GstBaseAudioEncoderClass)) -#define GST_IS_BASE_AUDIO_ENCODER(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_BASE_AUDIO_ENCODER)) -#define GST_IS_BASE_AUDIO_ENCODER_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_BASE_AUDIO_ENCODER)) -#define GST_BASE_AUDIO_ENCODER_CAST(obj) ((GstBaseAudioEncoder *)(obj)) - -/** - * GST_BASE_AUDIO_ENCODER_SINK_NAME: - * - * the name of the templates for the sink pad - */ -#define GST_BASE_AUDIO_ENCODER_SINK_NAME "sink" -/** - * GST_BASE_AUDIO_ENCODER_SRC_NAME: - * - * the name of the templates for the source pad - */ -#define GST_BASE_AUDIO_ENCODER_SRC_NAME "src" - -/** - * GST_BASE_AUDIO_ENCODER_SRC_PAD: - * @obj: base parse instance - * - * Gives the pointer to the source #GstPad object of the element. - * - * Since: 0.10.x - */ -#define GST_BASE_AUDIO_ENCODER_SRC_PAD(obj) (GST_BASE_AUDIO_ENCODER_CAST (obj)->srcpad) - -/** - * GST_BASE_AUDIO_ENCODER_SINK_PAD: - * @obj: base parse instance - * - * Gives the pointer to the sink #GstPad object of the element. - * - * Since: 0.10.x - */ -#define GST_BASE_AUDIO_ENCODER_SINK_PAD(obj) (GST_BASE_AUDIO_ENCODER_CAST (obj)->sinkpad) - -/** - * GST_BASE_AUDIO_ENCODER_SEGMENT: - * @obj: base parse instance - * - * Gives the segment of the element. - * - * Since: 0.10.x - */ -#define GST_BASE_AUDIO_ENCODER_SEGMENT(obj) (GST_BASE_AUDIO_ENCODER_CAST (obj)->segment) - -#define GST_BASE_AUDIO_ENCODER_STREAM_LOCK(enc) g_static_rec_mutex_lock (&GST_BASE_AUDIO_ENCODER (enc)->stream_lock) -#define GST_BASE_AUDIO_ENCODER_STREAM_UNLOCK(enc) g_static_rec_mutex_unlock (&GST_BASE_AUDIO_ENCODER (enc)->stream_lock) - -typedef struct _GstBaseAudioEncoder GstBaseAudioEncoder; -typedef struct _GstBaseAudioEncoderClass GstBaseAudioEncoderClass; - -typedef struct _GstBaseAudioEncoderPrivate GstBaseAudioEncoderPrivate; -typedef struct _GstBaseAudioEncoderContext GstBaseAudioEncoderContext; - -/** - * GstBaseAudioEncoderContext: - * @state: a #GstAudioState describing input audio format - * @frame_samples_min: number of samples (per channel) subclass needs to be handed - * at least, or will be handed all available if 0. - * @frame_samples_max: number of samples (per channel) subclass needs to be handed - * at most, or will be handed all available if 0. - * @frame_max: max number of frames of size @frame_samples accepted at once - * (assumed minimally 1). Requires @frame_samples_min and @frame_samples_max - * to be the equal. - * @min_latency: min latency of element - * @max_latency: max latency of element - * @lookahead: encoder lookahead (in units of input rate samples) - * - * Transparent #GstBaseAudioEncoderContext data structure. - */ -struct _GstBaseAudioEncoderContext { - /* input */ - GstAudioState state; - - /* output */ - gint frame_samples_min, frame_samples_max; - gint frame_max; - gint lookahead; - /* MT-protected (with LOCK) */ - GstClockTime min_latency; - GstClockTime max_latency; -}; - -/** - * GstBaseAudioEncoder: - * @element: the parent element. - * - * The opaque #GstBaseAudioEncoder data structure. - */ -struct _GstBaseAudioEncoder { - GstElement element; - - /*< protected >*/ - /* source and sink pads */ - GstPad *sinkpad; - GstPad *srcpad; - - /* protects all data processing, i.e. is locked - * in the chain function, finish_frame and when - * processing serialized events */ - GStaticRecMutex stream_lock; - - /* MT-protected (with STREAM_LOCK) */ - GstSegment segment; - GstBaseAudioEncoderContext *ctx; - - /* properties */ - gint64 tolerance; - gboolean perfect_ts; - gboolean hard_resync; - gboolean granule; - - /*< private >*/ - GstBaseAudioEncoderPrivate *priv; - gpointer _gst_reserved[GST_PADDING_LARGE]; -}; - -/** - * GstBaseAudioEncoderClass: - * @start: Optional. - * Called when the element starts processing. - * Allows opening external resources. - * @stop: Optional. - * Called when the element stops processing. - * Allows closing external resources. - * @set_format: Notifies subclass of incoming data format. - * GstBaseAudioEncoderContext fields have already been - * set according to provided caps. - * @handle_frame: Provides input samples (or NULL to clear any remaining data) - * according to directions as provided by subclass in the - * #GstBaseAudioEncoderContext. Input data ref management - * is performed by base class, subclass should not care or - * intervene. - * @flush: Optional. - * Instructs subclass to clear any codec caches and discard - * any pending samples and not yet returned encoded data. - * @event: Optional. - * Event handler on the sink pad. This function should return - * TRUE if the event was handled and should be discarded - * (i.e. not unref'ed). - * @pre_push: Optional. - * Called just prior to pushing (encoded data) buffer downstream. - * Subclass has full discretionary access to buffer, - * and a not OK flow return will abort downstream pushing. - * @getcaps: Optional. - * Allows for a custom sink getcaps implementation (e.g. - * for multichannel input specification). If not implemented, - * default returns gst_base_audio_encoder_proxy_getcaps - * applied to sink template caps. - * - * Subclasses can override any of the available virtual methods or not, as - * needed. At minimum @set_format and @handle_frame needs to be overridden. - */ -struct _GstBaseAudioEncoderClass { - GstElementClass parent_class; - - /*< public >*/ - /* virtual methods for subclasses */ - - gboolean (*start) (GstBaseAudioEncoder *enc); - - gboolean (*stop) (GstBaseAudioEncoder *enc); - - gboolean (*set_format) (GstBaseAudioEncoder *enc, - GstAudioState *state); - - GstFlowReturn (*handle_frame) (GstBaseAudioEncoder *enc, - GstBuffer *buffer); - - void (*flush) (GstBaseAudioEncoder *enc); - - GstFlowReturn (*pre_push) (GstBaseAudioEncoder *enc, - GstBuffer **buffer); - - gboolean (*event) (GstBaseAudioEncoder *enc, - GstEvent *event); - - GstCaps * (*getcaps) (GstBaseAudioEncoder *enc); - - /*< private >*/ - gpointer _gst_reserved[GST_PADDING_LARGE]; -}; - -GType gst_base_audio_encoder_get_type (void); - -GstFlowReturn gst_base_audio_encoder_finish_frame (GstBaseAudioEncoder * enc, - GstBuffer *buffer, gint samples); - -GstCaps * gst_base_audio_encoder_proxy_getcaps (GstBaseAudioEncoder * enc, - GstCaps * caps); - -G_END_DECLS - -#endif /* __GST_BASE_AUDIO_ENCODER_H__ */ diff --git a/omx/gstbaseaudioutils.c b/omx/gstbaseaudioutils.c deleted file mode 100644 index a2eb72525a..0000000000 --- a/omx/gstbaseaudioutils.c +++ /dev/null @@ -1,315 +0,0 @@ -/* GStreamer - * Copyright (C) 2011 Mark Nauwelaerts . - * Copyright (C) 2011 Nokia Corporation. All rights reserved. - * Contact: Stefan Kost - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Library General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Library General Public License for more details. - * - * You should have received a copy of the GNU Library General Public - * License along with this library; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. - */ - -#include "gstbaseaudioutils.h" - -#include -#include - - -#define CHECK_VALUE(var, val) \ -G_STMT_START { \ - if (!res) \ - goto fail; \ - if (var != val) \ - changed = TRUE; \ - var = val; \ -} G_STMT_END - -/** - * gst_base_audio_parse_caps: - * @caps: a #GstCaps - * @state: a #GstAudioState - * @changed: whether @caps introduced a change in current @state - * - * Parses audio format as represented by @caps into a more concise form - * as represented by @state, while checking if for changes to currently - * defined audio format. - * - * Returns: TRUE if parsing succeeded, otherwise FALSE - */ -gboolean -gst_base_audio_parse_caps (GstCaps * caps, GstAudioState * state, - gboolean * _changed) -{ - gboolean res = TRUE, changed = FALSE; - GstStructure *s; - gboolean vb; - gint vi; - - g_return_val_if_fail (caps != NULL, FALSE); - g_return_val_if_fail (gst_caps_is_fixed (caps), FALSE); - - s = gst_caps_get_structure (caps, 0); - if (gst_structure_has_name (s, "audio/x-raw-int")) - state->is_int = TRUE; - else if (gst_structure_has_name (s, "audio/x-raw-float")) - state->is_int = FALSE; - else - goto fail; - - res = gst_structure_get_int (s, "rate", &vi); - CHECK_VALUE (state->rate, vi); - res &= gst_structure_get_int (s, "channels", &vi); - CHECK_VALUE (state->channels, vi); - res &= gst_structure_get_int (s, "width", &vi); - CHECK_VALUE (state->width, vi); - res &= (!state->is_int || gst_structure_get_int (s, "depth", &vi)); - CHECK_VALUE (state->depth, vi); - res &= gst_structure_get_int (s, "endianness", &vi); - CHECK_VALUE (state->endian, vi); - res &= (!state->is_int || gst_structure_get_boolean (s, "signed", &vb)); - CHECK_VALUE (state->sign, vb); - - state->bpf = (state->width / 8) * state->channels; - GST_LOG ("bpf: %d", state->bpf); - if (!state->bpf) - goto fail; - - g_free (state->channel_pos); - state->channel_pos = gst_audio_get_channel_positions (s); - - if (_changed) - *_changed = changed; - - return res; - - /* ERRORS */ -fail: - { - /* there should not be caps out there that fail parsing ... */ - GST_WARNING ("failed to parse caps %" GST_PTR_FORMAT, caps); - return res; - } -} - -/** - * gst_base_audio_add_streamheader: - * @caps: a #GstCaps - * @buf: header buffers - * - * Adds given buffers to an array of buffers set as streamheader field - * on the given @caps. List of buffer arguments must be NULL-terminated. - * - * Returns: input caps with a streamheader field added, or NULL if some error - */ -GstCaps * -gst_base_audio_add_streamheader (GstCaps * caps, GstBuffer * buf, ...) -{ - GstStructure *structure = NULL; - va_list va; - GValue array = { 0 }; - GValue value = { 0 }; - - g_return_val_if_fail (caps != NULL, NULL); - g_return_val_if_fail (gst_caps_is_fixed (caps), NULL); - - caps = gst_caps_make_writable (caps); - structure = gst_caps_get_structure (caps, 0); - - g_value_init (&array, GST_TYPE_ARRAY); - - va_start (va, buf); - /* put buffers in a fixed list */ - while (buf) { - g_assert (gst_buffer_is_metadata_writable (buf)); - - /* mark buffer */ - GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_IN_CAPS); - - g_value_init (&value, GST_TYPE_BUFFER); - buf = gst_buffer_copy (buf); - GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_IN_CAPS); - gst_value_set_buffer (&value, buf); - gst_buffer_unref (buf); - gst_value_array_append_value (&array, &value); - g_value_unset (&value); - - buf = va_arg (va, GstBuffer *); - } - - gst_structure_set_value (structure, "streamheader", &array); - g_value_unset (&array); - - return caps; -} - -/** - * gst_base_audio_encoded_audio_convert: - * @fmt: audio format of the encoded audio - * @bytes: number of encoded bytes - * @samples: number of encoded samples - * @src_format: source format - * @src_value: source value - * @dest_format: destination format - * @dest_value: destination format - * - * Helper function to convert @src_value in @src_format to @dest_value in - * @dest_format for encoded audio data. Conversion is possible between - * BYTE and TIME format by using estimated bitrate based on - * @samples and @bytes (and @fmt). - */ -gboolean -gst_base_audio_encoded_audio_convert (GstAudioState * fmt, - gint64 bytes, gint64 samples, GstFormat src_format, - gint64 src_value, GstFormat * dest_format, gint64 * dest_value) -{ - gboolean res = FALSE; - - g_return_val_if_fail (dest_format != NULL, FALSE); - g_return_val_if_fail (dest_value != NULL, FALSE); - - if (G_UNLIKELY (src_format == *dest_format || src_value == 0 || - src_value == -1)) { - if (dest_value) - *dest_value = src_value; - return TRUE; - } - - if (samples == 0 || bytes == 0 || fmt->rate == 0) { - GST_DEBUG ("not enough metadata yet to convert"); - goto exit; - } - - bytes *= fmt->rate; - - switch (src_format) { - case GST_FORMAT_BYTES: - switch (*dest_format) { - case GST_FORMAT_TIME: - *dest_value = gst_util_uint64_scale (src_value, - GST_SECOND * samples, bytes); - res = TRUE; - break; - default: - res = FALSE; - } - break; - case GST_FORMAT_TIME: - switch (*dest_format) { - case GST_FORMAT_BYTES: - *dest_value = gst_util_uint64_scale (src_value, bytes, - samples * GST_SECOND); - res = TRUE; - break; - default: - res = FALSE; - } - break; - default: - res = FALSE; - } - -exit: - return res; -} - -/** - * gst_base_audio_raw_audio_convert: - * @fmt: audio format of the encoded audio - * @src_format: source format - * @src_value: source value - * @dest_format: destination format - * @dest_value: destination format - * - * Helper function to convert @src_value in @src_format to @dest_value in - * @dest_format for encoded audio data. Conversion is possible between - * BYTE, DEFAULT and TIME format based on audio characteristics provided - * by @fmt. - */ -gboolean -gst_base_audio_raw_audio_convert (GstAudioState * fmt, GstFormat src_format, - gint64 src_value, GstFormat * dest_format, gint64 * dest_value) -{ - gboolean res = FALSE; - guint scale = 1; - gint bytes_per_sample, rate, byterate; - - g_return_val_if_fail (dest_format != NULL, FALSE); - g_return_val_if_fail (dest_value != NULL, FALSE); - - if (G_UNLIKELY (src_format == *dest_format || src_value == 0 || - src_value == -1)) { - if (dest_value) - *dest_value = src_value; - return TRUE; - } - - bytes_per_sample = fmt->bpf; - rate = fmt->rate; - byterate = bytes_per_sample * rate; - - if (G_UNLIKELY (bytes_per_sample == 0 || rate == 0)) { - GST_DEBUG ("not enough metadata yet to convert"); - goto exit; - } - - switch (src_format) { - case GST_FORMAT_BYTES: - switch (*dest_format) { - case GST_FORMAT_DEFAULT: - *dest_value = src_value / bytes_per_sample; - res = TRUE; - break; - case GST_FORMAT_TIME: - *dest_value = - gst_util_uint64_scale_int (src_value, GST_SECOND, byterate); - res = TRUE; - break; - default: - res = FALSE; - } - break; - case GST_FORMAT_DEFAULT: - switch (*dest_format) { - case GST_FORMAT_BYTES: - *dest_value = src_value * bytes_per_sample; - res = TRUE; - break; - case GST_FORMAT_TIME: - *dest_value = gst_util_uint64_scale_int (src_value, GST_SECOND, rate); - res = TRUE; - break; - default: - res = FALSE; - } - break; - case GST_FORMAT_TIME: - switch (*dest_format) { - case GST_FORMAT_BYTES: - scale = bytes_per_sample; - /* fallthrough */ - case GST_FORMAT_DEFAULT: - *dest_value = gst_util_uint64_scale_int (src_value, - scale * rate, GST_SECOND); - res = TRUE; - break; - default: - res = FALSE; - } - break; - default: - res = FALSE; - } - -exit: - return res; -} diff --git a/omx/gstbaseaudioutils.h b/omx/gstbaseaudioutils.h deleted file mode 100644 index ceba86a4f9..0000000000 --- a/omx/gstbaseaudioutils.h +++ /dev/null @@ -1,74 +0,0 @@ -/* GStreamer - * Copyright (C) 2011 Mark Nauwelaerts . - * Copyright (C) 2011 Nokia Corporation. All rights reserved. - * Contact: Stefan Kost - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Library General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Library General Public License for more details. - * - * You should have received a copy of the GNU Library General Public - * License along with this library; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. - */ - -#ifndef _GST_BASE_AUDIO_UTILS_H_ -#define _GST_BASE_AUDIO_UTILS_H_ - -#ifndef GST_USE_UNSTABLE_API -#warning "Base audio utils provide unstable API and may change in future." -#warning "You can define GST_USE_UNSTABLE_API to avoid this warning." -#endif - -#include -#include - -G_BEGIN_DECLS - -/** - * GstAudioState: - * @is_int: whether sample data is int or float - * @rate: rate of sample data - * @channels: number of channels in sample data - * @width: width (in bits) of sample data - * @depth: used bits in sample data (if integer) - * @sign: sign of sample data (if integer) - * @endian: endianness of sample data - * @bpf: bytes per audio frame - */ -typedef struct _GstAudioState { - gboolean is_int; - gint rate; - gint channels; - gint width; - gint depth; - gboolean sign; - gint endian; - GstAudioChannelPosition *channel_pos; - - gint bpf; -} GstAudioState; - -gboolean gst_base_audio_parse_caps (GstCaps * caps, - GstAudioState * state, gboolean * changed); - -GstCaps *gst_base_audio_add_streamheader (GstCaps * caps, GstBuffer * buf, ...); - -gboolean gst_base_audio_encoded_audio_convert (GstAudioState * fmt, - gint64 bytes, gint64 samples, GstFormat src_format, - gint64 src_value, GstFormat * dest_format, gint64 * dest_value); - -gboolean gst_base_audio_raw_audio_convert (GstAudioState * fmt, GstFormat src_format, - gint64 src_value, GstFormat * dest_format, gint64 * dest_value); - -G_END_DECLS - -#endif - diff --git a/omx/gstomxaacenc.c b/omx/gstomxaacenc.c index 684d4b5509..d1aa797d7d 100644 --- a/omx/gstomxaacenc.c +++ b/omx/gstomxaacenc.c @@ -36,11 +36,11 @@ static void gst_omx_aac_enc_set_property (GObject * object, guint prop_id, static void gst_omx_aac_enc_get_property (GObject * object, guint prop_id, GValue * value, GParamSpec * pspec); static gboolean gst_omx_aac_enc_set_format (GstOMXAudioEnc * enc, - GstOMXPort * port, GstAudioState * state); + GstOMXPort * port, GstAudioInfo * info); static GstCaps *gst_omx_aac_enc_get_caps (GstOMXAudioEnc * enc, - GstOMXPort * port, GstAudioState * state); + GstOMXPort * port, GstAudioInfo * info); static guint gst_omx_aac_enc_get_num_samples (GstOMXAudioEnc * enc, - GstOMXPort * port, GstAudioState * state, GstOMXBuffer * buf); + GstOMXPort * port, GstAudioInfo * info, GstOMXBuffer * buf); enum { @@ -228,7 +228,7 @@ gst_omx_aac_enc_get_property (GObject * object, guint prop_id, GValue * value, static gboolean gst_omx_aac_enc_set_format (GstOMXAudioEnc * enc, GstOMXPort * port, - GstAudioState * state) + GstAudioInfo * info) { GstOMXAACEnc *self = GST_OMX_AAC_ENC (enc); OMX_AUDIO_PARAM_AACPROFILETYPE aac_profile; @@ -250,7 +250,7 @@ gst_omx_aac_enc_set_format (GstOMXAudioEnc * enc, GstOMXPort * port, return FALSE; } - peercaps = gst_pad_peer_get_caps (GST_BASE_AUDIO_ENCODER_SRC_PAD (self)); + peercaps = gst_pad_peer_get_caps (GST_AUDIO_ENCODER_SRC_PAD (self)); if (peercaps) { GstCaps *intersection; GstStructure *s; @@ -259,7 +259,7 @@ gst_omx_aac_enc_set_format (GstOMXAudioEnc * enc, GstOMXPort * port, intersection = gst_caps_intersect (peercaps, - gst_pad_get_pad_template_caps (GST_BASE_AUDIO_ENCODER_SRC_PAD (self))); + gst_pad_get_pad_template_caps (GST_AUDIO_ENCODER_SRC_PAD (self))); gst_caps_unref (peercaps); if (gst_caps_is_empty (intersection)) { gst_caps_unref (intersection); @@ -340,7 +340,7 @@ gst_omx_aac_enc_set_format (GstOMXAudioEnc * enc, GstOMXPort * port, static GstCaps * gst_omx_aac_enc_get_caps (GstOMXAudioEnc * enc, GstOMXPort * port, - GstAudioState * state) + GstAudioInfo * info) { GstCaps *caps; OMX_ERRORTYPE err; @@ -437,7 +437,7 @@ gst_omx_aac_enc_get_caps (GstOMXAudioEnc * enc, GstOMXPort * port, static guint gst_omx_aac_enc_get_num_samples (GstOMXAudioEnc * enc, GstOMXPort * port, - GstAudioState * state, GstOMXBuffer * buf) + GstAudioInfo * info, GstOMXBuffer * buf) { /* FIXME: Depends on the profile at least */ return 1024; diff --git a/omx/gstomxaudioenc.c b/omx/gstomxaudioenc.c index beab2b2f27..a8207c9e6a 100644 --- a/omx/gstomxaudioenc.c +++ b/omx/gstomxaudioenc.c @@ -37,15 +37,15 @@ static GstStateChangeReturn gst_omx_audio_enc_change_state (GstElement * element, GstStateChange transition); -static gboolean gst_omx_audio_enc_start (GstBaseAudioEncoder * encoder); -static gboolean gst_omx_audio_enc_stop (GstBaseAudioEncoder * encoder); -static gboolean gst_omx_audio_enc_set_format (GstBaseAudioEncoder * encoder, - GstAudioState * state); -static gboolean gst_omx_audio_enc_event (GstBaseAudioEncoder * encoder, +static gboolean gst_omx_audio_enc_start (GstAudioEncoder * encoder); +static gboolean gst_omx_audio_enc_stop (GstAudioEncoder * encoder); +static gboolean gst_omx_audio_enc_set_format (GstAudioEncoder * encoder, + GstAudioInfo * info); +static gboolean gst_omx_audio_enc_event (GstAudioEncoder * encoder, GstEvent * event); -static GstFlowReturn gst_omx_audio_enc_handle_frame (GstBaseAudioEncoder * +static GstFlowReturn gst_omx_audio_enc_handle_frame (GstAudioEncoder * encoder, GstBuffer * buffer); -static void gst_omx_audio_enc_flush (GstBaseAudioEncoder * encoder); +static void gst_omx_audio_enc_flush (GstAudioEncoder * encoder); static GstFlowReturn gst_omx_audio_enc_drain (GstOMXAudioEnc * self); @@ -60,8 +60,8 @@ enum GST_DEBUG_CATEGORY_INIT (gst_omx_audio_enc_debug_category, "omxaudioenc", 0, \ "debug category for gst-omx audio encoder base class"); -GST_BOILERPLATE_FULL (GstOMXAudioEnc, gst_omx_audio_enc, GstBaseAudioEncoder, - GST_TYPE_BASE_AUDIO_ENCODER, DEBUG_INIT); +GST_BOILERPLATE_FULL (GstOMXAudioEnc, gst_omx_audio_enc, GstAudioEncoder, + GST_TYPE_AUDIO_ENCODER, DEBUG_INIT); static void gst_omx_audio_enc_base_init (gpointer g_class) @@ -203,22 +203,21 @@ gst_omx_audio_enc_class_init (GstOMXAudioEncClass * klass) { GObjectClass *gobject_class = G_OBJECT_CLASS (klass); GstElementClass *element_class = GST_ELEMENT_CLASS (klass); - GstBaseAudioEncoderClass *base_audio_encoder_class = - GST_BASE_AUDIO_ENCODER_CLASS (klass); + GstAudioEncoderClass *audio_encoder_class = GST_AUDIO_ENCODER_CLASS (klass); gobject_class->finalize = gst_omx_audio_enc_finalize; element_class->change_state = GST_DEBUG_FUNCPTR (gst_omx_audio_enc_change_state); - base_audio_encoder_class->start = GST_DEBUG_FUNCPTR (gst_omx_audio_enc_start); - base_audio_encoder_class->stop = GST_DEBUG_FUNCPTR (gst_omx_audio_enc_stop); - base_audio_encoder_class->flush = GST_DEBUG_FUNCPTR (gst_omx_audio_enc_flush); - base_audio_encoder_class->set_format = + audio_encoder_class->start = GST_DEBUG_FUNCPTR (gst_omx_audio_enc_start); + audio_encoder_class->stop = GST_DEBUG_FUNCPTR (gst_omx_audio_enc_stop); + audio_encoder_class->flush = GST_DEBUG_FUNCPTR (gst_omx_audio_enc_flush); + audio_encoder_class->set_format = GST_DEBUG_FUNCPTR (gst_omx_audio_enc_set_format); - base_audio_encoder_class->handle_frame = + audio_encoder_class->handle_frame = GST_DEBUG_FUNCPTR (gst_omx_audio_enc_handle_frame); - base_audio_encoder_class->event = GST_DEBUG_FUNCPTR (gst_omx_audio_enc_event); + audio_encoder_class->event = GST_DEBUG_FUNCPTR (gst_omx_audio_enc_event); klass->default_sink_template_caps = "audio/x-raw-int, " "rate = (int) [ 1, MAX ], " @@ -432,31 +431,32 @@ gst_omx_audio_enc_loop (GstOMXAudioEnc * self) return; } - if (!GST_PAD_CAPS (GST_BASE_AUDIO_ENCODER_SRC_PAD (self)) + if (!GST_PAD_CAPS (GST_AUDIO_ENCODER_SRC_PAD (self)) || acq_return == GST_OMX_ACQUIRE_BUFFER_RECONFIGURED) { - GstAudioState *state = &GST_BASE_AUDIO_ENCODER (self)->ctx->state; + GstAudioInfo *info = + gst_audio_encoder_get_audio_info (GST_AUDIO_ENCODER (self)); GstCaps *caps; GST_DEBUG_OBJECT (self, "Port settings have changed, updating caps"); - GST_BASE_AUDIO_ENCODER_STREAM_LOCK (self); - caps = klass->get_caps (self, self->out_port, state); + GST_AUDIO_ENCODER_STREAM_LOCK (self); + caps = klass->get_caps (self, self->out_port, info); if (!caps) { if (buf) gst_omx_port_release_buffer (self->out_port, buf); - GST_BASE_AUDIO_ENCODER_STREAM_UNLOCK (self); + GST_AUDIO_ENCODER_STREAM_UNLOCK (self); goto caps_failed; } - if (!gst_pad_set_caps (GST_BASE_AUDIO_ENCODER_SRC_PAD (self), caps)) { + if (!gst_pad_set_caps (GST_AUDIO_ENCODER_SRC_PAD (self), caps)) { gst_caps_unref (caps); if (buf) gst_omx_port_release_buffer (self->out_port, buf); - GST_BASE_AUDIO_ENCODER_STREAM_UNLOCK (self); + GST_AUDIO_ENCODER_STREAM_UNLOCK (self); goto caps_failed; } gst_caps_unref (caps); - GST_BASE_AUDIO_ENCODER_STREAM_UNLOCK (self); + GST_AUDIO_ENCODER_STREAM_UNLOCK (self); /* Now get a buffer */ if (acq_return != GST_OMX_ACQUIRE_BUFFER_OK) @@ -468,7 +468,7 @@ gst_omx_audio_enc_loop (GstOMXAudioEnc * self) GST_DEBUG_OBJECT (self, "Handling buffer: 0x%08x %lu", buf->omx_buf->nFlags, buf->omx_buf->nTimeStamp); - GST_BASE_AUDIO_ENCODER_STREAM_LOCK (self); + GST_AUDIO_ENCODER_STREAM_LOCK (self); is_eos = ! !(buf->omx_buf->nFlags & OMX_BUFFERFLAG_EOS); if ((buf->omx_buf->nFlags & OMX_BUFFERFLAG_CODECCONFIG) @@ -476,18 +476,18 @@ gst_omx_audio_enc_loop (GstOMXAudioEnc * self) GstCaps *caps; GstBuffer *codec_data; - caps = gst_caps_copy (GST_PAD_CAPS (GST_BASE_AUDIO_ENCODER_SRC_PAD (self))); + caps = gst_caps_copy (GST_PAD_CAPS (GST_AUDIO_ENCODER_SRC_PAD (self))); codec_data = gst_buffer_new_and_alloc (buf->omx_buf->nFilledLen); memcpy (GST_BUFFER_DATA (codec_data), buf->omx_buf->pBuffer + buf->omx_buf->nOffset, buf->omx_buf->nFilledLen); gst_caps_set_simple (caps, "codec_data", GST_TYPE_BUFFER, codec_data, NULL); - if (!gst_pad_set_caps (GST_BASE_AUDIO_ENCODER_SRC_PAD (self), caps)) { + if (!gst_pad_set_caps (GST_AUDIO_ENCODER_SRC_PAD (self), caps)) { gst_caps_unref (caps); if (buf) gst_omx_port_release_buffer (self->out_port, buf); - GST_BASE_AUDIO_ENCODER_STREAM_UNLOCK (self); + GST_AUDIO_ENCODER_STREAM_UNLOCK (self); goto caps_failed; } gst_caps_unref (caps); @@ -498,7 +498,7 @@ gst_omx_audio_enc_loop (GstOMXAudioEnc * self) n_samples = klass->get_num_samples (self, self->out_port, - &GST_BASE_AUDIO_ENCODER (self)->ctx->state, buf); + gst_audio_encoder_get_audio_info (GST_AUDIO_ENCODER (self)), buf); if (buf->omx_buf->nFilledLen > 0) { outbuf = gst_buffer_new_and_alloc (buf->omx_buf->nFilledLen); @@ -511,7 +511,7 @@ gst_omx_audio_enc_loop (GstOMXAudioEnc * self) } gst_buffer_set_caps (outbuf, - GST_PAD_CAPS (GST_BASE_AUDIO_ENCODER_SRC_PAD (self))); + GST_PAD_CAPS (GST_AUDIO_ENCODER_SRC_PAD (self))); GST_BUFFER_TIMESTAMP (outbuf) = gst_util_uint64_scale (buf->omx_buf->nTimeStamp, GST_SECOND, @@ -522,7 +522,7 @@ gst_omx_audio_enc_loop (GstOMXAudioEnc * self) OMX_TICKS_PER_SECOND); flow_ret = - gst_base_audio_encoder_finish_frame (GST_BASE_AUDIO_ENCODER (self), + gst_audio_encoder_finish_frame (GST_AUDIO_ENCODER (self), outbuf, n_samples); } @@ -548,7 +548,7 @@ gst_omx_audio_enc_loop (GstOMXAudioEnc * self) if (flow_ret != GST_FLOW_OK) goto flow_error; - GST_BASE_AUDIO_ENCODER_STREAM_UNLOCK (self); + GST_AUDIO_ENCODER_STREAM_UNLOCK (self); return; @@ -558,9 +558,8 @@ component_error: ("OpenMAX component in error state %s (0x%08x)", gst_omx_component_get_last_error_string (self->component), gst_omx_component_get_last_error (self->component))); - gst_pad_push_event (GST_BASE_AUDIO_ENCODER_SRC_PAD (self), - gst_event_new_eos ()); - gst_pad_pause_task (GST_BASE_AUDIO_ENCODER_SRC_PAD (self)); + gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (self), gst_event_new_eos ()); + gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_ERROR; self->started = FALSE; return; @@ -568,7 +567,7 @@ component_error: flushing: { GST_DEBUG_OBJECT (self, "Flushing -- stopping task"); - gst_pad_pause_task (GST_BASE_AUDIO_ENCODER_SRC_PAD (self)); + gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_WRONG_STATE; self->started = FALSE; return; @@ -578,29 +577,28 @@ flow_error: if (flow_ret == GST_FLOW_UNEXPECTED) { GST_DEBUG_OBJECT (self, "EOS"); - gst_pad_push_event (GST_BASE_AUDIO_ENCODER_SRC_PAD (self), + gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (self), gst_event_new_eos ()); - gst_pad_pause_task (GST_BASE_AUDIO_ENCODER_SRC_PAD (self)); + gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self)); } else if (flow_ret == GST_FLOW_NOT_LINKED || flow_ret < GST_FLOW_UNEXPECTED) { GST_ELEMENT_ERROR (self, STREAM, FAILED, ("Internal data stream error."), ("stream stopped, reason %s", gst_flow_get_name (flow_ret))); - gst_pad_push_event (GST_BASE_AUDIO_ENCODER_SRC_PAD (self), + gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (self), gst_event_new_eos ()); - gst_pad_pause_task (GST_BASE_AUDIO_ENCODER_SRC_PAD (self)); + gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self)); } self->started = FALSE; - GST_BASE_AUDIO_ENCODER_STREAM_UNLOCK (self); + GST_AUDIO_ENCODER_STREAM_UNLOCK (self); return; } reconfigure_error: { GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL), ("Unable to reconfigure output port")); - gst_pad_push_event (GST_BASE_AUDIO_ENCODER_SRC_PAD (self), - gst_event_new_eos ()); - gst_pad_pause_task (GST_BASE_AUDIO_ENCODER_SRC_PAD (self)); + gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (self), gst_event_new_eos ()); + gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_NOT_NEGOTIATED; self->started = FALSE; return; @@ -608,9 +606,8 @@ reconfigure_error: caps_failed: { GST_ELEMENT_ERROR (self, LIBRARY, SETTINGS, (NULL), ("Failed to set caps")); - gst_pad_push_event (GST_BASE_AUDIO_ENCODER_SRC_PAD (self), - gst_event_new_eos ()); - gst_pad_pause_task (GST_BASE_AUDIO_ENCODER_SRC_PAD (self)); + gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (self), gst_event_new_eos ()); + gst_pad_pause_task (GST_AUDIO_ENCODER_SRC_PAD (self)); self->downstream_flow_ret = GST_FLOW_NOT_NEGOTIATED; self->started = FALSE; return; @@ -618,7 +615,7 @@ caps_failed: } static gboolean -gst_omx_audio_enc_start (GstBaseAudioEncoder * encoder) +gst_omx_audio_enc_start (GstAudioEncoder * encoder) { GstOMXAudioEnc *self; gboolean ret; @@ -628,14 +625,14 @@ gst_omx_audio_enc_start (GstBaseAudioEncoder * encoder) self->eos = FALSE; self->downstream_flow_ret = GST_FLOW_OK; ret = - gst_pad_start_task (GST_BASE_AUDIO_ENCODER_SRC_PAD (self), + gst_pad_start_task (GST_AUDIO_ENCODER_SRC_PAD (self), (GstTaskFunction) gst_omx_audio_enc_loop, self); return ret; } static gboolean -gst_omx_audio_enc_stop (GstBaseAudioEncoder * encoder) +gst_omx_audio_enc_stop (GstAudioEncoder * encoder) { GstOMXAudioEnc *self; @@ -646,7 +643,7 @@ gst_omx_audio_enc_stop (GstBaseAudioEncoder * encoder) gst_omx_port_set_flushing (self->in_port, TRUE); gst_omx_port_set_flushing (self->out_port, TRUE); - gst_pad_stop_task (GST_BASE_AUDIO_ENCODER_SRC_PAD (encoder)); + gst_pad_stop_task (GST_AUDIO_ENCODER_SRC_PAD (encoder)); if (gst_omx_component_get_state (self->component, 0) > OMX_StateIdle) gst_omx_component_set_state (self->component, OMX_StateIdle); @@ -666,8 +663,7 @@ gst_omx_audio_enc_stop (GstBaseAudioEncoder * encoder) } static gboolean -gst_omx_audio_enc_set_format (GstBaseAudioEncoder * encoder, - GstAudioState * state) +gst_omx_audio_enc_set_format (GstAudioEncoder * encoder, GstAudioInfo * info) { GstOMXAudioEnc *self; GstOMXAudioEncClass *klass; @@ -683,10 +679,10 @@ gst_omx_audio_enc_set_format (GstBaseAudioEncoder * encoder, GST_DEBUG_OBJECT (self, "Setting new caps"); /* Set audio encoder base class properties */ - encoder->ctx->frame_samples_min = + gst_audio_encoder_set_frame_samples_min (encoder, gst_util_uint64_scale_ceil (OMX_MIN_PCMPAYLOAD_MSEC, - GST_MSECOND * state->rate, GST_SECOND); - encoder->ctx->frame_samples_max = 0; + GST_MSECOND * info->rate, GST_SECOND)); + gst_audio_encoder_set_frame_samples_max (encoder, 0); gst_omx_port_get_port_definition (self->in_port, &port_def); @@ -714,20 +710,22 @@ gst_omx_audio_enc_set_format (GstBaseAudioEncoder * encoder, GST_OMX_INIT_STRUCT (&pcm_param); pcm_param.nPortIndex = self->in_port->index; - pcm_param.nChannels = state->channels; + pcm_param.nChannels = info->channels; pcm_param.eNumData = - (state->sign ? OMX_NumericalDataSigned : OMX_NumericalDataUnsigned); + ((info->finfo->flags & GST_AUDIO_FORMAT_FLAG_SIGNED) ? + OMX_NumericalDataSigned : OMX_NumericalDataUnsigned); pcm_param.eEndian = - ((state->endian == G_LITTLE_ENDIAN) ? OMX_EndianLittle : OMX_EndianBig); + ((info->finfo->endianness == G_LITTLE_ENDIAN) ? + OMX_EndianLittle : OMX_EndianBig); pcm_param.bInterleaved = OMX_TRUE; - pcm_param.nBitPerSample = state->width; - pcm_param.nSamplingRate = state->rate; + pcm_param.nBitPerSample = info->finfo->width; + pcm_param.nSamplingRate = info->rate; pcm_param.ePCMMode = OMX_AUDIO_PCMModeLinear; for (i = 0; i < pcm_param.nChannels; i++) { OMX_AUDIO_CHANNELTYPE pos; - switch (state->channel_pos[i]) { + switch (info->position[i]) { case GST_AUDIO_CHANNEL_POSITION_FRONT_MONO: case GST_AUDIO_CHANNEL_POSITION_FRONT_CENTER: pos = OMX_AUDIO_ChannelCF; @@ -773,7 +771,7 @@ gst_omx_audio_enc_set_format (GstBaseAudioEncoder * encoder, } if (klass->set_format) { - if (!klass->set_format (self, self->in_port, state)) { + if (!klass->set_format (self, self->in_port, info)) { GST_ERROR_OBJECT (self, "Subclass failed to set the new format"); return FALSE; } @@ -821,14 +819,14 @@ gst_omx_audio_enc_set_format (GstBaseAudioEncoder * encoder, /* Start the srcpad loop again */ self->downstream_flow_ret = GST_FLOW_OK; - gst_pad_start_task (GST_BASE_AUDIO_ENCODER_SRC_PAD (self), + gst_pad_start_task (GST_AUDIO_ENCODER_SRC_PAD (self), (GstTaskFunction) gst_omx_audio_enc_loop, encoder); return TRUE; } static void -gst_omx_audio_enc_flush (GstBaseAudioEncoder * encoder) +gst_omx_audio_enc_flush (GstAudioEncoder * encoder) { GstOMXAudioEnc *self; @@ -842,10 +840,10 @@ gst_omx_audio_enc_flush (GstBaseAudioEncoder * encoder) gst_omx_port_set_flushing (self->out_port, TRUE); /* Wait until the srcpad loop is finished */ - GST_BASE_AUDIO_ENCODER_STREAM_UNLOCK (self); - GST_PAD_STREAM_LOCK (GST_BASE_AUDIO_ENCODER_SRC_PAD (self)); - GST_PAD_STREAM_UNLOCK (GST_BASE_AUDIO_ENCODER_SRC_PAD (self)); - GST_BASE_AUDIO_ENCODER_STREAM_LOCK (self); + GST_AUDIO_ENCODER_STREAM_UNLOCK (self); + GST_PAD_STREAM_LOCK (GST_AUDIO_ENCODER_SRC_PAD (self)); + GST_PAD_STREAM_UNLOCK (GST_AUDIO_ENCODER_SRC_PAD (self)); + GST_AUDIO_ENCODER_STREAM_LOCK (self); gst_omx_port_set_flushing (self->in_port, FALSE); gst_omx_port_set_flushing (self->out_port, FALSE); @@ -853,13 +851,12 @@ gst_omx_audio_enc_flush (GstBaseAudioEncoder * encoder) /* Start the srcpad loop again */ self->downstream_flow_ret = GST_FLOW_OK; self->eos = FALSE; - gst_pad_start_task (GST_BASE_AUDIO_ENCODER_SRC_PAD (self), + gst_pad_start_task (GST_AUDIO_ENCODER_SRC_PAD (self), (GstTaskFunction) gst_omx_audio_enc_loop, encoder); } static GstFlowReturn -gst_omx_audio_enc_handle_frame (GstBaseAudioEncoder * encoder, - GstBuffer * inbuf) +gst_omx_audio_enc_handle_frame (GstAudioEncoder * encoder, GstBuffer * inbuf) { GstOMXAcquireBufferReturn acq_ret = GST_OMX_ACQUIRE_BUFFER_ERROR; GstOMXAudioEnc *self; @@ -893,9 +890,9 @@ gst_omx_audio_enc_handle_frame (GstBaseAudioEncoder * encoder, /* Make sure to release the base class stream lock, otherwise * _loop() can't call _finish_frame() and we might block forever * because no input buffers are released */ - GST_BASE_AUDIO_ENCODER_STREAM_UNLOCK (self); + GST_AUDIO_ENCODER_STREAM_UNLOCK (self); acq_ret = gst_omx_port_acquire_buffer (self->in_port, &buf); - GST_BASE_AUDIO_ENCODER_STREAM_LOCK (self); + GST_AUDIO_ENCODER_STREAM_LOCK (self); if (acq_ret == GST_OMX_ACQUIRE_BUFFER_ERROR) { goto component_error; @@ -990,7 +987,7 @@ reconfigure_error: } static gboolean -gst_omx_audio_enc_event (GstBaseAudioEncoder * encoder, GstEvent * event) +gst_omx_audio_enc_event (GstAudioEncoder * encoder, GstEvent * event) { GstOMXAudioEnc *self; @@ -1012,7 +1009,7 @@ gst_omx_audio_enc_event (GstBaseAudioEncoder * encoder, GstEvent * event) /* Make sure to release the base class stream lock, otherwise * _loop() can't call _finish_frame() and we might block forever * because no input buffers are released */ - GST_BASE_AUDIO_ENCODER_STREAM_UNLOCK (self); + GST_AUDIO_ENCODER_STREAM_UNLOCK (self); /* Send an EOS buffer to the component and let the base * class drop the EOS event. We will send it later when @@ -1026,7 +1023,7 @@ gst_omx_audio_enc_event (GstBaseAudioEncoder * encoder, GstEvent * event) GST_ERROR_OBJECT (self, "Failed to acquire buffer for EOS: %d", acq_ret); } - GST_BASE_AUDIO_ENCODER_STREAM_LOCK (self); + GST_AUDIO_ENCODER_STREAM_LOCK (self); return FALSE; } @@ -1057,14 +1054,14 @@ gst_omx_audio_enc_drain (GstOMXAudioEnc * self) /* Make sure to release the base class stream lock, otherwise * _loop() can't call _finish_frame() and we might block forever * because no input buffers are released */ - GST_BASE_AUDIO_ENCODER_STREAM_UNLOCK (self); + GST_AUDIO_ENCODER_STREAM_UNLOCK (self); /* Send an EOS buffer to the component and let the base * class drop the EOS event. We will send it later when * the EOS buffer arrives on the output port. */ acq_ret = gst_omx_port_acquire_buffer (self->in_port, &buf); if (acq_ret != GST_OMX_ACQUIRE_BUFFER_OK) { - GST_BASE_AUDIO_ENCODER_STREAM_LOCK (self); + GST_AUDIO_ENCODER_STREAM_LOCK (self); GST_ERROR_OBJECT (self, "Failed to acquire buffer for draining: %d", acq_ret); return GST_FLOW_ERROR; @@ -1078,7 +1075,7 @@ gst_omx_audio_enc_drain (GstOMXAudioEnc * self) g_cond_wait (self->drain_cond, self->drain_lock); GST_DEBUG_OBJECT (self, "Drained component"); g_mutex_unlock (self->drain_lock); - GST_BASE_AUDIO_ENCODER_STREAM_LOCK (self); + GST_AUDIO_ENCODER_STREAM_LOCK (self); self->started = FALSE; diff --git a/omx/gstomxaudioenc.h b/omx/gstomxaudioenc.h index 2e6852e082..c325faf833 100644 --- a/omx/gstomxaudioenc.h +++ b/omx/gstomxaudioenc.h @@ -22,7 +22,7 @@ #define __GST_OMX_AUDIO_ENC_H__ #include -#include "gstbaseaudioencoder.h" +#include #include "gstomx.h" @@ -46,7 +46,7 @@ typedef struct _GstOMXAudioEncClass GstOMXAudioEncClass; struct _GstOMXAudioEnc { - GstBaseAudioEncoder parent; + GstAudioEncoder parent; /* < protected > */ GstOMXCore *core; @@ -72,7 +72,7 @@ struct _GstOMXAudioEnc struct _GstOMXAudioEncClass { - GstBaseAudioEncoderClass parent_class; + GstAudioEncoderClass parent_class; const gchar *core_name; const gchar *component_name; @@ -85,9 +85,9 @@ struct _GstOMXAudioEncClass guint64 hacks; - gboolean (*set_format) (GstOMXAudioEnc * self, GstOMXPort * port, GstAudioState * state); - GstCaps *(*get_caps) (GstOMXAudioEnc * self, GstOMXPort * port, GstAudioState * state); - guint (*get_num_samples) (GstOMXAudioEnc * self, GstOMXPort * port, GstAudioState * state, GstOMXBuffer * buffer); + gboolean (*set_format) (GstOMXAudioEnc * self, GstOMXPort * port, GstAudioInfo * info); + GstCaps *(*get_caps) (GstOMXAudioEnc * self, GstOMXPort * port, GstAudioInfo * info); + guint (*get_num_samples) (GstOMXAudioEnc * self, GstOMXPort * port, GstAudioInfo * info, GstOMXBuffer * buffer); }; GType gst_omx_audio_enc_get_type (void);