a52dec: port to audiodecoder

This commit is contained in:
Mark Nauwelaerts 2011-12-14 17:33:52 +01:00
parent 260824b278
commit d55d4054bd
3 changed files with 246 additions and 426 deletions

View file

@ -3,11 +3,14 @@ plugin_LTLIBRARIES = libgsta52dec.la
libgsta52dec_la_SOURCES = gsta52dec.c libgsta52dec_la_SOURCES = gsta52dec.c
libgsta52dec_la_CFLAGS = \ libgsta52dec_la_CFLAGS = \
$(GST_PLUGINS_BASE_CFLAGS) \ $(GST_PLUGINS_BASE_CFLAGS) \
$(GST_BASE_CFLAGS) \
$(GST_CFLAGS) \ $(GST_CFLAGS) \
$(ORC_CFLAGS) \ $(ORC_CFLAGS) \
$(A52DEC_CFLAGS) $(A52DEC_CFLAGS)
libgsta52dec_la_LIBADD = \ libgsta52dec_la_LIBADD = \
$(GST_PLUGINS_BASE_LIBS) \ $(GST_PLUGINS_BASE_LIBS) \
$(GST_BASE_LIBS) \
$(GST_LIBS) \
-lgstaudio-$(GST_MAJORMINOR) \ -lgstaudio-$(GST_MAJORMINOR) \
$(ORC_LIBS) \ $(ORC_LIBS) \
$(A52DEC_LIBS) $(A52DEC_LIBS)

View file

@ -86,14 +86,20 @@ static GstStaticPadTemplate src_factory = GST_STATIC_PAD_TEMPLATE ("src",
"rate = (int) [ 4000, 96000 ], " "channels = (int) [ 1, 6 ]") "rate = (int) [ 4000, 96000 ], " "channels = (int) [ 1, 6 ]")
); );
GST_BOILERPLATE (GstA52Dec, gst_a52dec, GstElement, GST_TYPE_ELEMENT); GST_BOILERPLATE (GstA52Dec, gst_a52dec, GstAudioDecoder,
GST_TYPE_AUDIO_DECODER);
static gboolean gst_a52dec_start (GstAudioDecoder * dec);
static gboolean gst_a52dec_stop (GstAudioDecoder * dec);
static gboolean gst_a52dec_set_format (GstAudioDecoder * bdec, GstCaps * caps);
static gboolean gst_a52dec_parse (GstAudioDecoder * dec, GstAdapter * adapter,
gint * offset, gint * length);
static GstFlowReturn gst_a52dec_handle_frame (GstAudioDecoder * dec,
GstBuffer * buffer);
static GstFlowReturn gst_a52dec_pre_push (GstAudioDecoder * bdec,
GstBuffer ** buffer);
static GstFlowReturn gst_a52dec_chain (GstPad * pad, GstBuffer * buffer); static GstFlowReturn gst_a52dec_chain (GstPad * pad, GstBuffer * buffer);
static GstFlowReturn gst_a52dec_chain_raw (GstPad * pad, GstBuffer * buf);
static gboolean gst_a52dec_sink_setcaps (GstPad * pad, GstCaps * caps);
static gboolean gst_a52dec_sink_event (GstPad * pad, GstEvent * event);
static GstStateChangeReturn gst_a52dec_change_state (GstElement * element,
GstStateChange transition);
static void gst_a52dec_set_property (GObject * object, guint prop_id, static void gst_a52dec_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec); const GValue * value, GParamSpec * pspec);
@ -143,16 +149,21 @@ static void
gst_a52dec_class_init (GstA52DecClass * klass) gst_a52dec_class_init (GstA52DecClass * klass)
{ {
GObjectClass *gobject_class; GObjectClass *gobject_class;
GstElementClass *gstelement_class; GstAudioDecoderClass *gstbase_class;
guint cpuflags; guint cpuflags;
gobject_class = (GObjectClass *) klass; gobject_class = (GObjectClass *) klass;
gstelement_class = (GstElementClass *) klass; gstbase_class = (GstAudioDecoderClass *) klass;
gobject_class->set_property = gst_a52dec_set_property; gobject_class->set_property = gst_a52dec_set_property;
gobject_class->get_property = gst_a52dec_get_property; gobject_class->get_property = gst_a52dec_get_property;
gstelement_class->change_state = GST_DEBUG_FUNCPTR (gst_a52dec_change_state); gstbase_class->start = GST_DEBUG_FUNCPTR (gst_a52dec_start);
gstbase_class->stop = GST_DEBUG_FUNCPTR (gst_a52dec_stop);
gstbase_class->set_format = GST_DEBUG_FUNCPTR (gst_a52dec_set_format);
gstbase_class->parse = GST_DEBUG_FUNCPTR (gst_a52dec_parse);
gstbase_class->handle_frame = GST_DEBUG_FUNCPTR (gst_a52dec_handle_frame);
gstbase_class->pre_push = GST_DEBUG_FUNCPTR (gst_a52dec_pre_push);
/** /**
* GstA52Dec::drc * GstA52Dec::drc
@ -213,27 +224,106 @@ gst_a52dec_class_init (GstA52DecClass * klass)
static void static void
gst_a52dec_init (GstA52Dec * a52dec, GstA52DecClass * g_class) gst_a52dec_init (GstA52Dec * a52dec, GstA52DecClass * g_class)
{ {
/* create the sink and src pads */
a52dec->sinkpad = gst_pad_new_from_static_template (&sink_factory, "sink");
gst_pad_set_setcaps_function (a52dec->sinkpad,
GST_DEBUG_FUNCPTR (gst_a52dec_sink_setcaps));
gst_pad_set_chain_function (a52dec->sinkpad,
GST_DEBUG_FUNCPTR (gst_a52dec_chain));
gst_pad_set_event_function (a52dec->sinkpad,
GST_DEBUG_FUNCPTR (gst_a52dec_sink_event));
gst_element_add_pad (GST_ELEMENT (a52dec), a52dec->sinkpad);
a52dec->srcpad = gst_pad_new_from_static_template (&src_factory, "src");
gst_pad_use_fixed_caps (a52dec->srcpad);
gst_element_add_pad (GST_ELEMENT (a52dec), a52dec->srcpad);
a52dec->request_channels = A52_CHANNEL; a52dec->request_channels = A52_CHANNEL;
a52dec->dynamic_range_compression = FALSE; a52dec->dynamic_range_compression = FALSE;
a52dec->state = NULL; a52dec->state = NULL;
a52dec->samples = NULL; a52dec->samples = NULL;
gst_segment_init (&a52dec->segment, GST_FORMAT_UNDEFINED); /* retrieve and intercept base class chain.
* Quite HACKish, but that's dvd specs/caps for you,
* since one buffer needs to be split into 2 frames */
a52dec->base_chain = GST_PAD_CHAINFUNC (GST_AUDIO_DECODER_SINK_PAD (a52dec));
gst_pad_set_chain_function (GST_AUDIO_DECODER_SINK_PAD (a52dec),
GST_DEBUG_FUNCPTR (gst_a52dec_chain));
}
static gboolean
gst_a52dec_start (GstAudioDecoder * dec)
{
GstA52Dec *a52dec = GST_A52DEC (dec);
GstA52DecClass *klass;
GST_DEBUG_OBJECT (dec, "start");
klass = GST_A52DEC_CLASS (G_OBJECT_GET_CLASS (a52dec));
a52dec->state = a52_init (klass->a52_cpuflags);
a52dec->samples = a52_samples (a52dec->state);
a52dec->bit_rate = -1;
a52dec->sample_rate = -1;
a52dec->stream_channels = A52_CHANNEL;
a52dec->using_channels = A52_CHANNEL;
a52dec->level = 1;
a52dec->bias = 0;
a52dec->flag_update = TRUE;
/* call upon legacy upstream byte support (e.g. seeking) */
gst_audio_decoder_set_byte_time (dec, TRUE);
return TRUE;
}
static gboolean
gst_a52dec_stop (GstAudioDecoder * dec)
{
GstA52Dec *a52dec = GST_A52DEC (dec);
GST_DEBUG_OBJECT (dec, "stop");
a52dec->samples = NULL;
if (a52dec->state) {
a52_free (a52dec->state);
a52dec->state = NULL;
}
if (a52dec->pending_tags) {
gst_tag_list_free (a52dec->pending_tags);
a52dec->pending_tags = NULL;
}
return TRUE;
}
static GstFlowReturn
gst_a52dec_parse (GstAudioDecoder * bdec, GstAdapter * adapter,
gint * _offset, gint * len)
{
GstA52Dec *a52dec;
guint8 *data;
gint av, size;
gint length = 0, flags, sample_rate, bit_rate;
GstFlowReturn result = GST_FLOW_UNEXPECTED;
a52dec = GST_A52DEC (bdec);
size = av = gst_adapter_available (adapter);
data = (guint8 *) gst_adapter_peek (adapter, av);
/* find and read header */
bit_rate = a52dec->bit_rate;
sample_rate = a52dec->sample_rate;
flags = 0;
while (av >= 7) {
length = a52_syncinfo (data, &flags, &sample_rate, &bit_rate);
if (length == 0) {
/* shift window to re-find sync */
data++;
size--;
} else if (length <= size) {
GST_LOG_OBJECT (a52dec, "Sync: frame size %d", length);
result = GST_FLOW_OK;
break;
} else {
GST_LOG_OBJECT (a52dec, "Not enough data available (needed %d had %d)",
length, size);
break;
}
}
*_offset = av - size;
*len = length;
return result;
} }
static gint static gint
@ -324,106 +414,6 @@ gst_a52dec_channels (int flags, GstAudioChannelPosition ** _pos)
return chans; return chans;
} }
static void
clear_queued (GstA52Dec * dec)
{
g_list_foreach (dec->queued, (GFunc) gst_mini_object_unref, NULL);
g_list_free (dec->queued);
dec->queued = NULL;
}
static GstFlowReturn
flush_queued (GstA52Dec * dec)
{
GstFlowReturn ret = GST_FLOW_OK;
while (dec->queued) {
GstBuffer *buf = GST_BUFFER_CAST (dec->queued->data);
GST_LOG_OBJECT (dec, "pushing buffer %p, timestamp %"
GST_TIME_FORMAT ", duration %" GST_TIME_FORMAT, buf,
GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
/* iterate ouput queue an push downstream */
ret = gst_pad_push (dec->srcpad, buf);
dec->queued = g_list_delete_link (dec->queued, dec->queued);
}
return ret;
}
static GstFlowReturn
gst_a52dec_drain (GstA52Dec * dec)
{
GstFlowReturn ret = GST_FLOW_OK;
if (dec->segment.rate < 0.0) {
/* if we have some queued frames for reverse playback, flush
* them now */
ret = flush_queued (dec);
}
return ret;
}
static GstFlowReturn
gst_a52dec_push (GstA52Dec * a52dec,
GstPad * srcpad, int flags, sample_t * samples, GstClockTime timestamp)
{
GstBuffer *buf;
int chans, n, c;
GstFlowReturn result;
flags &= (A52_CHANNEL_MASK | A52_LFE);
chans = gst_a52dec_channels (flags, NULL);
if (!chans) {
GST_ELEMENT_ERROR (GST_ELEMENT (a52dec), STREAM, DECODE, (NULL),
("invalid channel flags: %d", flags));
return GST_FLOW_ERROR;
}
result =
gst_pad_alloc_buffer_and_set_caps (srcpad, 0,
256 * chans * (SAMPLE_WIDTH / 8), GST_PAD_CAPS (srcpad), &buf);
if (result != GST_FLOW_OK)
return result;
for (n = 0; n < 256; n++) {
for (c = 0; c < chans; c++) {
((sample_t *) GST_BUFFER_DATA (buf))[n * chans + c] =
samples[c * 256 + n];
}
}
GST_BUFFER_TIMESTAMP (buf) = timestamp;
GST_BUFFER_DURATION (buf) = 256 * GST_SECOND / a52dec->sample_rate;
result = GST_FLOW_OK;
if ((buf = gst_audio_buffer_clip (buf, &a52dec->segment,
a52dec->sample_rate, (SAMPLE_WIDTH / 8) * chans))) {
/* set discont when needed */
if (a52dec->discont) {
GST_LOG_OBJECT (a52dec, "marking DISCONT");
GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
a52dec->discont = FALSE;
}
if (a52dec->segment.rate > 0.0) {
GST_DEBUG_OBJECT (a52dec,
"Pushing buffer with ts %" GST_TIME_FORMAT " duration %"
GST_TIME_FORMAT, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
result = gst_pad_push (srcpad, buf);
} else {
/* reverse playback, queue frame till later when we get a discont. */
GST_DEBUG_OBJECT (a52dec, "queued frame");
a52dec->queued = g_list_prepend (a52dec->queued, buf);
}
}
return result;
}
static gboolean static gboolean
gst_a52dec_reneg (GstA52Dec * a52dec, GstPad * pad) gst_a52dec_reneg (GstA52Dec * a52dec, GstPad * pad)
{ {
@ -457,102 +447,69 @@ done:
return result; return result;
} }
static gboolean
gst_a52dec_sink_event (GstPad * pad, GstEvent * event)
{
GstA52Dec *a52dec = GST_A52DEC (gst_pad_get_parent (pad));
gboolean ret = FALSE;
GST_LOG ("Handling %s event", GST_EVENT_TYPE_NAME (event));
switch (GST_EVENT_TYPE (event)) {
case GST_EVENT_NEWSEGMENT:
{
GstFormat fmt;
gboolean update;
gint64 start, end, pos;
gdouble rate, arate;
gst_event_parse_new_segment_full (event, &update, &rate, &arate, &fmt,
&start, &end, &pos);
/* drain queued buffers before activating the segment so that we can clip
* against the old segment first */
gst_a52dec_drain (a52dec);
if (fmt != GST_FORMAT_TIME || !GST_CLOCK_TIME_IS_VALID (start)) {
GST_WARNING ("No time in newsegment event %p (format is %s)",
event, gst_format_get_name (fmt));
gst_event_unref (event);
a52dec->sent_segment = FALSE;
/* set some dummy values, FIXME: do proper conversion */
a52dec->time = start = pos = 0;
fmt = GST_FORMAT_TIME;
end = -1;
} else {
a52dec->time = start;
a52dec->sent_segment = TRUE;
GST_DEBUG_OBJECT (a52dec,
"Pushing newseg rate %g, applied rate %g, format %d, start %"
G_GINT64_FORMAT ", stop %" G_GINT64_FORMAT ", pos %"
G_GINT64_FORMAT, rate, arate, fmt, start, end, pos);
ret = gst_pad_push_event (a52dec->srcpad, event);
}
gst_segment_set_newsegment (&a52dec->segment, update, rate, fmt, start,
end, pos);
break;
}
case GST_EVENT_TAG:
ret = gst_pad_push_event (a52dec->srcpad, event);
break;
case GST_EVENT_EOS:
gst_a52dec_drain (a52dec);
ret = gst_pad_push_event (a52dec->srcpad, event);
break;
case GST_EVENT_FLUSH_START:
ret = gst_pad_push_event (a52dec->srcpad, event);
break;
case GST_EVENT_FLUSH_STOP:
if (a52dec->cache) {
gst_buffer_unref (a52dec->cache);
a52dec->cache = NULL;
}
clear_queued (a52dec);
gst_segment_init (&a52dec->segment, GST_FORMAT_UNDEFINED);
ret = gst_pad_push_event (a52dec->srcpad, event);
break;
default:
ret = gst_pad_push_event (a52dec->srcpad, event);
break;
}
gst_object_unref (a52dec);
return ret;
}
static void static void
gst_a52dec_update_streaminfo (GstA52Dec * a52dec) gst_a52dec_update_streaminfo (GstA52Dec * a52dec)
{ {
GstTagList *taglist; GstTagList *taglist;
taglist = gst_tag_list_new (); taglist = gst_tag_list_new ();
gst_tag_list_add (taglist, GST_TAG_MERGE_APPEND, GST_TAG_BITRATE,
(guint) a52dec->bit_rate, NULL);
gst_tag_list_add (taglist, GST_TAG_MERGE_APPEND, if (a52dec->pending_tags) {
GST_TAG_AUDIO_CODEC, "Dolby Digital (AC-3)", gst_tag_list_free (a52dec->pending_tags);
GST_TAG_BITRATE, (guint) a52dec->bit_rate, NULL); a52dec->pending_tags = NULL;
}
gst_element_found_tags_for_pad (GST_ELEMENT (a52dec), a52dec->pending_tags = taglist;
GST_PAD (a52dec->srcpad), taglist);
} }
static GstFlowReturn static GstFlowReturn
gst_a52dec_handle_frame (GstA52Dec * a52dec, guint8 * data, gst_a52dec_pre_push (GstAudioDecoder * bdec, GstBuffer ** buffer)
guint length, gint flags, gint sample_rate, gint bit_rate)
{ {
GstA52Dec *a52dec = GST_A52DEC (bdec);
if (G_UNLIKELY (a52dec->pending_tags)) {
gst_element_found_tags_for_pad (GST_ELEMENT (a52dec),
GST_AUDIO_DECODER_SRC_PAD (a52dec), a52dec->pending_tags);
a52dec->pending_tags = NULL;
}
return GST_FLOW_OK;
}
static GstFlowReturn
gst_a52dec_handle_frame (GstAudioDecoder * bdec, GstBuffer * buffer)
{
GstA52Dec *a52dec;
gint channels, i; gint channels, i;
gboolean need_reneg = FALSE; gboolean need_reneg = FALSE;
gint size, chans;
gint length = 0, flags, sample_rate, bit_rate;
guint8 *data;
GstFlowReturn result = GST_FLOW_OK;
GstBuffer *outbuf;
const gint num_blocks = 6;
a52dec = GST_A52DEC (bdec);
/* no fancy draining */
if (G_UNLIKELY (!buffer))
return GST_FLOW_OK;
/* parsed stuff already, so this should work out fine */
data = GST_BUFFER_DATA (buffer);
size = GST_BUFFER_SIZE (buffer);
g_assert (size >= 7);
/* re-obtain some sync header info,
* should be same as during _parse and could also be cached there,
* but anyway ... */
bit_rate = a52dec->bit_rate;
sample_rate = a52dec->sample_rate;
flags = 0;
length = a52_syncinfo (data, &flags, &sample_rate, &bit_rate);
g_assert (length == size);
/* update stream information, renegotiate or re-streaminfo if needed */ /* update stream information, renegotiate or re-streaminfo if needed */
need_reneg = FALSE; need_reneg = FALSE;
@ -582,7 +539,7 @@ gst_a52dec_handle_frame (GstA52Dec * a52dec, guint8 * data,
a52dec->flag_update = FALSE; a52dec->flag_update = FALSE;
caps = gst_pad_get_allowed_caps (a52dec->srcpad); caps = gst_pad_get_allowed_caps (GST_AUDIO_DECODER_SRC_PAD (a52dec));
if (caps && gst_caps_get_size (caps) > 0) { if (caps && gst_caps_get_size (caps) > 0) {
GstCaps *copy = gst_caps_copy_nth (caps, 0); GstCaps *copy = gst_caps_copy_nth (caps, 0);
GstStructure *structure = gst_caps_get_structure (copy, 0); GstStructure *structure = gst_caps_get_structure (copy, 0);
@ -618,14 +575,16 @@ gst_a52dec_handle_frame (GstA52Dec * a52dec, guint8 * data,
} else { } else {
flags = a52dec->using_channels; flags = a52dec->using_channels;
} }
/* process */ /* process */
flags |= A52_ADJUST_LEVEL; flags |= A52_ADJUST_LEVEL;
a52dec->level = 1; a52dec->level = 1;
if (a52_frame (a52dec->state, data, &flags, &a52dec->level, a52dec->bias)) { if (a52_frame (a52dec->state, data, &flags, &a52dec->level, a52dec->bias)) {
GST_WARNING ("a52_frame error"); GST_AUDIO_DECODER_ERROR (a52dec, 1, STREAM, DECODE, (NULL),
a52dec->discont = TRUE; ("a52_frame error"), result);
return GST_FLOW_OK; goto exit;
} }
channels = flags & (A52_CHANNEL_MASK | A52_LFE); channels = flags & (A52_CHANNEL_MASK | A52_LFE);
if (a52dec->using_channels != channels) { if (a52dec->using_channels != channels) {
need_reneg = TRUE; need_reneg = TRUE;
@ -634,43 +593,74 @@ gst_a52dec_handle_frame (GstA52Dec * a52dec, guint8 * data,
/* negotiate if required */ /* negotiate if required */
if (need_reneg) { if (need_reneg) {
GST_DEBUG ("a52dec reneg: sample_rate:%d stream_chans:%d using_chans:%d", GST_DEBUG_OBJECT (a52dec,
"a52dec reneg: sample_rate:%d stream_chans:%d using_chans:%d",
a52dec->sample_rate, a52dec->stream_channels, a52dec->using_channels); a52dec->sample_rate, a52dec->stream_channels, a52dec->using_channels);
if (!gst_a52dec_reneg (a52dec, a52dec->srcpad)) { if (!gst_a52dec_reneg (a52dec, GST_AUDIO_DECODER_SRC_PAD (a52dec)))
GST_ELEMENT_ERROR (a52dec, CORE, NEGOTIATION, (NULL), (NULL)); goto failed_negotiation;
return GST_FLOW_ERROR;
}
} }
if (a52dec->dynamic_range_compression == FALSE) { if (a52dec->dynamic_range_compression == FALSE) {
a52_dynrng (a52dec->state, NULL, NULL); a52_dynrng (a52dec->state, NULL, NULL);
} }
/* each frame consists of 6 blocks */ flags &= (A52_CHANNEL_MASK | A52_LFE);
for (i = 0; i < 6; i++) { chans = gst_a52dec_channels (flags, NULL);
if (!chans)
goto invalid_flags;
/* handle decoded data;
* each frame has 6 blocks, one block is 256 samples, ea */
result =
gst_pad_alloc_buffer_and_set_caps (GST_AUDIO_DECODER_SRC_PAD (a52dec), 0,
256 * chans * (SAMPLE_WIDTH / 8) * num_blocks,
GST_PAD_CAPS (GST_AUDIO_DECODER_SRC_PAD (a52dec)), &outbuf);
if (result != GST_FLOW_OK)
goto exit;
data = GST_BUFFER_DATA (outbuf);
for (i = 0; i < num_blocks; i++) {
if (a52_block (a52dec->state)) { if (a52_block (a52dec->state)) {
/* ignore errors but mark a discont */ /* also marks discont */
GST_WARNING ("a52_block error %d", i); GST_AUDIO_DECODER_ERROR (a52dec, 1, STREAM, DECODE, (NULL),
a52dec->discont = TRUE; ("error decoding block %d", i), result);
if (result != GST_FLOW_OK)
goto exit;
} else { } else {
GstFlowReturn ret; gint n, c;
/* push on */ for (n = 0; n < 256; n++) {
ret = gst_a52dec_push (a52dec, a52dec->srcpad, a52dec->using_channels, for (c = 0; c < chans; c++) {
a52dec->samples, a52dec->time); ((sample_t *) data)[n * chans + c] = a52dec->samples[c * 256 + n];
if (ret != GST_FLOW_OK)
return ret;
} }
a52dec->time += 256 * GST_SECOND / a52dec->sample_rate; }
}
data += 256 * chans * (SAMPLE_WIDTH / 8);
} }
return GST_FLOW_OK; result = gst_audio_decoder_finish_frame (bdec, outbuf, 1);
exit:
return result;
/* ERRORS */
failed_negotiation:
{
GST_ELEMENT_ERROR (a52dec, CORE, NEGOTIATION, (NULL), (NULL));
return GST_FLOW_ERROR;
}
invalid_flags:
{
GST_ELEMENT_ERROR (GST_ELEMENT (a52dec), STREAM, DECODE, (NULL),
("Invalid channel flags: %d", flags));
return GST_FLOW_ERROR;
}
} }
static gboolean static gboolean
gst_a52dec_sink_setcaps (GstPad * pad, GstCaps * caps) gst_a52dec_set_format (GstAudioDecoder * bdec, GstCaps * caps)
{ {
GstA52Dec *a52dec = GST_A52DEC (gst_pad_get_parent (pad)); GstA52Dec *a52dec = GST_A52DEC (bdec);
GstStructure *structure; GstStructure *structure;
structure = gst_caps_get_structure (caps, 0); structure = gst_caps_get_structure (caps, 0);
@ -680,8 +670,6 @@ gst_a52dec_sink_setcaps (GstPad * pad, GstCaps * caps)
else else
a52dec->dvdmode = FALSE; a52dec->dvdmode = FALSE;
gst_object_unref (a52dec);
return TRUE; return TRUE;
} }
@ -689,20 +677,9 @@ static GstFlowReturn
gst_a52dec_chain (GstPad * pad, GstBuffer * buf) gst_a52dec_chain (GstPad * pad, GstBuffer * buf)
{ {
GstA52Dec *a52dec = GST_A52DEC (GST_PAD_PARENT (pad)); GstA52Dec *a52dec = GST_A52DEC (GST_PAD_PARENT (pad));
GstFlowReturn ret; GstFlowReturn ret = GST_FLOW_OK;
gint first_access; gint first_access;
if (GST_BUFFER_IS_DISCONT (buf)) {
GST_LOG_OBJECT (a52dec, "received DISCONT");
gst_a52dec_drain (a52dec);
/* clear cache on discont and mark a discont in the element */
if (a52dec->cache) {
gst_buffer_unref (a52dec->cache);
a52dec->cache = NULL;
}
a52dec->discont = TRUE;
}
if (a52dec->dvdmode) { if (a52dec->dvdmode) {
gint size = GST_BUFFER_SIZE (buf); gint size = GST_BUFFER_SIZE (buf);
guchar *data = GST_BUFFER_DATA (buf); guchar *data = GST_BUFFER_DATA (buf);
@ -726,33 +703,36 @@ gst_a52dec_chain (GstPad * pad, GstBuffer * buf)
goto bad_first_access_parameter; goto bad_first_access_parameter;
subbuf = gst_buffer_create_sub (buf, offset, len); subbuf = gst_buffer_create_sub (buf, offset, len);
gst_buffer_copy_metadata (subbuf, buf, GST_BUFFER_COPY_ALL);
GST_BUFFER_TIMESTAMP (subbuf) = GST_CLOCK_TIME_NONE; GST_BUFFER_TIMESTAMP (subbuf) = GST_CLOCK_TIME_NONE;
ret = gst_a52dec_chain_raw (pad, subbuf); ret = a52dec->base_chain (pad, subbuf);
if (ret != GST_FLOW_OK) if (ret != GST_FLOW_OK) {
gst_buffer_unref (buf);
goto done; goto done;
}
offset += len; offset += len;
len = size - offset; len = size - offset;
if (len > 0) { if (len > 0) {
subbuf = gst_buffer_create_sub (buf, offset, len); subbuf = gst_buffer_create_sub (buf, offset, len);
gst_buffer_copy_metadata (subbuf, buf, GST_BUFFER_COPY_ALL);
GST_BUFFER_TIMESTAMP (subbuf) = GST_BUFFER_TIMESTAMP (buf); GST_BUFFER_TIMESTAMP (subbuf) = GST_BUFFER_TIMESTAMP (buf);
ret = gst_a52dec_chain_raw (pad, subbuf); ret = a52dec->base_chain (pad, subbuf);
} }
gst_buffer_unref (buf);
} else { } else {
/* first_access = 0 or 1, so if there's a timestamp it applies to the first byte */ /* first_access = 0 or 1, so if there's a timestamp it applies to the first byte */
subbuf = gst_buffer_create_sub (buf, offset, size - offset); subbuf = gst_buffer_create_sub (buf, offset, size - offset);
GST_BUFFER_TIMESTAMP (subbuf) = GST_BUFFER_TIMESTAMP (buf); gst_buffer_copy_metadata (subbuf, buf, GST_BUFFER_COPY_ALL);
ret = gst_a52dec_chain_raw (pad, subbuf); ret = a52dec->base_chain (pad, subbuf);
} }
} else { } else {
gst_buffer_ref (buf); ret = a52dec->base_chain (pad, buf);
ret = gst_a52dec_chain_raw (pad, buf);
} }
done: done:
gst_buffer_unref (buf);
return ret; return ret;
/* ERRORS */ /* ERRORS */
@ -772,162 +752,6 @@ bad_first_access_parameter:
} }
} }
static GstFlowReturn
gst_a52dec_chain_raw (GstPad * pad, GstBuffer * buf)
{
GstA52Dec *a52dec;
guint8 *data;
guint size;
gint length = 0, flags, sample_rate, bit_rate;
GstFlowReturn result = GST_FLOW_OK;
a52dec = GST_A52DEC (GST_PAD_PARENT (pad));
if (!a52dec->sent_segment) {
GstSegment segment;
/* Create a basic segment. Usually, we'll get a new-segment sent by
* another element that will know more information (a demuxer). If we're
* just looking at a raw AC3 stream, we won't - so we need to send one
* here, but we don't know much info, so just send a minimal TIME
* new-segment event
*/
gst_segment_init (&segment, GST_FORMAT_TIME);
gst_pad_push_event (a52dec->srcpad, gst_event_new_new_segment (FALSE,
segment.rate, segment.format, segment.start,
segment.duration, segment.start));
a52dec->sent_segment = TRUE;
}
/* merge with cache, if any. Also make sure timestamps match */
if (GST_BUFFER_TIMESTAMP_IS_VALID (buf)) {
a52dec->time = GST_BUFFER_TIMESTAMP (buf);
GST_DEBUG_OBJECT (a52dec,
"Received buffer with ts %" GST_TIME_FORMAT " duration %"
GST_TIME_FORMAT, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
}
if (a52dec->cache) {
buf = gst_buffer_join (a52dec->cache, buf);
a52dec->cache = NULL;
}
data = GST_BUFFER_DATA (buf);
size = GST_BUFFER_SIZE (buf);
/* find and read header */
bit_rate = a52dec->bit_rate;
sample_rate = a52dec->sample_rate;
flags = 0;
while (size >= 7) {
length = a52_syncinfo (data, &flags, &sample_rate, &bit_rate);
if (length == 0) {
/* no sync */
data++;
size--;
} else if (length <= size) {
GST_DEBUG ("Sync: %d", length);
if (flags != a52dec->prev_flags)
a52dec->flag_update = TRUE;
a52dec->prev_flags = flags;
result = gst_a52dec_handle_frame (a52dec, data,
length, flags, sample_rate, bit_rate);
if (result != GST_FLOW_OK) {
size = 0;
break;
}
size -= length;
data += length;
} else {
/* not enough data */
GST_LOG ("Not enough data available");
break;
}
}
/* keep cache */
if (length == 0) {
GST_LOG ("No sync found");
}
if (size > 0) {
a52dec->cache = gst_buffer_create_sub (buf,
GST_BUFFER_SIZE (buf) - size, size);
}
gst_buffer_unref (buf);
return result;
}
static GstStateChangeReturn
gst_a52dec_change_state (GstElement * element, GstStateChange transition)
{
GstStateChangeReturn ret = GST_STATE_CHANGE_SUCCESS;
GstA52Dec *a52dec = GST_A52DEC (element);
switch (transition) {
case GST_STATE_CHANGE_NULL_TO_READY:{
GstA52DecClass *klass;
klass = GST_A52DEC_CLASS (G_OBJECT_GET_CLASS (a52dec));
a52dec->state = a52_init (klass->a52_cpuflags);
if (!a52dec->state) {
GST_ELEMENT_ERROR (GST_ELEMENT (a52dec), STREAM, DECODE, (NULL),
("Failed to initialize a52 state"));
ret = GST_STATE_CHANGE_FAILURE;
}
break;
}
case GST_STATE_CHANGE_READY_TO_PAUSED:
a52dec->samples = a52_samples (a52dec->state);
a52dec->bit_rate = -1;
a52dec->sample_rate = -1;
a52dec->stream_channels = A52_CHANNEL;
a52dec->using_channels = A52_CHANNEL;
a52dec->level = 1;
a52dec->bias = 0;
a52dec->time = 0;
a52dec->sent_segment = FALSE;
a52dec->flag_update = TRUE;
gst_segment_init (&a52dec->segment, GST_FORMAT_UNDEFINED);
break;
case GST_STATE_CHANGE_PAUSED_TO_PLAYING:
break;
default:
break;
}
ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
switch (transition) {
case GST_STATE_CHANGE_PLAYING_TO_PAUSED:
break;
case GST_STATE_CHANGE_PAUSED_TO_READY:
a52dec->samples = NULL;
if (a52dec->cache) {
gst_buffer_unref (a52dec->cache);
a52dec->cache = NULL;
}
clear_queued (a52dec);
break;
case GST_STATE_CHANGE_READY_TO_NULL:
if (a52dec->state) {
a52_free (a52dec->state);
a52dec->state = NULL;
}
break;
default:
break;
}
return ret;
}
static void static void
gst_a52dec_set_property (GObject * object, guint prop_id, const GValue * value, gst_a52dec_set_property (GObject * object, guint prop_id, const GValue * value,
GParamSpec * pspec) GParamSpec * pspec)

View file

@ -22,6 +22,7 @@
#define __GST_A52DEC_H__ #define __GST_A52DEC_H__
#include <gst/gst.h> #include <gst/gst.h>
#include <gst/audio/gstaudiodecoder.h>
G_BEGIN_DECLS G_BEGIN_DECLS
@ -40,41 +41,33 @@ typedef struct _GstA52Dec GstA52Dec;
typedef struct _GstA52DecClass GstA52DecClass; typedef struct _GstA52DecClass GstA52DecClass;
struct _GstA52Dec { struct _GstA52Dec {
GstElement element; GstAudioDecoder element;
/* pads */ GstPadChainFunction base_chain;
GstPad *sinkpad,
*srcpad;
GstSegment segment;
gboolean dvdmode; gboolean dvdmode;
gboolean sent_segment;
gboolean discont;
gboolean flag_update; gboolean flag_update;
int prev_flags; int prev_flags;
/* stream properties */
int bit_rate; int bit_rate;
int sample_rate; int sample_rate;
int stream_channels; int stream_channels;
int request_channels; int request_channels;
int using_channels; int using_channels;
/* decoding properties */
sample_t level; sample_t level;
sample_t bias; sample_t bias;
gboolean dynamic_range_compression; gboolean dynamic_range_compression;
sample_t *samples; sample_t *samples;
a52_state_t *state; a52_state_t *state;
GstBuffer *cache; GstTagList *pending_tags;
GstClockTime time;
/* reverse */
GList *queued;
}; };
struct _GstA52DecClass { struct _GstA52DecClass {
GstElementClass parent_class; GstAudioDecoderClass parent_class;
guint32 a52_cpuflags; guint32 a52_cpuflags;
}; };