Merge branch 'master' into 0.11

Conflicts:
	gst-libs/gst/audio/gstaudiodecoder.c
	gst-libs/gst/audio/gstaudioencoder.c
	gst/encoding/gstencodebin.c
This commit is contained in:
Wim Taymans 2011-09-26 19:22:05 +02:00
commit f71511edd2
7 changed files with 357 additions and 48 deletions

View file

@ -215,7 +215,8 @@ GST_AUDIO_ENCODER_SRC_PAD
gst_audio_encoder_finish_frame
gst_audio_encoder_get_audio_info
gst_audio_encoder_get_frame_max
gst_audio_encoder_get_frame_samples
gst_audio_encoder_get_frame_samples_min
gst_audio_encoder_get_frame_samples_max
gst_audio_encoder_get_hard_resync
gst_audio_encoder_get_latency
gst_audio_encoder_get_lookahead
@ -224,7 +225,8 @@ gst_audio_encoder_get_perfect_timestamp
gst_audio_encoder_get_tolerance
gst_audio_encoder_proxy_getcaps
gst_audio_encoder_set_frame_max
gst_audio_encoder_set_frame_samples
gst_audio_encoder_set_frame_samples_min
gst_audio_encoder_set_frame_samples_max
gst_audio_encoder_set_hard_resync
gst_audio_encoder_set_latency
gst_audio_encoder_set_lookahead

View file

@ -259,6 +259,8 @@ struct _GstAudioDecoderPrivate
GstClockTime tolerance;
gboolean plc;
/* pending serialized sink events, will be sent from finish_frame() */
GList *pending_events;
};
@ -375,6 +377,8 @@ gst_audio_decoder_init (GstAudioDecoder * dec)
dec->priv->adapter_out = gst_adapter_new ();
g_queue_init (&dec->priv->frames);
g_static_rec_mutex_init (&dec->stream_lock);
/* property default */
dec->priv->latency = DEFAULT_LATENCY;
dec->priv->tolerance = DEFAULT_TOLERANCE;
@ -390,7 +394,7 @@ gst_audio_decoder_reset (GstAudioDecoder * dec, gboolean full)
{
GST_DEBUG_OBJECT (dec, "gst_audio_decoder_reset");
GST_OBJECT_LOCK (dec);
GST_AUDIO_DECODER_STREAM_LOCK (dec);
if (full) {
dec->priv->active = FALSE;
@ -409,6 +413,10 @@ gst_audio_decoder_reset (GstAudioDecoder * dec, gboolean full)
}
gst_segment_init (&dec->segment, GST_FORMAT_TIME);
g_list_foreach (dec->priv->pending_events, (GFunc) gst_event_unref, NULL);
g_list_free (dec->priv->pending_events);
dec->priv->pending_events = NULL;
}
g_queue_foreach (&dec->priv->frames, (GFunc) gst_buffer_unref, NULL);
@ -424,7 +432,7 @@ gst_audio_decoder_reset (GstAudioDecoder * dec, gboolean full)
dec->priv->discont = TRUE;
dec->priv->sync_flush = FALSE;
GST_OBJECT_UNLOCK (dec);
GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
}
static void
@ -442,6 +450,8 @@ gst_audio_decoder_finalize (GObject * object)
g_object_unref (dec->priv->adapter_out);
}
g_static_rec_mutex_free (&dec->stream_lock);
G_OBJECT_CLASS (parent_class)->finalize (object);
}
@ -455,6 +465,8 @@ gst_audio_decoder_src_setcaps (GstAudioDecoder * dec, GstCaps * caps)
GST_DEBUG_OBJECT (dec, "setting src caps %" GST_PTR_FORMAT, caps);
GST_AUDIO_DECODER_STREAM_LOCK (dec);
/* parse caps here to check subclass;
* also makes us aware of output format */
if (!gst_caps_is_fixed (caps))
@ -471,6 +483,9 @@ gst_audio_decoder_src_setcaps (GstAudioDecoder * dec, GstCaps * caps)
if (!gst_audio_info_from_caps (&dec->priv->ctx.info, caps))
goto refuse_caps;
done:
GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
gst_object_unref (dec);
return res;
@ -478,8 +493,8 @@ gst_audio_decoder_src_setcaps (GstAudioDecoder * dec, GstCaps * caps)
refuse_caps:
{
GST_WARNING_OBJECT (dec, "rejected caps %" GST_PTR_FORMAT, caps);
gst_object_unref (dec);
return res;
res = FALSE;
goto done;
}
}
@ -493,6 +508,7 @@ gst_audio_decoder_sink_setcaps (GstAudioDecoder * dec, GstCaps * caps)
GST_DEBUG_OBJECT (dec, "caps: %" GST_PTR_FORMAT, caps);
GST_AUDIO_DECODER_STREAM_LOCK (dec);
/* NOTE pbutils only needed here */
/* TODO maybe (only) upstream demuxer/parser etc should handle this ? */
#if 0
@ -506,6 +522,8 @@ gst_audio_decoder_sink_setcaps (GstAudioDecoder * dec, GstCaps * caps)
if (klass->set_format)
res = klass->set_format (dec, caps);
GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
return res;
}
@ -525,7 +543,7 @@ gst_audio_decoder_setup (GstAudioDecoder * dec)
gst_query_unref (query);
/* normalize to bool */
dec->priv->agg = !!res;
dec->priv->agg = ! !res;
}
/* mini aggregator combining output buffers into fewer larger ones,
@ -677,6 +695,7 @@ gst_audio_decoder_finish_frame (GstAudioDecoder * dec, GstBuffer * buf,
gint samples = 0;
GstClockTime ts, next_ts;
gsize size;
GstFlowReturn ret = GST_FLOW_OK;
/* subclass should know what it is producing by now */
g_return_val_if_fail (buf == NULL || gst_pad_has_current_caps (dec->srcpad),
@ -694,6 +713,20 @@ gst_audio_decoder_finish_frame (GstAudioDecoder * dec, GstBuffer * buf,
GST_LOG_OBJECT (dec, "accepting %d bytes == %d samples for %d frames",
buf ? size : -1, buf ? size / ctx->info.bpf : -1, frames);
GST_AUDIO_DECODER_STREAM_LOCK (dec);
if (priv->pending_events) {
GList *pending_events, *l;
pending_events = priv->pending_events;
priv->pending_events = NULL;
GST_DEBUG_OBJECT (dec, "Pushing pending events");
for (l = priv->pending_events; l; l = l->next)
gst_pad_push_event (dec->srcpad, l->data);
g_list_free (pending_events);
}
/* output shoud be whole number of sample frames */
if (G_LIKELY (buf && ctx->info.bpf)) {
if (size % ctx->info.bpf)
@ -800,7 +833,11 @@ gst_audio_decoder_finish_frame (GstAudioDecoder * dec, GstBuffer * buf,
dec->priv->error_count--;
exit:
return gst_audio_decoder_output (dec, buf);
ret = gst_audio_decoder_output (dec, buf);
GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
return ret;
/* ERRORS */
wrong_buffer:
@ -808,7 +845,8 @@ wrong_buffer:
GST_ELEMENT_ERROR (dec, STREAM, ENCODE, (NULL),
("buffer size %d not a multiple of %d", size, ctx->info.bpf));
gst_buffer_unref (buf);
return GST_FLOW_ERROR;
ret = GST_FLOW_ERROR;
goto exit;
}
overflow:
{
@ -817,7 +855,8 @@ overflow:
priv->frames.length), (NULL));
if (buf)
gst_buffer_unref (buf);
return GST_FLOW_ERROR;
ret = GST_FLOW_ERROR;
goto exit;
}
}
@ -1221,6 +1260,8 @@ gst_audio_decoder_chain (GstPad * pad, GstBuffer * buffer)
GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)),
GST_TIME_ARGS (GST_BUFFER_DURATION (buffer)));
GST_AUDIO_DECODER_STREAM_LOCK (dec);
if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DISCONT)) {
gint64 samples, ts;
@ -1247,6 +1288,8 @@ gst_audio_decoder_chain (GstPad * pad, GstBuffer * buffer)
else
ret = gst_audio_decoder_chain_reverse (dec, buffer);
GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
return ret;
}
@ -1269,6 +1312,7 @@ gst_audio_decoder_sink_eventfunc (GstAudioDecoder * dec, GstEvent * event)
{
GstSegment seg;
GST_AUDIO_DECODER_STREAM_LOCK (dec);
gst_event_copy_segment (event, &seg);
if (seg.format == GST_FORMAT_TIME) {
@ -1296,6 +1340,7 @@ gst_audio_decoder_sink_eventfunc (GstAudioDecoder * dec, GstEvent * event)
event = gst_event_new_segment (&seg);
} else {
GST_DEBUG_OBJECT (dec, "unsupported format; ignoring");
GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
break;
}
}
@ -1339,8 +1384,10 @@ gst_audio_decoder_sink_eventfunc (GstAudioDecoder * dec, GstEvent * event)
/* and follow along with segment */
dec->segment = seg;
gst_pad_push_event (dec->srcpad, event);
dec->priv->pending_events =
g_list_append (dec->priv->pending_events, event);
handled = TRUE;
GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
break;
}
@ -1348,12 +1395,20 @@ gst_audio_decoder_sink_eventfunc (GstAudioDecoder * dec, GstEvent * event)
break;
case GST_EVENT_FLUSH_STOP:
GST_AUDIO_DECODER_STREAM_LOCK (dec);
/* prepare for fresh start */
gst_audio_decoder_flush (dec, TRUE);
g_list_foreach (dec->priv->pending_events, (GFunc) gst_event_unref, NULL);
g_list_free (dec->priv->pending_events);
dec->priv->pending_events = NULL;
GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
break;
case GST_EVENT_EOS:
GST_AUDIO_DECODER_STREAM_LOCK (dec);
gst_audio_decoder_drain (dec);
GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
break;
case GST_EVENT_CAPS:
@ -1393,8 +1448,27 @@ gst_audio_decoder_sink_event (GstPad * pad, GstEvent * event)
if (!handled)
handled = gst_audio_decoder_sink_eventfunc (dec, event);
if (!handled)
ret = gst_pad_event_default (pad, event);
if (!handled) {
/* Forward non-serialized events and EOS/FLUSH_STOP immediately.
* For EOS this is required because no buffer or serialized event
* will come after EOS and nothing could trigger another
* _finish_frame() call.
*
* For FLUSH_STOP this is required because it is expected
* to be forwarded immediately and no buffers are queued anyway.
*/
if (!GST_EVENT_IS_SERIALIZED (event)
|| GST_EVENT_TYPE (event) == GST_EVENT_EOS
|| GST_EVENT_TYPE (event) == GST_EVENT_FLUSH_STOP) {
ret = gst_pad_event_default (pad, event);
} else {
GST_AUDIO_DECODER_STREAM_LOCK (dec);
dec->priv->pending_events =
g_list_append (dec->priv->pending_events, event);
GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
ret = TRUE;
}
}
GST_DEBUG_OBJECT (dec, "event handled");

View file

@ -20,7 +20,6 @@
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
#ifndef _GST_AUDIO_DECODER_H_
#define _GST_AUDIO_DECODER_H_
@ -85,6 +84,9 @@ G_BEGIN_DECLS
*/
#define GST_AUDIO_DECODER_SINK_PAD(obj) (((GstAudioDecoder *) (obj))->sinkpad)
#define GST_AUDIO_DECODER_STREAM_LOCK(dec) g_static_rec_mutex_lock (&GST_AUDIO_DECODER (dec)->stream_lock)
#define GST_AUDIO_DECODER_STREAM_UNLOCK(dec) g_static_rec_mutex_unlock (&GST_AUDIO_DECODER (dec)->stream_lock)
typedef struct _GstAudioDecoder GstAudioDecoder;
typedef struct _GstAudioDecoderClass GstAudioDecoderClass;
@ -146,6 +148,11 @@ struct _GstAudioDecoder
GstPad *sinkpad;
GstPad *srcpad;
/* protects all data processing, i.e. is locked
* in the chain function, finish_frame and when
* processing serialized events */
GStaticRecMutex stream_lock;
/* MT-protected (with STREAM_LOCK) */
GstSegment segment;

View file

@ -154,6 +154,7 @@
#include "gstaudioencoder.h"
#include <gst/base/gstadapter.h>
#include <gst/audio/audio.h>
#include <gst/pbutils/descriptions.h>
#include <stdlib.h>
#include <string.h>
@ -186,7 +187,7 @@ typedef struct _GstAudioEncoderContext
GstAudioInfo info;
/* output */
gint frame_samples;
gint frame_samples_min, frame_samples_max;
gint frame_max;
gint lookahead;
/* MT-protected (with LOCK) */
@ -238,6 +239,11 @@ struct _GstAudioEncoderPrivate
gboolean perfect_ts;
gboolean hard_resync;
gboolean granule;
/* pending tags */
GstTagList *tags;
/* pending serialized sink events, will be sent from finish_frame() */
GList *pending_events;
};
@ -380,6 +386,8 @@ gst_audio_encoder_init (GstAudioEncoder * enc, GstAudioEncoderClass * bclass)
enc->priv->adapter = gst_adapter_new ();
g_static_rec_mutex_init (&enc->stream_lock);
/* property default */
enc->priv->granule = DEFAULT_GRANULE;
enc->priv->perfect_ts = DEFAULT_PERFECT_TS;
@ -394,7 +402,9 @@ gst_audio_encoder_init (GstAudioEncoder * enc, GstAudioEncoderClass * bclass)
static void
gst_audio_encoder_reset (GstAudioEncoder * enc, gboolean full)
{
GST_OBJECT_LOCK (enc);
GST_AUDIO_ENCODER_STREAM_LOCK (enc);
GST_LOG_OBJECT (enc, "reset full %d", full);
if (full) {
enc->priv->active = FALSE;
@ -402,6 +412,14 @@ gst_audio_encoder_reset (GstAudioEncoder * enc, gboolean full)
enc->priv->bytes_out = 0;
gst_audio_info_init (&enc->priv->ctx.info);
memset (&enc->priv->ctx, 0, sizeof (enc->priv->ctx));
if (enc->priv->tags)
gst_tag_list_free (enc->priv->tags);
enc->priv->tags = NULL;
g_list_foreach (enc->priv->pending_events, (GFunc) gst_event_unref, NULL);
g_list_free (enc->priv->pending_events);
enc->priv->pending_events = NULL;
}
gst_segment_init (&enc->segment, GST_FORMAT_TIME);
@ -415,7 +433,7 @@ gst_audio_encoder_reset (GstAudioEncoder * enc, gboolean full)
enc->priv->samples = 0;
enc->priv->discont = FALSE;
GST_OBJECT_UNLOCK (enc);
GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
}
static void
@ -425,6 +443,8 @@ gst_audio_encoder_finalize (GObject * object)
g_object_unref (enc->priv->adapter);
g_static_rec_mutex_free (&enc->stream_lock);
G_OBJECT_CLASS (parent_class)->finalize (object);
}
@ -472,12 +492,41 @@ gst_audio_encoder_finish_frame (GstAudioEncoder * enc, GstBuffer * buf,
g_return_val_if_fail (buf == NULL || gst_buffer_get_size (buf) > 0,
GST_FLOW_ERROR);
GST_AUDIO_ENCODER_STREAM_LOCK (enc);
if (G_UNLIKELY (enc->priv->tags)) {
GstTagList *tags;
/* add codec info to pending tags */
tags = enc->priv->tags;
/* no more pending */
enc->priv->tags = NULL;
gst_pb_utils_add_codec_description_to_tag_list (tags, GST_TAG_CODEC,
GST_PAD_CAPS (enc->srcpad));
gst_pb_utils_add_codec_description_to_tag_list (tags, GST_TAG_AUDIO_CODEC,
GST_PAD_CAPS (enc->srcpad));
GST_DEBUG_OBJECT (enc, "sending tags %" GST_PTR_FORMAT, tags);
gst_element_found_tags_for_pad (GST_ELEMENT (enc), enc->srcpad, tags);
}
GST_LOG_OBJECT (enc, "accepting %d bytes encoded data as %d samples",
buf ? gst_buffer_get_size (buf) : -1, samples);
/* mark subclass still alive and providing */
priv->got_data = TRUE;
if (priv->pending_events) {
GList *pending_events, *l;
pending_events = priv->pending_events;
priv->pending_events = NULL;
GST_DEBUG_OBJECT (enc, "Pushing pending events");
for (l = priv->pending_events; l; l = l->next)
gst_pad_push_event (enc->srcpad, l->data);
g_list_free (pending_events);
}
/* remove corresponding samples from input */
if (samples < 0)
samples = (enc->priv->offset / ctx->info.bpf);
@ -627,6 +676,8 @@ gst_audio_encoder_finish_frame (GstAudioEncoder * enc, GstBuffer * buf,
}
exit:
GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
return ret;
/* ERRORS */
@ -637,7 +688,8 @@ overflow:
samples, priv->offset / ctx->info.bpf), (NULL));
if (buf)
gst_buffer_unref (buf);
return GST_FLOW_ERROR;
ret = GST_FLOW_ERROR;
goto exit;
}
}
@ -670,9 +722,11 @@ gst_audio_encoder_push_buffers (GstAudioEncoder * enc, gboolean force)
g_assert (priv->offset <= av);
av -= priv->offset;
need = ctx->frame_samples > 0 ? ctx->frame_samples * ctx->info.bpf : av;
GST_LOG_OBJECT (enc, "available: %d, needed: %d, force: %d",
av, need, force);
need =
ctx->frame_samples_min >
0 ? ctx->frame_samples_min * ctx->info.bpf : av;
GST_LOG_OBJECT (enc, "available: %d, needed: %d, force: %d", av, need,
force);
if ((need > av) || !av) {
if (G_UNLIKELY (force)) {
@ -685,14 +739,19 @@ gst_audio_encoder_push_buffers (GstAudioEncoder * enc, gboolean force)
priv->force = FALSE;
}
/* if we have some extra metadata,
* provide for integer multiple of frames to allow for better granularity
* of processing */
if (ctx->frame_samples > 0 && need) {
if (ctx->frame_max > 1)
need = need * MIN ((av / need), ctx->frame_max);
else if (ctx->frame_max == 0)
need = need * (av / need);
if (ctx->frame_samples_max > 0)
need = MIN (av, ctx->frame_samples_max * ctx->info.bpf);
if (ctx->frame_samples_min == ctx->frame_samples_max) {
/* if we have some extra metadata,
* provide for integer multiple of frames to allow for better granularity
* of processing */
if (ctx->frame_samples_min > 0 && need) {
if (ctx->frame_max > 1)
need = need * MIN ((av / need), ctx->frame_max);
else if (ctx->frame_max == 0)
need = need * (av / need);
}
}
if (need) {
@ -782,6 +841,8 @@ gst_audio_encoder_chain (GstPad * pad, GstBuffer * buffer)
priv = enc->priv;
ctx = &enc->priv->ctx;
GST_AUDIO_ENCODER_STREAM_LOCK (enc);
/* should know what is coming by now */
if (!ctx->info.bpf)
goto not_negotiated;
@ -916,6 +977,9 @@ gst_audio_encoder_chain (GstPad * pad, GstBuffer * buffer)
done:
GST_LOG_OBJECT (enc, "chain leaving");
GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
return ret;
/* ERRORS */
@ -924,7 +988,8 @@ not_negotiated:
GST_ELEMENT_ERROR (enc, CORE, NEGOTIATION, (NULL),
("encoder not initialized"));
gst_buffer_unref (buffer);
return GST_FLOW_NOT_NEGOTIATED;
ret = GST_FLOW_NOT_NEGOTIATED;
goto done;
}
wrong_buffer:
{
@ -932,7 +997,8 @@ wrong_buffer:
("buffer size %d not a multiple of %d", gst_buffer_get_size (buffer),
ctx->info.bpf));
gst_buffer_unref (buffer);
return GST_FLOW_ERROR;
ret = GST_FLOW_ERROR;
goto done;
}
}
@ -971,6 +1037,8 @@ gst_audio_encoder_sink_setcaps (GstAudioEncoder * enc, GstCaps * caps)
ctx = &enc->priv->ctx;
GST_AUDIO_ENCODER_STREAM_LOCK (enc);
GST_DEBUG_OBJECT (enc, "caps: %" GST_PTR_FORMAT, caps);
if (!gst_caps_is_fixed (caps))
@ -997,7 +1065,8 @@ gst_audio_encoder_sink_setcaps (GstAudioEncoder * enc, GstCaps * caps)
gst_audio_encoder_drain (enc);
/* context defaults */
enc->priv->ctx.frame_samples = 0;
enc->priv->ctx.frame_samples_min = 0;
enc->priv->ctx.frame_samples_max = 0;
enc->priv->ctx.frame_max = 0;
enc->priv->ctx.lookahead = 0;
@ -1025,13 +1094,17 @@ gst_audio_encoder_sink_setcaps (GstAudioEncoder * enc, GstCaps * caps)
GST_DEBUG_OBJECT (enc, "new audio format identical to configured format");
}
exit:
GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
return res;
/* ERRORS */
refuse_caps:
{
GST_WARNING_OBJECT (enc, "rejected caps %" GST_PTR_FORMAT, caps);
return res;
goto exit;
}
}
@ -1161,12 +1234,14 @@ gst_audio_encoder_sink_eventfunc (GstAudioEncoder * enc, GstEvent * event)
break;
}
GST_AUDIO_ENCODER_STREAM_LOCK (enc);
/* finish current segment */
gst_audio_encoder_drain (enc);
/* reset partially for new segment */
gst_audio_encoder_reset (enc, FALSE);
/* and follow along with segment */
enc->segment = seg;
GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
break;
}
@ -1174,18 +1249,46 @@ gst_audio_encoder_sink_eventfunc (GstAudioEncoder * enc, GstEvent * event)
break;
case GST_EVENT_FLUSH_STOP:
GST_AUDIO_ENCODER_STREAM_LOCK (enc);
/* discard any pending stuff */
/* TODO route through drain ?? */
if (!enc->priv->drained && klass->flush)
klass->flush (enc);
/* and get (re)set for the sequel */
gst_audio_encoder_reset (enc, FALSE);
g_list_foreach (enc->priv->pending_events, (GFunc) gst_event_unref, NULL);
g_list_free (enc->priv->pending_events);
enc->priv->pending_events = NULL;
GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
break;
case GST_EVENT_EOS:
GST_AUDIO_ENCODER_STREAM_LOCK (enc);
gst_audio_encoder_drain (enc);
GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
break;
case GST_EVENT_TAG:
{
GstTagList *tags;
gst_event_parse_tag (event, &tags);
tags = gst_tag_list_copy (tags);
gst_event_unref (event);
gst_tag_list_remove_tag (tags, GST_TAG_CODEC);
gst_tag_list_remove_tag (tags, GST_TAG_AUDIO_CODEC);
event = gst_event_new_tag (tags);
GST_OBJECT_LOCK (enc);
enc->priv->pending_events =
g_list_append (enc->priv->pending_events, event);
GST_OBJECT_UNLOCK (enc);
handled = TRUE;
break;
}
case GST_EVENT_CAPS:
{
GstCaps *caps;
@ -1224,8 +1327,27 @@ gst_audio_encoder_sink_event (GstPad * pad, GstEvent * event)
if (!handled)
handled = gst_audio_encoder_sink_eventfunc (enc, event);
if (!handled)
ret = gst_pad_event_default (pad, event);
if (!handled) {
/* Forward non-serialized events and EOS/FLUSH_STOP immediately.
* For EOS this is required because no buffer or serialized event
* will come after EOS and nothing could trigger another
* _finish_frame() call.
*
* For FLUSH_STOP this is required because it is expected
* to be forwarded immediately and no buffers are queued anyway.
*/
if (!GST_EVENT_IS_SERIALIZED (event)
|| GST_EVENT_TYPE (event) == GST_EVENT_EOS
|| GST_EVENT_TYPE (event) == GST_EVENT_FLUSH_STOP) {
ret = gst_pad_event_default (pad, event);
} else {
GST_AUDIO_ENCODER_STREAM_LOCK (enc);
enc->priv->pending_events =
g_list_append (enc->priv->pending_events, event);
GST_AUDIO_ENCODER_STREAM_UNLOCK (enc);
ret = TRUE;
}
}
GST_DEBUG_OBJECT (enc, "event handled");
@ -1544,6 +1666,11 @@ gst_audio_encoder_activate (GstAudioEncoder * enc, gboolean active)
GST_DEBUG_OBJECT (enc, "activate %d", active);
if (active) {
if (enc->priv->tags)
gst_tag_list_free (enc->priv->tags);
enc->priv->tags = gst_tag_list_new ();
if (!enc->priv->active && klass->start)
result = klass->start (enc);
} else {
@ -1601,37 +1728,77 @@ gst_audio_encoder_get_audio_info (GstAudioEncoder * enc)
}
/**
* gst_audio_encoder_set_frame_samples:
* gst_audio_encoder_set_frame_samples_min:
* @enc: a #GstAudioEncoder
* @num: number of samples per frame
*
* Sets number of samples (per channel) subclass needs to be handed,
* or will be handed all available if 0.
* at least or will be handed all available if 0.
*
* If an exact number of samples is required, gst_audio_encoder_set_frame_samples_max()
* must be called with the same number.
*
* Since: 0.10.36
*/
void
gst_audio_encoder_set_frame_samples (GstAudioEncoder * enc, gint num)
gst_audio_encoder_set_frame_samples_min (GstAudioEncoder * enc, gint num)
{
g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
enc->priv->ctx.frame_samples = num;
enc->priv->ctx.frame_samples_min = num;
}
/**
* gst_audio_encoder_get_frame_samples:
* gst_audio_encoder_get_frame_samples_min:
* @enc: a #GstAudioEncoder
*
* Returns: currently requested samples per frame
* Returns: currently minimum requested samples per frame
*
* Since: 0.10.36
*/
gint
gst_audio_encoder_get_frame_samples (GstAudioEncoder * enc)
gst_audio_encoder_get_frame_samples_min (GstAudioEncoder * enc)
{
g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), 0);
return enc->priv->ctx.frame_samples;
return enc->priv->ctx.frame_samples_min;
}
/**
* gst_audio_encoder_set_frame_samples_max:
* @enc: a #GstAudioEncoder
* @num: number of samples per frame
*
* Sets number of samples (per channel) subclass needs to be handed,
* at most or will be handed all available if 0.
*
* If an exact number of samples is required, gst_audio_encoder_set_frame_samples_min()
* must be called with the same number.
*
* Since: 0.10.36
*/
void
gst_audio_encoder_set_frame_samples_max (GstAudioEncoder * enc, gint num)
{
g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
enc->priv->ctx.frame_samples_max = num;
}
/**
* gst_audio_encoder_get_frame_samples_min:
* @enc: a #GstAudioEncoder
*
* Returns: currently maximum requested samples per frame
*
* Since: 0.10.36
*/
gint
gst_audio_encoder_get_frame_samples_max (GstAudioEncoder * enc)
{
g_return_val_if_fail (GST_IS_AUDIO_ENCODER (enc), 0);
return enc->priv->ctx.frame_samples_max;
}
/**
@ -1639,7 +1806,8 @@ gst_audio_encoder_get_frame_samples (GstAudioEncoder * enc)
* @enc: a #GstAudioEncoder
* @num: number of frames
*
* Sets max number of frames accepted at once (assumed minimally 1)
* Sets max number of frames accepted at once (assumed minimally 1).
* Requires @frame_samples_min and @frame_samples_max to be the equal.
*
* Since: 0.10.36
*/
@ -1939,3 +2107,40 @@ gst_audio_encoder_get_tolerance (GstAudioEncoder * enc)
return result;
}
/**
* gst_audio_encoder_merge_tags:
* @enc: a #GstAudioEncoder
* @tags: a #GstTagList to merge
* @mode: the #GstTagMergeMode to use
*
* Adds tags to so-called pending tags, which will be processed
* before pushing out data downstream.
*
* Note that this is provided for convenience, and the subclass is
* not required to use this and can still do tag handling on its own,
* although it should be aware that baseclass already takes care
* of the usual CODEC/AUDIO_CODEC tags.
*
* MT safe.
*
* Since: 0.10.36
*/
void
gst_audio_encoder_merge_tags (GstAudioEncoder * enc,
const GstTagList * tags, GstTagMergeMode mode)
{
GstTagList *otags;
g_return_if_fail (GST_IS_AUDIO_ENCODER (enc));
g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags));
GST_OBJECT_LOCK (enc);
if (tags)
GST_DEBUG_OBJECT (enc, "merging tags %" GST_PTR_FORMAT, tags);
otags = enc->priv->tags;
enc->priv->tags = gst_tag_list_merge (enc->priv->tags, tags, mode);
if (otags)
gst_tag_list_free (otags);
GST_OBJECT_UNLOCK (enc);
}

View file

@ -87,6 +87,8 @@ G_BEGIN_DECLS
*/
#define GST_AUDIO_ENCODER_SEGMENT(obj) (GST_AUDIO_ENCODER_CAST (obj)->segment)
#define GST_AUDIO_ENCODER_STREAM_LOCK(enc) g_static_rec_mutex_lock (&GST_AUDIO_ENCODER (enc)->stream_lock)
#define GST_AUDIO_ENCODER_STREAM_UNLOCK(enc) g_static_rec_mutex_unlock (&GST_AUDIO_ENCODER (enc)->stream_lock)
typedef struct _GstAudioEncoder GstAudioEncoder;
typedef struct _GstAudioEncoderClass GstAudioEncoderClass;
@ -108,6 +110,11 @@ struct _GstAudioEncoder {
GstPad *sinkpad;
GstPad *srcpad;
/* protects all data processing, i.e. is locked
* in the chain function, finish_frame and when
* processing serialized events */
GStaticRecMutex stream_lock;
/* MT-protected (with STREAM_LOCK) */
GstSegment segment;
@ -196,9 +203,13 @@ GstCaps * gst_audio_encoder_proxy_getcaps (GstAudioEncoder * enc,
/* context parameters */
GstAudioInfo * gst_audio_encoder_get_audio_info (GstAudioEncoder * enc);
gint gst_audio_encoder_get_frame_samples (GstAudioEncoder * enc);
gint gst_audio_encoder_get_frame_samples_min (GstAudioEncoder * enc);
void gst_audio_encoder_set_frame_samples (GstAudioEncoder * enc, gint num);
void gst_audio_encoder_set_frame_samples_min (GstAudioEncoder * enc, gint num);
gint gst_audio_encoder_get_frame_samples_max (GstAudioEncoder * enc);
void gst_audio_encoder_set_frame_samples_max (GstAudioEncoder * enc, gint num);
gint gst_audio_encoder_get_frame_max (GstAudioEncoder * enc);
@ -238,6 +249,9 @@ void gst_audio_encoder_set_tolerance (GstAudioEncoder * enc,
gint64 gst_audio_encoder_get_tolerance (GstAudioEncoder * enc);
void gst_audio_encoder_merge_tags (GstAudioEncoder * enc,
const GstTagList * tags, GstTagMergeMode mode);
G_END_DECLS
#endif /* __GST_AUDIO_ENCODER_H__ */

View file

@ -45,6 +45,10 @@
GST_DEBUG_CATEGORY_STATIC (type_find_debug);
#define GST_CAT_DEFAULT type_find_debug
/* so our code stays ready for 0.11 */
#define gst_type_find_peek(tf,off,len) \
((const guint8 *)gst_type_find_peek((tf),(off),(len)))
/* DataScanCtx: helper for typefind functions that scan through data
* step-by-step, to avoid doing a peek at each and every offset */

View file

@ -34,7 +34,8 @@ EXPORTS
gst_audio_encoder_finish_frame
gst_audio_encoder_get_audio_info
gst_audio_encoder_get_frame_max
gst_audio_encoder_get_frame_samples
gst_audio_encoder_get_frame_samples_max
gst_audio_encoder_get_frame_samples_min
gst_audio_encoder_get_hard_resync
gst_audio_encoder_get_latency
gst_audio_encoder_get_lookahead
@ -42,9 +43,11 @@ EXPORTS
gst_audio_encoder_get_perfect_timestamp
gst_audio_encoder_get_tolerance
gst_audio_encoder_get_type
gst_audio_encoder_merge_tags
gst_audio_encoder_proxy_getcaps
gst_audio_encoder_set_frame_max
gst_audio_encoder_set_frame_samples
gst_audio_encoder_set_frame_samples_max
gst_audio_encoder_set_frame_samples_min
gst_audio_encoder_set_hard_resync
gst_audio_encoder_set_latency
gst_audio_encoder_set_lookahead