audioaggregator: implement input conversion

https://bugzilla.gnome.org/show_bug.cgi?id=786344
This commit is contained in:
Mathieu Duponchelle 2017-07-22 20:32:20 +02:00 committed by Mathieu Duponchelle
parent 9a128603c9
commit 536cb12577
6 changed files with 857 additions and 447 deletions

View file

@ -29,6 +29,38 @@
* aggregating their buffers for raw audio
* @see_also: #GstAggregator
*
* #GstAudioAggregator will perform conversion on the data arriving
* on its sink pads, based on the format expected downstream.
*
* Subclasses can opt out of the conversion behaviour by setting
* #GstAudioAggregator.convert_buffer() to %NULL.
*
* Subclasses that wish to use the default conversion implementation
* should use a (subclass of) #GstAudioAggregatorConvertPad as their
* #GstAggregatorClass.sinkpads_type, as it will cache the created
* #GstAudioConverter and install a property allowing to configure it,
* #GstAudioAggregatorPadClass:converter-config.
*
* Subclasses that wish to perform custom conversion should override
* #GstAudioAggregator.convert_buffer().
*
* When conversion is enabled, #GstAudioAggregator will accept
* any type of raw audio caps and perform conversion
* on the data arriving on its sink pads, with whatever downstream
* expects as the target format.
*
* In case downstream caps are not fully fixated, it will use
* the first configured sink pad to finish fixating its source pad
* caps.
*
* Additionally, handling audio conversion directly in the element
* means that this base class supports safely reconfiguring its
* source pad.
*
* A notable exception for now is the sample rate, sink pads must
* have the same sample rate as either the downstream requirement,
* or the first configured pad, or a combination of both (when
* downstream specifies a range or a set of acceptable rates).
*/
@ -47,7 +79,7 @@ struct _GstAudioAggregatorPadPrivate
{
/* All members are protected by the pad object lock */
GstBuffer *buffer; /* current input buffer we're mixing, for
GstBuffer *buffer; /* current buffer we're mixing, for
comparison with a new input buffer from
aggregator to see if we need to update our
cached values. */
@ -55,6 +87,8 @@ struct _GstAudioAggregatorPadPrivate
guint position, size; /* position in the input buffer and size of the
input buffer in number of samples */
GstBuffer *input_buffer;
guint64 output_offset; /* Sample offset in output segment relative to
pad.segment.start that position refers to
in the current buffer. */
@ -76,6 +110,12 @@ struct _GstAudioAggregatorPadPrivate
G_DEFINE_TYPE (GstAudioAggregatorPad, gst_audio_aggregator_pad,
GST_TYPE_AGGREGATOR_PAD);
enum
{
PROP_PAD_0,
PROP_PAD_CONVERTER_CONFIG,
};
static GstFlowReturn
gst_audio_aggregator_pad_flush_pad (GstAggregatorPad * aggpad,
GstAggregator * aggregator);
@ -86,6 +126,7 @@ gst_audio_aggregator_pad_finalize (GObject * object)
GstAudioAggregatorPad *pad = (GstAudioAggregatorPad *) object;
gst_buffer_replace (&pad->priv->buffer, NULL);
gst_buffer_replace (&pad->priv->input_buffer, NULL);
G_OBJECT_CLASS (gst_audio_aggregator_pad_parent_class)->finalize (object);
}
@ -112,6 +153,7 @@ gst_audio_aggregator_pad_init (GstAudioAggregatorPad * pad)
gst_audio_info_init (&pad->info);
pad->priv->buffer = NULL;
pad->priv->input_buffer = NULL;
pad->priv->position = 0;
pad->priv->size = 0;
pad->priv->output_offset = -1;
@ -131,13 +173,182 @@ gst_audio_aggregator_pad_flush_pad (GstAggregatorPad * aggpad,
pad->priv->output_offset = pad->priv->next_offset = -1;
pad->priv->discont_time = GST_CLOCK_TIME_NONE;
gst_buffer_replace (&pad->priv->buffer, NULL);
gst_buffer_replace (&pad->priv->input_buffer, NULL);
GST_OBJECT_UNLOCK (aggpad);
return GST_FLOW_OK;
}
struct _GstAudioAggregatorConvertPadPrivate
{
/* All members are protected by the pad object lock */
GstAudioConverter *converter;
GstStructure *converter_config;
gboolean converter_config_changed;
};
G_DEFINE_TYPE (GstAudioAggregatorConvertPad, gst_audio_aggregator_convert_pad,
GST_TYPE_AUDIO_AGGREGATOR_PAD);
static void
gst_audio_aggregator_convert_pad_update_converter (GstAudioAggregatorConvertPad
* aaggcpad, GstAudioInfo * in_info, GstAudioInfo * out_info)
{
if (!aaggcpad->priv->converter_config_changed)
return;
if (aaggcpad->priv->converter) {
gst_audio_converter_free (aaggcpad->priv->converter);
aaggcpad->priv->converter = NULL;
}
if (gst_audio_info_is_equal (in_info, out_info) ||
in_info->finfo->format == GST_AUDIO_FORMAT_UNKNOWN) {
if (aaggcpad->priv->converter) {
gst_audio_converter_free (aaggcpad->priv->converter);
aaggcpad->priv->converter = NULL;
}
} else {
/* If we haven't received caps yet, this pad should not have
* a buffer to convert anyway */
aaggcpad->priv->converter =
gst_audio_converter_new (GST_AUDIO_CONVERTER_FLAG_NONE,
in_info, out_info,
aaggcpad->priv->converter_config ? gst_structure_copy (aaggcpad->
priv->converter_config) : NULL);
}
aaggcpad->priv->converter_config_changed = FALSE;
}
static GstBuffer *
gst_audio_aggregator_convert_pad_convert_buffer (GstAudioAggregatorConvertPad *
aaggcpad, GstAudioInfo * in_info, GstAudioInfo * out_info,
GstBuffer * input_buffer)
{
GstBuffer *res;
gst_audio_aggregator_convert_pad_update_converter (aaggcpad, in_info,
out_info);
if (aaggcpad->priv->converter) {
gint insize = gst_buffer_get_size (input_buffer);
gsize insamples = insize / in_info->bpf;
gsize outsamples =
gst_audio_converter_get_out_frames (aaggcpad->priv->converter,
insamples);
gint outsize = outsamples * out_info->bpf;
GstMapInfo inmap, outmap;
res = gst_buffer_new_allocate (NULL, outsize, NULL);
/* We create a perfectly similar buffer, except obviously for
* its converted contents */
gst_buffer_copy_into (res, input_buffer,
GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS |
GST_BUFFER_COPY_META, 0, -1);
gst_buffer_map (input_buffer, &inmap, GST_MAP_READ);
gst_buffer_map (res, &outmap, GST_MAP_WRITE);
gst_audio_converter_samples (aaggcpad->priv->converter,
GST_AUDIO_CONVERTER_FLAG_NONE,
(gpointer *) & inmap.data, insamples,
(gpointer *) & outmap.data, outsamples);
gst_buffer_unmap (input_buffer, &inmap);
gst_buffer_unmap (res, &outmap);
} else {
res = gst_buffer_ref (input_buffer);
}
return res;
}
static void
gst_audio_aggregator_convert_pad_finalize (GObject * object)
{
GstAudioAggregatorConvertPad *pad = (GstAudioAggregatorConvertPad *) object;
if (pad->priv->converter)
gst_audio_converter_free (pad->priv->converter);
if (pad->priv->converter_config)
gst_structure_free (pad->priv->converter_config);
G_OBJECT_CLASS (gst_audio_aggregator_convert_pad_parent_class)->finalize
(object);
}
static void
gst_audio_aggregator_convert_pad_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec)
{
GstAudioAggregatorConvertPad *pad = GST_AUDIO_AGGREGATOR_CONVERT_PAD (object);
switch (prop_id) {
case PROP_PAD_CONVERTER_CONFIG:
GST_OBJECT_LOCK (pad);
if (pad->priv->converter_config)
g_value_set_boxed (value, pad->priv->converter_config);
GST_OBJECT_UNLOCK (pad);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
static void
gst_audio_aggregator_convert_pad_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec)
{
GstAudioAggregatorConvertPad *pad = GST_AUDIO_AGGREGATOR_CONVERT_PAD (object);
switch (prop_id) {
case PROP_PAD_CONVERTER_CONFIG:
GST_OBJECT_LOCK (pad);
if (pad->priv->converter_config)
gst_structure_free (pad->priv->converter_config);
pad->priv->converter_config = g_value_dup_boxed (value);
pad->priv->converter_config_changed = TRUE;
GST_OBJECT_UNLOCK (pad);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
static void
gst_audio_aggregator_convert_pad_class_init (GstAudioAggregatorConvertPadClass *
klass)
{
GObjectClass *gobject_class = (GObjectClass *) klass;
g_type_class_add_private (klass,
sizeof (GstAudioAggregatorConvertPadPrivate));
gobject_class->set_property = gst_audio_aggregator_convert_pad_set_property;
gobject_class->get_property = gst_audio_aggregator_convert_pad_get_property;
g_object_class_install_property (gobject_class, PROP_PAD_CONVERTER_CONFIG,
g_param_spec_boxed ("converter-config", "Converter configuration",
"A GstStructure describing the configuration that should be used "
"when converting this pad's audio buffers",
GST_TYPE_STRUCTURE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
gobject_class->finalize = gst_audio_aggregator_convert_pad_finalize;
}
static void
gst_audio_aggregator_convert_pad_init (GstAudioAggregatorConvertPad * pad)
{
pad->priv =
G_TYPE_INSTANCE_GET_PRIVATE (pad, GST_TYPE_AUDIO_AGGREGATOR_CONVERT_PAD,
GstAudioAggregatorConvertPadPrivate);
}
/**************************************
* GstAudioAggregator implementation *
**************************************/
@ -179,6 +390,9 @@ static gboolean gst_audio_aggregator_sink_event (GstAggregator * agg,
GstAggregatorPad * aggpad, GstEvent * event);
static gboolean gst_audio_aggregator_src_query (GstAggregator * agg,
GstQuery * query);
static gboolean
gst_audio_aggregator_sink_query (GstAggregator * agg, GstAggregatorPad * aggpad,
GstQuery * query);
static gboolean gst_audio_aggregator_start (GstAggregator * agg);
static gboolean gst_audio_aggregator_stop (GstAggregator * agg);
static GstFlowReturn gst_audio_aggregator_flush (GstAggregator * agg);
@ -192,6 +406,11 @@ static GstFlowReturn gst_audio_aggregator_aggregate (GstAggregator * agg,
static gboolean sync_pad_values (GstElement * aagg, GstPad * pad, gpointer ud);
static gboolean gst_audio_aggregator_negotiated_src_caps (GstAggregator * agg,
GstCaps * caps);
static GstFlowReturn
gst_audio_aggregator_update_src_caps (GstAggregator * agg,
GstCaps * caps, GstCaps ** ret);
static GstCaps *gst_audio_aggregator_fixate_src_caps (GstAggregator * agg,
GstCaps * caps);
#define DEFAULT_OUTPUT_BUFFER_DURATION (10 * GST_MSECOND)
#define DEFAULT_ALIGNMENT_THRESHOLD (40 * GST_MSECOND)
@ -229,6 +448,66 @@ gst_audio_aggregator_get_next_time (GstAggregator * agg)
return next_time;
}
static GstBuffer *
gst_audio_aggregator_convert_once (GstAudioAggregator * aagg, GstPad * pad,
GstAudioInfo * in_info, GstAudioInfo * out_info, GstBuffer * buffer)
{
GstAudioConverter *converter =
gst_audio_converter_new (GST_AUDIO_CONVERTER_FLAG_NONE,
in_info, out_info, NULL);
gint insize = gst_buffer_get_size (buffer);
gsize insamples = insize / in_info->bpf;
gsize outsamples = gst_audio_converter_get_out_frames (converter,
insamples);
gint outsize = outsamples * out_info->bpf;
GstMapInfo inmap, outmap;
GstBuffer *converted = gst_buffer_new_allocate (NULL, outsize, NULL);
gst_buffer_copy_into (converted, buffer,
GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS |
GST_BUFFER_COPY_META, 0, -1);
gst_buffer_map (buffer, &inmap, GST_MAP_READ);
gst_buffer_map (converted, &outmap, GST_MAP_WRITE);
gst_audio_converter_samples (converter,
GST_AUDIO_CONVERTER_FLAG_NONE,
(gpointer *) & inmap.data, insamples,
(gpointer *) & outmap.data, outsamples);
gst_buffer_unmap (buffer, &inmap);
gst_buffer_unmap (converted, &outmap);
gst_audio_converter_free (converter);
return converted;
}
static GstBuffer *
gst_audio_aggregator_default_convert_buffer (GstAudioAggregator * aagg,
GstPad * pad, GstAudioInfo * in_info, GstAudioInfo * out_info,
GstBuffer * buffer)
{
if (GST_IS_AUDIO_AGGREGATOR_CONVERT_PAD (pad))
return
gst_audio_aggregator_convert_pad_convert_buffer
(GST_AUDIO_AGGREGATOR_CONVERT_PAD (pad),
&GST_AUDIO_AGGREGATOR_PAD (pad)->info, out_info, buffer);
else
return gst_audio_aggregator_convert_once (aagg, pad, in_info, out_info,
buffer);
}
static GstBuffer *
gst_audio_aggregator_convert_buffer (GstAudioAggregator * aagg, GstPad * pad,
GstAudioInfo * in_info, GstAudioInfo * out_info, GstBuffer * buffer)
{
GstAudioAggregatorClass *klass = GST_AUDIO_AGGREGATOR_GET_CLASS (aagg);
g_assert (klass->convert_buffer);
return klass->convert_buffer (aagg, pad, in_info, out_info, buffer);
}
static void
gst_audio_aggregator_class_init (GstAudioAggregatorClass * klass)
{
@ -247,6 +526,7 @@ gst_audio_aggregator_class_init (GstAudioAggregatorClass * klass)
GST_DEBUG_FUNCPTR (gst_audio_aggregator_sink_event);
gstaggregator_class->src_query =
GST_DEBUG_FUNCPTR (gst_audio_aggregator_src_query);
gstaggregator_class->sink_query = gst_audio_aggregator_sink_query;
gstaggregator_class->start = gst_audio_aggregator_start;
gstaggregator_class->stop = gst_audio_aggregator_stop;
gstaggregator_class->flush = gst_audio_aggregator_flush;
@ -254,10 +534,14 @@ gst_audio_aggregator_class_init (GstAudioAggregatorClass * klass)
GST_DEBUG_FUNCPTR (gst_audio_aggregator_aggregate);
gstaggregator_class->clip = GST_DEBUG_FUNCPTR (gst_audio_aggregator_do_clip);
gstaggregator_class->get_next_time = gst_audio_aggregator_get_next_time;
gstaggregator_class->update_src_caps =
GST_DEBUG_FUNCPTR (gst_audio_aggregator_update_src_caps);
gstaggregator_class->fixate_src_caps = gst_audio_aggregator_fixate_src_caps;
gstaggregator_class->negotiated_src_caps =
gst_audio_aggregator_negotiated_src_caps;
klass->create_output_buffer = gst_audio_aggregator_create_output_buffer;
klass->convert_buffer = gst_audio_aggregator_default_convert_buffer;
GST_DEBUG_CATEGORY_INIT (audio_aggregator_debug, "audioaggregator",
GST_DEBUG_FG_MAGENTA, "GstAudioAggregator");
@ -361,6 +645,263 @@ gst_audio_aggregator_get_property (GObject * object, guint prop_id,
}
}
/* Caps negotiation */
/* Unref after usage */
static GstAudioAggregatorPad *
gst_audio_aggregator_get_first_configured_pad (GstAggregator * agg)
{
GstAudioAggregatorPad *res = NULL;
GList *l;
GST_OBJECT_LOCK (agg);
for (l = GST_ELEMENT (agg)->sinkpads; l; l = l->next) {
GstAudioAggregatorPad *aaggpad = l->data;
if (GST_AUDIO_INFO_FORMAT (&aaggpad->info) != GST_AUDIO_FORMAT_UNKNOWN) {
res = gst_object_ref (aaggpad);
break;
}
}
GST_OBJECT_UNLOCK (agg);
return res;
}
static GstCaps *
gst_audio_aggregator_sink_getcaps (GstPad * pad, GstAggregator * agg,
GstCaps * filter)
{
GstAudioAggregatorPad *first_configured_pad =
gst_audio_aggregator_get_first_configured_pad (agg);
GstCaps *sink_template_caps = gst_pad_get_pad_template_caps (pad);
GstCaps *downstream_caps = gst_pad_get_allowed_caps (agg->srcpad);
GstCaps *sink_caps;
GstStructure *s, *s2;
gint downstream_rate;
sink_template_caps = gst_caps_make_writable (sink_template_caps);
s = gst_caps_get_structure (sink_template_caps, 0);
if (downstream_caps && !gst_caps_is_empty (downstream_caps))
s2 = gst_caps_get_structure (downstream_caps, 0);
else
s2 = NULL;
if (s2 && gst_structure_get_int (s2, "rate", &downstream_rate)) {
gst_structure_fixate_field_nearest_int (s, "rate", downstream_rate);
} else if (first_configured_pad) {
gst_structure_fixate_field_nearest_int (s, "rate",
first_configured_pad->info.rate);
}
if (first_configured_pad)
gst_object_unref (first_configured_pad);
sink_caps = filter ? gst_caps_intersect (sink_template_caps,
filter) : gst_caps_ref (sink_template_caps);
GST_INFO_OBJECT (pad, "Getting caps with filter %" GST_PTR_FORMAT, filter);
GST_DEBUG_OBJECT (pad, "sink template caps : %" GST_PTR_FORMAT,
sink_template_caps);
GST_DEBUG_OBJECT (pad, "downstream caps %" GST_PTR_FORMAT, downstream_caps);
GST_INFO_OBJECT (pad, "returned sink caps : %" GST_PTR_FORMAT, sink_caps);
gst_caps_unref (sink_template_caps);
if (downstream_caps)
gst_caps_unref (downstream_caps);
return sink_caps;
}
static gboolean
gst_audio_aggregator_sink_setcaps (GstAudioAggregatorPad * aaggpad,
GstAggregator * agg, GstCaps * caps)
{
GstAudioAggregatorPad *first_configured_pad =
gst_audio_aggregator_get_first_configured_pad (agg);
GstCaps *downstream_caps = gst_pad_get_allowed_caps (agg->srcpad);
GstAudioInfo info;
gboolean ret = TRUE;
gint downstream_rate;
GstStructure *s;
if (!downstream_caps || gst_caps_is_empty (downstream_caps)) {
ret = FALSE;
goto done;
}
gst_audio_info_from_caps (&info, caps);
s = gst_caps_get_structure (downstream_caps, 0);
/* TODO: handle different rates on sinkpads, a bit complex
* because offsets will have to be updated, and audio resampling
* has a latency to take into account
*/
if ((gst_structure_get_int (s, "rate", &downstream_rate)
&& info.rate != downstream_rate) || (first_configured_pad
&& info.rate != first_configured_pad->info.rate)) {
gst_pad_push_event (GST_PAD (aaggpad), gst_event_new_reconfigure ());
gst_object_unref (first_configured_pad);
ret = FALSE;
} else {
GST_OBJECT_LOCK (aaggpad);
gst_audio_info_from_caps (&aaggpad->info, caps);
if (GST_IS_AUDIO_AGGREGATOR_CONVERT_PAD (aaggpad))
GST_AUDIO_AGGREGATOR_CONVERT_PAD (aaggpad)->
priv->converter_config_changed = TRUE;
GST_OBJECT_UNLOCK (aaggpad);
}
done:
if (downstream_caps)
gst_caps_unref (downstream_caps);
return ret;
}
static GstFlowReturn
gst_audio_aggregator_update_src_caps (GstAggregator * agg,
GstCaps * caps, GstCaps ** ret)
{
GstCaps *src_template_caps = gst_pad_get_pad_template_caps (agg->srcpad);
GstCaps *downstream_caps =
gst_pad_peer_query_caps (agg->srcpad, src_template_caps);
gst_caps_unref (src_template_caps);
*ret = gst_caps_intersect (caps, downstream_caps);
GST_INFO ("Updated src caps to %" GST_PTR_FORMAT, *ret);
if (downstream_caps)
gst_caps_unref (downstream_caps);
return GST_FLOW_OK;
}
/* At that point if the caps are not fixed, this means downstream
* didn't have fully specified requirements, we'll just go ahead
* and fixate raw audio fields using our first configured pad, we don't for
* now need a more complicated heuristic
*/
static GstCaps *
gst_audio_aggregator_fixate_src_caps (GstAggregator * agg, GstCaps * caps)
{
GstAudioAggregatorClass *aaggclass = GST_AUDIO_AGGREGATOR_GET_CLASS (agg);
GstAudioAggregatorPad *first_configured_pad;
if (!aaggclass->convert_buffer)
return
GST_AGGREGATOR_CLASS
(gst_audio_aggregator_parent_class)->fixate_src_caps (agg, caps);
first_configured_pad = gst_audio_aggregator_get_first_configured_pad (agg);
if (first_configured_pad) {
GstStructure *s, *s2;
GstCaps *first_configured_caps =
gst_audio_info_to_caps (&first_configured_pad->info);
gint first_configured_rate, first_configured_channels;
caps = gst_caps_make_writable (caps);
s = gst_caps_get_structure (caps, 0);
s2 = gst_caps_get_structure (first_configured_caps, 0);
gst_structure_get_int (s2, "rate", &first_configured_rate);
gst_structure_get_int (s2, "channels", &first_configured_channels);
gst_structure_fixate_field_string (s, "format",
gst_structure_get_string (s2, "format"));
gst_structure_fixate_field_string (s, "layout",
gst_structure_get_string (s2, "layout"));
gst_structure_fixate_field_nearest_int (s, "rate", first_configured_rate);
gst_structure_fixate_field_nearest_int (s, "channels",
first_configured_channels);
gst_caps_unref (first_configured_caps);
gst_object_unref (first_configured_pad);
}
if (!gst_caps_is_fixed (caps))
caps = gst_caps_fixate (caps);
GST_INFO_OBJECT (agg, "Fixated src caps to %" GST_PTR_FORMAT, caps);
return caps;
}
/* Must be called with OBJECT_LOCK taken */
static void
gst_audio_aggregator_update_converters (GstAudioAggregator * aagg,
GstAudioInfo * new_info)
{
GList *l;
for (l = GST_ELEMENT (aagg)->sinkpads; l; l = l->next) {
GstAudioAggregatorPad *aaggpad = l->data;
if (GST_IS_AUDIO_AGGREGATOR_CONVERT_PAD (aaggpad))
GST_AUDIO_AGGREGATOR_CONVERT_PAD (aaggpad)->
priv->converter_config_changed = TRUE;
/* If we currently were mixing a buffer, we need to convert it to the new
* format */
if (aaggpad->priv->buffer) {
GstBuffer *new_converted_buffer =
gst_audio_aggregator_convert_buffer (aagg, GST_PAD (aaggpad),
&aaggpad->info, new_info, aaggpad->priv->input_buffer);
gst_buffer_replace (&aaggpad->priv->buffer, new_converted_buffer);
}
}
}
/* We now have our final output caps, we can create the required converters */
static gboolean
gst_audio_aggregator_negotiated_src_caps (GstAggregator * agg, GstCaps * caps)
{
GstAudioAggregator *aagg = GST_AUDIO_AGGREGATOR (agg);
GstAudioAggregatorClass *aaggclass = GST_AUDIO_AGGREGATOR_GET_CLASS (agg);
GstAudioInfo info;
GST_INFO_OBJECT (agg, "src caps negotiated %" GST_PTR_FORMAT, caps);
if (!gst_audio_info_from_caps (&info, caps)) {
GST_WARNING_OBJECT (aagg, "Rejecting invalid caps: %" GST_PTR_FORMAT, caps);
return FALSE;
}
GST_AUDIO_AGGREGATOR_LOCK (aagg);
GST_OBJECT_LOCK (aagg);
if (aaggclass->convert_buffer) {
gst_audio_aggregator_update_converters (aagg, &info);
if (aagg->priv->current_buffer
&& !gst_audio_info_is_equal (&aagg->info, &info)) {
GstBuffer *converted =
gst_audio_aggregator_convert_buffer (aagg, agg->srcpad, &aagg->info,
&info, aagg->priv->current_buffer);
gst_buffer_unref (aagg->priv->current_buffer);
aagg->priv->current_buffer = converted;
}
}
if (!gst_audio_info_is_equal (&info, &aagg->info)) {
GST_INFO_OBJECT (aagg, "setting caps to %" GST_PTR_FORMAT, caps);
gst_caps_replace (&aagg->current_caps, caps);
memcpy (&aagg->info, &info, sizeof (info));
}
GST_OBJECT_UNLOCK (aagg);
GST_AUDIO_AGGREGATOR_UNLOCK (aagg);
return
GST_AGGREGATOR_CLASS
(gst_audio_aggregator_parent_class)->negotiated_src_caps (agg, caps);
}
/* event handling */
@ -439,6 +980,7 @@ static gboolean
gst_audio_aggregator_sink_event (GstAggregator * agg,
GstAggregatorPad * aggpad, GstEvent * event)
{
GstAudioAggregatorPad *aaggpad = GST_AUDIO_AGGREGATOR_PAD (aggpad);
gboolean res = TRUE;
GST_DEBUG_OBJECT (aggpad, "Got %s event on sink pad",
@ -484,6 +1026,17 @@ gst_audio_aggregator_sink_event (GstAggregator * agg,
break;
}
case GST_EVENT_CAPS:
{
GstCaps *caps;
gst_event_parse_caps (event, &caps);
GST_INFO_OBJECT (aggpad, "Got caps %" GST_PTR_FORMAT, caps);
res = gst_audio_aggregator_sink_setcaps (aaggpad, agg, caps);
gst_event_unref (event);
event = NULL;
break;
}
default:
break;
}
@ -496,6 +1049,35 @@ gst_audio_aggregator_sink_event (GstAggregator * agg,
return res;
}
static gboolean
gst_audio_aggregator_sink_query (GstAggregator * agg, GstAggregatorPad * aggpad,
GstQuery * query)
{
gboolean res = FALSE;
switch (GST_QUERY_TYPE (query)) {
case GST_QUERY_CAPS:
{
GstCaps *filter, *caps;
gst_query_parse_caps (query, &filter);
caps = gst_audio_aggregator_sink_getcaps (GST_PAD (aggpad), agg, filter);
gst_query_set_caps_result (query, caps);
gst_caps_unref (caps);
res = TRUE;
break;
}
default:
res =
GST_AGGREGATOR_CLASS (gst_audio_aggregator_parent_class)->sink_query
(agg, aggpad, query);
break;
}
return res;
}
/* FIXME, the duration query should reflect how long you will produce
* data, that is the amount of stream time until you will emit EOS.
*
@ -658,39 +1240,6 @@ gst_audio_aggregator_set_sink_caps (GstAudioAggregator * aagg,
#endif
}
static gboolean
gst_audio_aggregator_negotiated_src_caps (GstAggregator * agg, GstCaps * caps)
{
GstAudioAggregator *aagg = GST_AUDIO_AGGREGATOR (agg);
GstAudioInfo info;
if (!gst_audio_info_from_caps (&info, caps)) {
GST_WARNING_OBJECT (aagg, "Rejecting invalid caps: %" GST_PTR_FORMAT, caps);
return FALSE;
}
GST_AUDIO_AGGREGATOR_LOCK (aagg);
GST_OBJECT_LOCK (aagg);
if (!gst_audio_info_is_equal (&info, &aagg->info)) {
GST_INFO_OBJECT (aagg, "setting caps to %" GST_PTR_FORMAT, caps);
gst_caps_replace (&aagg->current_caps, caps);
memcpy (&aagg->info, &info, sizeof (info));
}
GST_OBJECT_UNLOCK (aagg);
GST_AUDIO_AGGREGATOR_UNLOCK (aagg);
/* send caps event later, after stream-start event */
return
GST_AGGREGATOR_CLASS
(gst_audio_aggregator_parent_class)->negotiated_src_caps (agg, caps);
}
/* Must hold object lock and aagg lock to call */
static void
@ -769,9 +1318,10 @@ gst_audio_aggregator_do_clip (GstAggregator * agg,
* values.
*/
static gboolean
gst_audio_aggregator_queue_new_buffer (GstAudioAggregator * aagg,
GstAudioAggregatorPad * pad, GstBuffer * inbuf)
gst_audio_aggregator_fill_buffer (GstAudioAggregator * aagg,
GstAudioAggregatorPad * pad)
{
GstAudioAggregatorClass *aaggclass = GST_AUDIO_AGGREGATOR_GET_CLASS (aagg);
GstClockTime start_time, end_time;
gboolean discont = FALSE;
guint64 start_offset, end_offset;
@ -780,27 +1330,31 @@ gst_audio_aggregator_queue_new_buffer (GstAudioAggregator * aagg,
GstAggregator *agg = GST_AGGREGATOR (aagg);
GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad);
g_assert (pad->priv->buffer == NULL);
rate = GST_AUDIO_INFO_RATE (&pad->info);
bpf = GST_AUDIO_INFO_BPF (&pad->info);
if (aaggclass->convert_buffer) {
rate = GST_AUDIO_INFO_RATE (&aagg->info);
bpf = GST_AUDIO_INFO_BPF (&aagg->info);
} else {
rate = GST_AUDIO_INFO_RATE (&pad->info);
bpf = GST_AUDIO_INFO_BPF (&pad->info);
}
pad->priv->position = 0;
pad->priv->size = gst_buffer_get_size (inbuf) / bpf;
pad->priv->size = gst_buffer_get_size (pad->priv->buffer) / bpf;
if (pad->priv->size == 0) {
if (!GST_BUFFER_DURATION_IS_VALID (inbuf) ||
!GST_BUFFER_FLAG_IS_SET (inbuf, GST_BUFFER_FLAG_GAP)) {
if (!GST_BUFFER_DURATION_IS_VALID (pad->priv->buffer) ||
!GST_BUFFER_FLAG_IS_SET (pad->priv->buffer, GST_BUFFER_FLAG_GAP)) {
GST_WARNING_OBJECT (pad, "Dropping 0-sized buffer missing either a"
" duration or a GAP flag: %" GST_PTR_FORMAT, inbuf);
" duration or a GAP flag: %" GST_PTR_FORMAT, pad->priv->buffer);
return FALSE;
}
pad->priv->size = gst_util_uint64_scale (GST_BUFFER_DURATION (inbuf), rate,
pad->priv->size =
gst_util_uint64_scale (GST_BUFFER_DURATION (pad->priv->buffer), rate,
GST_SECOND);
}
if (!GST_BUFFER_PTS_IS_VALID (inbuf)) {
if (!GST_BUFFER_PTS_IS_VALID (pad->priv->buffer)) {
if (pad->priv->output_offset == -1)
pad->priv->output_offset = aagg->priv->offset;
if (pad->priv->next_offset == -1)
@ -810,7 +1364,7 @@ gst_audio_aggregator_queue_new_buffer (GstAudioAggregator * aagg,
goto done;
}
start_time = GST_BUFFER_PTS (inbuf);
start_time = GST_BUFFER_PTS (pad->priv->buffer);
end_time =
start_time + gst_util_uint64_scale_ceil (pad->priv->size, GST_SECOND,
rate);
@ -823,8 +1377,8 @@ gst_audio_aggregator_queue_new_buffer (GstAudioAggregator * aagg,
GST_SECOND);
end_offset = start_offset + pad->priv->size;
if (GST_BUFFER_IS_DISCONT (inbuf)
|| GST_BUFFER_FLAG_IS_SET (inbuf, GST_BUFFER_FLAG_RESYNC)
if (GST_BUFFER_IS_DISCONT (pad->priv->buffer)
|| GST_BUFFER_FLAG_IS_SET (pad->priv->buffer, GST_BUFFER_FLAG_RESYNC)
|| pad->priv->new_segment || pad->priv->next_offset == -1) {
discont = TRUE;
pad->priv->new_segment = FALSE;
@ -905,8 +1459,6 @@ gst_audio_aggregator_queue_new_buffer (GstAudioAggregator * aagg,
if (start_output_offset == -1 && end_output_offset == -1) {
/* Outside output segment, drop */
gst_buffer_unref (inbuf);
pad->priv->buffer = NULL;
pad->priv->position = 0;
pad->priv->size = 0;
pad->priv->output_offset = -1;
@ -919,9 +1471,6 @@ gst_audio_aggregator_queue_new_buffer (GstAudioAggregator * aagg,
end_output_offset = start_output_offset + pad->priv->size;
if (end_output_offset < aagg->priv->offset) {
/* Before output segment, drop */
gst_buffer_unref (inbuf);
pad->priv->buffer = NULL;
pad->priv->position = 0;
pad->priv->size = 0;
pad->priv->output_offset = -1;
@ -950,8 +1499,6 @@ gst_audio_aggregator_queue_new_buffer (GstAudioAggregator * aagg,
pad->priv->position += diff;
if (pad->priv->position >= pad->priv->size) {
/* Empty buffer, drop */
gst_buffer_unref (inbuf);
pad->priv->buffer = NULL;
pad->priv->position = 0;
pad->priv->size = 0;
pad->priv->output_offset = -1;
@ -978,7 +1525,6 @@ done:
GST_LOG_OBJECT (pad,
"Queued new buffer at offset %" G_GUINT64_FORMAT,
pad->priv->output_offset);
pad->priv->buffer = inbuf;
return TRUE;
}
@ -1013,6 +1559,7 @@ gst_audio_aggregator_mix_buffer (GstAudioAggregator * aagg,
pad->priv->position = pad->priv->size;
gst_buffer_replace (&pad->priv->buffer, NULL);
gst_buffer_replace (&pad->priv->input_buffer, NULL);
return FALSE;
}
@ -1042,6 +1589,7 @@ gst_audio_aggregator_mix_buffer (GstAudioAggregator * aagg,
if (pad->priv->position == pad->priv->size) {
/* Buffer done, drop it */
gst_buffer_replace (&pad->priv->buffer, NULL);
gst_buffer_replace (&pad->priv->input_buffer, NULL);
GST_LOG_OBJECT (pad, "Finished mixing buffer, waiting for next");
return FALSE;
}
@ -1060,6 +1608,9 @@ gst_audio_aggregator_create_output_buffer (GstAudioAggregator * aagg,
gst_aggregator_get_allocator (GST_AGGREGATOR (aagg), &allocator, &params);
GST_DEBUG ("Creating output buffer with size %d",
num_frames * GST_AUDIO_INFO_BPF (&aagg->info));
outbuf = gst_buffer_new_allocate (allocator, num_frames *
GST_AUDIO_INFO_BPF (&aagg->info), &params);
@ -1220,7 +1771,6 @@ gst_audio_aggregator_aggregate (GstAggregator * agg, gboolean timeout)
aagg->priv->offset, GST_TIME_ARGS (agg->segment.position));
for (iter = element->sinkpads; iter; iter = iter->next) {
GstBuffer *inbuf;
GstAudioAggregatorPad *pad = (GstAudioAggregatorPad *) iter->data;
GstAggregatorPad *aggpad = (GstAggregatorPad *) iter->data;
gboolean pad_eos = gst_aggregator_pad_is_eos (aggpad);
@ -1228,10 +1778,10 @@ gst_audio_aggregator_aggregate (GstAggregator * agg, gboolean timeout)
if (!pad_eos)
is_eos = FALSE;
inbuf = gst_aggregator_pad_get_buffer (aggpad);
pad->priv->input_buffer = gst_aggregator_pad_get_buffer (aggpad);
GST_OBJECT_LOCK (pad);
if (!inbuf) {
if (!pad->priv->input_buffer) {
if (timeout) {
if (pad->priv->output_offset < next_offset) {
gint64 diff = next_offset - pad->priv->output_offset;
@ -1247,19 +1797,28 @@ gst_audio_aggregator_aggregate (GstAggregator * agg, gboolean timeout)
continue;
}
g_assert (!pad->priv->buffer || pad->priv->buffer == inbuf);
/* New buffer? */
if (!pad->priv->buffer) {
/* Takes ownership of buffer */
if (!gst_audio_aggregator_queue_new_buffer (aagg, pad, inbuf)) {
if (GST_IS_AUDIO_AGGREGATOR_CONVERT_PAD (pad))
pad->priv->buffer =
gst_audio_aggregator_convert_buffer
(aagg, GST_PAD (pad), &pad->info, &aagg->info,
pad->priv->input_buffer);
else
pad->priv->buffer = gst_buffer_ref (pad->priv->input_buffer);
if (!gst_audio_aggregator_fill_buffer (aagg, pad)) {
gst_buffer_replace (&pad->priv->buffer, NULL);
gst_buffer_replace (&pad->priv->input_buffer, NULL);
pad->priv->buffer = NULL;
dropped = TRUE;
GST_OBJECT_UNLOCK (pad);
gst_aggregator_pad_drop_buffer (aggpad);
continue;
}
} else {
gst_buffer_unref (inbuf);
gst_buffer_unref (pad->priv->input_buffer);
}
if (!pad->priv->buffer && !dropped && pad_eos) {
@ -1288,6 +1847,7 @@ gst_audio_aggregator_aggregate (GstAggregator * agg, gboolean timeout)
GST_AUDIO_INFO_RATE (&aagg->info))), pad->priv->buffer);
/* Buffer done, drop it */
gst_buffer_replace (&pad->priv->buffer, NULL);
gst_buffer_replace (&pad->priv->input_buffer, NULL);
dropped = TRUE;
GST_OBJECT_UNLOCK (pad);
gst_aggregator_pad_drop_buffer (aggpad);

View file

@ -67,7 +67,7 @@ typedef struct _GstAudioAggregatorPadPrivate GstAudioAggregatorPadPrivate;
* @parent: The parent #GstAggregatorPad
* @info: The audio info for this pad set from the incoming caps
*
* The implementation the GstPad to use with #GstAudioAggregator
* The default implementation of GstPad used with #GstAudioAggregator
*/
struct _GstAudioAggregatorPad
{
@ -86,7 +86,7 @@ struct _GstAudioAggregatorPad
*
*/
struct _GstAudioAggregatorPadClass
{
{
GstAggregatorPadClass parent_class;
/*< private >*/
@ -96,6 +96,54 @@ struct _GstAudioAggregatorPadClass
GST_EXPORT
GType gst_audio_aggregator_pad_get_type (void);
#define GST_TYPE_AUDIO_AGGREGATOR_CONVERT_PAD (gst_audio_aggregator_convert_pad_get_type())
#define GST_AUDIO_AGGREGATOR_CONVERT_PAD(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_AUDIO_AGGREGATOR_CONVERT_PAD, GstAudioAggregatorConvertPad))
#define GST_AUDIO_AGGREGATOR_CONVERT_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_AUDIO_AGGREGATOR_CONVERT_PAD, GstAudioAggregatorConvertPadClass))
#define GST_AUDIO_AGGREGATOR_CONVERT_PAD_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj),GST_TYPE_AUDIO_AGGREGATOR_CONVERT_PAD, GstAudioAggregatorConvertPadClass))
#define GST_IS_AUDIO_AGGREGATOR_CONVERT_PAD(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_AUDIO_AGGREGATOR_CONVERT_PAD))
#define GST_IS_AUDIO_AGGREGATOR_CONVERT_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_AUDIO_AGGREGATOR_CONVERT_PAD))
/****************************
* GstAudioAggregatorPad Structs *
***************************/
typedef struct _GstAudioAggregatorConvertPad GstAudioAggregatorConvertPad;
typedef struct _GstAudioAggregatorConvertPadClass GstAudioAggregatorConvertPadClass;
typedef struct _GstAudioAggregatorConvertPadPrivate GstAudioAggregatorConvertPadPrivate;
/**
* GstAudioAggregatorConvertPad:
* @parent: The parent #GstAudioAggregatorPad
*
* An implementation of GstPad that can be used with #GstAudioAggregator.
*
* See #GstAudioAggregator for more details.
*/
struct _GstAudioAggregatorConvertPad
{
GstAudioAggregatorPad parent;
/*< private >*/
GstAudioAggregatorConvertPadPrivate * priv;
gpointer _gst_reserved[GST_PADDING];
};
/**
* GstAudioAggregatorConvertPadClass:
*
*/
struct _GstAudioAggregatorConvertPadClass
{
GstAudioAggregatorPadClass parent_class;
/*< private >*/
gpointer _gst_reserved[GST_PADDING];
};
GST_EXPORT
GType gst_audio_aggregator_convert_pad_get_type (void);
/**************************
* GstAudioAggregator API *
**************************/
@ -137,6 +185,10 @@ struct _GstAudioAggregator
* buffer. The in_offset and out_offset are in "frames", which is
* the size of a sample times the number of channels. Returns TRUE if
* any non-silence was added to the buffer
* @convert_buffer: Convert a buffer from one format to another. The pad
* is either a sinkpad, when converting an input buffer, or the source pad,
* when converting the output buffer after a downstream format change is
* requested.
*/
struct _GstAudioAggregatorClass {
GstAggregatorClass parent_class;
@ -146,6 +198,11 @@ struct _GstAudioAggregatorClass {
gboolean (* aggregate_one_buffer) (GstAudioAggregator * aagg,
GstAudioAggregatorPad * pad, GstBuffer * inbuf, guint in_offset,
GstBuffer * outbuf, guint out_offset, guint num_frames);
GstBuffer * (* convert_buffer) (GstAudioAggregator *aagg,
GstPad * pad,
GstAudioInfo *in_info,
GstAudioInfo *out_info,
GstBuffer * buffer);
/*< private >*/
gpointer _gst_reserved[GST_PADDING_LARGE];
@ -163,6 +220,9 @@ void gst_audio_aggregator_set_sink_caps (GstAudioAggregator * aagg,
GstAudioAggregatorPad * pad,
GstCaps * caps);
GST_EXPORT
void gst_audio_aggregator_class_perform_conversion (GstAudioAggregatorClass * klass);
G_END_DECLS
#endif /* __GST_AUDIO_AGGREGATOR_H__ */

View file

@ -580,7 +580,7 @@ gst_audio_interleave_class_init (GstAudioInterleaveClass * klass)
agg_class->negotiated_src_caps = gst_audio_interleave_negotiated_src_caps;
aagg_class->aggregate_one_buffer = gst_audio_interleave_aggregate_one_buffer;
aagg_class->convert_buffer = NULL;
/**
* GstInterleave:channel-positions

View file

@ -31,12 +31,17 @@
* Unlike the adder element audiomixer properly synchronises all input streams
* and also handles live inputs such as capture sources or RTP properly.
*
* Caps negotiation is inherently racy with the audiomixer element. You can set
* the "caps" property to force audiomixer to operate in a specific audio
* format, sample rate and channel count. In this case you may also need
* audioconvert and/or audioresample elements for each input stream before the
* audiomixer element to make sure the input branch can produce the forced
* format.
* The audiomixer element can accept any sort of raw audio data, it will
* be converted to the target format if necessary, with the exception
* of the sample rate, which has to be identical to either what downstream
* expects, or the sample rate of the first configured pad. Use a capsfilter
* after the audiomixer element if you want to precisely control the format
* that comes out of the audiomixer, which supports changing the format of
* its output while playing.
*
* If you want to control the manner in which incoming data gets converted,
* see the #GstAudioAggregatorPad:converter-config property, which will let
* you for example change the way in which channels may get remapped.
*
* The input pads are from a GstPad subclass and have additional
* properties to mute each pad individually and set the volume:
@ -89,7 +94,7 @@ enum
};
G_DEFINE_TYPE (GstAudioMixerPad, gst_audiomixer_pad,
GST_TYPE_AUDIO_AGGREGATOR_PAD);
GST_TYPE_AUDIO_AGGREGATOR_CONVERT_PAD);
static void
gst_audiomixer_pad_get_property (GObject * object, guint prop_id,
@ -163,20 +168,19 @@ gst_audiomixer_pad_init (GstAudioMixerPad * pad)
enum
{
PROP_0,
PROP_FILTER_CAPS
PROP_0
};
/* elementfactory information */
/* These are the formats we can mix natively */
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
#define CAPS \
GST_AUDIO_CAPS_MAKE ("{ S32LE, U32LE, S16LE, U16LE, S8, U8, F32LE, F64LE }") \
", layout = (string) { interleaved, non-interleaved }"
", layout = interleaved"
#else
#define CAPS \
GST_AUDIO_CAPS_MAKE ("{ S32BE, U32BE, S16BE, U16BE, S8, U8, F32BE, F64BE }") \
", layout = (string) { interleaved, non-interleaved }"
", layout = interleaved"
#endif
static GstStaticPadTemplate gst_audiomixer_src_template =
@ -186,12 +190,15 @@ GST_STATIC_PAD_TEMPLATE ("src",
GST_STATIC_CAPS (CAPS)
);
#define SINK_CAPS \
GST_STATIC_CAPS (GST_AUDIO_CAPS_MAKE (GST_AUDIO_FORMATS_ALL) \
", layout=interleaved")
static GstStaticPadTemplate gst_audiomixer_sink_template =
GST_STATIC_PAD_TEMPLATE ("sink_%u",
GST_PAD_SINK,
GST_PAD_REQUEST,
GST_STATIC_CAPS (CAPS)
);
SINK_CAPS);
static void gst_audiomixer_child_proxy_init (gpointer g_iface,
gpointer iface_data);
@ -201,14 +208,6 @@ G_DEFINE_TYPE_WITH_CODE (GstAudioMixer, gst_audiomixer,
GST_TYPE_AUDIO_AGGREGATOR, G_IMPLEMENT_INTERFACE (GST_TYPE_CHILD_PROXY,
gst_audiomixer_child_proxy_init));
static void gst_audiomixer_dispose (GObject * object);
static void gst_audiomixer_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec);
static void gst_audiomixer_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec);
static gboolean gst_audiomixer_setcaps (GstAudioMixer * audiomixer,
GstPad * pad, GstCaps * caps);
static GstPad *gst_audiomixer_request_new_pad (GstElement * element,
GstPadTemplate * temp, const gchar * req_name, const GstCaps * caps);
static void gst_audiomixer_release_pad (GstElement * element, GstPad * pad);
@ -219,287 +218,12 @@ gst_audiomixer_aggregate_one_buffer (GstAudioAggregator * aagg,
GstBuffer * outbuf, guint out_offset, guint num_samples);
/* we can only accept caps that we and downstream can handle.
* if we have filtercaps set, use those to constrain the target caps.
*/
static GstCaps *
gst_audiomixer_sink_getcaps (GstAggregator * agg, GstPad * pad,
GstCaps * filter)
{
GstAudioAggregator *aagg;
GstAudioMixer *audiomixer;
GstCaps *result, *peercaps, *current_caps, *filter_caps;
GstStructure *s;
gint i, n;
audiomixer = GST_AUDIO_MIXER (agg);
aagg = GST_AUDIO_AGGREGATOR (agg);
GST_OBJECT_LOCK (audiomixer);
/* take filter */
if ((filter_caps = audiomixer->filter_caps)) {
if (filter)
filter_caps =
gst_caps_intersect_full (filter, filter_caps,
GST_CAPS_INTERSECT_FIRST);
else
gst_caps_ref (filter_caps);
} else {
filter_caps = filter ? gst_caps_ref (filter) : NULL;
}
GST_OBJECT_UNLOCK (audiomixer);
if (filter_caps && gst_caps_is_empty (filter_caps)) {
GST_WARNING_OBJECT (pad, "Empty filter caps");
return filter_caps;
}
/* get the downstream possible caps */
peercaps = gst_pad_peer_query_caps (agg->srcpad, filter_caps);
/* get the allowed caps on this sinkpad */
GST_OBJECT_LOCK (audiomixer);
current_caps = aagg->current_caps ? gst_caps_ref (aagg->current_caps) : NULL;
if (current_caps == NULL) {
current_caps = gst_pad_get_pad_template_caps (pad);
if (!current_caps)
current_caps = gst_caps_new_any ();
}
GST_OBJECT_UNLOCK (audiomixer);
if (peercaps) {
/* if the peer has caps, intersect */
GST_DEBUG_OBJECT (audiomixer, "intersecting peer and our caps");
result =
gst_caps_intersect_full (peercaps, current_caps,
GST_CAPS_INTERSECT_FIRST);
gst_caps_unref (peercaps);
gst_caps_unref (current_caps);
} else {
/* the peer has no caps (or there is no peer), just use the allowed caps
* of this sinkpad. */
/* restrict with filter-caps if any */
if (filter_caps) {
GST_DEBUG_OBJECT (audiomixer, "no peer caps, using filtered caps");
result =
gst_caps_intersect_full (filter_caps, current_caps,
GST_CAPS_INTERSECT_FIRST);
gst_caps_unref (current_caps);
} else {
GST_DEBUG_OBJECT (audiomixer, "no peer caps, using our caps");
result = current_caps;
}
}
result = gst_caps_make_writable (result);
n = gst_caps_get_size (result);
for (i = 0; i < n; i++) {
GstStructure *sref;
s = gst_caps_get_structure (result, i);
sref = gst_structure_copy (s);
gst_structure_set (sref, "channels", GST_TYPE_INT_RANGE, 0, 2, NULL);
if (gst_structure_is_subset (s, sref)) {
/* This field is irrelevant when in mono or stereo */
gst_structure_remove_field (s, "channel-mask");
}
gst_structure_free (sref);
}
if (filter_caps)
gst_caps_unref (filter_caps);
GST_LOG_OBJECT (audiomixer, "getting caps on pad %p,%s to %" GST_PTR_FORMAT,
pad, GST_PAD_NAME (pad), result);
return result;
}
static gboolean
gst_audiomixer_sink_query (GstAggregator * agg, GstAggregatorPad * aggpad,
GstQuery * query)
{
gboolean res = FALSE;
switch (GST_QUERY_TYPE (query)) {
case GST_QUERY_CAPS:
{
GstCaps *filter, *caps;
gst_query_parse_caps (query, &filter);
caps = gst_audiomixer_sink_getcaps (agg, GST_PAD (aggpad), filter);
gst_query_set_caps_result (query, caps);
gst_caps_unref (caps);
res = TRUE;
break;
}
default:
res =
GST_AGGREGATOR_CLASS (parent_class)->sink_query (agg, aggpad, query);
break;
}
return res;
}
/* the first caps we receive on any of the sinkpads will define the caps for all
* the other sinkpads because we can only mix streams with the same caps.
*/
static gboolean
gst_audiomixer_setcaps (GstAudioMixer * audiomixer, GstPad * pad,
GstCaps * orig_caps)
{
GstAggregator *agg = GST_AGGREGATOR (audiomixer);
GstAudioAggregator *aagg = GST_AUDIO_AGGREGATOR (audiomixer);
GstCaps *caps;
GstAudioInfo info;
GstStructure *s;
gint channels = 0;
caps = gst_caps_copy (orig_caps);
s = gst_caps_get_structure (caps, 0);
if (gst_structure_get_int (s, "channels", &channels))
if (channels <= 2)
gst_structure_remove_field (s, "channel-mask");
if (!gst_audio_info_from_caps (&info, caps))
goto invalid_format;
if (channels == 1) {
GstCaps *filter;
GstCaps *downstream_caps;
if (audiomixer->filter_caps)
filter = gst_caps_intersect_full (caps, audiomixer->filter_caps,
GST_CAPS_INTERSECT_FIRST);
else
filter = gst_caps_ref (caps);
downstream_caps = gst_pad_peer_query_caps (agg->srcpad, filter);
gst_caps_unref (filter);
if (downstream_caps) {
gst_caps_unref (caps);
caps = downstream_caps;
if (gst_caps_is_empty (caps)) {
gst_caps_unref (caps);
return FALSE;
}
caps = gst_caps_fixate (caps);
}
}
GST_OBJECT_LOCK (audiomixer);
/* don't allow reconfiguration for now; there's still a race between the
* different upstream threads doing query_caps + accept_caps + sending
* (possibly different) CAPS events, but there's not much we can do about
* that, upstream needs to deal with it. */
if (aagg->current_caps != NULL) {
if (gst_audio_info_is_equal (&info, &aagg->info)) {
GST_OBJECT_UNLOCK (audiomixer);
gst_caps_unref (caps);
gst_audio_aggregator_set_sink_caps (aagg, GST_AUDIO_AGGREGATOR_PAD (pad),
orig_caps);
return TRUE;
} else {
GST_DEBUG_OBJECT (pad, "got input caps %" GST_PTR_FORMAT ", but "
"current caps are %" GST_PTR_FORMAT, caps, aagg->current_caps);
GST_OBJECT_UNLOCK (audiomixer);
gst_pad_push_event (pad, gst_event_new_reconfigure ());
gst_caps_unref (caps);
return FALSE;
}
} else {
gst_caps_replace (&aagg->current_caps, caps);
aagg->info = info;
gst_pad_mark_reconfigure (GST_AGGREGATOR_SRC_PAD (agg));
}
GST_OBJECT_UNLOCK (audiomixer);
gst_audio_aggregator_set_sink_caps (aagg, GST_AUDIO_AGGREGATOR_PAD (pad),
orig_caps);
GST_INFO_OBJECT (pad, "handle caps change to %" GST_PTR_FORMAT, caps);
gst_caps_unref (caps);
return TRUE;
/* ERRORS */
invalid_format:
{
gst_caps_unref (caps);
GST_WARNING_OBJECT (audiomixer, "invalid format set as caps");
return FALSE;
}
}
static GstFlowReturn
gst_audiomixer_update_src_caps (GstAggregator * agg, GstCaps * caps,
GstCaps ** ret)
{
GstAudioAggregator *aagg = GST_AUDIO_AGGREGATOR (agg);
if (aagg->current_caps == NULL)
return GST_AGGREGATOR_FLOW_NEED_DATA;
*ret = gst_caps_ref (aagg->current_caps);
return GST_FLOW_OK;
}
static gboolean
gst_audiomixer_sink_event (GstAggregator * agg, GstAggregatorPad * aggpad,
GstEvent * event)
{
GstAudioMixer *audiomixer = GST_AUDIO_MIXER (agg);
gboolean res = TRUE;
GST_DEBUG_OBJECT (aggpad, "Got %s event on sink pad",
GST_EVENT_TYPE_NAME (event));
switch (GST_EVENT_TYPE (event)) {
case GST_EVENT_CAPS:
{
GstCaps *caps;
gst_event_parse_caps (event, &caps);
res = gst_audiomixer_setcaps (audiomixer, GST_PAD_CAST (aggpad), caps);
gst_event_unref (event);
event = NULL;
break;
}
default:
break;
}
if (event != NULL)
return GST_AGGREGATOR_CLASS (parent_class)->sink_event (agg, aggpad, event);
return res;
}
static void
gst_audiomixer_class_init (GstAudioMixerClass * klass)
{
GObjectClass *gobject_class = (GObjectClass *) klass;
GstElementClass *gstelement_class = (GstElementClass *) klass;
GstAggregatorClass *agg_class = (GstAggregatorClass *) klass;
GstAudioAggregatorClass *aagg_class = (GstAudioAggregatorClass *) klass;
gobject_class->set_property = gst_audiomixer_set_property;
gobject_class->get_property = gst_audiomixer_get_property;
gobject_class->dispose = gst_audiomixer_dispose;
g_object_class_install_property (gobject_class, PROP_FILTER_CAPS,
g_param_spec_boxed ("caps", "Target caps",
"Set target format for mixing (NULL means ANY). "
"Setting this property takes a reference to the supplied GstCaps "
"object", GST_TYPE_CAPS, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
gst_element_class_add_static_pad_template (gstelement_class,
&gst_audiomixer_src_template);
gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
@ -513,80 +237,12 @@ gst_audiomixer_class_init (GstAudioMixerClass * klass)
gstelement_class->release_pad =
GST_DEBUG_FUNCPTR (gst_audiomixer_release_pad);
agg_class->sink_query = GST_DEBUG_FUNCPTR (gst_audiomixer_sink_query);
agg_class->sink_event = GST_DEBUG_FUNCPTR (gst_audiomixer_sink_event);
agg_class->update_src_caps =
GST_DEBUG_FUNCPTR (gst_audiomixer_update_src_caps);
aagg_class->aggregate_one_buffer = gst_audiomixer_aggregate_one_buffer;
}
static void
gst_audiomixer_init (GstAudioMixer * audiomixer)
{
audiomixer->filter_caps = NULL;
}
static void
gst_audiomixer_dispose (GObject * object)
{
GstAudioMixer *audiomixer = GST_AUDIO_MIXER (object);
gst_caps_replace (&audiomixer->filter_caps, NULL);
G_OBJECT_CLASS (parent_class)->dispose (object);
}
static void
gst_audiomixer_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec)
{
GstAudioMixer *audiomixer = GST_AUDIO_MIXER (object);
switch (prop_id) {
case PROP_FILTER_CAPS:{
GstCaps *new_caps = NULL;
GstCaps *old_caps;
const GstCaps *new_caps_val = gst_value_get_caps (value);
if (new_caps_val != NULL) {
new_caps = (GstCaps *) new_caps_val;
gst_caps_ref (new_caps);
}
GST_OBJECT_LOCK (audiomixer);
old_caps = audiomixer->filter_caps;
audiomixer->filter_caps = new_caps;
GST_OBJECT_UNLOCK (audiomixer);
if (old_caps)
gst_caps_unref (old_caps);
GST_DEBUG_OBJECT (audiomixer, "set new caps %" GST_PTR_FORMAT, new_caps);
break;
}
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
static void
gst_audiomixer_get_property (GObject * object, guint prop_id, GValue * value,
GParamSpec * pspec)
{
GstAudioMixer *audiomixer = GST_AUDIO_MIXER (object);
switch (prop_id) {
case PROP_FILTER_CAPS:
GST_OBJECT_LOCK (audiomixer);
gst_value_set_caps (value, audiomixer->filter_caps);
GST_OBJECT_UNLOCK (audiomixer);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
static GstPad *

View file

@ -50,9 +50,6 @@ typedef struct _GstAudioMixerPadClass GstAudioMixerPadClass;
*/
struct _GstAudioMixer {
GstAudioAggregator element;
/* target caps (set via property) */
GstCaps *filter_caps;
};
struct _GstAudioMixerClass {
@ -69,7 +66,7 @@ GType gst_audiomixer_get_type (void);
#define GST_AUDIO_MIXER_PAD_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS((obj) ,GST_TYPE_AUDIO_MIXER_PAD,GstAudioMixerPadClass))
struct _GstAudioMixerPad {
GstAudioAggregatorPad parent;
GstAudioAggregatorConvertPad parent;
gdouble volume;
gint volume_i32;
@ -79,7 +76,7 @@ struct _GstAudioMixerPad {
};
struct _GstAudioMixerPadClass {
GstAudioAggregatorPadClass parent_class;
GstAudioAggregatorConvertPadClass parent_class;
};
GType gst_audiomixer_pad_get_type (void);

View file

@ -59,7 +59,7 @@ test_teardown (void)
/* some test helpers */
static GstElement *
setup_pipeline (GstElement * audiomixer, gint num_srcs)
setup_pipeline (GstElement * audiomixer, gint num_srcs, GstElement * capsfilter)
{
GstElement *pipeline, *src, *sink;
gint i;
@ -71,7 +71,13 @@ setup_pipeline (GstElement * audiomixer, gint num_srcs)
sink = gst_element_factory_make ("fakesink", "sink");
gst_bin_add_many (GST_BIN (pipeline), audiomixer, sink, NULL);
gst_element_link (audiomixer, sink);
if (capsfilter) {
gst_bin_add (GST_BIN (pipeline), capsfilter);
gst_element_link_many (audiomixer, capsfilter, sink, NULL);
} else {
gst_element_link (audiomixer, sink);
}
for (i = 0; i < num_srcs; i++) {
src = gst_element_factory_make ("audiotestsrc", NULL);
@ -198,7 +204,7 @@ GST_START_TEST (test_caps)
GstCaps *caps;
/* build pipeline */
pipeline = setup_pipeline (NULL, 1);
pipeline = setup_pipeline (NULL, 1, NULL);
/* prepare playing */
set_state_and_wait (pipeline, GST_STATE_PAUSED);
@ -217,7 +223,7 @@ GST_END_TEST;
/* check that caps set on the property are honoured */
GST_START_TEST (test_filter_caps)
{
GstElement *pipeline, *audiomixer;
GstElement *pipeline, *audiomixer, *capsfilter;
GstCaps *filter_caps, *caps;
filter_caps = gst_caps_new_simple ("audio/x-raw",
@ -226,10 +232,12 @@ GST_START_TEST (test_filter_caps)
"rate", G_TYPE_INT, 44100, "channels", G_TYPE_INT, 1,
"channel-mask", GST_TYPE_BITMASK, (guint64) 0x04, NULL);
capsfilter = gst_element_factory_make ("capsfilter", NULL);
/* build pipeline */
audiomixer = gst_element_factory_make ("audiomixer", NULL);
g_object_set (audiomixer, "caps", filter_caps, NULL);
pipeline = setup_pipeline (audiomixer, 1);
g_object_set (capsfilter, "caps", filter_caps, NULL);
pipeline = setup_pipeline (audiomixer, 1, capsfilter);
/* prepare playing */
set_state_and_wait (pipeline, GST_STATE_PAUSED);
@ -411,7 +419,7 @@ GST_START_TEST (test_play_twice)
/* build pipeline */
audiomixer = gst_element_factory_make ("audiomixer", "audiomixer");
bin = setup_pipeline (audiomixer, 2);
bin = setup_pipeline (audiomixer, 2, NULL);
bus = gst_element_get_bus (bin);
gst_bus_add_signal_watch_full (bus, G_PRIORITY_HIGH);
@ -471,7 +479,7 @@ GST_START_TEST (test_play_twice_then_add_and_play_again)
/* build pipeline */
audiomixer = gst_element_factory_make ("audiomixer", "audiomixer");
bin = setup_pipeline (audiomixer, 2);
bin = setup_pipeline (audiomixer, 2, NULL);
bus = gst_element_get_bus (bin);
gst_bus_add_signal_watch_full (bus, G_PRIORITY_HIGH);
@ -1098,7 +1106,7 @@ GST_START_TEST (test_loop)
GST_INFO ("preparing test");
/* build pipeline */
bin = setup_pipeline (NULL, 2);
bin = setup_pipeline (NULL, 2, NULL);
bus = gst_element_get_bus (bin);
gst_bus_add_signal_watch_full (bus, G_PRIORITY_HIGH);
@ -1713,6 +1721,134 @@ GST_START_TEST (test_sinkpad_property_controller)
GST_END_TEST;
static void
change_src_caps (GstElement * fakesink, GstBuffer * buffer, GstPad * pad,
GstElement * capsfilter)
{
GstCaps *caps = gst_caps_new_simple ("audio/x-raw",
"format", G_TYPE_STRING, GST_AUDIO_NE (S32),
"layout", G_TYPE_STRING, "interleaved",
"rate", G_TYPE_INT, 10, "channels", G_TYPE_INT, 1, NULL);
g_object_set (capsfilter, "caps", caps, NULL);
g_signal_connect (fakesink, "handoff", (GCallback) handoff_buffer_cb, NULL);
g_signal_handlers_disconnect_by_func (fakesink, change_src_caps, capsfilter);
}
/* In this test, we create an input buffer with a duration of 2 seconds,
* and require the audiomixer to output 1 second long buffers.
* The input buffer will thus be mixed twice, and the audiomixer will
* output two buffers.
*
* After audiomixer has output a first buffer, we change its output format
* from S8 to S32. As our sample rate stays the same at 10 fps, and we use
* mono, the first buffer should be 10 bytes long, and the second 40.
*
* The input buffer is made up of 15 0-valued bytes, and 5 1-valued bytes.
* We verify that the second buffer contains 5 0-valued integers, and
* 5 1 << 24 valued integers.
*/
GST_START_TEST (test_change_output_caps)
{
GstSegment segment;
GstElement *bin, *audiomixer, *capsfilter, *sink;
GstBus *bus;
GstPad *sinkpad;
gboolean res;
GstStateChangeReturn state_res;
GstFlowReturn ret;
GstEvent *event;
GstBuffer *buffer;
GstCaps *caps;
GstQuery *drain = gst_query_new_drain ();
GstMapInfo inmap;
GstMapInfo outmap;
gsize i;
bin = gst_pipeline_new ("pipeline");
bus = gst_element_get_bus (bin);
gst_bus_add_signal_watch_full (bus, G_PRIORITY_HIGH);
g_signal_connect (bus, "message::error", (GCallback) message_received, bin);
g_signal_connect (bus, "message::warning", (GCallback) message_received, bin);
g_signal_connect (bus, "message::eos", (GCallback) message_received, bin);
audiomixer = gst_element_factory_make ("audiomixer", "audiomixer");
g_object_set (audiomixer, "output-buffer-duration", GST_SECOND, NULL);
capsfilter = gst_element_factory_make ("capsfilter", NULL);
sink = gst_element_factory_make ("fakesink", "sink");
g_object_set (sink, "signal-handoffs", TRUE, NULL);
g_signal_connect (sink, "handoff", (GCallback) change_src_caps, capsfilter);
gst_bin_add_many (GST_BIN (bin), audiomixer, capsfilter, sink, NULL);
res = gst_element_link_many (audiomixer, capsfilter, sink, NULL);
fail_unless (res == TRUE, NULL);
state_res = gst_element_set_state (bin, GST_STATE_PLAYING);
ck_assert_int_ne (state_res, GST_STATE_CHANGE_FAILURE);
sinkpad = gst_element_get_request_pad (audiomixer, "sink_%u");
fail_if (sinkpad == NULL, NULL);
gst_pad_send_event (sinkpad, gst_event_new_stream_start ("test"));
caps = gst_caps_new_simple ("audio/x-raw",
"format", G_TYPE_STRING, "S8",
"layout", G_TYPE_STRING, "interleaved",
"rate", G_TYPE_INT, 10, "channels", G_TYPE_INT, 1, NULL);
gst_pad_set_caps (sinkpad, caps);
g_object_set (capsfilter, "caps", caps, NULL);
gst_caps_unref (caps);
gst_segment_init (&segment, GST_FORMAT_TIME);
segment.start = 0;
segment.stop = 2 * GST_SECOND;
segment.time = 0;
event = gst_event_new_segment (&segment);
gst_pad_send_event (sinkpad, event);
gst_buffer_replace (&handoff_buffer, NULL);
buffer = new_buffer (20, 0, 0, 2 * GST_SECOND, 0);
gst_buffer_map (buffer, &inmap, GST_MAP_WRITE);
memset (inmap.data + 15, 1, 5);
gst_buffer_unmap (buffer, &inmap);
ret = gst_pad_chain (sinkpad, buffer);
ck_assert_int_eq (ret, GST_FLOW_OK);
gst_pad_query (sinkpad, drain);
fail_unless (handoff_buffer != NULL);
fail_unless_equals_int (gst_buffer_get_size (handoff_buffer), 40);
gst_buffer_map (handoff_buffer, &outmap, GST_MAP_READ);
for (i = 0; i < 10; i++) {
guint32 sample;
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
sample = GUINT32_FROM_LE (((guint32 *) outmap.data)[i]);
#else
sample = GUINT32_FROM_BE (((guint32 *) outmap.data)[i]);
#endif
if (i < 5) {
fail_unless_equals_int (sample, 0);
} else {
fail_unless_equals_int (sample, 1 << 24);
}
}
gst_buffer_unmap (handoff_buffer, &outmap);
gst_element_release_request_pad (audiomixer, sinkpad);
gst_object_unref (sinkpad);
gst_element_set_state (bin, GST_STATE_NULL);
gst_bus_remove_signal_watch (bus);
gst_object_unref (bus);
gst_object_unref (bin);
gst_query_unref (drain);
}
GST_END_TEST;
static Suite *
audiomixer_suite (void)
{
@ -1739,6 +1875,7 @@ audiomixer_suite (void)
tcase_add_test (tc_chain, test_segment_base_handling);
tcase_add_test (tc_chain, test_sinkpad_property_controller);
tcase_add_checked_fixture (tc_chain, test_setup, test_teardown);
tcase_add_test (tc_chain, test_change_output_caps);
/* Use a longer timeout */
#ifdef HAVE_VALGRIND