audiomixer: remove, moved to -base

https://bugzilla.gnome.org/show_bug.cgi?id=791218
This commit is contained in:
Tim-Philipp Müller 2018-02-13 00:28:36 +00:00
parent 843f118523
commit c180f8ffed
20 changed files with 3 additions and 7763 deletions

View file

@ -63,6 +63,7 @@ CRUFT_FILES = \
$(top_builddir)/ext/qt/.libs/*.{so,dll,DLL,dylib} \
$(top_builddir)/gst/aacparse/.libs/*.{so,dll,DLL,dylib} \
$(top_builddir)/gst/amrparse/.libs/*.{so,dll,DLL,dylib} \
$(top_builddir)/gst/audiomixer/.libs/*.{so,dll,DLL,dylib} \
$(top_builddir)/gst/audioparsers/.libs/*.{so,dll,DLL,dylib} \
$(top_builddir)/gst/camerabin2/.libs/libgstcamerabin2.so \
$(top_builddir)/gst/flacparse/.libs/*.{so,dll,DLL,dylib} \
@ -105,6 +106,7 @@ CRUFT_DIRS = \
$(top_srcdir)/docs/plugins/tmpl \
$(top_srcdir)/gst/aacparse \
$(top_srcdir)/gst/amrparse \
$(top_srcdir)/gst/audiomixer \
$(top_srcdir)/gst/camerabin \
$(top_srcdir)/gst/dataurisrc \
$(top_srcdir)/gst/flacparse \

View file

@ -424,7 +424,6 @@ AG_GST_CHECK_PLUGIN(videoframe_audiolevel)
AG_GST_CHECK_PLUGIN(asfmux)
AG_GST_CHECK_PLUGIN(audiobuffersplit)
AG_GST_CHECK_PLUGIN(audiofxbad)
AG_GST_CHECK_PLUGIN(audiomixer)
AG_GST_CHECK_PLUGIN(audiomixmatrix)
AG_GST_CHECK_PLUGIN(compositor)
AG_GST_CHECK_PLUGIN(audiovisualizers)
@ -2486,7 +2485,6 @@ gst/videoframe_audiolevel/Makefile
gst/asfmux/Makefile
gst/audiobuffersplit/Makefile
gst/audiofxbad/Makefile
gst/audiomixer/Makefile
gst/audiomixmatrix/Makefile
gst/audiovisualizers/Makefile
gst/autoconvert/Makefile

View file

@ -21,8 +21,6 @@
<xi:include href="xml/element-aiffparse.xml" />
<xi:include href="xml/element-aiffmux.xml" />
<xi:include href="xml/element-assrender.xml" />
<xi:include href="xml/element-audiointerleave.xml" />
<xi:include href="xml/element-audiomixer.xml" />
<xi:include href="xml/element-audioparse.xml" />
<xi:include href="xml/element-autoconvert.xml" />
<xi:include href="xml/element-bs2b.xml" />
@ -118,7 +116,6 @@
<chapter>
<title>gst-plugins-bad Plugins</title>
<xi:include href="xml/plugin-aiff.xml" />
<xi:include href="xml/plugin-audiomixer.xml" />
<xi:include href="xml/plugin-audiovisualizers.xml" />
<xi:include href="xml/plugin-autoconvert.xml" />
<xi:include href="xml/plugin-assrender.xml" />

View file

@ -190,38 +190,6 @@ GST_TYPE_AUDIO_CHANNEL_MIX
gst_audio_channel_mix_get_type
</SECTION>
<SECTION>
<FILE>element-audiointerleave</FILE>
<TITLE>audiointerleave</TITLE>
GstAudioInterleave
<SUBSECTION Standard>
GstAudioInterleaveClass
GST_AUDIO_INTERLEAVE
GST_AUDIO_INTERLEAVE_CAST
GST_IS_AUDIO_INTERLEAVE
GST_AUDIO_INTERLEAVE_CLASS
GST_IS_AUDIO_INTERLEAVE_CLASS
GST_TYPE_AUDIO_INTERLEAVE
<SUBSECTION Private>
gst_audio_interleave_get_type
</SECTION>
<SECTION>
<FILE>element-audiomixer</FILE>
<TITLE>audiomixer</TITLE>
GstAudioMixer
<SUBSECTION Standard>
GstAudioMixerClass
GST_AUDIO_MIXER
GST_AUDIO_MIXER_CAST
GST_IS_AUDIO_MIXER
GST_AUDIO_MIXER_CLASS
GST_IS_AUDIO_MIXER_CLASS
GST_TYPE_AUDIO_MIXER
<SUBSECTION Private>
gst_audio_mixer_get_type
</SECTION>
<SECTION>
<FILE>element-audiomixmatrix</FILE>
<TITLE>audiomixmatrix</TITLE>

View file

@ -1,76 +0,0 @@
<plugin>
<name>audiomixer</name>
<description>Mixes multiple audio streams</description>
<filename>../../gst/audiomixer/.libs/libgstaudiomixer.so</filename>
<basename>libgstaudiomixer.so</basename>
<version>1.13.0.1</version>
<license>LGPL</license>
<source>gst-plugins-bad</source>
<package>GStreamer Bad Plug-ins git</package>
<origin>Unknown package origin</origin>
<elements>
<element>
<name>audiointerleave</name>
<longname>AudioInterleave</longname>
<class>Generic/Audio</class>
<description>Mixes multiple audio streams</description>
<author>Olivier Crete &lt;olivier.crete@collabora.com&gt;</author>
<pads>
<caps>
<name>sink_%u</name>
<direction>sink</direction>
<presence>request</presence>
<details>audio/x-raw, rate=(int)[ 1, 2147483647 ], channels=(int)1, format=(string){ S8, U8, S16LE, S16BE, U16LE, U16BE, S24_32LE, S24_32BE, U24_32LE, U24_32BE, S32LE, S32BE, U32LE, U32BE, S24LE, S24BE, U24LE, U24BE, S20LE, S20BE, U20LE, U20BE, S18LE, S18BE, U18LE, U18BE, F32LE, F32BE, F64LE, F64BE }, layout=(string){ non-interleaved, interleaved }</details>
</caps>
<caps>
<name>src</name>
<direction>source</direction>
<presence>always</presence>
<details>audio/x-raw, rate=(int)[ 1, 2147483647 ], channels=(int)[ 1, 2147483647 ], format=(string){ S8, U8, S16LE, S16BE, U16LE, U16BE, S24_32LE, S24_32BE, U24_32LE, U24_32BE, S32LE, S32BE, U32LE, U32BE, S24LE, S24BE, U24LE, U24BE, S20LE, S20BE, U20LE, U20BE, S18LE, S18BE, U18LE, U18BE, F32LE, F32BE, F64LE, F64BE }, layout=(string)interleaved</details>
</caps>
</pads>
</element>
<element>
<name>audiomixer</name>
<longname>AudioMixer</longname>
<class>Generic/Audio</class>
<description>Mixes multiple audio streams</description>
<author>Sebastian Dröge &lt;sebastian@centricular.com&gt;</author>
<pads>
<caps>
<name>sink_%u</name>
<direction>sink</direction>
<presence>request</presence>
<details>audio/x-raw, format=(string){ S32LE, U32LE, S16LE, U16LE, S8, U8, F32LE, F64LE }, rate=(int)[ 1, 2147483647 ], channels=(int)[ 1, 2147483647 ], layout=(string){ interleaved, non-interleaved }</details>
</caps>
<caps>
<name>src</name>
<direction>source</direction>
<presence>always</presence>
<details>audio/x-raw, format=(string){ S32LE, U32LE, S16LE, U16LE, S8, U8, F32LE, F64LE }, rate=(int)[ 1, 2147483647 ], channels=(int)[ 1, 2147483647 ], layout=(string){ interleaved, non-interleaved }</details>
</caps>
</pads>
</element>
<element>
<name>liveadder</name>
<longname>AudioMixer</longname>
<class>Generic/Audio</class>
<description>Mixes multiple audio streams</description>
<author>Sebastian Dröge &lt;sebastian@centricular.com&gt;</author>
<pads>
<caps>
<name>sink_%u</name>
<direction>sink</direction>
<presence>request</presence>
<details>audio/x-raw, format=(string){ S32LE, U32LE, S16LE, U16LE, S8, U8, F32LE, F64LE }, rate=(int)[ 1, 2147483647 ], channels=(int)[ 1, 2147483647 ], layout=(string){ interleaved, non-interleaved }</details>
</caps>
<caps>
<name>src</name>
<direction>source</direction>
<presence>always</presence>
<details>audio/x-raw, format=(string){ S32LE, U32LE, S16LE, U16LE, S8, U8, F32LE, F64LE }, rate=(int)[ 1, 2147483647 ], channels=(int)[ 1, 2147483647 ], layout=(string){ interleaved, non-interleaved }</details>
</caps>
</pads>
</element>
</elements>
</plugin>

View file

@ -1,21 +0,0 @@
plugin_LTLIBRARIES = libgstaudiomixer.la
ORC_SOURCE=gstaudiomixerorc
include $(top_srcdir)/common/orc.mak
libgstaudiomixer_la_SOURCES = gstaudiomixer.c gstaudiointerleave.c
nodist_libgstaudiomixer_la_SOURCES = $(ORC_NODIST_SOURCES)
libgstaudiomixer_la_CFLAGS = \
-I$(top_srcdir)/gst-libs \
-I$(top_builddir)/gst-libs \
$(GST_PLUGINS_BASE_CFLAGS) $(GST_BASE_CFLAGS) \
$(GST_CFLAGS) $(ORC_CFLAGS)
libgstaudiomixer_la_LDFLAGS = $(GST_PLUGIN_LDFLAGS)
libgstaudiomixer_la_LIBADD = \
$(top_builddir)/gst-libs/gst/audio/libgstbadaudio-$(GST_API_VERSION).la \
$(GST_PLUGINS_BASE_LIBS) -lgstaudio-@GST_API_VERSION@ \
$(GST_BASE_LIBS) $(GST_LIBS) $(ORC_LIBS)
noinst_HEADERS = gstaudiomixer.h gstaudiointerleave.h

View file

@ -1,902 +0,0 @@
/* GStreamer
* Copyright (C) 1999,2000 Erik Walthinsen <omega@cse.ogi.edu>
* 2000 Wim Taymans <wtay@chello.be>
* 2005 Wim Taymans <wim@fluendo.com>
* 2007 Andy Wingo <wingo at pobox.com>
* 2008 Sebastian Dröge <slomo@circular-chaos.org>
* 2014 Collabora
* Olivier Crete <olivier.crete@collabora.com>
*
* gstaudiointerleave.c: audiointerleave element, N in, one out,
* samples are added
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
/**
* SECTION:element-audiointerleave
* @title: audiointerleave
*
*/
/* FIXME 0.11: suppress warnings for deprecated API such as GValueArray
* with newer GLib versions (>= 2.31.0) */
#define GLIB_DISABLE_DEPRECATION_WARNINGS
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "gstaudiointerleave.h"
#include <gst/audio/audio.h>
#include <string.h>
#define GST_CAT_DEFAULT gst_audio_interleave_debug
GST_DEBUG_CATEGORY_STATIC (GST_CAT_DEFAULT);
enum
{
PROP_PAD_0,
PROP_PAD_CHANNEL
};
G_DEFINE_TYPE (GstAudioInterleavePad, gst_audio_interleave_pad,
GST_TYPE_AUDIO_AGGREGATOR_PAD);
static void
gst_audio_interleave_pad_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec)
{
GstAudioInterleavePad *pad = GST_AUDIO_INTERLEAVE_PAD (object);
switch (prop_id) {
case PROP_PAD_CHANNEL:
g_value_set_uint (value, pad->channel);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
static void
gst_audio_interleave_pad_class_init (GstAudioInterleavePadClass * klass)
{
GObjectClass *gobject_class = (GObjectClass *) klass;
gobject_class->get_property = gst_audio_interleave_pad_get_property;
g_object_class_install_property (gobject_class,
PROP_PAD_CHANNEL,
g_param_spec_uint ("channel",
"Channel number",
"Number of the channel of this pad in the output", 0, G_MAXUINT, 0,
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
}
static void
gst_audio_interleave_pad_init (GstAudioInterleavePad * pad)
{
}
enum
{
PROP_0,
PROP_CHANNEL_POSITIONS,
PROP_CHANNEL_POSITIONS_FROM_INPUT
};
/* elementfactory information */
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
#define CAPS \
GST_AUDIO_CAPS_MAKE ("{ S32LE, U32LE, S16LE, U16LE, S8, U8, F32LE, F64LE }") \
", layout = (string) { interleaved, non-interleaved }"
#else
#define CAPS \
GST_AUDIO_CAPS_MAKE ("{ S32BE, U32BE, S16BE, U16BE, S8, U8, F32BE, F64BE }") \
", layout = (string) { interleaved, non-interleaved }"
#endif
static GstStaticPadTemplate gst_audio_interleave_sink_template =
GST_STATIC_PAD_TEMPLATE ("sink_%u",
GST_PAD_SINK,
GST_PAD_REQUEST,
GST_STATIC_CAPS ("audio/x-raw, "
"rate = (int) [ 1, MAX ], "
"channels = (int) 1, "
"format = (string) " GST_AUDIO_FORMATS_ALL ", "
"layout = (string) {non-interleaved, interleaved}")
);
static GstStaticPadTemplate gst_audio_interleave_src_template =
GST_STATIC_PAD_TEMPLATE ("src",
GST_PAD_SRC,
GST_PAD_ALWAYS,
GST_STATIC_CAPS ("audio/x-raw, "
"rate = (int) [ 1, MAX ], "
"channels = (int) [ 1, MAX ], "
"format = (string) " GST_AUDIO_FORMATS_ALL ", "
"layout = (string) interleaved")
);
static void gst_audio_interleave_child_proxy_init (gpointer g_iface,
gpointer iface_data);
#define gst_audio_interleave_parent_class parent_class
G_DEFINE_TYPE_WITH_CODE (GstAudioInterleave, gst_audio_interleave,
GST_TYPE_AUDIO_AGGREGATOR, G_IMPLEMENT_INTERFACE (GST_TYPE_CHILD_PROXY,
gst_audio_interleave_child_proxy_init));
static void gst_audio_interleave_finalize (GObject * object);
static void gst_audio_interleave_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec);
static void gst_audio_interleave_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec);
static gboolean gst_audio_interleave_setcaps (GstAudioInterleave * self,
GstPad * pad, GstCaps * caps);
static GstPad *gst_audio_interleave_request_new_pad (GstElement * element,
GstPadTemplate * temp, const gchar * req_name, const GstCaps * caps);
static void gst_audio_interleave_release_pad (GstElement * element,
GstPad * pad);
static gboolean gst_audio_interleave_stop (GstAggregator * agg);
static gboolean
gst_audio_interleave_aggregate_one_buffer (GstAudioAggregator * aagg,
GstAudioAggregatorPad * aaggpad, GstBuffer * inbuf, guint in_offset,
GstBuffer * outbuf, guint out_offset, guint num_samples);
static void
__remove_channels (GstCaps * caps)
{
GstStructure *s;
gint i, size;
size = gst_caps_get_size (caps);
for (i = 0; i < size; i++) {
s = gst_caps_get_structure (caps, i);
gst_structure_remove_field (s, "channel-mask");
gst_structure_remove_field (s, "channels");
}
}
static void
__set_channels (GstCaps * caps, gint channels)
{
GstStructure *s;
gint i, size;
size = gst_caps_get_size (caps);
for (i = 0; i < size; i++) {
s = gst_caps_get_structure (caps, i);
if (channels > 0)
gst_structure_set (s, "channels", G_TYPE_INT, channels, NULL);
else
gst_structure_set (s, "channels", GST_TYPE_INT_RANGE, 1, G_MAXINT, NULL);
}
}
/* we can only accept caps that we and downstream can handle.
* if we have filtercaps set, use those to constrain the target caps.
*/
static GstCaps *
gst_audio_interleave_sink_getcaps (GstAggregator * agg, GstPad * pad,
GstCaps * filter)
{
GstAudioInterleave *self = GST_AUDIO_INTERLEAVE (agg);
GstCaps *result = NULL, *peercaps, *sinkcaps;
GST_OBJECT_LOCK (self);
/* If we already have caps on one of the sink pads return them */
if (self->sinkcaps)
result = gst_caps_copy (self->sinkcaps);
GST_OBJECT_UNLOCK (self);
if (result == NULL) {
/* get the downstream possible caps */
peercaps = gst_pad_peer_query_caps (agg->srcpad, NULL);
/* get the allowed caps on this sinkpad */
sinkcaps = gst_caps_copy (gst_pad_get_pad_template_caps (pad));
__remove_channels (sinkcaps);
if (peercaps) {
peercaps = gst_caps_make_writable (peercaps);
__remove_channels (peercaps);
/* if the peer has caps, intersect */
GST_DEBUG_OBJECT (pad, "intersecting peer and template caps");
result = gst_caps_intersect (peercaps, sinkcaps);
gst_caps_unref (peercaps);
gst_caps_unref (sinkcaps);
} else {
/* the peer has no caps (or there is no peer), just use the allowed caps
* of this sinkpad. */
GST_DEBUG_OBJECT (pad, "no peer caps, using sinkcaps");
result = sinkcaps;
}
__set_channels (result, 1);
}
if (filter != NULL) {
GstCaps *caps = result;
GST_LOG_OBJECT (pad, "intersecting filter caps %" GST_PTR_FORMAT " with "
"preliminary result %" GST_PTR_FORMAT, filter, caps);
result = gst_caps_intersect_full (filter, caps, GST_CAPS_INTERSECT_FIRST);
gst_caps_unref (caps);
}
GST_DEBUG_OBJECT (pad, "Returning caps %" GST_PTR_FORMAT, result);
return result;
}
static gboolean
gst_audio_interleave_sink_query (GstAggregator * agg, GstAggregatorPad * aggpad,
GstQuery * query)
{
gboolean res = FALSE;
switch (GST_QUERY_TYPE (query)) {
case GST_QUERY_CAPS:
{
GstCaps *filter, *caps;
gst_query_parse_caps (query, &filter);
caps = gst_audio_interleave_sink_getcaps (agg, GST_PAD (aggpad), filter);
gst_query_set_caps_result (query, caps);
gst_caps_unref (caps);
res = TRUE;
break;
}
default:
res =
GST_AGGREGATOR_CLASS (parent_class)->sink_query (agg, aggpad, query);
break;
}
return res;
}
static gint
compare_positions (gconstpointer a, gconstpointer b, gpointer user_data)
{
const gint i = *(const gint *) a;
const gint j = *(const gint *) b;
const gint *pos = (const gint *) user_data;
if (pos[i] < pos[j])
return -1;
else if (pos[i] > pos[j])
return 1;
else
return 0;
}
static gboolean
gst_audio_interleave_channel_positions_to_mask (GValueArray * positions,
gint default_ordering_map[64], guint64 * mask)
{
gint i;
guint channels;
GstAudioChannelPosition *pos;
gboolean ret;
channels = positions->n_values;
pos = g_new (GstAudioChannelPosition, channels);
for (i = 0; i < channels; i++) {
GValue *val;
val = g_value_array_get_nth (positions, i);
pos[i] = g_value_get_enum (val);
}
/* sort the default ordering map according to the position order */
for (i = 0; i < channels; i++) {
default_ordering_map[i] = i;
}
g_qsort_with_data (default_ordering_map, channels,
sizeof (*default_ordering_map), compare_positions, pos);
ret = gst_audio_channel_positions_to_mask (pos, channels, FALSE, mask);
g_free (pos);
return ret;
}
/* Must be called with the object lock held */
static guint64
gst_audio_interleave_get_channel_mask (GstAudioInterleave * self)
{
guint64 channel_mask = 0;
if (self->channels <= 64 &&
self->channel_positions != NULL &&
self->channels == self->channel_positions->n_values) {
if (!gst_audio_interleave_channel_positions_to_mask
(self->channel_positions, self->default_channels_ordering_map,
&channel_mask)) {
GST_WARNING_OBJECT (self, "Invalid channel positions, using NONE");
channel_mask = 0;
}
} else if (self->channels <= 64) {
GST_WARNING_OBJECT (self, "Using NONE channel positions");
}
return channel_mask;
}
#define MAKE_FUNC(type) \
static void interleave_##type (guint##type *out, guint##type *in, \
guint stride, guint nframes) \
{ \
gint i; \
\
for (i = 0; i < nframes; i++) { \
*out = in[i]; \
out += stride; \
} \
}
MAKE_FUNC (8);
MAKE_FUNC (16);
MAKE_FUNC (32);
MAKE_FUNC (64);
static void
interleave_24 (guint8 * out, guint8 * in, guint stride, guint nframes)
{
gint i;
for (i = 0; i < nframes; i++) {
memcpy (out, in, 3);
out += stride * 3;
in += 3;
}
}
static void
gst_audio_interleave_set_process_function (GstAudioInterleave * self,
GstAudioInfo * info)
{
switch (GST_AUDIO_INFO_WIDTH (info)) {
case 8:
self->func = (GstInterleaveFunc) interleave_8;
break;
case 16:
self->func = (GstInterleaveFunc) interleave_16;
break;
case 24:
self->func = (GstInterleaveFunc) interleave_24;
break;
case 32:
self->func = (GstInterleaveFunc) interleave_32;
break;
case 64:
self->func = (GstInterleaveFunc) interleave_64;
break;
default:
g_assert_not_reached ();
break;
}
}
/* the first caps we receive on any of the sinkpads will define the caps for all
* the other sinkpads because we can only mix streams with the same caps.
*/
static gboolean
gst_audio_interleave_setcaps (GstAudioInterleave * self, GstPad * pad,
GstCaps * caps)
{
GstAudioAggregator *aagg = GST_AUDIO_AGGREGATOR (self);
GstAudioInfo info;
GValue *val;
guint channel;
gboolean new = FALSE;
if (!gst_audio_info_from_caps (&info, caps))
goto invalid_format;
GST_OBJECT_LOCK (self);
if (self->sinkcaps && !gst_caps_is_subset (caps, self->sinkcaps))
goto cannot_change_caps;
if (!self->sinkcaps) {
GstCaps *sinkcaps = gst_caps_copy (caps);
GstStructure *s = gst_caps_get_structure (sinkcaps, 0);
gst_structure_remove_field (s, "channel-mask");
GST_DEBUG_OBJECT (self, "setting sinkcaps %" GST_PTR_FORMAT, sinkcaps);
gst_caps_replace (&self->sinkcaps, sinkcaps);
gst_pad_mark_reconfigure (GST_AGGREGATOR_SRC_PAD (aagg));
gst_caps_unref (sinkcaps);
new = TRUE;
}
if (self->channel_positions_from_input
&& GST_AUDIO_INFO_CHANNELS (&info) == 1) {
channel = GST_AUDIO_INTERLEAVE_PAD (pad)->channel;
val = g_value_array_get_nth (self->input_channel_positions, channel);
g_value_set_enum (val, GST_AUDIO_INFO_POSITION (&info, 0));
}
GST_OBJECT_UNLOCK (self);
gst_audio_aggregator_set_sink_caps (aagg, GST_AUDIO_AGGREGATOR_PAD (pad),
caps);
if (!new)
return TRUE;
GST_INFO_OBJECT (pad, "handle caps change to %" GST_PTR_FORMAT, caps);
return TRUE;
/* ERRORS */
invalid_format:
{
GST_WARNING_OBJECT (self, "invalid format set as caps: %" GST_PTR_FORMAT,
caps);
return FALSE;
}
cannot_change_caps:
{
GST_OBJECT_UNLOCK (self);
GST_WARNING_OBJECT (self, "caps of %" GST_PTR_FORMAT " already set, can't "
"change", self->sinkcaps);
return FALSE;
}
}
static gboolean
gst_audio_interleave_sink_event (GstAggregator * agg, GstAggregatorPad * aggpad,
GstEvent * event)
{
GstAudioInterleave *self = GST_AUDIO_INTERLEAVE (agg);
gboolean res = TRUE;
GST_DEBUG_OBJECT (aggpad, "Got %s event on sink pad",
GST_EVENT_TYPE_NAME (event));
switch (GST_EVENT_TYPE (event)) {
case GST_EVENT_CAPS:
{
GstCaps *caps;
gst_event_parse_caps (event, &caps);
res = gst_audio_interleave_setcaps (self, GST_PAD_CAST (aggpad), caps);
gst_event_unref (event);
event = NULL;
break;
}
default:
break;
}
if (event != NULL)
return GST_AGGREGATOR_CLASS (parent_class)->sink_event (agg, aggpad, event);
return res;
}
static GstFlowReturn
gst_audio_interleave_update_src_caps (GstAggregator * agg, GstCaps * caps,
GstCaps ** ret)
{
GstAudioInterleave *self = GST_AUDIO_INTERLEAVE (agg);
GstStructure *s;
/* This means that either no caps have been set on the sink pad (if
* sinkcaps is NULL) or that there is no sink pad (if channels == 0).
*/
GST_OBJECT_LOCK (self);
if (self->sinkcaps == NULL || self->channels == 0) {
GST_OBJECT_UNLOCK (self);
return GST_FLOW_NOT_NEGOTIATED;
}
*ret = gst_caps_copy (self->sinkcaps);
s = gst_caps_get_structure (*ret, 0);
gst_structure_set (s, "channels", G_TYPE_INT, self->channels, "layout",
G_TYPE_STRING, "interleaved", "channel-mask", GST_TYPE_BITMASK,
gst_audio_interleave_get_channel_mask (self), NULL);
GST_OBJECT_UNLOCK (self);
return GST_FLOW_OK;
}
static gboolean
gst_audio_interleave_negotiated_src_caps (GstAggregator * agg, GstCaps * caps)
{
GstAudioInterleave *self = GST_AUDIO_INTERLEAVE (agg);
GstAudioAggregator *aagg = GST_AUDIO_AGGREGATOR (self);
if (!GST_AGGREGATOR_CLASS (parent_class)->negotiated_src_caps (agg, caps))
return FALSE;
gst_audio_interleave_set_process_function (self, &aagg->info);
return TRUE;
}
static void
gst_audio_interleave_class_init (GstAudioInterleaveClass * klass)
{
GObjectClass *gobject_class = (GObjectClass *) klass;
GstElementClass *gstelement_class = (GstElementClass *) klass;
GstAggregatorClass *agg_class = (GstAggregatorClass *) klass;
GstAudioAggregatorClass *aagg_class = (GstAudioAggregatorClass *) klass;
GST_DEBUG_CATEGORY_INIT (GST_CAT_DEFAULT, "audiointerleave", 0,
"audio interleaving element");
gobject_class->set_property = gst_audio_interleave_set_property;
gobject_class->get_property = gst_audio_interleave_get_property;
gobject_class->finalize = gst_audio_interleave_finalize;
gst_element_class_add_static_pad_template (gstelement_class,
&gst_audio_interleave_src_template);
gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
&gst_audio_interleave_sink_template, GST_TYPE_AUDIO_INTERLEAVE_PAD);
gst_element_class_set_static_metadata (gstelement_class, "AudioInterleave",
"Generic/Audio", "Mixes multiple audio streams",
"Olivier Crete <olivier.crete@collabora.com>");
gstelement_class->request_new_pad =
GST_DEBUG_FUNCPTR (gst_audio_interleave_request_new_pad);
gstelement_class->release_pad =
GST_DEBUG_FUNCPTR (gst_audio_interleave_release_pad);
agg_class->sink_query = GST_DEBUG_FUNCPTR (gst_audio_interleave_sink_query);
agg_class->sink_event = GST_DEBUG_FUNCPTR (gst_audio_interleave_sink_event);
agg_class->stop = gst_audio_interleave_stop;
agg_class->update_src_caps = gst_audio_interleave_update_src_caps;
agg_class->negotiated_src_caps = gst_audio_interleave_negotiated_src_caps;
aagg_class->aggregate_one_buffer = gst_audio_interleave_aggregate_one_buffer;
aagg_class->convert_buffer = NULL;
/**
* GstInterleave:channel-positions
*
* Channel positions: This property controls the channel positions
* that are used on the src caps. The number of elements should be
* the same as the number of sink pads and the array should contain
* a valid list of channel positions. The n-th element of the array
* is the position of the n-th sink pad.
*
* These channel positions will only be used if they're valid and the
* number of elements is the same as the number of channels. If this
* is not given a NONE layout will be used.
*
*/
g_object_class_install_property (gobject_class, PROP_CHANNEL_POSITIONS,
g_param_spec_value_array ("channel-positions", "Channel positions",
"Channel positions used on the output",
g_param_spec_enum ("channel-position", "Channel position",
"Channel position of the n-th input",
GST_TYPE_AUDIO_CHANNEL_POSITION,
GST_AUDIO_CHANNEL_POSITION_NONE,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS),
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
/**
* GstInterleave:channel-positions-from-input
*
* Channel positions from input: If this property is set to %TRUE the channel
* positions will be taken from the input caps if valid channel positions for
* the output can be constructed from them. If this is set to %TRUE setting the
* channel-positions property overwrites this property again.
*
*/
g_object_class_install_property (gobject_class,
PROP_CHANNEL_POSITIONS_FROM_INPUT,
g_param_spec_boolean ("channel-positions-from-input",
"Channel positions from input",
"Take channel positions from the input", TRUE,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
}
static void
gst_audio_interleave_init (GstAudioInterleave * self)
{
self->input_channel_positions = g_value_array_new (0);
self->channel_positions_from_input = TRUE;
self->channel_positions = self->input_channel_positions;
}
static void
gst_audio_interleave_finalize (GObject * object)
{
GstAudioInterleave *self = GST_AUDIO_INTERLEAVE (object);
if (self->channel_positions
&& self->channel_positions != self->input_channel_positions) {
g_value_array_free (self->channel_positions);
self->channel_positions = NULL;
}
if (self->input_channel_positions) {
g_value_array_free (self->input_channel_positions);
self->input_channel_positions = NULL;
}
G_OBJECT_CLASS (parent_class)->finalize (object);
}
static void
gst_audio_interleave_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec)
{
GstAudioInterleave *self = GST_AUDIO_INTERLEAVE (object);
switch (prop_id) {
case PROP_CHANNEL_POSITIONS:
g_return_if_fail (
((GValueArray *) g_value_get_boxed (value))->n_values > 0);
if (self->channel_positions &&
self->channel_positions != self->input_channel_positions)
g_value_array_free (self->channel_positions);
self->channel_positions = g_value_dup_boxed (value);
self->channel_positions_from_input = FALSE;
break;
case PROP_CHANNEL_POSITIONS_FROM_INPUT:
self->channel_positions_from_input = g_value_get_boolean (value);
if (self->channel_positions_from_input) {
if (self->channel_positions &&
self->channel_positions != self->input_channel_positions)
g_value_array_free (self->channel_positions);
self->channel_positions = self->input_channel_positions;
}
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
static void
gst_audio_interleave_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec)
{
GstAudioInterleave *self = GST_AUDIO_INTERLEAVE (object);
switch (prop_id) {
case PROP_CHANNEL_POSITIONS:
g_value_set_boxed (value, self->channel_positions);
break;
case PROP_CHANNEL_POSITIONS_FROM_INPUT:
g_value_set_boolean (value, self->channel_positions_from_input);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
static gboolean
gst_audio_interleave_stop (GstAggregator * agg)
{
GstAudioInterleave *self = GST_AUDIO_INTERLEAVE (agg);
if (!GST_AGGREGATOR_CLASS (parent_class)->stop (agg))
return FALSE;
gst_caps_replace (&self->sinkcaps, NULL);
return TRUE;
}
static GstPad *
gst_audio_interleave_request_new_pad (GstElement * element,
GstPadTemplate * templ, const gchar * req_name, const GstCaps * caps)
{
GstAudioInterleave *self = GST_AUDIO_INTERLEAVE (element);
GstAudioInterleavePad *newpad;
gchar *pad_name;
gint channel, padnumber;
GValue val = { 0, };
/* FIXME: We ignore req_name, this is evil! */
GST_OBJECT_LOCK (self);
padnumber = g_atomic_int_add (&self->padcounter, 1);
channel = self->channels++;
if (!self->channel_positions_from_input)
channel = padnumber;
GST_OBJECT_UNLOCK (self);
pad_name = g_strdup_printf ("sink_%u", padnumber);
newpad = (GstAudioInterleavePad *)
GST_ELEMENT_CLASS (parent_class)->request_new_pad (element,
templ, pad_name, caps);
g_free (pad_name);
if (newpad == NULL)
goto could_not_create;
newpad->channel = channel;
gst_pad_use_fixed_caps (GST_PAD (newpad));
gst_child_proxy_child_added (GST_CHILD_PROXY (element), G_OBJECT (newpad),
GST_OBJECT_NAME (newpad));
g_value_init (&val, GST_TYPE_AUDIO_CHANNEL_POSITION);
g_value_set_enum (&val, GST_AUDIO_CHANNEL_POSITION_NONE);
self->input_channel_positions =
g_value_array_append (self->input_channel_positions, &val);
g_value_unset (&val);
/* Update the src caps if we already have them */
gst_pad_mark_reconfigure (GST_AGGREGATOR_SRC_PAD (self));
return GST_PAD_CAST (newpad);
could_not_create:
{
GST_DEBUG_OBJECT (element, "could not create/add pad");
return NULL;
}
}
static void
gst_audio_interleave_release_pad (GstElement * element, GstPad * pad)
{
GstAudioInterleave *self;
gint position;
GList *l;
self = GST_AUDIO_INTERLEAVE (element);
/* Take lock to make sure we're not changing this when processing buffers */
GST_OBJECT_LOCK (self);
self->channels--;
position = GST_AUDIO_INTERLEAVE_PAD (pad)->channel;
g_value_array_remove (self->input_channel_positions, position);
/* Update channel numbers */
/* Taken above, GST_OBJECT_LOCK (self); */
for (l = GST_ELEMENT_CAST (self)->sinkpads; l != NULL; l = l->next) {
GstAudioInterleavePad *ipad = GST_AUDIO_INTERLEAVE_PAD (l->data);
if (GST_AUDIO_INTERLEAVE_PAD (pad)->channel < ipad->channel)
ipad->channel--;
}
gst_pad_mark_reconfigure (GST_AGGREGATOR_SRC_PAD (self));
GST_OBJECT_UNLOCK (self);
GST_DEBUG_OBJECT (self, "release pad %s:%s", GST_DEBUG_PAD_NAME (pad));
gst_child_proxy_child_removed (GST_CHILD_PROXY (self), G_OBJECT (pad),
GST_OBJECT_NAME (pad));
GST_ELEMENT_CLASS (parent_class)->release_pad (element, pad);
}
/* Called with object lock and pad object lock held */
static gboolean
gst_audio_interleave_aggregate_one_buffer (GstAudioAggregator * aagg,
GstAudioAggregatorPad * aaggpad, GstBuffer * inbuf, guint in_offset,
GstBuffer * outbuf, guint out_offset, guint num_frames)
{
GstAudioInterleave *self = GST_AUDIO_INTERLEAVE (aagg);
GstAudioInterleavePad *pad = GST_AUDIO_INTERLEAVE_PAD (aaggpad);
GstMapInfo inmap;
GstMapInfo outmap;
gint out_width, in_bpf, out_bpf, out_channels, channel;
guint8 *outdata;
GST_OBJECT_LOCK (aagg);
GST_OBJECT_LOCK (aaggpad);
out_width = GST_AUDIO_INFO_WIDTH (&aagg->info) / 8;
in_bpf = GST_AUDIO_INFO_BPF (&aaggpad->info);
out_bpf = GST_AUDIO_INFO_BPF (&aagg->info);
out_channels = GST_AUDIO_INFO_CHANNELS (&aagg->info);
gst_buffer_map (outbuf, &outmap, GST_MAP_READWRITE);
gst_buffer_map (inbuf, &inmap, GST_MAP_READ);
GST_LOG_OBJECT (pad, "interleaves %u frames on channel %d/%d at offset %u"
" from offset %u", num_frames, pad->channel, out_channels,
out_offset * out_bpf, in_offset * in_bpf);
if (self->channels > 64) {
channel = pad->channel;
} else {
channel = self->default_channels_ordering_map[pad->channel];
}
outdata = outmap.data + (out_offset * out_bpf) + (out_width * channel);
self->func (outdata, inmap.data + (in_offset * in_bpf), out_channels,
num_frames);
gst_buffer_unmap (inbuf, &inmap);
gst_buffer_unmap (outbuf, &outmap);
GST_OBJECT_UNLOCK (aaggpad);
GST_OBJECT_UNLOCK (aagg);
return TRUE;
}
/* GstChildProxy implementation */
static GObject *
gst_audio_interleave_child_proxy_get_child_by_index (GstChildProxy *
child_proxy, guint index)
{
GstAudioInterleave *self = GST_AUDIO_INTERLEAVE (child_proxy);
GObject *obj = NULL;
GST_OBJECT_LOCK (self);
obj = g_list_nth_data (GST_ELEMENT_CAST (self)->sinkpads, index);
if (obj)
gst_object_ref (obj);
GST_OBJECT_UNLOCK (self);
return obj;
}
static guint
gst_audio_interleave_child_proxy_get_children_count (GstChildProxy *
child_proxy)
{
guint count = 0;
GstAudioInterleave *self = GST_AUDIO_INTERLEAVE (child_proxy);
GST_OBJECT_LOCK (self);
count = GST_ELEMENT_CAST (self)->numsinkpads;
GST_OBJECT_UNLOCK (self);
GST_INFO_OBJECT (self, "Children Count: %d", count);
return count;
}
static void
gst_audio_interleave_child_proxy_init (gpointer g_iface, gpointer iface_data)
{
GstChildProxyInterface *iface = g_iface;
GST_INFO ("intializing child proxy interface");
iface->get_child_by_index =
gst_audio_interleave_child_proxy_get_child_by_index;
iface->get_children_count =
gst_audio_interleave_child_proxy_get_children_count;
}

View file

@ -1,100 +0,0 @@
/* GStreamer
* Copyright (C) 1999,2000 Erik Walthinsen <omega@cse.ogi.edu>
* 2000 Wim Taymans <wtay@chello.be>
* Copyright (C) 2013 Sebastian Dröge <slomo@circular-chaos.org>
*
* gstaudiointerleave.h: Header for audiointerleave element
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#ifndef __GST_AUDIO_INTERLEAVE_H__
#define __GST_AUDIO_INTERLEAVE_H__
#include <gst/gst.h>
#include <gst/audio/audio.h>
#include <gst/audio/gstaudioaggregator.h>
G_BEGIN_DECLS
#define GST_TYPE_AUDIO_INTERLEAVE (gst_audio_interleave_get_type())
#define GST_AUDIO_INTERLEAVE(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_AUDIO_INTERLEAVE,GstAudioInterleave))
#define GST_IS_AUDIO_INTERLEAVE(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_AUDIO_INTERLEAVE))
#define GST_AUDIO_INTERLEAVE_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass) ,GST_TYPE_AUDIO_INTERLEAVE,GstAudioInterleaveClass))
#define GST_IS_AUDIO_INTERLEAVE_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass) ,GST_TYPE_AUDIO_INTERLEAVE))
#define GST_AUDIO_INTERLEAVE_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS((obj) ,GST_TYPE_AUDIO_INTERLEAVE,GstAudioInterleaveClass))
typedef struct _GstAudioInterleave GstAudioInterleave;
typedef struct _GstAudioInterleaveClass GstAudioInterleaveClass;
typedef struct _GstAudioInterleavePad GstAudioInterleavePad;
typedef struct _GstAudioInterleavePadClass GstAudioInterleavePadClass;
typedef void (*GstInterleaveFunc) (gpointer out, gpointer in, guint stride,
guint nframes);
/**
* GstAudioInterleave:
*
* The GstAudioInterleave object structure.
*/
struct _GstAudioInterleave {
GstAudioAggregator parent;
gint padcounter;
guint channels; /* object lock */
GstCaps *sinkcaps;
GValueArray *channel_positions;
GValueArray *input_channel_positions;
gboolean channel_positions_from_input;
gint default_channels_ordering_map[64];
GstInterleaveFunc func;
};
struct _GstAudioInterleaveClass {
GstAudioAggregatorClass parent_class;
};
GType gst_audio_interleave_get_type (void);
#define GST_TYPE_AUDIO_INTERLEAVE_PAD (gst_audio_interleave_pad_get_type())
#define GST_AUDIO_INTERLEAVE_PAD(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_AUDIO_INTERLEAVE_PAD,GstAudioInterleavePad))
#define GST_IS_AUDIO_INTERLEAVE_PAD(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_AUDIO_INTERLEAVE_PAD))
#define GST_AUDIO_INTERLEAVE_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass) ,GST_TYPE_AUDIO_INTERLEAVE_PAD,GstAudioInterleavePadClass))
#define GST_IS_AUDIO_INTERLEAVE_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass) ,GST_TYPE_AUDIO_INTERLEAVE_PAD))
#define GST_AUDIO_INTERLEAVE_PAD_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS((obj) ,GST_TYPE_AUDIO_INTERLEAVE_PAD,GstAudioInterleavePadClass))
struct _GstAudioInterleavePad {
GstAudioAggregatorPad parent;
guint channel;
};
struct _GstAudioInterleavePadClass {
GstAudioAggregatorPadClass parent_class;
};
GType gst_audio_interleave_pad_get_type (void);
G_END_DECLS
#endif /* __GST_AUDIO_INTERLEAVE_H__ */

View file

@ -1,577 +0,0 @@
/* GStreamer
* Copyright (C) 1999,2000 Erik Walthinsen <omega@cse.ogi.edu>
* 2001 Thomas <thomas@apestaart.org>
* 2005,2006 Wim Taymans <wim@fluendo.com>
* 2013 Sebastian Dröge <sebastian@centricular.com>
*
* audiomixer.c: AudioMixer element, N in, one out, samples are added
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
/**
* SECTION:element-audiomixer
* @title: audiomixer
*
* The audiomixer allows to mix several streams into one by adding the data.
* Mixed data is clamped to the min/max values of the data format.
*
* Unlike the adder element audiomixer properly synchronises all input streams
* and also handles live inputs such as capture sources or RTP properly.
*
* The audiomixer element can accept any sort of raw audio data, it will
* be converted to the target format if necessary, with the exception
* of the sample rate, which has to be identical to either what downstream
* expects, or the sample rate of the first configured pad. Use a capsfilter
* after the audiomixer element if you want to precisely control the format
* that comes out of the audiomixer, which supports changing the format of
* its output while playing.
*
* If you want to control the manner in which incoming data gets converted,
* see the #GstAudioAggregatorPad:converter-config property, which will let
* you for example change the way in which channels may get remapped.
*
* The input pads are from a GstPad subclass and have additional
* properties to mute each pad individually and set the volume:
*
* * "mute": Whether to mute the pad or not (#gboolean)
* * "volume": The volume of the pad, between 0.0 and 10.0 (#gdouble)
*
* ## Example launch line
* |[
* gst-launch-1.0 audiotestsrc freq=100 ! audiomixer name=mix ! audioconvert ! alsasink audiotestsrc freq=500 ! mix.
* ]| This pipeline produces two sine waves mixed together.
*
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "gstaudiomixer.h"
#include <gst/audio/audio.h>
#include <string.h> /* strcmp */
#include "gstaudiomixerorc.h"
#include "gstaudiointerleave.h"
#define GST_CAT_DEFAULT gst_audiomixer_debug
GST_DEBUG_CATEGORY_STATIC (GST_CAT_DEFAULT);
#define DEFAULT_PAD_VOLUME (1.0)
#define DEFAULT_PAD_MUTE (FALSE)
/* some defines for audio processing */
/* the volume factor is a range from 0.0 to (arbitrary) VOLUME_MAX_DOUBLE = 10.0
* we map 1.0 to VOLUME_UNITY_INT*
*/
#define VOLUME_UNITY_INT8 8 /* internal int for unity 2^(8-5) */
#define VOLUME_UNITY_INT8_BIT_SHIFT 3 /* number of bits to shift for unity */
#define VOLUME_UNITY_INT16 2048 /* internal int for unity 2^(16-5) */
#define VOLUME_UNITY_INT16_BIT_SHIFT 11 /* number of bits to shift for unity */
#define VOLUME_UNITY_INT24 524288 /* internal int for unity 2^(24-5) */
#define VOLUME_UNITY_INT24_BIT_SHIFT 19 /* number of bits to shift for unity */
#define VOLUME_UNITY_INT32 134217728 /* internal int for unity 2^(32-5) */
#define VOLUME_UNITY_INT32_BIT_SHIFT 27
enum
{
PROP_PAD_0,
PROP_PAD_VOLUME,
PROP_PAD_MUTE
};
G_DEFINE_TYPE (GstAudioMixerPad, gst_audiomixer_pad,
GST_TYPE_AUDIO_AGGREGATOR_CONVERT_PAD);
static void
gst_audiomixer_pad_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec)
{
GstAudioMixerPad *pad = GST_AUDIO_MIXER_PAD (object);
switch (prop_id) {
case PROP_PAD_VOLUME:
g_value_set_double (value, pad->volume);
break;
case PROP_PAD_MUTE:
g_value_set_boolean (value, pad->mute);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
static void
gst_audiomixer_pad_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec)
{
GstAudioMixerPad *pad = GST_AUDIO_MIXER_PAD (object);
switch (prop_id) {
case PROP_PAD_VOLUME:
GST_OBJECT_LOCK (pad);
pad->volume = g_value_get_double (value);
pad->volume_i8 = pad->volume * VOLUME_UNITY_INT8;
pad->volume_i16 = pad->volume * VOLUME_UNITY_INT16;
pad->volume_i32 = pad->volume * VOLUME_UNITY_INT32;
GST_OBJECT_UNLOCK (pad);
break;
case PROP_PAD_MUTE:
GST_OBJECT_LOCK (pad);
pad->mute = g_value_get_boolean (value);
GST_OBJECT_UNLOCK (pad);
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
static void
gst_audiomixer_pad_class_init (GstAudioMixerPadClass * klass)
{
GObjectClass *gobject_class = (GObjectClass *) klass;
gobject_class->set_property = gst_audiomixer_pad_set_property;
gobject_class->get_property = gst_audiomixer_pad_get_property;
g_object_class_install_property (gobject_class, PROP_PAD_VOLUME,
g_param_spec_double ("volume", "Volume", "Volume of this pad",
0.0, 10.0, DEFAULT_PAD_VOLUME,
G_PARAM_READWRITE | GST_PARAM_CONTROLLABLE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PROP_PAD_MUTE,
g_param_spec_boolean ("mute", "Mute", "Mute this pad",
DEFAULT_PAD_MUTE,
G_PARAM_READWRITE | GST_PARAM_CONTROLLABLE | G_PARAM_STATIC_STRINGS));
}
static void
gst_audiomixer_pad_init (GstAudioMixerPad * pad)
{
pad->volume = DEFAULT_PAD_VOLUME;
pad->mute = DEFAULT_PAD_MUTE;
}
enum
{
PROP_0
};
/* These are the formats we can mix natively */
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
#define CAPS \
GST_AUDIO_CAPS_MAKE ("{ S32LE, U32LE, S16LE, U16LE, S8, U8, F32LE, F64LE }") \
", layout = interleaved"
#else
#define CAPS \
GST_AUDIO_CAPS_MAKE ("{ S32BE, U32BE, S16BE, U16BE, S8, U8, F32BE, F64BE }") \
", layout = interleaved"
#endif
static GstStaticPadTemplate gst_audiomixer_src_template =
GST_STATIC_PAD_TEMPLATE ("src",
GST_PAD_SRC,
GST_PAD_ALWAYS,
GST_STATIC_CAPS (CAPS)
);
#define SINK_CAPS \
GST_STATIC_CAPS (GST_AUDIO_CAPS_MAKE (GST_AUDIO_FORMATS_ALL) \
", layout=interleaved")
static GstStaticPadTemplate gst_audiomixer_sink_template =
GST_STATIC_PAD_TEMPLATE ("sink_%u",
GST_PAD_SINK,
GST_PAD_REQUEST,
SINK_CAPS);
static void gst_audiomixer_child_proxy_init (gpointer g_iface,
gpointer iface_data);
#define gst_audiomixer_parent_class parent_class
G_DEFINE_TYPE_WITH_CODE (GstAudioMixer, gst_audiomixer,
GST_TYPE_AUDIO_AGGREGATOR, G_IMPLEMENT_INTERFACE (GST_TYPE_CHILD_PROXY,
gst_audiomixer_child_proxy_init));
static GstPad *gst_audiomixer_request_new_pad (GstElement * element,
GstPadTemplate * temp, const gchar * req_name, const GstCaps * caps);
static void gst_audiomixer_release_pad (GstElement * element, GstPad * pad);
static gboolean
gst_audiomixer_aggregate_one_buffer (GstAudioAggregator * aagg,
GstAudioAggregatorPad * aaggpad, GstBuffer * inbuf, guint in_offset,
GstBuffer * outbuf, guint out_offset, guint num_samples);
static void
gst_audiomixer_class_init (GstAudioMixerClass * klass)
{
GstElementClass *gstelement_class = (GstElementClass *) klass;
GstAudioAggregatorClass *aagg_class = (GstAudioAggregatorClass *) klass;
gst_element_class_add_static_pad_template (gstelement_class,
&gst_audiomixer_src_template);
gst_element_class_add_static_pad_template_with_gtype (gstelement_class,
&gst_audiomixer_sink_template, GST_TYPE_AUDIO_MIXER_PAD);
gst_element_class_set_static_metadata (gstelement_class, "AudioMixer",
"Generic/Audio", "Mixes multiple audio streams",
"Sebastian Dröge <sebastian@centricular.com>");
gstelement_class->request_new_pad =
GST_DEBUG_FUNCPTR (gst_audiomixer_request_new_pad);
gstelement_class->release_pad =
GST_DEBUG_FUNCPTR (gst_audiomixer_release_pad);
aagg_class->aggregate_one_buffer = gst_audiomixer_aggregate_one_buffer;
}
static void
gst_audiomixer_init (GstAudioMixer * audiomixer)
{
}
static GstPad *
gst_audiomixer_request_new_pad (GstElement * element, GstPadTemplate * templ,
const gchar * req_name, const GstCaps * caps)
{
GstAudioMixerPad *newpad;
newpad = (GstAudioMixerPad *)
GST_ELEMENT_CLASS (parent_class)->request_new_pad (element,
templ, req_name, caps);
if (newpad == NULL)
goto could_not_create;
gst_child_proxy_child_added (GST_CHILD_PROXY (element), G_OBJECT (newpad),
GST_OBJECT_NAME (newpad));
return GST_PAD_CAST (newpad);
could_not_create:
{
GST_DEBUG_OBJECT (element, "could not create/add pad");
return NULL;
}
}
static void
gst_audiomixer_release_pad (GstElement * element, GstPad * pad)
{
GstAudioMixer *audiomixer;
audiomixer = GST_AUDIO_MIXER (element);
GST_DEBUG_OBJECT (audiomixer, "release pad %s:%s", GST_DEBUG_PAD_NAME (pad));
gst_child_proxy_child_removed (GST_CHILD_PROXY (audiomixer), G_OBJECT (pad),
GST_OBJECT_NAME (pad));
GST_ELEMENT_CLASS (parent_class)->release_pad (element, pad);
}
static gboolean
gst_audiomixer_aggregate_one_buffer (GstAudioAggregator * aagg,
GstAudioAggregatorPad * aaggpad, GstBuffer * inbuf, guint in_offset,
GstBuffer * outbuf, guint out_offset, guint num_frames)
{
GstAudioMixerPad *pad = GST_AUDIO_MIXER_PAD (aaggpad);
GstMapInfo inmap;
GstMapInfo outmap;
gint bpf;
GST_OBJECT_LOCK (aagg);
GST_OBJECT_LOCK (aaggpad);
if (pad->mute || pad->volume < G_MINDOUBLE) {
GST_DEBUG_OBJECT (pad, "Skipping muted pad");
GST_OBJECT_UNLOCK (aaggpad);
GST_OBJECT_UNLOCK (aagg);
return FALSE;
}
bpf = GST_AUDIO_INFO_BPF (&aagg->info);
gst_buffer_map (outbuf, &outmap, GST_MAP_READWRITE);
gst_buffer_map (inbuf, &inmap, GST_MAP_READ);
GST_LOG_OBJECT (pad, "mixing %u bytes at offset %u from offset %u",
num_frames * bpf, out_offset * bpf, in_offset * bpf);
/* further buffers, need to add them */
if (pad->volume == 1.0) {
switch (aagg->info.finfo->format) {
case GST_AUDIO_FORMAT_U8:
audiomixer_orc_add_u8 ((gpointer) (outmap.data + out_offset * bpf),
(gpointer) (inmap.data + in_offset * bpf),
num_frames * aagg->info.channels);
break;
case GST_AUDIO_FORMAT_S8:
audiomixer_orc_add_s8 ((gpointer) (outmap.data + out_offset * bpf),
(gpointer) (inmap.data + in_offset * bpf),
num_frames * aagg->info.channels);
break;
case GST_AUDIO_FORMAT_U16:
audiomixer_orc_add_u16 ((gpointer) (outmap.data + out_offset * bpf),
(gpointer) (inmap.data + in_offset * bpf),
num_frames * aagg->info.channels);
break;
case GST_AUDIO_FORMAT_S16:
audiomixer_orc_add_s16 ((gpointer) (outmap.data + out_offset * bpf),
(gpointer) (inmap.data + in_offset * bpf),
num_frames * aagg->info.channels);
break;
case GST_AUDIO_FORMAT_U32:
audiomixer_orc_add_u32 ((gpointer) (outmap.data + out_offset * bpf),
(gpointer) (inmap.data + in_offset * bpf),
num_frames * aagg->info.channels);
break;
case GST_AUDIO_FORMAT_S32:
audiomixer_orc_add_s32 ((gpointer) (outmap.data + out_offset * bpf),
(gpointer) (inmap.data + in_offset * bpf),
num_frames * aagg->info.channels);
break;
case GST_AUDIO_FORMAT_F32:
audiomixer_orc_add_f32 ((gpointer) (outmap.data + out_offset * bpf),
(gpointer) (inmap.data + in_offset * bpf),
num_frames * aagg->info.channels);
break;
case GST_AUDIO_FORMAT_F64:
audiomixer_orc_add_f64 ((gpointer) (outmap.data + out_offset * bpf),
(gpointer) (inmap.data + in_offset * bpf),
num_frames * aagg->info.channels);
break;
default:
g_assert_not_reached ();
break;
}
} else {
switch (aagg->info.finfo->format) {
case GST_AUDIO_FORMAT_U8:
audiomixer_orc_add_volume_u8 ((gpointer) (outmap.data +
out_offset * bpf), (gpointer) (inmap.data + in_offset * bpf),
pad->volume_i8, num_frames * aagg->info.channels);
break;
case GST_AUDIO_FORMAT_S8:
audiomixer_orc_add_volume_s8 ((gpointer) (outmap.data +
out_offset * bpf), (gpointer) (inmap.data + in_offset * bpf),
pad->volume_i8, num_frames * aagg->info.channels);
break;
case GST_AUDIO_FORMAT_U16:
audiomixer_orc_add_volume_u16 ((gpointer) (outmap.data +
out_offset * bpf), (gpointer) (inmap.data + in_offset * bpf),
pad->volume_i16, num_frames * aagg->info.channels);
break;
case GST_AUDIO_FORMAT_S16:
audiomixer_orc_add_volume_s16 ((gpointer) (outmap.data +
out_offset * bpf), (gpointer) (inmap.data + in_offset * bpf),
pad->volume_i16, num_frames * aagg->info.channels);
break;
case GST_AUDIO_FORMAT_U32:
audiomixer_orc_add_volume_u32 ((gpointer) (outmap.data +
out_offset * bpf), (gpointer) (inmap.data + in_offset * bpf),
pad->volume_i32, num_frames * aagg->info.channels);
break;
case GST_AUDIO_FORMAT_S32:
audiomixer_orc_add_volume_s32 ((gpointer) (outmap.data +
out_offset * bpf), (gpointer) (inmap.data + in_offset * bpf),
pad->volume_i32, num_frames * aagg->info.channels);
break;
case GST_AUDIO_FORMAT_F32:
audiomixer_orc_add_volume_f32 ((gpointer) (outmap.data +
out_offset * bpf), (gpointer) (inmap.data + in_offset * bpf),
pad->volume, num_frames * aagg->info.channels);
break;
case GST_AUDIO_FORMAT_F64:
audiomixer_orc_add_volume_f64 ((gpointer) (outmap.data +
out_offset * bpf), (gpointer) (inmap.data + in_offset * bpf),
pad->volume, num_frames * aagg->info.channels);
break;
default:
g_assert_not_reached ();
break;
}
}
gst_buffer_unmap (inbuf, &inmap);
gst_buffer_unmap (outbuf, &outmap);
GST_OBJECT_UNLOCK (aaggpad);
GST_OBJECT_UNLOCK (aagg);
return TRUE;
}
/* GstChildProxy implementation */
static GObject *
gst_audiomixer_child_proxy_get_child_by_index (GstChildProxy * child_proxy,
guint index)
{
GstAudioMixer *audiomixer = GST_AUDIO_MIXER (child_proxy);
GObject *obj = NULL;
GST_OBJECT_LOCK (audiomixer);
obj = g_list_nth_data (GST_ELEMENT_CAST (audiomixer)->sinkpads, index);
if (obj)
gst_object_ref (obj);
GST_OBJECT_UNLOCK (audiomixer);
return obj;
}
static guint
gst_audiomixer_child_proxy_get_children_count (GstChildProxy * child_proxy)
{
guint count = 0;
GstAudioMixer *audiomixer = GST_AUDIO_MIXER (child_proxy);
GST_OBJECT_LOCK (audiomixer);
count = GST_ELEMENT_CAST (audiomixer)->numsinkpads;
GST_OBJECT_UNLOCK (audiomixer);
GST_INFO_OBJECT (audiomixer, "Children Count: %d", count);
return count;
}
static void
gst_audiomixer_child_proxy_init (gpointer g_iface, gpointer iface_data)
{
GstChildProxyInterface *iface = g_iface;
GST_INFO ("intializing child proxy interface");
iface->get_child_by_index = gst_audiomixer_child_proxy_get_child_by_index;
iface->get_children_count = gst_audiomixer_child_proxy_get_children_count;
}
/* Empty liveadder alias with non-zero latency */
typedef GstAudioMixer GstLiveAdder;
typedef GstAudioMixerClass GstLiveAdderClass;
static GType gst_live_adder_get_type (void);
#define GST_TYPE_LIVE_ADDER gst_live_adder_get_type ()
G_DEFINE_TYPE (GstLiveAdder, gst_live_adder, GST_TYPE_AUDIO_MIXER);
enum
{
LIVEADDER_PROP_LATENCY = 1
};
static void
gst_live_adder_init (GstLiveAdder * self)
{
}
static void
gst_live_adder_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec)
{
switch (prop_id) {
case LIVEADDER_PROP_LATENCY:
{
GParamSpec *parent_spec =
g_object_class_find_property (G_OBJECT_CLASS
(gst_live_adder_parent_class), "latency");
GObjectClass *pspec_class = g_type_class_peek (parent_spec->owner_type);
GValue v = { 0 };
g_value_init (&v, G_TYPE_UINT64);
g_value_set_uint64 (&v, g_value_get_uint (value) * GST_MSECOND);
G_OBJECT_CLASS (pspec_class)->set_property (object,
parent_spec->param_id, &v, parent_spec);
break;
}
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
static void
gst_live_adder_get_property (GObject * object, guint prop_id, GValue * value,
GParamSpec * pspec)
{
switch (prop_id) {
case LIVEADDER_PROP_LATENCY:
{
GParamSpec *parent_spec =
g_object_class_find_property (G_OBJECT_CLASS
(gst_live_adder_parent_class), "latency");
GObjectClass *pspec_class = g_type_class_peek (parent_spec->owner_type);
GValue v = { 0 };
g_value_init (&v, G_TYPE_UINT64);
G_OBJECT_CLASS (pspec_class)->get_property (object,
parent_spec->param_id, &v, parent_spec);
g_value_set_uint (value, g_value_get_uint64 (&v) / GST_MSECOND);
break;
}
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
}
static void
gst_live_adder_class_init (GstLiveAdderClass * klass)
{
GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
gobject_class->set_property = gst_live_adder_set_property;
gobject_class->get_property = gst_live_adder_get_property;
g_object_class_install_property (gobject_class, LIVEADDER_PROP_LATENCY,
g_param_spec_uint ("latency", "Buffer latency",
"Additional latency in live mode to allow upstream "
"to take longer to produce buffers for the current "
"position (in milliseconds)", 0, G_MAXUINT,
30, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | G_PARAM_CONSTRUCT));
}
static gboolean
plugin_init (GstPlugin * plugin)
{
GST_DEBUG_CATEGORY_INIT (GST_CAT_DEFAULT, "audiomixer", 0,
"audio mixing element");
if (!gst_element_register (plugin, "audiomixer", GST_RANK_NONE,
GST_TYPE_AUDIO_MIXER))
return FALSE;
if (!gst_element_register (plugin, "liveadder", GST_RANK_NONE,
GST_TYPE_LIVE_ADDER))
return FALSE;
if (!gst_element_register (plugin, "audiointerleave", GST_RANK_NONE,
GST_TYPE_AUDIO_INTERLEAVE))
return FALSE;
return TRUE;
}
GST_PLUGIN_DEFINE (GST_VERSION_MAJOR,
GST_VERSION_MINOR,
audiomixer,
"Mixes multiple audio streams",
plugin_init, VERSION, "LGPL", GST_PACKAGE_NAME, GST_PACKAGE_ORIGIN)

View file

@ -1,87 +0,0 @@
/* GStreamer
* Copyright (C) 1999,2000 Erik Walthinsen <omega@cse.ogi.edu>
* 2000 Wim Taymans <wtay@chello.be>
* Copyright (C) 2013 Sebastian Dröge <slomo@circular-chaos.org>
*
* gstaudiomixer.h: Header for GstAudioMixer element
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#ifndef __GST_AUDIO_MIXER_H__
#define __GST_AUDIO_MIXER_H__
#include <gst/gst.h>
#include <gst/audio/audio.h>
#include <gst/audio/gstaudioaggregator.h>
G_BEGIN_DECLS
#define GST_TYPE_AUDIO_MIXER (gst_audiomixer_get_type())
#define GST_AUDIO_MIXER(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_AUDIO_MIXER,GstAudioMixer))
#define GST_IS_AUDIO_MIXER(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_AUDIO_MIXER))
#define GST_AUDIO_MIXER_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass) ,GST_TYPE_AUDIO_MIXER,GstAudioMixerClass))
#define GST_IS_AUDIO_MIXER_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass) ,GST_TYPE_AUDIO_MIXER))
#define GST_AUDIO_MIXER_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS((obj) ,GST_TYPE_AUDIO_MIXER,GstAudioMixerClass))
typedef struct _GstAudioMixer GstAudioMixer;
typedef struct _GstAudioMixerClass GstAudioMixerClass;
typedef struct _GstAudioMixerPad GstAudioMixerPad;
typedef struct _GstAudioMixerPadClass GstAudioMixerPadClass;
/**
* GstAudioMixer:
*
* The audiomixer object structure.
*/
struct _GstAudioMixer {
GstAudioAggregator element;
};
struct _GstAudioMixerClass {
GstAudioAggregatorClass parent_class;
};
GType gst_audiomixer_get_type (void);
#define GST_TYPE_AUDIO_MIXER_PAD (gst_audiomixer_pad_get_type())
#define GST_AUDIO_MIXER_PAD(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_AUDIO_MIXER_PAD,GstAudioMixerPad))
#define GST_IS_AUDIO_MIXER_PAD(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_AUDIO_MIXER_PAD))
#define GST_AUDIO_MIXER_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass) ,GST_TYPE_AUDIO_MIXER_PAD,GstAudioMixerPadClass))
#define GST_IS_AUDIO_MIXER_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass) ,GST_TYPE_AUDIO_MIXER_PAD))
#define GST_AUDIO_MIXER_PAD_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS((obj) ,GST_TYPE_AUDIO_MIXER_PAD,GstAudioMixerPadClass))
struct _GstAudioMixerPad {
GstAudioAggregatorConvertPad parent;
gdouble volume;
gint volume_i32;
gint volume_i16;
gint volume_i8;
gboolean mute;
};
struct _GstAudioMixerPadClass {
GstAudioAggregatorConvertPadClass parent_class;
};
GType gst_audiomixer_pad_get_type (void);
G_END_DECLS
#endif /* __GST_AUDIO_MIXER_H__ */

File diff suppressed because it is too large Load diff

View file

@ -1,106 +0,0 @@
/* autogenerated from gstaudiomixerorc.orc */
#ifndef _GSTAUDIOMIXERORC_H_
#define _GSTAUDIOMIXERORC_H_
#include <glib.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _ORC_INTEGER_TYPEDEFS_
#define _ORC_INTEGER_TYPEDEFS_
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#include <stdint.h>
typedef int8_t orc_int8;
typedef int16_t orc_int16;
typedef int32_t orc_int32;
typedef int64_t orc_int64;
typedef uint8_t orc_uint8;
typedef uint16_t orc_uint16;
typedef uint32_t orc_uint32;
typedef uint64_t orc_uint64;
#define ORC_UINT64_C(x) UINT64_C(x)
#elif defined(_MSC_VER)
typedef signed __int8 orc_int8;
typedef signed __int16 orc_int16;
typedef signed __int32 orc_int32;
typedef signed __int64 orc_int64;
typedef unsigned __int8 orc_uint8;
typedef unsigned __int16 orc_uint16;
typedef unsigned __int32 orc_uint32;
typedef unsigned __int64 orc_uint64;
#define ORC_UINT64_C(x) (x##Ui64)
#define inline __inline
#else
#include <limits.h>
typedef signed char orc_int8;
typedef short orc_int16;
typedef int orc_int32;
typedef unsigned char orc_uint8;
typedef unsigned short orc_uint16;
typedef unsigned int orc_uint32;
#if INT_MAX == LONG_MAX
typedef long long orc_int64;
typedef unsigned long long orc_uint64;
#define ORC_UINT64_C(x) (x##ULL)
#else
typedef long orc_int64;
typedef unsigned long orc_uint64;
#define ORC_UINT64_C(x) (x##UL)
#endif
#endif
typedef union { orc_int16 i; orc_int8 x2[2]; } orc_union16;
typedef union { orc_int32 i; float f; orc_int16 x2[2]; orc_int8 x4[4]; } orc_union32;
typedef union { orc_int64 i; double f; orc_int32 x2[2]; float x2f[2]; orc_int16 x4[4]; } orc_union64;
#endif
#ifndef ORC_RESTRICT
#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define ORC_RESTRICT restrict
#elif defined(__GNUC__) && __GNUC__ >= 4
#define ORC_RESTRICT __restrict__
#else
#define ORC_RESTRICT
#endif
#endif
#ifndef ORC_INTERNAL
#if defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590)
#define ORC_INTERNAL __attribute__((visibility("hidden")))
#elif defined(__SUNPRO_C) && (__SUNPRO_C >= 0x550)
#define ORC_INTERNAL __hidden
#elif defined (__GNUC__)
#define ORC_INTERNAL __attribute__((visibility("hidden")))
#else
#define ORC_INTERNAL
#endif
#endif
void audiomixer_orc_add_s32 (gint32 * ORC_RESTRICT d1, const gint32 * ORC_RESTRICT s1, int n);
void audiomixer_orc_add_s16 (gint16 * ORC_RESTRICT d1, const gint16 * ORC_RESTRICT s1, int n);
void audiomixer_orc_add_s8 (gint8 * ORC_RESTRICT d1, const gint8 * ORC_RESTRICT s1, int n);
void audiomixer_orc_add_u32 (guint32 * ORC_RESTRICT d1, const guint32 * ORC_RESTRICT s1, int n);
void audiomixer_orc_add_u16 (guint16 * ORC_RESTRICT d1, const guint16 * ORC_RESTRICT s1, int n);
void audiomixer_orc_add_u8 (guint8 * ORC_RESTRICT d1, const guint8 * ORC_RESTRICT s1, int n);
void audiomixer_orc_add_f32 (float * ORC_RESTRICT d1, const float * ORC_RESTRICT s1, int n);
void audiomixer_orc_add_f64 (double * ORC_RESTRICT d1, const double * ORC_RESTRICT s1, int n);
void audiomixer_orc_volume_u8 (guint8 * ORC_RESTRICT d1, int p1, int n);
void audiomixer_orc_add_volume_u8 (guint8 * ORC_RESTRICT d1, const guint8 * ORC_RESTRICT s1, int p1, int n);
void audiomixer_orc_add_volume_s8 (gint8 * ORC_RESTRICT d1, const gint8 * ORC_RESTRICT s1, int p1, int n);
void audiomixer_orc_add_volume_u16 (guint16 * ORC_RESTRICT d1, const guint16 * ORC_RESTRICT s1, int p1, int n);
void audiomixer_orc_add_volume_s16 (gint16 * ORC_RESTRICT d1, const gint16 * ORC_RESTRICT s1, int p1, int n);
void audiomixer_orc_add_volume_u32 (guint32 * ORC_RESTRICT d1, const guint32 * ORC_RESTRICT s1, int p1, int n);
void audiomixer_orc_add_volume_s32 (gint32 * ORC_RESTRICT d1, const gint32 * ORC_RESTRICT s1, int p1, int n);
void audiomixer_orc_add_volume_f32 (float * ORC_RESTRICT d1, const float * ORC_RESTRICT s1, float p1, int n);
void audiomixer_orc_add_volume_f64 (double * ORC_RESTRICT d1, const double * ORC_RESTRICT s1, double p1, int n);
#ifdef __cplusplus
}
#endif
#endif

View file

@ -1,176 +0,0 @@
.function audiomixer_orc_add_s32
.dest 4 d1 gint32
.source 4 s1 gint32
addssl d1, d1, s1
.function audiomixer_orc_add_s16
.dest 2 d1 gint16
.source 2 s1 gint16
addssw d1, d1, s1
.function audiomixer_orc_add_s8
.dest 1 d1 gint8
.source 1 s1 gint8
addssb d1, d1, s1
.function audiomixer_orc_add_u32
.dest 4 d1 guint32
.source 4 s1 guint32
addusl d1, d1, s1
.function audiomixer_orc_add_u16
.dest 2 d1 guint16
.source 2 s1 guint16
addusw d1, d1, s1
.function audiomixer_orc_add_u8
.dest 1 d1 guint8
.source 1 s1 guint8
addusb d1, d1, s1
.function audiomixer_orc_add_f32
.dest 4 d1 float
.source 4 s1 float
addf d1, d1, s1
.function audiomixer_orc_add_f64
.dest 8 d1 double
.source 8 s1 double
addd d1, d1, s1
.function audiomixer_orc_volume_u8
.dest 1 d1 guint8
.param 1 p1
.const 1 c1 0x80
.temp 2 t1
.temp 1 t2
xorb t2, d1, c1
mulsbw t1, t2, p1
shrsw t1, t1, 3
convssswb t2, t1
xorb d1, t2, c1
.function audiomixer_orc_add_volume_u8
.dest 1 d1 guint8
.source 1 s1 guint8
.param 1 p1
.const 1 c1 0x80
.temp 2 t1
.temp 1 t2
xorb t2, s1, c1
mulsbw t1, t2, p1
shrsw t1, t1, 3
convssswb t2, t1
xorb t2, t2, c1
addusb d1, d1, t2
.function audiomixer_orc_add_volume_s8
.dest 1 d1 gint8
.source 1 s1 gint8
.param 1 p1
.temp 2 t1
.temp 1 t2
mulsbw t1, s1, p1
shrsw t1, t1, 3
convssswb t2, t1
addssb d1, d1, t2
.function audiomixer_orc_add_volume_u16
.dest 2 d1 guint16
.source 2 s1 guint16
.param 2 p1
.const 2 c1 0x8000
.temp 4 t1
.temp 2 t2
xorw t2, s1, c1
mulswl t1, t2, p1
shrsl t1, t1, 11
convssslw t2, t1
xorw t2, t2, c1
addusw d1, d1, t2
.function audiomixer_orc_add_volume_s16
.dest 2 d1 gint16
.source 2 s1 gint16
.param 2 p1
.temp 4 t1
.temp 2 t2
mulswl t1, s1, p1
shrsl t1, t1, 11
convssslw t2, t1
addssw d1, d1, t2
.function audiomixer_orc_add_volume_u32
.dest 4 d1 guint32
.source 4 s1 guint32
.param 4 p1
.const 4 c1 0x80000000
.temp 8 t1
.temp 4 t2
xorl t2, s1, c1
mulslq t1, t2, p1
shrsq t1, t1, 27
convsssql t2, t1
xorl t2, t2, c1
addusl d1, d1, t2
.function audiomixer_orc_add_volume_s32
.dest 4 d1 gint32
.source 4 s1 gint32
.param 4 p1
.temp 8 t1
.temp 4 t2
mulslq t1, s1, p1
shrsq t1, t1, 27
convsssql t2, t1
addssl d1, d1, t2
.function audiomixer_orc_add_volume_f32
.dest 4 d1 float
.source 4 s1 float
.floatparam 4 p1
.temp 4 t1
mulf t1, s1, p1
addf d1, d1, t1
.function audiomixer_orc_add_volume_f64
.dest 8 d1 double
.source 8 s1 double
.doubleparam 8 p1
.temp 8 t1
muld t1, s1, p1
addd d1, d1, t1

View file

@ -1,32 +0,0 @@
audiomixer_sources = [
'gstaudiomixer.c',
'gstaudiointerleave.c',
]
orcsrc = 'gstaudiomixerorc'
if have_orcc
orc_h = custom_target(orcsrc + '.h',
input : orcsrc + '.orc',
output : orcsrc + '.h',
command : orcc_args + ['--header', '-o', '@OUTPUT@', '@INPUT@'])
orc_c = custom_target(orcsrc + '.c',
input : orcsrc + '.orc',
output : orcsrc + '.c',
command : orcc_args + ['--implementation', '-o', '@OUTPUT@', '@INPUT@'])
else
orc_h = configure_file(input : orcsrc + '-dist.h',
output : orcsrc + '.h',
configuration : configuration_data())
orc_c = configure_file(input : orcsrc + '-dist.c',
output : orcsrc + '.c',
configuration : configuration_data())
endif
gstaudiomixer = library('gstaudiomixer',
audiomixer_sources, orc_c, orc_h,
c_args : gst_plugins_bad_args + [ '-DGST_USE_UNSTABLE_API' ],
include_directories : [configinc],
dependencies : [gstbadaudio_dep, gstaudio_dep, gstbase_dep, orc_dep],
install : true,
install_dir : plugins_install_dir,
)

View file

@ -5,7 +5,6 @@ subdir('aiff')
subdir('asfmux')
subdir('audiobuffersplit')
subdir('audiofxbad')
subdir('audiomixer')
subdir('audiomixmatrix')
subdir('audiovisualizers')
subdir('autoconvert')

View file

@ -127,7 +127,7 @@ check_kate=
endif
if HAVE_ORC
check_orc = orc/bayer orc/audiomixer orc/compositor
check_orc = orc/bayer orc/compositor
else
check_orc =
endif
@ -257,8 +257,6 @@ check_PROGRAMS = \
elements/videoframe-audiolevel \
elements/autoconvert \
elements/autovideoconvert \
elements/audiointerleave \
elements/audiomixer \
elements/asfmux \
elements/camerabin \
elements/gdppay \
@ -313,12 +311,6 @@ LDADD = $(GST_CHECK_LIBS)
generic_states_CFLAGS = $(AM_CFLAGS) $(GLIB_CFLAGS)
generic_states_LDADD = $(LDADD) $(GLIB_LIBS)
elements_audiomixer_LDADD = $(GST_BASE_LIBS) $(GST_CONTROLLER_LIBS) -lgstbase-@GST_API_VERSION@ $(LDADD)
elements_audiomixer_CFLAGS = $(GST_PLUGINS_BASE_CFLAGS) $(GST_BASE_CFLAGS) $(GST_CONTROLLER_CFLAGS) $(AM_CFLAGS)
elements_audiointerleave_LDADD = $(GST_BASE_LIBS) -lgstbase-@GST_API_VERSION@ $(GST_AUDIO_LIBS) $(LDADD)
elements_audiointerleave_CFLAGS = $(GST_PLUGINS_BASE_CFLAGS) $(GST_BASE_CFLAGS) $(AM_CFLAGS)
elements_pnm_CFLAGS = \
$(GST_PLUGINS_BASE_CFLAGS) \
$(GST_BASE_CFLAGS) $(GST_CFLAGS) $(AM_CFLAGS)
@ -542,14 +534,6 @@ orc/bayer.c: $(top_srcdir)/gst/bayer/gstbayerorc.orc
$(MKDIR_P) orc
$(ORCC) --test -o $@ $<
orc_audiomixer_CFLAGS = $(ORC_CFLAGS)
orc_audiomixer_LDADD = $(ORC_LIBS) -lorc-test-0.4
nodist_orc_audiomixer_SOURCES = orc/audiomixer.c
orc/audiomixer.c: $(top_srcdir)/gst/audiomixer/gstaudiomixerorc.orc
$(MKDIR_P) orc
$(ORCC) --test -o $@ $<
elements_compositor_LDADD = \
$(GST_PLUGINS_BASE_LIBS) $(GST_VIDEO_LIBS) $(GST_BASE_LIBS) $(LDADD)
elements_compositor_CFLAGS = \

View file

@ -2,8 +2,6 @@
aiffparse
asfmux
assrender
audiointerleave
audiomixer
autoconvert
autovideoconvert
baseaudiovisualizer

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -18,8 +18,6 @@ base_tests = [
[['elements/aiffparse.c']],
[['elements/asfmux.c']],
[['elements/assrender.c'], not ass_dep.found(), [ass_dep]],
[['elements/audiointerleave.c']],
[['elements/audiomixer.c']],
[['elements/autoconvert.c']],
[['elements/autovideoconvert.c']],
[['elements/camerabin.c']],