audio-converter: rework the main processing loop

Rework the main processing loop. We now create an audio processing
chain from small core functions. This is very similar to how the
video-converter core works and allows us to statically calculate an
optimal allocation strategy for all possible combinations of operations.
Make sure we support non-interleaved data everywhere.
Add functions to calculate in and out frames and latency.
This commit is contained in:
Wim Taymans 2015-12-16 10:45:48 +01:00
parent 8bcf183c7f
commit 08734e7598
3 changed files with 575 additions and 173 deletions

View file

@ -74,8 +74,9 @@ ensure_debug_category (void)
#define ensure_debug_category() /* NOOP */
#endif /* GST_DISABLE_GST_DEBUG */
typedef void (*AudioConvertFunc) (gpointer dst, const gpointer src, gint count);
typedef struct _AudioChain AudioChain;
typedef void (*AudioConvertFunc) (gpointer dst, const gpointer src, gint count);
/* int/int int/float float/int float/float
*
* unpack S32 S32 F64 F64
@ -84,6 +85,11 @@ typedef void (*AudioConvertFunc) (gpointer dst, const gpointer src, gint count);
* convert F64->S32
* quantize S32 S32
* pack S32 F64 S32 F64
*
*
* interleave
* deinterleave
* resample
*/
struct _GstAudioConverter
{
@ -92,26 +98,136 @@ struct _GstAudioConverter
GstStructure *config;
GstAudioConverterFlags flags;
GstAudioFormat current_format;
GstAudioLayout current_layout;
gint current_channels;
gpointer *in_data;
gpointer *out_data;
/* unpack */
gboolean in_default;
gboolean unpack_ip;
AudioChain *unpack_chain;
/* convert in */
AudioConvertFunc convert_in;
AudioChain *convert_in_chain;
/* channel mix */
gboolean mix_passthrough;
GstAudioChannelMix *mix;
AudioChain *mix_chain;
/* convert out */
AudioConvertFunc convert_out;
AudioChain *convert_out_chain;
/* quant */
GstAudioQuantize *quant;
AudioChain *quant_chain;
/* pack */
gboolean out_default;
AudioChain *pack_chain;
gboolean passthrough;
gpointer tmpbuf;
gpointer tmpbuf2;
gint tmpbufsize;
};
typedef gboolean (*AudioChainFunc) (AudioChain * chain, gsize samples,
gpointer user_data);
typedef gpointer *(*AudioChainAllocFunc) (AudioChain * chain, gsize samples,
gpointer user_data);
static gpointer *get_output_samples (AudioChain * chain, gsize samples,
gpointer user_data);
struct _AudioChain
{
AudioChain *prev;
AudioChainFunc make_func;
gpointer make_func_data;
GDestroyNotify make_func_notify;
gint stride;
gint inc;
gint blocks;
gboolean pass_alloc;
gboolean allow_ip;
AudioChainAllocFunc alloc_func;
gpointer alloc_data;
gpointer *tmp;
gsize tmpsize;
gpointer *samples;
};
static AudioChain *
audio_chain_new (AudioChain * prev, GstAudioConverter * convert)
{
AudioChain *chain;
const GstAudioFormatInfo *finfo;
chain = g_slice_new0 (AudioChain);
chain->prev = prev;
if (convert->current_layout == GST_AUDIO_LAYOUT_NON_INTERLEAVED) {
chain->inc = 1;
chain->blocks = convert->current_channels;
} else {
chain->inc = convert->current_channels;
chain->blocks = 1;
}
finfo = gst_audio_format_get_info (convert->current_format);
chain->stride = (finfo->width * chain->inc) / 8;
return chain;
}
static void
audio_chain_set_make_func (AudioChain * chain,
AudioChainFunc make_func, gpointer user_data, GDestroyNotify notify)
{
chain->make_func = make_func;
chain->make_func_data = user_data;
chain->make_func_notify = notify;
}
static void
audio_chain_free (AudioChain * chain)
{
GST_LOG ("free chain %p", chain);
if (chain->make_func_notify)
chain->make_func_notify (chain->make_func_data);
g_free (chain->tmp);
g_slice_free (AudioChain, chain);
}
static gpointer *
audio_chain_alloc_samples (AudioChain * chain, guint samples)
{
return chain->alloc_func (chain, samples, chain->alloc_data);
}
static gpointer *
audio_chain_get_samples (AudioChain * chain, guint samples)
{
gpointer *res;
while (!chain->samples)
chain->make_func (chain, samples, chain->make_func_data);
res = chain->samples;
chain->samples = NULL;
return res;
}
/*
static guint
get_opt_uint (GstAudioConverter * convert, const gchar * opt, guint def)
@ -202,6 +318,351 @@ gst_audio_converter_get_config (GstAudioConverter * convert)
return convert->config;
}
static gboolean
do_unpack (AudioChain * chain, gsize samples, gpointer user_data)
{
GstAudioConverter *convert = user_data;
gpointer *tmp;
gboolean src_writable;
src_writable = (convert->flags & GST_AUDIO_CONVERTER_FLAG_SOURCE_WRITABLE);
if (!chain->allow_ip || !src_writable || !convert->in_default) {
gint i;
if (src_writable && chain->allow_ip)
tmp = convert->in_data;
else
tmp = audio_chain_alloc_samples (chain, samples);
GST_LOG ("unpack %p %p, %" G_GSIZE_FORMAT, tmp, convert->in_data, samples);
for (i = 0; i < chain->blocks; i++) {
convert->in.finfo->unpack_func (convert->in.finfo,
GST_AUDIO_PACK_FLAG_TRUNCATE_RANGE, tmp[i], convert->in_data[i],
samples * chain->inc);
}
} else {
tmp = convert->in_data;
GST_LOG ("get in samples %p", tmp);
}
chain->samples = tmp;
return TRUE;
}
static gboolean
do_convert_in (AudioChain * chain, gsize samples, gpointer user_data)
{
GstAudioConverter *convert = user_data;
gpointer *in, *out;
gint i;
in = audio_chain_get_samples (chain->prev, samples);
out = (chain->allow_ip ? in : audio_chain_alloc_samples (chain, samples));
GST_LOG ("convert in %p, %p %" G_GSIZE_FORMAT, in, out, samples);
for (i = 0; i < chain->blocks; i++)
convert->convert_in (out[i], in[i], samples * chain->inc);
chain->samples = out;
return TRUE;
}
static gboolean
do_mix (AudioChain * chain, gsize samples, gpointer user_data)
{
GstAudioConverter *convert = user_data;
gpointer *in, *out;
in = audio_chain_get_samples (chain->prev, samples);
out = (chain->allow_ip ? in : audio_chain_alloc_samples (chain, samples));
GST_LOG ("mix %p %p,%" G_GSIZE_FORMAT, in, out, samples);
gst_audio_channel_mix_samples (convert->mix, in, out, samples);
chain->samples = out;
return TRUE;
}
static gboolean
do_convert_out (AudioChain * chain, gsize samples, gpointer user_data)
{
GstAudioConverter *convert = user_data;
gpointer *in, *out;
gint i;
in = audio_chain_get_samples (chain->prev, samples);
out = (chain->allow_ip ? in : audio_chain_alloc_samples (chain, samples));
GST_LOG ("convert out %p, %p %" G_GSIZE_FORMAT, in, out, samples);
for (i = 0; i < chain->blocks; i++)
convert->convert_out (out[i], in[i], samples * chain->inc);
chain->samples = out;
return TRUE;
}
static gboolean
do_quantize (AudioChain * chain, gsize samples, gpointer user_data)
{
GstAudioConverter *convert = user_data;
gpointer *in, *out;
in = audio_chain_get_samples (chain->prev, samples);
out = (chain->allow_ip ? in : audio_chain_alloc_samples (chain, samples));
GST_LOG ("quantize %p, %p %" G_GSIZE_FORMAT, in, out, samples);
gst_audio_quantize_samples (convert->quant, in, out, samples);
chain->samples = out;
return TRUE;
}
static AudioChain *
chain_unpack (GstAudioConverter * convert)
{
AudioChain *prev;
GstAudioInfo *in = &convert->in;
const GstAudioFormatInfo *fup;
convert->current_format = in->finfo->unpack_format;
convert->current_layout = in->layout;
convert->current_channels = in->channels;
convert->in_default = in->finfo->unpack_format == in->finfo->format;
GST_INFO ("unpack format %s to %s",
gst_audio_format_to_string (in->finfo->format),
gst_audio_format_to_string (convert->current_format));
fup = gst_audio_format_get_info (in->finfo->unpack_format);
prev = convert->unpack_chain = audio_chain_new (NULL, convert);
prev->allow_ip = fup->width <= in->finfo->width;
prev->pass_alloc = FALSE;
audio_chain_set_make_func (prev, do_unpack, convert, NULL);
return prev;
}
static AudioChain *
chain_convert_in (GstAudioConverter * convert, AudioChain * prev)
{
gboolean in_int, out_int;
GstAudioInfo *in = &convert->in;
GstAudioInfo *out = &convert->out;
in_int = GST_AUDIO_FORMAT_INFO_IS_INTEGER (in->finfo);
out_int = GST_AUDIO_FORMAT_INFO_IS_INTEGER (out->finfo);
if (in_int && !out_int) {
GST_INFO ("convert S32 to F64");
convert->convert_in = (AudioConvertFunc) audio_orc_s32_to_double;
convert->current_format = GST_AUDIO_FORMAT_F64;
prev = convert->convert_in_chain = audio_chain_new (prev, convert);
prev->allow_ip = FALSE;
prev->pass_alloc = FALSE;
audio_chain_set_make_func (prev, do_convert_in, convert, NULL);
}
return prev;
}
static AudioChain *
chain_mix (GstAudioConverter * convert, AudioChain * prev)
{
GstAudioChannelMixFlags flags;
GstAudioInfo *in = &convert->in;
GstAudioInfo *out = &convert->out;
GstAudioFormat format = convert->current_format;
flags =
GST_AUDIO_INFO_IS_UNPOSITIONED (in) ?
GST_AUDIO_CHANNEL_MIX_FLAGS_UNPOSITIONED_IN : 0;
flags |=
GST_AUDIO_INFO_IS_UNPOSITIONED (out) ?
GST_AUDIO_CHANNEL_MIX_FLAGS_UNPOSITIONED_OUT : 0;
convert->current_channels = out->channels;
convert->mix =
gst_audio_channel_mix_new (flags, format, in->channels, in->position,
out->channels, out->position);
convert->mix_passthrough =
gst_audio_channel_mix_is_passthrough (convert->mix);
GST_INFO ("mix format %s, passthrough %d, in_channels %d, out_channels %d",
gst_audio_format_to_string (format), convert->mix_passthrough,
in->channels, out->channels);
if (!convert->mix_passthrough) {
prev = convert->mix_chain = audio_chain_new (prev, convert);
/* we can only do in-place when in >= out, else we don't have enough
* memory. */
prev->allow_ip = in->channels >= out->channels;
prev->pass_alloc = in->channels <= out->channels;
audio_chain_set_make_func (prev, do_mix, convert, NULL);
}
return prev;
}
static AudioChain *
chain_convert_out (GstAudioConverter * convert, AudioChain * prev)
{
gboolean in_int, out_int;
GstAudioInfo *in = &convert->in;
GstAudioInfo *out = &convert->out;
in_int = GST_AUDIO_FORMAT_INFO_IS_INTEGER (in->finfo);
out_int = GST_AUDIO_FORMAT_INFO_IS_INTEGER (out->finfo);
if (!in_int && out_int) {
convert->convert_out = (AudioConvertFunc) audio_orc_double_to_s32;
convert->current_format = GST_AUDIO_FORMAT_S32;
GST_INFO ("convert F64 to S32");
prev = convert->convert_out_chain = audio_chain_new (prev, convert);
prev->allow_ip = TRUE;
prev->pass_alloc = FALSE;
audio_chain_set_make_func (prev, do_convert_out, convert, NULL);
}
return prev;
}
static AudioChain *
chain_quantize (GstAudioConverter * convert, AudioChain * prev)
{
GstAudioInfo *in = &convert->in;
GstAudioInfo *out = &convert->out;
gint in_depth, out_depth;
gboolean in_int, out_int;
GstAudioDitherMethod dither;
GstAudioNoiseShapingMethod ns;
dither = GET_OPT_DITHER_METHOD (convert);
ns = GET_OPT_NOISE_SHAPING_METHOD (convert);
in_depth = GST_AUDIO_FORMAT_INFO_DEPTH (in->finfo);
out_depth = GST_AUDIO_FORMAT_INFO_DEPTH (out->finfo);
GST_INFO ("depth in %d, out %d", in_depth, out_depth);
in_int = GST_AUDIO_FORMAT_INFO_IS_INTEGER (in->finfo);
out_int = GST_AUDIO_FORMAT_INFO_IS_INTEGER (out->finfo);
/* Don't dither or apply noise shaping if target depth is bigger than 20 bits
* as DA converters only can do a SNR up to 20 bits in reality.
* Also don't dither or apply noise shaping if target depth is larger than
* source depth. */
if (out_depth > 20 || (in_int && out_depth >= in_depth)) {
dither = GST_AUDIO_DITHER_NONE;
ns = GST_AUDIO_NOISE_SHAPING_NONE;
GST_INFO ("using no dither and noise shaping");
} else {
GST_INFO ("using dither %d and noise shaping %d", dither, ns);
/* Use simple error feedback when output sample rate is smaller than
* 32000 as the other methods might move the noise to audible ranges */
if (ns > GST_AUDIO_NOISE_SHAPING_ERROR_FEEDBACK && out->rate < 32000)
ns = GST_AUDIO_NOISE_SHAPING_ERROR_FEEDBACK;
}
/* we still want to run the quantization step when reducing bits to get
* the rounding correct */
if (out_int && out_depth < 32) {
GST_INFO ("quantize to %d bits, dither %d, ns %d", out_depth, dither, ns);
convert->quant =
gst_audio_quantize_new (dither, ns, 0, convert->current_format,
out->channels, 1U << (32 - out_depth));
prev = convert->quant_chain = audio_chain_new (prev, convert);
prev->allow_ip = TRUE;
prev->pass_alloc = TRUE;
audio_chain_set_make_func (prev, do_quantize, convert, NULL);
}
return prev;
}
static AudioChain *
chain_pack (GstAudioConverter * convert, AudioChain * prev)
{
GstAudioInfo *out = &convert->out;
GstAudioFormat format = convert->current_format;
convert->current_format = out->finfo->format;
g_assert (out->finfo->unpack_format == format);
convert->out_default = format == out->finfo->format;
GST_INFO ("pack format %s to %s", gst_audio_format_to_string (format),
gst_audio_format_to_string (out->finfo->format));
return prev;
}
static gpointer *
get_output_samples (AudioChain * chain, gsize samples, gpointer user_data)
{
GstAudioConverter *convert = user_data;
GST_LOG ("output samples %" G_GSIZE_FORMAT, samples);
return convert->out_data;
}
static gpointer *
get_temp_samples (AudioChain * chain, gsize samples, gpointer user_data)
{
gsize needed;
/* first part contains the pointers, second part the data */
needed = (samples * chain->stride + sizeof (gpointer)) * chain->blocks;
if (needed > chain->tmpsize) {
gint i;
guint8 *s;
GST_DEBUG ("alloc samples %" G_GSIZE_FORMAT, needed);
chain->tmp = g_realloc (chain->tmp, needed);
chain->tmpsize = needed;
/* jump to the data */
s = (guint8 *) & chain->tmp[chain->blocks];
/* set up the pointers */
for (i = 0; i < chain->blocks; i++)
chain->tmp[i] = s + (i * samples * chain->stride);
}
return chain->tmp;
}
static void
setup_allocators (GstAudioConverter * convert)
{
AudioChain *chain;
AudioChainAllocFunc alloc_func;
gboolean allow_ip;
/* start with using dest if we can directly write into it */
if (convert->out_default) {
alloc_func = get_output_samples;
allow_ip = FALSE;
} else {
alloc_func = get_temp_samples;
allow_ip = TRUE;
}
/* now walk backwards, we try to write into the dest samples directly
* and keep track if the source needs to be writable */
for (chain = convert->pack_chain; chain; chain = chain->prev) {
chain->alloc_func = alloc_func;
chain->alloc_data = convert;
chain->allow_ip = allow_ip && chain->allow_ip;
if (!chain->pass_alloc) {
/* can't pass allocator, make new temp line allocator */
alloc_func = get_temp_samples;
allow_ip = TRUE;
}
}
}
/**
* gst_audio_converter_new: (skip)
* @in: a source #GstAudioInfo
@ -221,12 +682,7 @@ gst_audio_converter_new (GstAudioInfo * in, GstAudioInfo * out,
GstStructure * config)
{
GstAudioConverter *convert;
gint in_depth, out_depth;
GstAudioChannelMixFlags flags;
gboolean in_int, out_int;
GstAudioFormat format;
GstAudioDitherMethod dither;
GstAudioNoiseShapingMethod ns;
AudioChain *prev;
g_return_val_if_fail (in != NULL, FALSE);
g_return_val_if_fail (out != NULL, FALSE);
@ -249,85 +705,20 @@ gst_audio_converter_new (GstAudioInfo * in, GstAudioInfo * out,
if (config)
gst_audio_converter_set_config (convert, config);
dither = GET_OPT_DITHER_METHOD (convert);
ns = GET_OPT_NOISE_SHAPING_METHOD (convert);
GST_INFO ("unitsizes: %d -> %d", in->bpf, out->bpf);
in_depth = GST_AUDIO_FORMAT_INFO_DEPTH (in->finfo);
out_depth = GST_AUDIO_FORMAT_INFO_DEPTH (out->finfo);
GST_INFO ("depth in %d, out %d", in_depth, out_depth);
in_int = GST_AUDIO_FORMAT_INFO_IS_INTEGER (in->finfo);
out_int = GST_AUDIO_FORMAT_INFO_IS_INTEGER (out->finfo);
flags =
GST_AUDIO_INFO_IS_UNPOSITIONED (in) ?
GST_AUDIO_CHANNEL_MIX_FLAGS_UNPOSITIONED_IN : 0;
flags |=
GST_AUDIO_INFO_IS_UNPOSITIONED (out) ?
GST_AUDIO_CHANNEL_MIX_FLAGS_UNPOSITIONED_OUT : 0;
/* step 1, unpack */
format = in->finfo->unpack_format;
convert->in_default = in->finfo->unpack_format == in->finfo->format;
GST_INFO ("unpack format %s to %s",
gst_audio_format_to_string (in->finfo->format),
gst_audio_format_to_string (format));
prev = chain_unpack (convert);
/* step 2, optional convert from S32 to F64 for channel mix */
if (in_int && !out_int) {
GST_INFO ("convert S32 to F64");
convert->convert_in = (AudioConvertFunc) audio_orc_s32_to_double;
format = GST_AUDIO_FORMAT_F64;
}
prev = chain_convert_in (convert, prev);
/* step 3, channel mix */
convert->mix =
gst_audio_channel_mix_new (flags, format, in->channels, in->position,
out->channels, out->position);
convert->mix_passthrough =
gst_audio_channel_mix_is_passthrough (convert->mix);
GST_INFO ("mix format %s, passthrough %d, in_channels %d, out_channels %d",
gst_audio_format_to_string (format), convert->mix_passthrough,
in->channels, out->channels);
prev = chain_mix (convert, prev);
/* step 4, optional convert for quantize */
if (!in_int && out_int) {
GST_INFO ("convert F64 to S32");
convert->convert_out = (AudioConvertFunc) audio_orc_double_to_s32;
format = GST_AUDIO_FORMAT_S32;
}
prev = chain_convert_out (convert, prev);
/* step 5, optional quantize */
/* Don't dither or apply noise shaping if target depth is bigger than 20 bits
* as DA converters only can do a SNR up to 20 bits in reality.
* Also don't dither or apply noise shaping if target depth is larger than
* source depth. */
if (out_depth > 20 || (in_int && out_depth >= in_depth)) {
dither = GST_AUDIO_DITHER_NONE;
ns = GST_AUDIO_NOISE_SHAPING_NONE;
GST_INFO ("using no dither and noise shaping");
} else {
GST_INFO ("using dither %d and noise shaping %d", dither, ns);
/* Use simple error feedback when output sample rate is smaller than
* 32000 as the other methods might move the noise to audible ranges */
if (ns > GST_AUDIO_NOISE_SHAPING_ERROR_FEEDBACK && out->rate < 32000)
ns = GST_AUDIO_NOISE_SHAPING_ERROR_FEEDBACK;
}
/* we still want to run the quantization step when reducing bits to get
* the rounding correct */
if (out_int && out_depth < 32) {
GST_INFO ("quantize to %d bits, dither %d, ns %d", out_depth, dither, ns);
convert->quant = gst_audio_quantize_new (dither, ns, 0, format,
out->channels, 1U << (32 - out_depth));
}
prev = chain_quantize (convert, prev);
/* step 6, pack */
g_assert (out->finfo->unpack_format == format);
convert->out_default = format == out->finfo->format;
GST_INFO ("pack format %s to %s", gst_audio_format_to_string (format),
gst_audio_format_to_string (out->finfo->format));
convert->pack_chain = chain_pack (convert, prev);
/* optimize */
if (out->finfo->format == in->finfo->format && convert->mix_passthrough) {
@ -335,6 +726,9 @@ gst_audio_converter_new (GstAudioInfo * in, GstAudioInfo * out,
convert->passthrough = TRUE;
}
setup_allocators (convert);
return convert;
/* ERRORS */
@ -356,6 +750,17 @@ gst_audio_converter_free (GstAudioConverter * convert)
{
g_return_if_fail (convert != NULL);
if (convert->unpack_chain)
audio_chain_free (convert->unpack_chain);
if (convert->convert_in_chain)
audio_chain_free (convert->convert_in_chain);
if (convert->mix_chain)
audio_chain_free (convert->mix_chain);
if (convert->convert_out_chain)
audio_chain_free (convert->convert_out_chain);
if (convert->quant_chain)
audio_chain_free (convert->quant_chain);
if (convert->quant)
gst_audio_quantize_free (convert->quant);
if (convert->mix)
@ -363,13 +768,61 @@ gst_audio_converter_free (GstAudioConverter * convert)
gst_audio_info_init (&convert->in);
gst_audio_info_init (&convert->out);
g_free (convert->tmpbuf);
g_free (convert->tmpbuf2);
gst_structure_free (convert->config);
g_slice_free (GstAudioConverter, convert);
}
/**
* gst_audio_converter_get_out_frames:
* @convert: a #GstAudioConverter
* @in_frames: number of input frames
*
* Calculate how many output frames can be produced when @in_frames input
* frames are given to @convert.
*
* Returns: the number of output frames
*/
gsize
gst_audio_converter_get_out_frames (GstAudioConverter * convert,
gsize in_frames)
{
return in_frames;
}
/**
* gst_audio_converter_get_in_frames:
* @convert: a #GstAudioConverter
* @out_frames: number of output frames
*
* Calculate how many input frames are currently needed by @convert to produce
* @out_frames of output frames.
*
* Returns: the number of input frames
*/
gsize
gst_audio_converter_get_in_frames (GstAudioConverter * convert,
gsize out_frames)
{
return out_frames;
}
/**
* gst_audio_converter_get_max_latency:
* @convert: a #GstAudioConverter
*
* Get the maximum number of input frames that the converter would
* need before producing output.
*
* Returns: the latency of @convert as expressed in the number of
* frames.
*/
gsize
gst_audio_converter_get_max_latency (GstAudioConverter * convert)
{
return 0;
}
/**
* gst_audio_converter_samples:
* @convert: a #GstAudioConverter
@ -402,8 +855,9 @@ gst_audio_converter_samples (GstAudioConverter * convert,
gpointer out[], gsize out_samples, gsize * in_consumed,
gsize * out_produced)
{
guint size;
gpointer outbuf, tmpbuf, tmpbuf2, inp, outp;
AudioChain *chain;
gpointer *tmp;
gint i;
g_return_val_if_fail (convert != NULL, FALSE);
g_return_val_if_fail (in != NULL, FALSE);
@ -419,93 +873,31 @@ gst_audio_converter_samples (GstAudioConverter * convert,
return TRUE;
}
inp = in[0];
outp = out[0];
chain = convert->pack_chain;
if (convert->passthrough) {
memcpy (outp, inp, in_samples * convert->in.bpf);
for (i = 0; i < chain->blocks; i++)
memcpy (out[i], in[i], in_samples * chain->inc);
*out_produced = in_samples;
*in_consumed = in_samples;
return TRUE;
}
size =
sizeof (gdouble) * in_samples * MAX (convert->in.channels,
convert->out.channels);
convert->flags = flags;
convert->in_data = in;
convert->out_data = out;
if (size > convert->tmpbufsize) {
convert->tmpbuf = g_realloc (convert->tmpbuf, size);
convert->tmpbuf2 = g_realloc (convert->tmpbuf2, size);
convert->tmpbufsize = size;
}
tmpbuf = convert->tmpbuf;
tmpbuf2 = convert->tmpbuf2;
/* get samples to pack */
tmp = audio_chain_get_samples (chain, in_samples);
/* 1. unpack */
if (!convert->in_default) {
if (!convert->convert_in && convert->mix_passthrough
&& !convert->convert_out && !convert->quant && convert->out_default)
outbuf = outp;
else
outbuf = tmpbuf;
convert->in.finfo->unpack_func (convert->in.finfo,
GST_AUDIO_PACK_FLAG_TRUNCATE_RANGE, outbuf, inp,
in_samples * convert->in.channels);
inp = outbuf;
}
/* 2. optionally convert for mixing */
if (convert->convert_in) {
if (convert->mix_passthrough && !convert->convert_out && !convert->quant
&& convert->out_default)
outbuf = outp;
else if (inp == tmpbuf)
outbuf = tmpbuf2;
else
outbuf = tmpbuf;
convert->convert_in (outbuf, inp, in_samples * convert->in.channels);
inp = outbuf;
}
/* step 3, channel mix if not passthrough */
if (!convert->mix_passthrough) {
if (!convert->convert_out && !convert->quant && convert->out_default)
outbuf = outp;
else
outbuf = tmpbuf;
gst_audio_channel_mix_samples (convert->mix, &inp, &outbuf, in_samples);
inp = outbuf;
}
/* step 4, optional convert F64 -> S32 for quantize */
if (convert->convert_out) {
if (!convert->quant && convert->out_default)
outbuf = outp;
else
outbuf = tmpbuf;
convert->convert_out (outbuf, inp, in_samples * convert->out.channels);
inp = outbuf;
}
/* step 5, optional quantize */
if (convert->quant) {
if (convert->out_default)
outbuf = outp;
else
outbuf = tmpbuf;
gst_audio_quantize_samples (convert->quant, &inp, &outbuf, in_samples);
inp = outbuf;
}
/* step 6, pack */
if (!convert->out_default) {
convert->out.finfo->pack_func (convert->out.finfo, 0, inp, outp,
in_samples * convert->out.channels);
GST_LOG ("pack %p, %p %" G_GSIZE_FORMAT, tmp, out, in_samples);
/* and pack if needed */
for (i = 0; i < chain->blocks; i++)
convert->out.finfo->pack_func (convert->out.finfo, 0, tmp[i], out[i],
in_samples * chain->inc);
}
*out_produced = in_samples;
*in_consumed = in_samples;

View file

@ -78,6 +78,13 @@ void gst_audio_converter_free (GstAudioConverter * con
gboolean gst_audio_converter_set_config (GstAudioConverter * convert, GstStructure *config);
const GstStructure * gst_audio_converter_get_config (GstAudioConverter * convert);
gsize gst_audio_converter_get_out_frames (GstAudioConverter *convert,
gsize in_frames);
gsize gst_audio_converter_get_in_frames (GstAudioConverter *convert,
gsize out_frames);
gsize gst_audio_converter_get_max_latency (GstAudioConverter *convert);
gboolean gst_audio_converter_samples (GstAudioConverter * convert,
GstAudioConverterFlags flags,

View file

@ -49,6 +49,9 @@ EXPORTS
gst_audio_converter_flags_get_type
gst_audio_converter_free
gst_audio_converter_get_config
gst_audio_converter_get_in_frames
gst_audio_converter_get_max_latency
gst_audio_converter_get_out_frames
gst_audio_converter_new
gst_audio_converter_samples
gst_audio_converter_set_config