mirror of
https://gitlab.freedesktop.org/gstreamer/gstreamer.git
synced 2024-12-19 06:46:38 +00:00
avauddec: Add support for planar audio formats
This commit is contained in:
parent
47647e1cac
commit
e092f3d411
3 changed files with 89 additions and 11 deletions
|
@ -229,14 +229,20 @@ gst_ffmpegauddec_get_buffer (AVCodecContext * context, AVFrame * frame)
|
|||
{
|
||||
GstFFMpegAudDec *ffmpegdec;
|
||||
GstAudioInfo *info;
|
||||
BufferInfo *buffer_info = g_slice_new (BufferInfo);
|
||||
BufferInfo *buffer_info;
|
||||
|
||||
ffmpegdec = (GstFFMpegAudDec *) context->opaque;
|
||||
if (G_UNLIKELY (!gst_ffmpegauddec_negotiate (ffmpegdec, FALSE)))
|
||||
goto negotiate_failed;
|
||||
|
||||
/* Always use the default allocator for planar audio formats because
|
||||
* we will have to copy and deinterleave later anyway */
|
||||
if (av_sample_fmt_is_planar (ffmpegdec->context->sample_fmt))
|
||||
goto fallback;
|
||||
|
||||
info = gst_audio_decoder_get_audio_info (GST_AUDIO_DECODER (ffmpegdec));
|
||||
|
||||
buffer_info = g_slice_new (BufferInfo);
|
||||
buffer_info->buffer =
|
||||
gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (ffmpegdec),
|
||||
frame->nb_samples * info->bpf);
|
||||
|
@ -451,22 +457,87 @@ gst_ffmpegauddec_audio_frame (GstFFMpegAudDec * ffmpegdec,
|
|||
if (len >= 0 && have_data > 0) {
|
||||
BufferInfo *buffer_info = frame.opaque;
|
||||
|
||||
GST_DEBUG_OBJECT (ffmpegdec, "Creating output buffer");
|
||||
if (buffer_info) {
|
||||
*outbuf = gst_buffer_ref (buffer_info->buffer);
|
||||
} else {
|
||||
*outbuf = gst_buffer_new_and_alloc (frame.linesize[0]);
|
||||
gst_buffer_fill (*outbuf, 0, frame.data[0], frame.linesize[0]);
|
||||
}
|
||||
ffmpegdec->context->release_buffer (ffmpegdec->context, &frame);
|
||||
|
||||
if (!gst_ffmpegauddec_negotiate (ffmpegdec, FALSE)) {
|
||||
gst_buffer_unref (*outbuf);
|
||||
*outbuf = NULL;
|
||||
*ret = GST_FLOW_NOT_NEGOTIATED;
|
||||
len = -1;
|
||||
goto beach;
|
||||
}
|
||||
|
||||
GST_DEBUG_OBJECT (ffmpegdec, "Creating output buffer");
|
||||
if (buffer_info) {
|
||||
*outbuf = gst_buffer_ref (buffer_info->buffer);
|
||||
} else if (av_sample_fmt_is_planar (ffmpegdec->context->sample_fmt)) {
|
||||
gint i, j;
|
||||
gint nsamples, channels;
|
||||
GstMapInfo minfo;
|
||||
|
||||
channels = ffmpegdec->info.channels;
|
||||
|
||||
*outbuf =
|
||||
gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER
|
||||
(ffmpegdec), frame.linesize[0] * channels);
|
||||
|
||||
gst_buffer_map (*outbuf, &minfo, GST_MAP_WRITE);
|
||||
nsamples = frame.nb_samples;
|
||||
switch (ffmpegdec->info.finfo->width) {
|
||||
case 8:{
|
||||
guint8 *odata = minfo.data;
|
||||
|
||||
for (i = 0; i < nsamples; i++) {
|
||||
for (j = 0; j < channels; j++) {
|
||||
odata[j] = ((const guint8 *) frame.data[j])[i];
|
||||
}
|
||||
odata += channels;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 16:{
|
||||
guint16 *odata = (guint16 *) minfo.data;
|
||||
|
||||
for (i = 0; i < nsamples; i++) {
|
||||
for (j = 0; j < channels; j++) {
|
||||
odata[j] = ((const guint16 *) frame.data[j])[i];
|
||||
}
|
||||
odata += channels;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 32:{
|
||||
guint32 *odata = (guint32 *) minfo.data;
|
||||
|
||||
for (i = 0; i < nsamples; i++) {
|
||||
for (j = 0; j < channels; j++) {
|
||||
odata[j] = ((const guint32 *) frame.data[j])[i];
|
||||
}
|
||||
odata += channels;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 64:{
|
||||
guint64 *odata = (guint64 *) minfo.data;
|
||||
|
||||
for (i = 0; i < nsamples; i++) {
|
||||
for (j = 0; j < channels; j++) {
|
||||
odata[j] = ((const guint64 *) frame.data[j])[i];
|
||||
}
|
||||
odata += channels;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
g_assert_not_reached ();
|
||||
break;
|
||||
}
|
||||
gst_buffer_unmap (*outbuf, &minfo);
|
||||
} else {
|
||||
*outbuf =
|
||||
gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER
|
||||
(ffmpegdec), frame.linesize[0]);
|
||||
gst_buffer_fill (*outbuf, 0, frame.data[0], frame.linesize[0]);
|
||||
}
|
||||
ffmpegdec->context->release_buffer (ffmpegdec->context, &frame);
|
||||
|
||||
GST_DEBUG_OBJECT (ffmpegdec, "Buffer created. Size: %d", have_data);
|
||||
|
||||
/* Reorder channels to the GStreamer channel order */
|
||||
|
|
|
@ -1747,6 +1747,8 @@ gst_ffmpeg_pixfmt_to_caps (enum PixelFormat pix_fmt, AVCodecContext * context,
|
|||
GstAudioFormat
|
||||
gst_ffmpeg_smpfmt_to_audioformat (enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
sample_fmt = av_get_packed_sample_fmt (sample_fmt);
|
||||
|
||||
switch (sample_fmt) {
|
||||
case AV_SAMPLE_FMT_S16:
|
||||
return GST_AUDIO_FORMAT_S16;
|
||||
|
|
|
@ -46,16 +46,21 @@ av_smp_format_depth (enum AVSampleFormat smp_fmt)
|
|||
gint depth = -1;
|
||||
switch (smp_fmt) {
|
||||
case AV_SAMPLE_FMT_U8:
|
||||
case AV_SAMPLE_FMT_U8P:
|
||||
depth = 1;
|
||||
break;
|
||||
case AV_SAMPLE_FMT_S16:
|
||||
case AV_SAMPLE_FMT_S16P:
|
||||
depth = 2;
|
||||
break;
|
||||
case AV_SAMPLE_FMT_S32:
|
||||
case AV_SAMPLE_FMT_S32P:
|
||||
case AV_SAMPLE_FMT_FLT:
|
||||
case AV_SAMPLE_FMT_FLTP:
|
||||
depth = 4;
|
||||
break;
|
||||
case AV_SAMPLE_FMT_DBL:
|
||||
case AV_SAMPLE_FMT_DBLP:
|
||||
depth = 8;
|
||||
break;
|
||||
default:
|
||||
|
|
Loading…
Reference in a new issue