avaudenc: Add support for planar audio formats

This commit is contained in:
Sebastian Dröge 2012-12-05 19:28:33 +01:00
parent 2d7ebf9514
commit cab519f370
2 changed files with 169 additions and 9 deletions

View file

@ -367,6 +367,7 @@ gst_ffmpegaudenc_encode_audio (GstFFMpegAudEnc * ffmpegaudenc,
GstAudioInfo *info;
AVPacket pkt;
AVFrame frame;
gboolean planar;
enc = GST_AUDIO_ENCODER (ffmpegaudenc);
@ -378,11 +379,84 @@ gst_ffmpegaudenc_encode_audio (GstFFMpegAudEnc * ffmpegaudenc,
memset (&frame, 0, sizeof (frame));
info = gst_audio_encoder_get_audio_info (enc);
frame.data[0] = audio_in;
frame.linesize[0] = in_size;
frame.nb_samples = in_size / info->bpf;
planar = av_sample_fmt_is_planar (ffmpegaudenc->context->sample_fmt);
if (planar && info->channels > 1) {
gint channels, nsamples;
gint i, j;
nsamples = frame.nb_samples = in_size / info->bpf;
channels = info->channels;
frame.data[0] = g_malloc (in_size);
frame.linesize[0] = in_size / channels;
for (i = 1; i < channels; i++) {
frame.data[i] = frame.data[i - 1] + frame.linesize[0];
frame.linesize[i] = frame.linesize[0];
}
switch (info->finfo->width) {
case 8:{
const guint8 *idata = (const guint8 *) audio_in;
for (i = 0; i < nsamples; i++) {
for (j = 0; j < channels; j++) {
((guint8 *) frame.data[j])[i] = idata[j];
}
idata += channels;
}
break;
}
case 16:{
const guint16 *idata = (const guint16 *) audio_in;
for (i = 0; i < nsamples; i++) {
for (j = 0; j < channels; j++) {
((guint16 *) frame.data[j])[i] = idata[j];
}
idata += channels;
}
break;
}
case 32:{
const guint32 *idata = (const guint32 *) audio_in;
for (i = 0; i < nsamples; i++) {
for (j = 0; j < channels; j++) {
((guint32 *) frame.data[j])[i] = idata[j];
}
idata += channels;
}
break;
}
case 64:{
const guint64 *idata = (const guint64 *) audio_in;
for (i = 0; i < nsamples; i++) {
for (j = 0; j < channels; j++) {
((guint64 *) frame.data[j])[i] = idata[j];
}
idata += channels;
}
break;
}
default:
g_assert_not_reached ();
break;
}
} else {
frame.data[0] = audio_in;
frame.linesize[0] = in_size;
frame.nb_samples = in_size / info->bpf;
}
res = avcodec_encode_audio2 (ctx, &pkt, &frame, have_data);
if (planar && info->channels > 1)
g_free (frame.data[0]);
if (res < 0) {
char error_str[128] = { 0, };

View file

@ -1747,19 +1747,25 @@ gst_ffmpeg_pixfmt_to_caps (enum PixelFormat pix_fmt, AVCodecContext * context,
GstAudioFormat
gst_ffmpeg_smpfmt_to_audioformat (enum AVSampleFormat sample_fmt)
{
sample_fmt = av_get_packed_sample_fmt (sample_fmt);
switch (sample_fmt) {
case AV_SAMPLE_FMT_U8:
case AV_SAMPLE_FMT_U8P:
return GST_AUDIO_FORMAT_U8;
break;
case AV_SAMPLE_FMT_S16:
case AV_SAMPLE_FMT_S16P:
return GST_AUDIO_FORMAT_S16;
break;
case AV_SAMPLE_FMT_S32:
case AV_SAMPLE_FMT_S32P:
return GST_AUDIO_FORMAT_S32;
break;
case AV_SAMPLE_FMT_FLT:
case AV_SAMPLE_FMT_FLTP:
return GST_AUDIO_FORMAT_F32;
break;
case AV_SAMPLE_FMT_DBL:
case AV_SAMPLE_FMT_DBLP:
return GST_AUDIO_FORMAT_F64;
break;
default:
@ -2201,25 +2207,105 @@ gst_ffmpeg_videoinfo_to_context (GstVideoInfo * info, AVCodecContext * context)
void
gst_ffmpeg_audioinfo_to_context (GstAudioInfo * info, AVCodecContext * context)
{
const AVCodec *codec;
const enum AVSampleFormat *smpl_fmts;
enum AVSampleFormat smpl_fmt = -1;
context->channels = info->channels;
context->sample_rate = info->rate;
codec = context->codec;
smpl_fmts = codec->sample_fmts;
switch (info->finfo->format) {
case GST_AUDIO_FORMAT_F32:
context->sample_fmt = AV_SAMPLE_FMT_FLT;
if (smpl_fmts) {
while (*smpl_fmts != -1) {
if (*smpl_fmts == AV_SAMPLE_FMT_FLT) {
smpl_fmt = *smpl_fmts;
break;
} else if (*smpl_fmts == AV_SAMPLE_FMT_FLTP) {
smpl_fmt = *smpl_fmts;
}
smpl_fmts++;
}
} else {
smpl_fmt = AV_SAMPLE_FMT_FLT;
}
break;
case GST_AUDIO_FORMAT_F64:
context->sample_fmt = AV_SAMPLE_FMT_DBL;
if (smpl_fmts) {
while (*smpl_fmts != -1) {
if (*smpl_fmts == AV_SAMPLE_FMT_DBL) {
smpl_fmt = *smpl_fmts;
break;
} else if (*smpl_fmts == AV_SAMPLE_FMT_DBLP) {
smpl_fmt = *smpl_fmts;
}
smpl_fmts++;
}
} else {
smpl_fmt = AV_SAMPLE_FMT_DBL;
}
break;
case GST_AUDIO_FORMAT_S32:
context->sample_fmt = AV_SAMPLE_FMT_S32;
if (smpl_fmts) {
while (*smpl_fmts != -1) {
if (*smpl_fmts == AV_SAMPLE_FMT_S32) {
smpl_fmt = *smpl_fmts;
break;
} else if (*smpl_fmts == AV_SAMPLE_FMT_S32P) {
smpl_fmt = *smpl_fmts;
}
smpl_fmts++;
}
} else {
smpl_fmt = AV_SAMPLE_FMT_S32;
}
break;
case GST_AUDIO_FORMAT_S16:
context->sample_fmt = AV_SAMPLE_FMT_S16;
if (smpl_fmts) {
while (*smpl_fmts != -1) {
if (*smpl_fmts == AV_SAMPLE_FMT_S16) {
smpl_fmt = *smpl_fmts;
break;
} else if (*smpl_fmts == AV_SAMPLE_FMT_S16P) {
smpl_fmt = *smpl_fmts;
}
smpl_fmts++;
}
} else {
smpl_fmt = AV_SAMPLE_FMT_S16;
}
break;
case GST_AUDIO_FORMAT_U8:
if (smpl_fmts) {
while (*smpl_fmts != -1) {
if (*smpl_fmts == AV_SAMPLE_FMT_U8) {
smpl_fmt = *smpl_fmts;
break;
} else if (*smpl_fmts == AV_SAMPLE_FMT_U8P) {
smpl_fmt = *smpl_fmts;
}
smpl_fmts++;
}
} else {
smpl_fmt = AV_SAMPLE_FMT_U8;
}
break;
default:
break;
}
g_assert (smpl_fmt != -1);
context->sample_fmt = smpl_fmt;
}
/* Convert a GstCaps and a FFMPEG codec Type to a